Merge branch 'next' into inmem-journal

This commit is contained in:
Raúl Kripalani 2020-07-20 10:38:58 +01:00
commit 4d2d8b2d11
84 changed files with 4902 additions and 4114 deletions

View File

@ -158,7 +158,6 @@ BINS+=lotus-fountain
lotus-chainwatch:
rm -f lotus-chainwatch
go build -o lotus-chainwatch ./cmd/lotus-chainwatch
go run github.com/GeertJohan/go.rice/rice append --exec lotus-chainwatch -i ./cmd/lotus-chainwatch -i ./build
.PHONY: lotus-chainwatch
BINS+=lotus-chainwatch

View File

@ -191,7 +191,7 @@ type FullNode interface {
// ClientImport imports file under the specified path into filestore.
ClientImport(ctx context.Context, ref FileRef) (*ImportRes, error)
// ClientRemoveImport removes file import
ClientRemoveImport(ctx context.Context, importID int64) error
ClientRemoveImport(ctx context.Context, importID int) error
// ClientStartDeal proposes a deal with a miner.
ClientStartDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error)
// ClientGetDealInfo returns the latest information about a given deal.
@ -243,8 +243,8 @@ type FullNode interface {
// If the filterOut boolean is set to true, any sectors in the filter are excluded.
// If false, only those sectors in the filter are included.
StateMinerSectors(context.Context, address.Address, *abi.BitField, bool, types.TipSetKey) ([]*ChainSectorInfo, error)
// StateMinerProvingSet returns info about those sectors that a given miner is actively proving.
StateMinerProvingSet(context.Context, address.Address, types.TipSetKey) ([]*ChainSectorInfo, error)
// StateMinerActiveSectors returns info about sectors that a given miner is actively proving.
StateMinerActiveSectors(context.Context, address.Address, types.TipSetKey) ([]*ChainSectorInfo, error)
// StateMinerProvingDeadline calculates the deadline at some epoch for a proving period
// and returns the deadline-related calculations.
StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*miner.DeadlineInfo, error)
@ -253,7 +253,9 @@ type FullNode interface {
// StateMinerInfo returns info about the indicated miner
StateMinerInfo(context.Context, address.Address, types.TipSetKey) (MinerInfo, error)
// StateMinerDeadlines returns all the proving deadlines for the given miner
StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) (*miner.Deadlines, error)
StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]*miner.Deadline, error)
// StateMinerPartitions loads miner partitions for the specified miner/deadline
StateMinerPartitions(context.Context, address.Address, uint64, types.TipSetKey) ([]*miner.Partition, error)
// StateMinerFaults returns a bitfield indicating the faulty sectors of the given miner
StateMinerFaults(context.Context, address.Address, types.TipSetKey) (*abi.BitField, error)
// StateAllMinerFaults returns all non-expired Faults that occur within lookback epochs of the given tipset
@ -267,7 +269,13 @@ type FullNode interface {
// StateSectorPreCommitInfo returns the PreCommit info for the specified miner's sector
StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error)
// StateSectorGetInfo returns the on-chain info for the specified miner's sector
// NOTE: returned info.Expiration may not be accurate in some cases, use StateSectorExpiration to get accurate
// expiration epoch
StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error)
// StateSectorExpiration returns epoch at which given sector will expire
StateSectorExpiration(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*SectorExpiration, error)
// StateSectorPartition finds deadline/partition with the specified sector
StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*SectorLocation, error)
StatePledgeCollateral(context.Context, types.TipSetKey) (types.BigInt, error)
// StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed
StateSearchMsg(context.Context, cid.Cid) (*MsgLookup, error)
@ -327,6 +335,18 @@ type FullNode interface {
// It takes the following params: <multisig address>, <proposed message ID>, <recipient address>, <value to transfer>,
// <sender address of the cancel msg>, <method to call in the proposed message>, <params to include in the proposed message>
MsigCancel(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error)
// MsigSwapPropose proposes swapping 2 signers in the multisig
// It takes the following params: <multisig address>, <sender address of the propose msg>,
// <old signer> <new signer>
MsigSwapPropose(context.Context, address.Address, address.Address, address.Address, address.Address) (cid.Cid, error)
// MsigSwapApprove approves a previously proposed SwapSigner
// It takes the following params: <multisig address>, <sender address of the approve msg>, <proposed message ID>,
// <proposer address>, <old signer> <new signer>
MsigSwapApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (cid.Cid, error)
// MsigSwapCancel cancels a previously proposed SwapSigner message
// It takes the following params: <multisig address>, <sender address of the cancel msg>, <proposed message ID>,
// <old signer> <new signer>
MsigSwapCancel(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error)
MarketEnsureAvailable(context.Context, address.Address, address.Address, types.BigInt) (cid.Cid, error)
// MarketFreeBalance
@ -354,17 +374,30 @@ type FileRef struct {
}
type MinerSectors struct {
Sset uint64
Pset uint64
Sectors uint64
Active uint64
}
type SectorExpiration struct {
OnTime abi.ChainEpoch
// non-zero if sector is faulty, epoch at which it will be permanently
// removed if it doesn't recover
Early abi.ChainEpoch
}
type SectorLocation struct {
Deadline uint64
Partition uint64
}
type ImportRes struct {
Root cid.Cid
ImportID int64
ImportID int
}
type Import struct {
Key int64
Key int
Err string
Root *cid.Cid

View File

@ -113,7 +113,7 @@ type FullNodeStruct struct {
ClientImport func(ctx context.Context, ref api.FileRef) (*api.ImportRes, error) `perm:"admin"`
ClientListImports func(ctx context.Context) ([]api.Import, error) `perm:"write"`
ClientRemoveImport func(ctx context.Context, importID int64) error `perm:"admin"`
ClientRemoveImport func(ctx context.Context, importID int) error `perm:"admin"`
ClientHasLocal func(ctx context.Context, root cid.Cid) (bool, error) `perm:"write"`
ClientFindData func(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]api.QueryOffer, error) `perm:"read"`
ClientMinerQueryOffer func(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (api.QueryOffer, error) `perm:"read"`
@ -127,11 +127,12 @@ type FullNodeStruct struct {
StateNetworkName func(context.Context) (dtypes.NetworkName, error) `perm:"read"`
StateMinerSectors func(context.Context, address.Address, *abi.BitField, bool, types.TipSetKey) ([]*api.ChainSectorInfo, error) `perm:"read"`
StateMinerProvingSet func(context.Context, address.Address, types.TipSetKey) ([]*api.ChainSectorInfo, error) `perm:"read"`
StateMinerActiveSectors func(context.Context, address.Address, types.TipSetKey) ([]*api.ChainSectorInfo, error) `perm:"read"`
StateMinerProvingDeadline func(context.Context, address.Address, types.TipSetKey) (*miner.DeadlineInfo, error) `perm:"read"`
StateMinerPower func(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) `perm:"read"`
StateMinerInfo func(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error) `perm:"read"`
StateMinerDeadlines func(context.Context, address.Address, types.TipSetKey) (*miner.Deadlines, error) `perm:"read"`
StateMinerDeadlines func(context.Context, address.Address, types.TipSetKey) ([]*miner.Deadline, error) `perm:"read"`
StateMinerPartitions func(context.Context, address.Address, uint64, types.TipSetKey) ([]*miner.Partition, error) `perm:"read"`
StateMinerFaults func(context.Context, address.Address, types.TipSetKey) (*abi.BitField, error) `perm:"read"`
StateAllMinerFaults func(context.Context, abi.ChainEpoch, types.TipSetKey) ([]*api.Fault, error) `perm:"read"`
StateMinerRecoveries func(context.Context, address.Address, types.TipSetKey) (*abi.BitField, error) `perm:"read"`
@ -139,6 +140,8 @@ type FullNodeStruct struct {
StateMinerAvailableBalance func(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) `perm:"read"`
StateSectorPreCommitInfo func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) `perm:"read"`
StateSectorGetInfo func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) `perm:"read"`
StateSectorExpiration func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*api.SectorExpiration, error) `perm:"read"`
StateSectorPartition func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*api.SectorLocation, error) `perm:"read"`
StateCall func(context.Context, *types.Message, types.TipSetKey) (*api.InvocResult, error) `perm:"read"`
StateReplay func(context.Context, types.TipSetKey, cid.Cid) (*api.InvocResult, error) `perm:"read"`
StateGetActor func(context.Context, address.Address, types.TipSetKey) (*types.Actor, error) `perm:"read"`
@ -166,6 +169,9 @@ type FullNodeStruct struct {
MsigPropose func(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"`
MsigApprove func(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"`
MsigCancel func(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"`
MsigSwapPropose func(context.Context, address.Address, address.Address, address.Address, address.Address) (cid.Cid, error) `perm:"sign"`
MsigSwapApprove func(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (cid.Cid, error) `perm:"sign"`
MsigSwapCancel func(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error) `perm:"sign"`
MarketEnsureAvailable func(context.Context, address.Address, address.Address, types.BigInt) (cid.Cid, error) `perm:"sign"`
@ -348,7 +354,7 @@ func (c *FullNodeStruct) ClientListImports(ctx context.Context) ([]api.Import, e
return c.Internal.ClientListImports(ctx)
}
func (c *FullNodeStruct) ClientRemoveImport(ctx context.Context, importID int64) error {
func (c *FullNodeStruct) ClientRemoveImport(ctx context.Context, importID int) error {
return c.Internal.ClientRemoveImport(ctx, importID)
}
@ -578,8 +584,8 @@ func (c *FullNodeStruct) StateMinerSectors(ctx context.Context, addr address.Add
return c.Internal.StateMinerSectors(ctx, addr, filter, filterOut, tsk)
}
func (c *FullNodeStruct) StateMinerProvingSet(ctx context.Context, addr address.Address, tsk types.TipSetKey) ([]*api.ChainSectorInfo, error) {
return c.Internal.StateMinerProvingSet(ctx, addr, tsk)
func (c *FullNodeStruct) StateMinerActiveSectors(ctx context.Context, addr address.Address, tsk types.TipSetKey) ([]*api.ChainSectorInfo, error) {
return c.Internal.StateMinerActiveSectors(ctx, addr, tsk)
}
func (c *FullNodeStruct) StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*miner.DeadlineInfo, error) {
@ -594,10 +600,14 @@ func (c *FullNodeStruct) StateMinerInfo(ctx context.Context, actor address.Addre
return c.Internal.StateMinerInfo(ctx, actor, tsk)
}
func (c *FullNodeStruct) StateMinerDeadlines(ctx context.Context, m address.Address, tsk types.TipSetKey) (*miner.Deadlines, error) {
func (c *FullNodeStruct) StateMinerDeadlines(ctx context.Context, m address.Address, tsk types.TipSetKey) ([]*miner.Deadline, error) {
return c.Internal.StateMinerDeadlines(ctx, m, tsk)
}
func (c *FullNodeStruct) StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tsk types.TipSetKey) ([]*miner.Partition, error) {
return c.Internal.StateMinerPartitions(ctx, m, dlIdx, tsk)
}
func (c *FullNodeStruct) StateMinerFaults(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*abi.BitField, error) {
return c.Internal.StateMinerFaults(ctx, actor, tsk)
}
@ -626,6 +636,14 @@ func (c *FullNodeStruct) StateSectorGetInfo(ctx context.Context, maddr address.A
return c.Internal.StateSectorGetInfo(ctx, maddr, n, tsk)
}
func (c *FullNodeStruct) StateSectorExpiration(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*api.SectorExpiration, error) {
return c.Internal.StateSectorExpiration(ctx, maddr, n, tsk)
}
func (c *FullNodeStruct) StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*api.SectorLocation, error) {
return c.Internal.StateSectorPartition(ctx, maddr, sectorNumber, tok)
}
func (c *FullNodeStruct) StateCall(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (*api.InvocResult, error) {
return c.Internal.StateCall(ctx, msg, tsk)
}
@ -726,6 +744,18 @@ func (c *FullNodeStruct) MsigCancel(ctx context.Context, msig address.Address, t
return c.Internal.MsigCancel(ctx, msig, txID, to, amt, src, method, params)
}
func (c *FullNodeStruct) MsigSwapPropose(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
return c.Internal.MsigSwapPropose(ctx, msig, src, oldAdd, newAdd)
}
func (c *FullNodeStruct) MsigSwapApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
return c.Internal.MsigSwapApprove(ctx, msig, src, txID, proposer, oldAdd, newAdd)
}
func (c *FullNodeStruct) MsigSwapCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
return c.Internal.MsigSwapCancel(ctx, msig, src, txID, oldAdd, newAdd)
}
func (c *FullNodeStruct) MarketEnsureAvailable(ctx context.Context, addr, wallet address.Address, amt types.BigInt) (cid.Cid, error) {
return c.Internal.MarketEnsureAvailable(ctx, addr, wallet, amt)
}

View File

@ -91,6 +91,8 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error {
}
func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) error {
*t = PaymentInfo{}
br := cbg.GetPeeker(r)
scratch := make([]byte, 8)
@ -256,6 +258,8 @@ func (t *SealedRef) MarshalCBOR(w io.Writer) error {
}
func (t *SealedRef) UnmarshalCBOR(r io.Reader) error {
*t = SealedRef{}
br := cbg.GetPeeker(r)
scratch := make([]byte, 8)
@ -378,6 +382,8 @@ func (t *SealedRefs) MarshalCBOR(w io.Writer) error {
}
func (t *SealedRefs) UnmarshalCBOR(r io.Reader) error {
*t = SealedRefs{}
br := cbg.GetPeeker(r)
scratch := make([]byte, 8)
@ -505,6 +511,8 @@ func (t *SealTicket) MarshalCBOR(w io.Writer) error {
}
func (t *SealTicket) UnmarshalCBOR(r io.Reader) error {
*t = SealTicket{}
br := cbg.GetPeeker(r)
scratch := make([]byte, 8)
@ -647,6 +655,8 @@ func (t *SealSeed) MarshalCBOR(w io.Writer) error {
}
func (t *SealSeed) UnmarshalCBOR(r io.Reader) error {
*t = SealSeed{}
br := cbg.GetPeeker(r)
scratch := make([]byte, 8)

View File

@ -83,14 +83,14 @@ func TestCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) {
// Validate upgrade
{
si, err := client.StateSectorGetInfo(ctx, maddr, CC, types.EmptyTSK)
exp, err := client.StateSectorExpiration(ctx, maddr, CC, types.EmptyTSK)
require.NoError(t, err)
require.Greater(t, 50000, int(si.Expiration))
require.Greater(t, 50000, int(exp.OnTime))
}
{
si, err := client.StateSectorGetInfo(ctx, maddr, Upgraded, types.EmptyTSK)
exp, err := client.StateSectorExpiration(ctx, maddr, Upgraded, types.EmptyTSK)
require.NoError(t, err)
require.Less(t, 50000, int(si.Expiration))
require.Less(t, 50000, int(exp.OnTime))
}
fmt.Println("shutting down mining")

View File

@ -195,6 +195,7 @@ func startSealingWaiting(t *testing.T, ctx context.Context, miner TestStorageNod
si, err := miner.SectorsStatus(ctx, snum)
require.NoError(t, err)
t.Logf("Sector state: %s", si.State)
if si.State == api.SectorState(sealing.WaitDeals) {
require.NoError(t, miner.SectorStartSealing(ctx, snum))
}

View File

@ -41,8 +41,9 @@ func BuiltinBootstrap() ([]peer.AddrInfo, error) {
func DrandBootstrap() ([]peer.AddrInfo, error) {
addrs := []string{
"/dnsaddr/dev1.drand.sh/",
"/dnsaddr/dev2.drand.sh/",
"/dnsaddr/pl-eu.testnet.drand.sh/",
"/dnsaddr/pl-us.testnet.drand.sh/",
"/dnsaddr/pl-sin.testnet.drand.sh/",
}
return addrutil.ParseAddresses(context.TODO(), addrs)
}

View File

@ -62,7 +62,7 @@ const WinningPoStSectorSetLookback = abi.ChainEpoch(10)
// Devnet settings
const TotalFilecoin = uint64(2_000_000_000)
const MiningRewardTotal = uint64(1_400_000_000)
const MiningRewardTotal = uint64(1_900_000_000)
const FilecoinPrecision = uint64(1_000_000_000_000_000_000)
@ -95,8 +95,9 @@ const BlockGasLimit = 7_500_000_000
var DrandConfig = dtypes.DrandConfig{
Servers: []string{
"https://dev1.drand.sh",
"https://dev1.drand.sh",
"https://pl-eu.testnet.drand.sh",
"https://pl-us.testnet.drand.sh",
"https://pl-sin.testnet.drand.sh",
},
ChainInfoJSON: `{"public_key":"88fdb6f22fcbe671bf91befbf723e159e5934f785168b437c03424cde6361cff5f5d3034390260f210438946f21d867d","period":30,"genesis_time":1589461830,"hash":"e89c9efe5af86ac79fc5d1c0ee0aaa64a81a97bb55d0acc4d2497cc2a0087afe","groupHash":"8f16f0105250b51f34e41fb845d09668b2e3db008dacb3c2d461f0bb2349b854"}`,
ChainInfoJSON: `{"public_key":"922a2e93828ff83345bae533f5172669a26c02dc76d6bf59c80892e12ab1455c229211886f35bb56af6d5bea981024df","period":25,"genesis_time":1590445175,"hash":"84b2234fb34e835dccd048255d7ad3194b81af7d978c3bf157e3469592ae4e02","groupHash":"4dd408e5fdff9323c76a9b6f087ba8fdc5a6da907bd9217d9d10f2287d081957"}`,
}

View File

@ -53,7 +53,7 @@ func (ve Version) EqMajorMinor(v2 Version) bool {
}
// APIVersion is a semver version of the rpc api exposed
var APIVersion Version = newVer(0, 7, 0)
var APIVersion Version = newVer(0, 8, 0)
//nolint:varcheck,deadcode
const (

View File

@ -7,7 +7,7 @@ import (
"io"
"github.com/filecoin-project/lotus/chain/types"
"github.com/ipfs/go-cid"
cid "github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
xerrors "golang.org/x/xerrors"
)
@ -57,6 +57,8 @@ func (t *BlockSyncRequest) MarshalCBOR(w io.Writer) error {
}
func (t *BlockSyncRequest) UnmarshalCBOR(r io.Reader) error {
*t = BlockSyncRequest{}
br := cbg.GetPeeker(r)
scratch := make([]byte, 8)
@ -179,6 +181,8 @@ func (t *BlockSyncResponse) MarshalCBOR(w io.Writer) error {
}
func (t *BlockSyncResponse) UnmarshalCBOR(r io.Reader) error {
*t = BlockSyncResponse{}
br := cbg.GetPeeker(r)
scratch := make([]byte, 8)
@ -354,6 +358,8 @@ func (t *BSTipSet) MarshalCBOR(w io.Writer) error {
}
func (t *BSTipSet) UnmarshalCBOR(r io.Reader) error {
*t = BSTipSet{}
br := cbg.GetPeeker(r)
scratch := make([]byte, 8)

View File

@ -53,3 +53,54 @@ func DiffAdtArray(preArr, curArr *adt.Array, out AdtArrayDiff) error {
return out.Add(uint64(i), curVal)
})
}
// TODO Performance can be improved by diffing the underlying IPLD graph, e.g. https://github.com/ipfs/go-merkledag/blob/749fd8717d46b4f34c9ce08253070079c89bc56d/dagutils/diff.go#L104
// CBOR Marshaling will likely be the largest performance bottleneck here.
// AdtMapDiff generalizes adt.Map diffing by accepting a Deferred type that can unmarshalled to its corresponding struct
// in an interface implantation.
// AsKey should return the Keyer implementation specific to the map
// Add should be called when a new k,v is added to the map
// Modify should be called when a value is modified in the map
// Remove should be called when a value is removed from the map
type AdtMapDiff interface {
AsKey(key string) (adt.Keyer, error)
Add(key string, val *typegen.Deferred) error
Modify(key string, from, to *typegen.Deferred) error
Remove(key string, val *typegen.Deferred) error
}
func DiffAdtMap(preMap, curMap *adt.Map, out AdtMapDiff) error {
prevVal := new(typegen.Deferred)
if err := preMap.ForEach(prevVal, func(key string) error {
curVal := new(typegen.Deferred)
k, err := out.AsKey(key)
if err != nil {
return err
}
found, err := curMap.Get(k, curVal)
if err != nil {
return err
}
if !found {
if err := out.Remove(key, prevVal); err != nil {
return err
}
return nil
}
if err := out.Modify(key, prevVal, curVal); err != nil {
return err
}
return curMap.Delete(k)
}); err != nil {
return err
}
curVal := new(typegen.Deferred)
return curMap.ForEach(curVal, func(key string) error {
return out.Add(key, curVal)
})
}

View File

@ -3,7 +3,6 @@ package state
import (
"bytes"
"context"
"github.com/filecoin-project/go-address"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
@ -419,3 +418,78 @@ func (sp *StatePredicates) OnMinerSectorChange() DiffMinerActorStateFunc {
return true, sectorChanges, nil
}
}
type MinerPreCommitChanges struct {
Added []miner.SectorPreCommitOnChainInfo
Removed []miner.SectorPreCommitOnChainInfo
}
func (m *MinerPreCommitChanges) AsKey(key string) (adt.Keyer, error) {
sector, err := adt.ParseUIntKey(key)
if err != nil {
return nil, err
}
return miner.SectorKey(abi.SectorNumber(sector)), nil
}
func (m *MinerPreCommitChanges) Add(key string, val *typegen.Deferred) error {
sp := new(miner.SectorPreCommitOnChainInfo)
err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw))
if err != nil {
return err
}
m.Added = append(m.Added, *sp)
return nil
}
func (m *MinerPreCommitChanges) Modify(key string, from, to *typegen.Deferred) error {
return nil
}
func (m *MinerPreCommitChanges) Remove(key string, val *typegen.Deferred) error {
sp := new(miner.SectorPreCommitOnChainInfo)
err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw))
if err != nil {
return err
}
m.Removed = append(m.Removed, *sp)
return nil
}
func (sp *StatePredicates) OnMinerPreCommitChange() DiffMinerActorStateFunc {
return func(ctx context.Context, oldState, newState *miner.State) (changed bool, user UserData, err error) {
ctxStore := &contextStore{
ctx: ctx,
cst: sp.cst,
}
precommitChanges := &MinerPreCommitChanges{
Added: []miner.SectorPreCommitOnChainInfo{},
Removed: []miner.SectorPreCommitOnChainInfo{},
}
if oldState.PreCommittedSectors.Equals(newState.PreCommittedSectors) {
return false, nil, nil
}
oldPrecommits, err := adt.AsMap(ctxStore, oldState.PreCommittedSectors)
if err != nil {
return false, nil, err
}
newPrecommits, err := adt.AsMap(ctxStore, newState.PreCommittedSectors)
if err != nil {
return false, nil, err
}
if err := DiffAdtMap(oldPrecommits, newPrecommits, precommitChanges); err != nil {
return false, nil, err
}
if len(precommitChanges.Added)+len(precommitChanges.Removed) == 0 {
return false, nil, nil
}
return true, precommitChanges, nil
}
}

View File

@ -436,7 +436,16 @@ func createEmptyMinerState(ctx context.Context, t *testing.T, store *cbornode.Ba
emptyMap, err := store.Put(context.TODO(), hamt.NewNode(store, hamt.UseTreeBitWidth(5)))
require.NoError(t, err)
emptyDeadlines := miner.ConstructDeadlines()
emptyDeadline, err := store.Put(context.TODO(), &miner.Deadline{
Partitions: emptyArrayCid,
ExpirationsEpochs: emptyArrayCid,
PostSubmissions: abi.NewBitField(),
EarlyTerminations: abi.NewBitField(),
LiveSectors: 0,
})
require.NoError(t, err)
emptyDeadlines := miner.ConstructDeadlines(emptyDeadline)
emptyDeadlinesCid, err := store.Put(context.Background(), emptyDeadlines)
require.NoError(t, err)

View File

@ -9,7 +9,6 @@ import (
"time"
"github.com/filecoin-project/go-address"
commcid "github.com/filecoin-project/go-fil-commcid"
"github.com/filecoin-project/specs-actors/actors/abi"
saminer "github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/specs-actors/actors/crypto"
@ -178,12 +177,12 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
Accounts: []genesis.Actor{
{
Type: genesis.TAccount,
Balance: types.FromFil(40_000_000),
Balance: types.FromFil(20_000_000),
Meta: (&genesis.AccountMeta{Owner: mk1}).ActorMeta(),
},
{
Type: genesis.TAccount,
Balance: types.FromFil(40_000_000),
Balance: types.FromFil(20_000_000),
Meta: (&genesis.AccountMeta{Owner: mk2}).ActorMeta(),
},
{
@ -285,7 +284,7 @@ func (cg *ChainGen) GenesisCar() ([]byte, error) {
func CarWalkFunc(nd format.Node) (out []*format.Link, err error) {
for _, link := range nd.Links() {
if link.Cid.Prefix().MhType == uint64(commcid.FC_SEALED_V1) || link.Cid.Prefix().MhType == uint64(commcid.FC_UNSEALED_V1) {
if link.Cid.Prefix().Codec == cid.FilCommitmentSealed || link.Cid.Prefix().Codec == cid.FilCommitmentUnsealed {
continue
}
out = append(out, link)
@ -439,7 +438,8 @@ func (cg *ChainGen) makeBlock(parents *types.TipSet, m address.Address, vrfticke
// ResyncBankerNonce is used for dealing with messages made when
// simulating forks
func (cg *ChainGen) ResyncBankerNonce(ts *types.TipSet) error {
act, err := cg.sm.GetActor(cg.banker, ts)
var act types.Actor
err := cg.sm.WithParentState(ts, cg.sm.WithActor(cg.banker, stmgr.GetActor(&act)))
if err != nil {
return err
}

View File

@ -8,8 +8,10 @@ import (
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/specs-actors/actors/builtin/account"
"github.com/filecoin-project/specs-actors/actors/builtin/multisig"
"github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
"github.com/filecoin-project/specs-actors/actors/runtime"
"github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
bstore "github.com/ipfs/go-ipfs-blockstore"
@ -196,8 +198,8 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
// Create accounts
for id, info := range template.Accounts {
if info.Type != genesis.TAccount {
return nil, xerrors.New("unsupported account type") // TODO: msigs
if info.Type != genesis.TAccount && info.Type != genesis.TMultisig {
return nil, xerrors.New("unsupported account type")
}
ida, err := address.NewIDAddress(uint64(AccountStart + id))
@ -205,24 +207,57 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
return nil, err
}
var ainfo genesis.AccountMeta
if err := json.Unmarshal(info.Meta, &ainfo); err != nil {
return nil, xerrors.Errorf("unmarshaling account meta: %w", err)
// var newAddress address.Address
if (info.Type == genesis.TAccount) {
var ainfo genesis.AccountMeta
if err := json.Unmarshal(info.Meta, &ainfo); err != nil {
return nil, xerrors.Errorf("unmarshaling account meta: %w", err)
}
st, err := cst.Put(ctx, &account.State{Address: ainfo.Owner})
if err != nil {
return nil, err
}
err = state.SetActor(ida, &types.Actor{
Code: builtin.AccountActorCodeID,
Balance: info.Balance,
Head: st,
})
if err != nil {
return nil, xerrors.Errorf("setting account from actmap: %w", err)
}
} else if (info.Type == genesis.TMultisig) {
var ainfo genesis.MultisigMeta
if err := json.Unmarshal(info.Meta, &ainfo); err != nil {
return nil, xerrors.Errorf("unmarshaling account meta: %w", err)
}
pending, err := adt.MakeEmptyMap(adt.WrapStore(ctx, cst)).Root()
if err != nil {
return nil, xerrors.Errorf("failed to create empty map: %v", err)
}
st, err := cst.Put(ctx, &multisig.State{
Signers: ainfo.Signers,
NumApprovalsThreshold: uint64(ainfo.Threshold),
StartEpoch: abi.ChainEpoch(ainfo.VestingStart),
UnlockDuration: abi.ChainEpoch(ainfo.VestingDuration),
PendingTxns: pending,
InitialBalance: info.Balance,
})
if err != nil {
return nil, err
}
err = state.SetActor(ida, &types.Actor{
Code: builtin.MultisigActorCodeID,
Balance: info.Balance,
Head: st,
})
if err != nil {
return nil, xerrors.Errorf("setting account from actmap: %w", err)
}
}
st, err := cst.Put(ctx, &account.State{Address: ainfo.Owner})
if err != nil {
return nil, err
}
err = state.SetActor(ida, &types.Actor{
Code: builtin.AccountActorCodeID,
Balance: info.Balance,
Head: st,
})
if err != nil {
return nil, xerrors.Errorf("setting account from actmap: %w", err)
}
}
vregroot, err := address.NewIDAddress(80)
@ -288,7 +323,12 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, stateroot ci
}
}
return vm.Flush(ctx)
st, err := vm.Flush(ctx)
if err != nil {
return cid.Cid{}, xerrors.Errorf("vm flush: %w", err)
}
return st, nil
}
func MakeGenesisBlock(ctx context.Context, bs bstore.Blockstore, sys runtime.Syscalls, template genesis.Template) (*GenesisBootstrap, error) {

View File

@ -19,6 +19,7 @@ import (
"github.com/filecoin-project/specs-actors/actors/builtin/market"
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/specs-actors/actors/builtin/power"
"github.com/filecoin-project/specs-actors/actors/builtin/reward"
"github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/filecoin-project/specs-actors/actors/runtime"
@ -188,6 +189,9 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
err = vm.MutateState(ctx, builtin.StoragePowerActorAddr, func(cst cbor.IpldStore, st *power.State) error {
st.TotalQualityAdjPower = qaPow
st.TotalRawBytePower = rawPow
st.ThisEpochQualityAdjPower = qaPow
st.ThisEpochRawBytePower = rawPow
return nil
})
if err != nil {
@ -235,8 +239,8 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
return cid.Undef, xerrors.Errorf("getting current total power: %w", err)
}
pledge := miner.InitialPledgeForPower(sectorWeight, tpow.QualityAdjPower, tpow.PledgeCollateral, epochReward, circSupply(ctx, vm, minerInfos[i].maddr))
pledge := miner.InitialPledgeForPower(sectorWeight, tpow.QualityAdjPower, epochReward.ThisEpochBaselinePower, tpow.PledgeCollateral, epochReward.ThisEpochReward, circSupply(ctx, vm, minerInfos[i].maddr))
fmt.Println(types.FIL(pledge))
_, err = doExecValue(ctx, vm, minerInfos[i].maddr, m.Worker, pledge, builtin.MethodsMiner.PreCommitSector, mustEnc(params))
if err != nil {
return cid.Undef, xerrors.Errorf("failed to confirm presealed sectors: %w", err)
@ -327,18 +331,18 @@ func dealWeight(ctx context.Context, vm *vm.VM, maddr address.Address, dealIDs [
return dealWeights, nil
}
func currentEpochBlockReward(ctx context.Context, vm *vm.VM, maddr address.Address) (abi.TokenAmount, error) {
func currentEpochBlockReward(ctx context.Context, vm *vm.VM, maddr address.Address) (*reward.ThisEpochRewardReturn, error) {
rwret, err := doExecValue(ctx, vm, builtin.RewardActorAddr, maddr, big.Zero(), builtin.MethodsReward.ThisEpochReward, nil)
if err != nil {
return abi.TokenAmount{}, err
return nil, err
}
epochReward := abi.NewTokenAmount(0)
var epochReward reward.ThisEpochRewardReturn
if err := epochReward.UnmarshalCBOR(bytes.NewReader(rwret)); err != nil {
return abi.TokenAmount{}, err
return nil, err
}
return epochReward, nil
return &epochReward, nil
}
func circSupply(ctx context.Context, vmi *vm.VM, maddr address.Address) abi.TokenAmount {

View File

@ -30,6 +30,10 @@ func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesi
amap := hamt.NewNode(cst, hamt.UseTreeBitWidth(5)) // TODO: use spec adt map
for i, a := range initialActors {
if a.Type == genesis.TMultisig {
continue
}
if a.Type != genesis.TAccount {
return nil, xerrors.Errorf("unsupported account type: %s", a.Type) // TODO: Support msig (skip here)
}

View File

@ -16,7 +16,7 @@ func SetupRewardActor(bs bstore.Blockstore) (*types.Actor, error) {
cst := cbor.NewCborStore(bs)
z := big.Zero()
st := reward.ConstructState(&z)
st := reward.ConstructState(z)
st.ThisEpochReward = types.FromFil(100)
hcid, err := cst.Put(context.TODO(), st)

View File

@ -31,7 +31,7 @@ func SetupStoragePowerActor(bs bstore.Blockstore) (*types.Actor, error) {
MinerCount: 0,
MinerAboveMinPowerCount: 0,
CronEventQueue: emptyhamt,
LastEpochTick: 0,
FirstCronEpoch: 0,
Claims: emptyhamt,
ProofValidationBatch: nil,
}

View File

@ -165,7 +165,8 @@ func (mpp *mpoolProvider) PubSubPublish(k string, v []byte) error {
}
func (mpp *mpoolProvider) StateGetActor(addr address.Address, ts *types.TipSet) (*types.Actor, error) {
return mpp.sm.GetActor(addr, ts)
var act types.Actor
return &act, mpp.sm.WithParentState(ts, mpp.sm.WithActor(addr, stmgr.GetActor(&act)))
}
func (mpp *mpoolProvider) StateAccountKey(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) {

View File

@ -10,6 +10,7 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
@ -25,7 +26,7 @@ func (sm *StateManager) CallRaw(ctx context.Context, msg *types.Message, bstate
}
if msg.GasLimit == 0 {
msg.GasLimit = 10000000000
msg.GasLimit = build.BlockGasLimit
}
if msg.GasPrice == types.EmptyInt {
msg.GasPrice = types.NewInt(0)

155
chain/stmgr/read.go Normal file
View File

@ -0,0 +1,155 @@
package stmgr
import (
"context"
"reflect"
"golang.org/x/xerrors"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/specs-actors/actors/util/adt"
)
type StateTreeCB func(state *state.StateTree) error
func (sm *StateManager) WithParentStateTsk(tsk types.TipSetKey, cb StateTreeCB) error {
ts, err := sm.cs.GetTipSetFromKey(tsk)
if err != nil {
return xerrors.Errorf("loading tipset %s: %w", tsk, err)
}
cst := cbor.NewCborStore(sm.cs.Blockstore())
state, err := state.LoadStateTree(cst, sm.parentState(ts))
if err != nil {
return xerrors.Errorf("load state tree: %w", err)
}
return cb(state)
}
func (sm *StateManager) WithParentState(ts *types.TipSet, cb StateTreeCB) error {
cst := cbor.NewCborStore(sm.cs.Blockstore())
state, err := state.LoadStateTree(cst, sm.parentState(ts))
if err != nil {
return xerrors.Errorf("load state tree: %w", err)
}
return cb(state)
}
func (sm *StateManager) WithStateTree(st cid.Cid, cb StateTreeCB) error {
cst := cbor.NewCborStore(sm.cs.Blockstore())
state, err := state.LoadStateTree(cst, st)
if err != nil {
return xerrors.Errorf("load state tree: %w", err)
}
return cb(state)
}
type ActorCB func(act *types.Actor) error
func GetActor(out *types.Actor) ActorCB {
return func(act *types.Actor) error {
*out = *act
return nil
}
}
func (sm *StateManager) WithActor(addr address.Address, cb ActorCB) StateTreeCB {
return func(state *state.StateTree) error {
act, err := state.GetActor(addr)
if err != nil {
return xerrors.Errorf("get actor: %w", err)
}
return cb(act)
}
}
// WithActorState usage:
// Option 1: WithActorState(ctx, idAddr, func(store adt.Store, st *ActorStateType) error {...})
// Option 2: WithActorState(ctx, idAddr, actorStatePtr)
func (sm *StateManager) WithActorState(ctx context.Context, out interface{}) ActorCB {
return func(act *types.Actor) error {
store := sm.cs.Store(ctx)
outCallback := reflect.TypeOf(out).Kind() == reflect.Func
var st reflect.Value
if outCallback {
st = reflect.New(reflect.TypeOf(out).In(1).Elem())
} else {
st = reflect.ValueOf(out)
}
if err := store.Get(ctx, act.Head, st.Interface()); err != nil {
return xerrors.Errorf("read actor head: %w", err)
}
if outCallback {
out := reflect.ValueOf(out).Call([]reflect.Value{reflect.ValueOf(store), st})
if !out[0].IsNil() && out[0].Interface().(error) != nil {
return out[0].Interface().(error)
}
}
return nil
}
}
type DeadlinesCB func(store adt.Store, deadlines *miner.Deadlines) error
func (sm *StateManager) WithDeadlines(cb DeadlinesCB) func(store adt.Store, mas *miner.State) error {
return func(store adt.Store, mas *miner.State) error {
deadlines, err := mas.LoadDeadlines(store)
if err != nil {
return err
}
return cb(store, deadlines)
}
}
type DeadlineCB func(store adt.Store, idx uint64, deadline *miner.Deadline) error
func (sm *StateManager) WithDeadline(idx uint64, cb DeadlineCB) DeadlinesCB {
return func(store adt.Store, deadlines *miner.Deadlines) error {
d, err := deadlines.LoadDeadline(store, idx)
if err != nil {
return err
}
return cb(store, idx, d)
}
}
func (sm *StateManager) WithEachDeadline(cb DeadlineCB) DeadlinesCB {
return func(store adt.Store, deadlines *miner.Deadlines) error {
return deadlines.ForEach(store, func(dlIdx uint64, dl *miner.Deadline) error {
return cb(store, dlIdx, dl)
})
}
}
type PartitionCB func(store adt.Store, idx uint64, partition *miner.Partition) error
func (sm *StateManager) WithEachPartition(cb PartitionCB) DeadlineCB {
return func(store adt.Store, idx uint64, deadline *miner.Deadline) error {
parts, err := deadline.PartitionsArray(store)
if err != nil {
return err
}
var partition miner.Partition
return parts.ForEach(&partition, func(i int64) error {
p := partition
return cb(store, uint64(i), &p)
})
}
}

View File

@ -8,6 +8,7 @@ import (
"github.com/filecoin-project/go-address"
amt "github.com/filecoin-project/go-amt-ipld/v2"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/store"
@ -236,7 +237,7 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, pstate cid.Cid, bms []B
Nonce: ca.Nonce,
Value: types.NewInt(0),
GasPrice: types.NewInt(0),
GasLimit: 1 << 30, // Make super sure this is never too little
GasLimit: build.BlockGasLimit * 10, // Make super sure this is never too little
Method: builtin.MethodsCron.EpochTick,
Params: nil,
}
@ -337,74 +338,10 @@ func (sm *StateManager) parentState(ts *types.TipSet) cid.Cid {
return ts.ParentState()
}
func (sm *StateManager) GetActor(addr address.Address, ts *types.TipSet) (*types.Actor, error) {
cst := cbor.NewCborStore(sm.cs.Blockstore())
state, err := state.LoadStateTree(cst, sm.parentState(ts))
if err != nil {
return nil, xerrors.Errorf("load state tree: %w", err)
}
return state.GetActor(addr)
}
func (sm *StateManager) getActorRaw(addr address.Address, st cid.Cid) (*types.Actor, error) {
cst := cbor.NewCborStore(sm.cs.Blockstore())
state, err := state.LoadStateTree(cst, st)
if err != nil {
return nil, xerrors.Errorf("load state tree: %w", err)
}
return state.GetActor(addr)
}
func (sm *StateManager) GetBalance(addr address.Address, ts *types.TipSet) (types.BigInt, error) {
act, err := sm.GetActor(addr, ts)
if err != nil {
if xerrors.Is(err, types.ErrActorNotFound) {
return types.NewInt(0), nil
}
return types.EmptyInt, xerrors.Errorf("get actor: %w", err)
}
return act.Balance, nil
}
func (sm *StateManager) ChainStore() *store.ChainStore {
return sm.cs
}
func (sm *StateManager) LoadActorState(ctx context.Context, a address.Address, out interface{}, ts *types.TipSet) (*types.Actor, error) {
act, err := sm.GetActor(a, ts)
if err != nil {
return nil, err
}
cst := cbor.NewCborStore(sm.cs.Blockstore())
if err := cst.Get(ctx, act.Head, out); err != nil {
var r cbg.Deferred
_ = cst.Get(ctx, act.Head, &r)
log.Errorw("bad actor head", "error", err, "raw", r.Raw, "address", a)
return nil, err
}
return act, nil
}
func (sm *StateManager) LoadActorStateRaw(ctx context.Context, a address.Address, out interface{}, st cid.Cid) (*types.Actor, error) {
act, err := sm.getActorRaw(a, st)
if err != nil {
return nil, err
}
cst := cbor.NewCborStore(sm.cs.Blockstore())
if err := cst.Get(ctx, act.Head, out); err != nil {
return nil, err
}
return act, nil
}
// ResolveToKeyAddress is similar to `vm.ResolveToKeyAddr` but does not allow `Actor` type of addresses.
// Uses the `TipSet` `ts` to generate the VM state.
func (sm *StateManager) ResolveToKeyAddress(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) {
@ -636,7 +573,8 @@ func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet
default:
}
act, err := sm.GetActor(m.VMMessage().From, cur)
var act types.Actor
err := sm.WithParentState(cur, sm.WithActor(m.VMMessage().From, GetActor(&act)))
if err != nil {
return nil, nil, err
}
@ -759,7 +697,7 @@ func (sm *StateManager) MarketBalance(ctx context.Context, addr address.Address,
return api.MarketBalance{}, err
}
if ehas {
out.Escrow, err = et.Get(addr)
out.Escrow, _, err = et.Get(addr)
if err != nil {
return api.MarketBalance{}, xerrors.Errorf("getting escrow balance: %w", err)
}
@ -776,7 +714,7 @@ func (sm *StateManager) MarketBalance(ctx context.Context, addr address.Address,
return api.MarketBalance{}, err
}
if lhas {
out.Locked, err = lt.Get(addr)
out.Locked, _, err = lt.Get(addr)
if err != nil {
return api.MarketBalance{}, xerrors.Errorf("getting locked balance: %w", err)
}

View File

@ -44,14 +44,38 @@ import (
func GetNetworkName(ctx context.Context, sm *StateManager, st cid.Cid) (dtypes.NetworkName, error) {
var state init_.State
_, err := sm.LoadActorStateRaw(ctx, builtin.InitActorAddr, &state, st)
err := sm.WithStateTree(st, sm.WithActor(builtin.InitActorAddr, sm.WithActorState(ctx, &state)))
if err != nil {
return "", xerrors.Errorf("(get sset) failed to load init actor state: %w", err)
return "", err
}
return dtypes.NetworkName(state.NetworkName), nil
}
func (sm *StateManager) LoadActorState(ctx context.Context, addr address.Address, out interface{}, ts *types.TipSet) (*types.Actor, error) {
var a *types.Actor
if err := sm.WithParentState(ts, sm.WithActor(addr, func(act *types.Actor) error {
a = act
return sm.WithActorState(ctx, out)(act)
})); err != nil {
return nil, err
}
return a, nil
}
func (sm *StateManager) LoadActorStateRaw(ctx context.Context, addr address.Address, out interface{}, st cid.Cid) (*types.Actor, error) {
var a *types.Actor
if err := sm.WithStateTree(st, sm.WithActor(addr, func(act *types.Actor) error {
a = act
return sm.WithActorState(ctx, out)(act)
})); err != nil {
return nil, err
}
return a, nil
}
func GetMinerWorkerRaw(ctx context.Context, sm *StateManager, st cid.Cid, maddr address.Address) (address.Address, error) {
var mas miner.State
_, err := sm.LoadActorStateRaw(ctx, maddr, &mas, st)
@ -105,35 +129,6 @@ func GetPowerRaw(ctx context.Context, sm *StateManager, st cid.Cid, maddr addres
}, nil
}
func SectorSetSizes(ctx context.Context, sm *StateManager, maddr address.Address, ts *types.TipSet) (api.MinerSectors, error) {
var mas miner.State
_, err := sm.LoadActorState(ctx, maddr, &mas, ts)
if err != nil {
return api.MinerSectors{}, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err)
}
notProving, err := bitfield.MultiMerge(mas.Faults, mas.Recoveries)
if err != nil {
return api.MinerSectors{}, err
}
npc, err := notProving.Count()
if err != nil {
return api.MinerSectors{}, err
}
blks := cbor.NewCborStore(sm.ChainStore().Blockstore())
ss, err := amt.LoadAMT(ctx, blks, mas.Sectors)
if err != nil {
return api.MinerSectors{}, err
}
return api.MinerSectors{
Sset: ss.Count,
Pset: ss.Count - npc,
}, nil
}
func PreCommitInfo(ctx context.Context, sm *StateManager, maddr address.Address, sid abi.SectorNumber, ts *types.TipSet) (miner.SectorPreCommitOnChainInfo, error) {
var mas miner.State
_, err := sm.LoadActorState(ctx, maddr, &mas, ts)
@ -181,31 +176,51 @@ func GetMinerSectorSet(ctx context.Context, sm *StateManager, ts *types.TipSet,
}
func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]abi.SectorInfo, error) {
var mas miner.State
_, err := sm.LoadActorStateRaw(ctx, maddr, &mas, st)
var partsProving []*abi.BitField
var mas *miner.State
var info *miner.MinerInfo
err := sm.WithStateTree(st, sm.WithActor(maddr, sm.WithActorState(ctx, func(store adt.Store, mst *miner.State) error {
var err error
mas = mst
info, err = mas.GetInfo(store)
if err != nil {
return xerrors.Errorf("getting miner info: %w", err)
}
deadlines, err := mas.LoadDeadlines(store)
if err != nil {
return xerrors.Errorf("loading deadlines: %w", err)
}
return deadlines.ForEach(store, func(dlIdx uint64, deadline *miner.Deadline) error {
partitions, err := deadline.PartitionsArray(store)
if err != nil {
return xerrors.Errorf("getting partition array: %w", err)
}
var partition miner.Partition
return partitions.ForEach(&partition, func(partIdx int64) error {
p, err := bitfield.SubtractBitField(partition.Sectors, partition.Faults)
if err != nil {
return xerrors.Errorf("subtract faults from partition sectors: %w", err)
}
partsProving = append(partsProving, p)
return nil
})
})
})))
if err != nil {
return nil, xerrors.Errorf("(get sectors) failed to load miner actor state: %w", err)
return nil, err
}
cst := cbor.NewCborStore(sm.cs.Blockstore())
var deadlines miner.Deadlines
if err := cst.Get(ctx, mas.Deadlines, &deadlines); err != nil {
return nil, xerrors.Errorf("failed to load deadlines: %w", err)
}
notProving, err := bitfield.MultiMerge(mas.Faults, mas.Recoveries)
provingSectors, err := bitfield.MultiMerge(partsProving...)
if err != nil {
return nil, xerrors.Errorf("failed to union faults and recoveries: %w", err)
}
allSectors, err := bitfield.MultiMerge(append(deadlines.Due[:], mas.NewSectors)...)
if err != nil {
return nil, xerrors.Errorf("merging deadline bitfields failed: %w", err)
}
provingSectors, err := bitfield.SubtractBitField(allSectors, notProving)
if err != nil {
return nil, xerrors.Errorf("failed to subtract non-proving sectors from set: %w", err)
return nil, xerrors.Errorf("merge partition proving sets: %w", err)
}
numProvSect, err := provingSectors.Count()
@ -218,11 +233,6 @@ func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *S
return nil, nil
}
info, err := mas.GetInfo(sm.cs.Store(ctx))
if err != nil {
return nil, err
}
spt, err := ffiwrapper.SealProofTypeFromSectorSize(info.SectorSize)
if err != nil {
return nil, xerrors.Errorf("getting seal proof type: %w", err)
@ -248,7 +258,7 @@ func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *S
return nil, xerrors.Errorf("failed to enumerate all sector IDs: %w", err)
}
sectorAmt, err := amt.LoadAMT(ctx, cst, mas.Sectors)
sectorAmt, err := amt.LoadAMT(ctx, sm.cs.Store(ctx), mas.Sectors)
if err != nil {
return nil, xerrors.Errorf("failed to load sectors amt: %w", err)
}
@ -313,36 +323,6 @@ func GetMinerSlashed(ctx context.Context, sm *StateManager, ts *types.TipSet, ma
return false, nil
}
func GetMinerDeadlines(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (*miner.Deadlines, error) {
var mas miner.State
_, err := sm.LoadActorState(ctx, maddr, &mas, ts)
if err != nil {
return nil, xerrors.Errorf("(get ssize) failed to load miner actor state: %w", err)
}
return mas.LoadDeadlines(sm.cs.Store(ctx))
}
func GetMinerFaults(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (*abi.BitField, error) {
var mas miner.State
_, err := sm.LoadActorState(ctx, maddr, &mas, ts)
if err != nil {
return nil, xerrors.Errorf("(get faults) failed to load miner actor state: %w", err)
}
return mas.Faults, nil
}
func GetMinerRecoveries(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (*abi.BitField, error) {
var mas miner.State
_, err := sm.LoadActorState(ctx, maddr, &mas, ts)
if err != nil {
return nil, xerrors.Errorf("(get recoveries) failed to load miner actor state: %w", err)
}
return mas.Recoveries, nil
}
func GetStorageDeal(ctx context.Context, sm *StateManager, dealID abi.DealID, ts *types.TipSet) (*api.MarketDeal, error) {
var state market.State
if _, err := sm.LoadActorState(ctx, builtin.StorageMarketActorAddr, &state, ts); err != nil {
@ -484,20 +464,6 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch,
return root, trace, nil
}
func GetProvingSetRaw(ctx context.Context, sm *StateManager, mas miner.State) ([]*api.ChainSectorInfo, error) {
notProving, err := bitfield.MultiMerge(mas.Faults, mas.Recoveries)
if err != nil {
return nil, err
}
provset, err := LoadSectorsFromSet(ctx, sm.cs.Blockstore(), mas.Sectors, notProving, true)
if err != nil {
return nil, xerrors.Errorf("failed to get proving set: %w", err)
}
return provset, nil
}
func GetLookbackTipSetForRound(ctx context.Context, sm *StateManager, ts *types.TipSet, round abi.ChainEpoch) (*types.TipSet, error) {
var lbr abi.ChainEpoch
if round > build.WinningPoStSectorSetLookback {
@ -675,9 +641,9 @@ func init() {
}
func GetReturnType(ctx context.Context, sm *StateManager, to address.Address, method abi.MethodNum, ts *types.TipSet) (cbg.CBORUnmarshaler, error) {
act, err := sm.GetActor(to, ts)
if err != nil {
return nil, err
var act types.Actor
if err := sm.WithParentState(ts, sm.WithActor(to, GetActor(&act))); err != nil {
return nil, xerrors.Errorf("getting actor: %w", err)
}
m := MethodsMap[act.Code][method]

View File

@ -1043,21 +1043,25 @@ func recurseLinks(bs blockstore.Blockstore, root cid.Cid, in []cid.Cid) ([]cid.C
return nil, xerrors.Errorf("recurse links get (%s) failed: %w", root, err)
}
top, err := cbg.ScanForLinks(bytes.NewReader(data.RawData()))
var rerr error
err = cbg.ScanForLinks(bytes.NewReader(data.RawData()), func(c cid.Cid) {
if rerr != nil {
// No error return on ScanForLinks :(
return
}
in = append(in, c)
var err error
in, err = recurseLinks(bs, c, in)
if err != nil {
rerr = err
}
})
if err != nil {
return nil, xerrors.Errorf("scanning for links failed: %w", err)
}
in = append(in, top...)
for _, c := range top {
var err error
in, err = recurseLinks(bs, c, in)
if err != nil {
return nil, err
}
}
return in, nil
return in, rerr
}
func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, w io.Writer) error {

View File

@ -9,7 +9,7 @@ import (
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
"github.com/ipfs/go-cid"
cid "github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
xerrors "golang.org/x/xerrors"
)
@ -146,6 +146,8 @@ func (t *BlockHeader) MarshalCBOR(w io.Writer) error {
}
func (t *BlockHeader) UnmarshalCBOR(r io.Reader) error {
*t = BlockHeader{}
br := cbg.GetPeeker(r)
scratch := make([]byte, 8)
@ -470,6 +472,8 @@ func (t *Ticket) MarshalCBOR(w io.Writer) error {
}
func (t *Ticket) UnmarshalCBOR(r io.Reader) error {
*t = Ticket{}
br := cbg.GetPeeker(r)
scratch := make([]byte, 8)
@ -545,6 +549,8 @@ func (t *ElectionProof) MarshalCBOR(w io.Writer) error {
}
func (t *ElectionProof) UnmarshalCBOR(r io.Reader) error {
*t = ElectionProof{}
br := cbg.GetPeeker(r)
scratch := make([]byte, 8)
@ -688,6 +694,8 @@ func (t *Message) MarshalCBOR(w io.Writer) error {
}
func (t *Message) UnmarshalCBOR(r io.Reader) error {
*t = Message{}
br := cbg.GetPeeker(r)
scratch := make([]byte, 8)
@ -861,6 +869,8 @@ func (t *SignedMessage) MarshalCBOR(w io.Writer) error {
}
func (t *SignedMessage) UnmarshalCBOR(r io.Reader) error {
*t = SignedMessage{}
br := cbg.GetPeeker(r)
scratch := make([]byte, 8)
@ -926,6 +936,8 @@ func (t *MsgMeta) MarshalCBOR(w io.Writer) error {
}
func (t *MsgMeta) UnmarshalCBOR(r io.Reader) error {
*t = MsgMeta{}
br := cbg.GetPeeker(r)
scratch := make([]byte, 8)
@ -1007,6 +1019,8 @@ func (t *Actor) MarshalCBOR(w io.Writer) error {
}
func (t *Actor) UnmarshalCBOR(r io.Reader) error {
*t = Actor{}
br := cbg.GetPeeker(r)
scratch := make([]byte, 8)
@ -1123,6 +1137,8 @@ func (t *MessageReceipt) MarshalCBOR(w io.Writer) error {
}
func (t *MessageReceipt) UnmarshalCBOR(r io.Reader) error {
*t = MessageReceipt{}
br := cbg.GetPeeker(r)
scratch := make([]byte, 8)
@ -1257,6 +1273,8 @@ func (t *BlockMsg) MarshalCBOR(w io.Writer) error {
}
func (t *BlockMsg) UnmarshalCBOR(r io.Reader) error {
*t = BlockMsg{}
br := cbg.GetPeeker(r)
scratch := make([]byte, 8)
@ -1407,6 +1425,8 @@ func (t *ExpTipSet) MarshalCBOR(w io.Writer) error {
}
func (t *ExpTipSet) UnmarshalCBOR(r io.Reader) error {
*t = ExpTipSet{}
br := cbg.GetPeeker(r)
scratch := make([]byte, 8)
@ -1542,6 +1562,8 @@ func (t *BeaconEntry) MarshalCBOR(w io.Writer) error {
}
func (t *BeaconEntry) UnmarshalCBOR(r io.Reader) error {
*t = BeaconEntry{}
br := cbg.GetPeeker(r)
scratch := make([]byte, 8)

View File

@ -550,7 +550,8 @@ func (rt *Runtime) chargeGasInternal(gas GasCharge, skip int) aerrors.ActorError
rt.lastGasChargeTime = now
rt.lastGasCharge = &gasTrace
if rt.gasUsed+toUse > rt.gasAvailable {
// overflow safe
if rt.gasUsed > rt.gasAvailable-toUse {
rt.gasUsed = rt.gasAvailable
return aerrors.Newf(exitcode.SysErrOutOfGas, "not enough gas: used=%d, available=%d",
rt.gasUsed, rt.gasAvailable)

View File

@ -20,7 +20,6 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
commcid "github.com/filecoin-project/go-fil-commcid"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/builtin/account"
init_ "github.com/filecoin-project/specs-actors/actors/builtin/init"
@ -535,12 +534,16 @@ func (vm *VM) MutateState(ctx context.Context, addr address.Address, fn interfac
return nil
}
func linksForObj(blk block.Block) ([]cid.Cid, error) {
func linksForObj(blk block.Block, cb func(cid.Cid)) error {
switch blk.Cid().Prefix().Codec {
case cid.DagCBOR:
return cbg.ScanForLinks(bytes.NewReader(blk.RawData()))
err := cbg.ScanForLinks(bytes.NewReader(blk.RawData()), cb)
if err != nil {
return xerrors.Errorf("cbg.ScanForLinks: %w", err)
}
return nil
default:
return nil, xerrors.Errorf("vm flush copy method only supports dag cbor")
return xerrors.Errorf("vm flush copy method only supports dag cbor")
}
}
@ -558,7 +561,7 @@ func Copy(from, to blockstore.Blockstore, root cid.Cid) error {
}
if err := copyRec(from, to, root, batchCp); err != nil {
return err
return xerrors.Errorf("copyRec: %w", err)
}
if len(batch) > 0 {
@ -581,31 +584,40 @@ func copyRec(from, to blockstore.Blockstore, root cid.Cid, cp func(block.Block)
return xerrors.Errorf("get %s failed: %w", root, err)
}
links, err := linksForObj(blk)
if err != nil {
return err
}
var lerr error
err = linksForObj(blk, func(link cid.Cid) {
if lerr != nil {
// Theres no erorr return on linksForObj callback :(
return
}
for _, link := range links {
if link.Prefix().MhType == mh.IDENTITY || link.Prefix().MhType == uint64(commcid.FC_SEALED_V1) || link.Prefix().MhType == uint64(commcid.FC_UNSEALED_V1) {
continue
if link.Prefix().MhType == mh.IDENTITY || link.Prefix().Codec == cid.FilCommitmentSealed || link.Prefix().Codec == cid.FilCommitmentUnsealed {
return
}
has, err := to.Has(link)
if err != nil {
return err
lerr = xerrors.Errorf("has: %w", err)
return
}
if has {
continue
return
}
if err := copyRec(from, to, link, cp); err != nil {
return err
lerr = err
return
}
})
if err != nil {
return xerrors.Errorf("linksForObj (%x): %w", blk.RawData(), err)
}
if lerr != nil {
return lerr
}
if err := cp(blk); err != nil {
return err
return xerrors.Errorf("copy: %w", err)
}
return nil
}
@ -686,7 +698,7 @@ func (vm *VM) transfer(from, to address.Address, amt types.BigInt) aerrors.Actor
}
if err := deductFunds(f, amt); err != nil {
return aerrors.Newf(exitcode.SysErrInsufficientFunds, "transfer failed when deducting funds: %s", err)
return aerrors.Newf(exitcode.SysErrInsufficientFunds, "transfer failed when deducting funds (%s): %s", types.FIL(amt), err)
}
depositFunds(t, amt)

View File

@ -140,14 +140,14 @@ var clientDropCmd = &cli.Command{
defer closer()
ctx := ReqContext(cctx)
var ids []int64
var ids []int
for i, s := range cctx.Args().Slice() {
id, err := strconv.ParseInt(s, 10, 64)
id, err := strconv.ParseInt(s, 10, 0)
if err != nil {
return xerrors.Errorf("parsing %d-th import ID: %w", i, err)
}
ids = append(ids, id)
ids = append(ids, int(id))
}
for _, id := range ids {

View File

@ -6,12 +6,13 @@ import (
"encoding/binary"
"encoding/hex"
"fmt"
"github.com/filecoin-project/specs-actors/actors/abi"
"os"
"sort"
"strconv"
"text/tabwriter"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/go-address"
init_ "github.com/filecoin-project/specs-actors/actors/builtin/init"
samsig "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
@ -31,17 +32,14 @@ import (
var multisigCmd = &cli.Command{
Name: "msig",
Usage: "Interact with a multisig wallet",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "source",
Usage: "specify the account to send propose from",
},
},
Subcommands: []*cli.Command{
msigCreateCmd,
msigInspectCmd,
msigProposeCmd,
msigApproveCmd,
msigSwapProposeCmd,
msigSwapApproveCmd,
msigSwapCancelCmd,
},
}
@ -187,12 +185,19 @@ var msigInspectCmd = &cli.Command{
return err
}
head, err := api.ChainHead(ctx)
if err != nil {
return err
}
var mstate samsig.State
if err := mstate.UnmarshalCBOR(bytes.NewReader(obj)); err != nil {
return err
}
fmt.Printf("Balance: %sfil\n", types.FIL(act.Balance))
locked := mstate.AmountLocked(head.Height() - mstate.StartEpoch)
fmt.Printf("Balance: %s\n", types.FIL(act.Balance))
fmt.Printf("Spendable: %s\n", types.FIL(types.BigSub(act.Balance, locked)))
fmt.Printf("Threshold: %d / %d\n", mstate.NumApprovalsThreshold, len(mstate.Signers))
fmt.Println("Signers:")
for _, s := range mstate.Signers {
@ -277,7 +282,7 @@ var msigProposeCmd = &cli.Command{
ArgsUsage: "[multisigAddress destinationAddress value <methodId methodParams> (optional)]",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "source",
Name: "from",
Usage: "account to send the propose message from",
},
},
@ -329,8 +334,8 @@ var msigProposeCmd = &cli.Command{
}
var from address.Address
if cctx.IsSet("source") {
f, err := address.NewFromString(cctx.String("source"))
if cctx.IsSet("from") {
f, err := address.NewFromString(cctx.String("from"))
if err != nil {
return err
}
@ -376,7 +381,7 @@ var msigApproveCmd = &cli.Command{
ArgsUsage: "[multisigAddress messageId proposerAddress destination value <methodId methodParams> (optional)]",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "source",
Name: "from",
Usage: "account to send the approve message from",
},
},
@ -445,8 +450,8 @@ var msigApproveCmd = &cli.Command{
}
var from address.Address
if cctx.IsSet("source") {
f, err := address.NewFromString(cctx.String("source"))
if cctx.IsSet("from") {
f, err := address.NewFromString(cctx.String("from"))
if err != nil {
return err
}
@ -478,3 +483,234 @@ var msigApproveCmd = &cli.Command{
return nil
},
}
var msigSwapProposeCmd = &cli.Command{
Name: "swap-propose",
Usage: "Propose to swap signers",
ArgsUsage: "[multisigAddress oldAddress newAddress]",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "from",
Usage: "account to send the approve message from",
},
},
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
if cctx.Args().Len() != 3 {
return fmt.Errorf("must pass multisig address, old signer address, new signer address")
}
msig, err := address.NewFromString(cctx.Args().Get(0))
if err != nil {
return err
}
oldAdd, err := address.NewFromString(cctx.Args().Get(1))
if err != nil {
return err
}
newAdd, err := address.NewFromString(cctx.Args().Get(2))
if err != nil {
return err
}
var from address.Address
if cctx.IsSet("from") {
f, err := address.NewFromString(cctx.String("from"))
if err != nil {
return err
}
from = f
} else {
defaddr, err := api.WalletDefaultAddress(ctx)
if err != nil {
return err
}
from = defaddr
}
msgCid, err := api.MsigSwapPropose(ctx, msig, from, oldAdd, newAdd)
if err != nil {
return err
}
fmt.Println("sent swap proposal in message: ", msgCid)
wait, err := api.StateWaitMsg(ctx, msgCid, build.MessageConfidence)
if err != nil {
return err
}
if wait.Receipt.ExitCode != 0 {
return fmt.Errorf("swap proposal returned exit %d", wait.Receipt.ExitCode)
}
return nil
},
}
var msigSwapApproveCmd = &cli.Command{
Name: "swap-approve",
Usage: "Approve a message to swap signers",
ArgsUsage: "[multisigAddress proposerAddress txId oldAddress newAddress]",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "from",
Usage: "account to send the approve message from",
},
},
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
if cctx.Args().Len() != 5 {
return fmt.Errorf("must pass multisig address, proposer address, transaction id, old signer address, new signer address")
}
msig, err := address.NewFromString(cctx.Args().Get(0))
if err != nil {
return err
}
prop, err := address.NewFromString(cctx.Args().Get(1))
if err != nil {
return err
}
txid, err := strconv.ParseUint(cctx.Args().Get(2), 10, 64)
if err != nil {
return err
}
oldAdd, err := address.NewFromString(cctx.Args().Get(3))
if err != nil {
return err
}
newAdd, err := address.NewFromString(cctx.Args().Get(4))
if err != nil {
return err
}
var from address.Address
if cctx.IsSet("from") {
f, err := address.NewFromString(cctx.String("from"))
if err != nil {
return err
}
from = f
} else {
defaddr, err := api.WalletDefaultAddress(ctx)
if err != nil {
return err
}
from = defaddr
}
msgCid, err := api.MsigSwapApprove(ctx, msig, from, txid, prop, oldAdd, newAdd)
if err != nil {
return err
}
fmt.Println("sent swap approval in message: ", msgCid)
wait, err := api.StateWaitMsg(ctx, msgCid, build.MessageConfidence)
if err != nil {
return err
}
if wait.Receipt.ExitCode != 0 {
return fmt.Errorf("swap approval returned exit %d", wait.Receipt.ExitCode)
}
return nil
},
}
var msigSwapCancelCmd = &cli.Command{
Name: "swap-cancel",
Usage: "Cancel a message to swap signers",
ArgsUsage: "[multisigAddress txId oldAddress newAddress]",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "from",
Usage: "account to send the approve message from",
},
},
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
if cctx.Args().Len() != 4 {
return fmt.Errorf("must pass multisig address, transaction id, old signer address, new signer address")
}
msig, err := address.NewFromString(cctx.Args().Get(0))
if err != nil {
return err
}
txid, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64)
if err != nil {
return err
}
oldAdd, err := address.NewFromString(cctx.Args().Get(2))
if err != nil {
return err
}
newAdd, err := address.NewFromString(cctx.Args().Get(3))
if err != nil {
return err
}
var from address.Address
if cctx.IsSet("from") {
f, err := address.NewFromString(cctx.String("from"))
if err != nil {
return err
}
from = f
} else {
defaddr, err := api.WalletDefaultAddress(ctx)
if err != nil {
return err
}
from = defaddr
}
msgCid, err := api.MsigSwapCancel(ctx, msig, from, txid, oldAdd, newAdd)
if err != nil {
return err
}
fmt.Println("sent swap approval in message: ", msgCid)
wait, err := api.StateWaitMsg(ctx, msgCid, build.MessageConfidence)
if err != nil {
return err
}
if wait.Receipt.ExitCode != 0 {
return fmt.Errorf("swap approval returned exit %d", wait.Receipt.ExitCode)
}
return nil
},
}

View File

@ -14,7 +14,7 @@ var sendCmd = &cli.Command{
ArgsUsage: "[targetAddress] [amount]",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "source",
Name: "from",
Usage: "optionally specify the account to send funds from",
},
&cli.StringFlag{
@ -52,7 +52,7 @@ var sendCmd = &cli.Command{
}
var fromAddr address.Address
if from := cctx.String("source"); from == "" {
if from := cctx.String("from"); from == "" {
defaddr, err := api.WalletDefaultAddress(ctx)
if err != nil {
return err

View File

@ -49,7 +49,7 @@ var stateCmd = &cli.Command{
Subcommands: []*cli.Command{
statePowerCmd,
stateSectorsCmd,
stateProvingSetCmd,
stateActiveSectorsCmd,
statePledgeCollateralCmd,
stateListActorsCmd,
stateListMinersCmd,
@ -241,9 +241,9 @@ var stateSectorsCmd = &cli.Command{
},
}
var stateProvingSetCmd = &cli.Command{
Name: "proving",
Usage: "Query the proving set of a miner",
var stateActiveSectorsCmd = &cli.Command{
Name: "active-sectors",
Usage: "Query the active sector set of a miner",
ArgsUsage: "[minerAddress]",
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
@ -268,7 +268,7 @@ var stateProvingSetCmd = &cli.Command{
return err
}
sectors, err := api.StateMinerProvingSet(ctx, maddr, ts.Key())
sectors, err := api.StateMinerActiveSectors(ctx, maddr, ts.Key())
if err != nil {
return err
}

View File

@ -1,12 +1,15 @@
package main
import (
"database/sql"
"fmt"
"hash/crc32"
"strconv"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
)
var dotCmd = &cli.Command{
@ -14,10 +17,24 @@ var dotCmd = &cli.Command{
Usage: "generate dot graphs",
ArgsUsage: "<minHeight> <toseeHeight>",
Action: func(cctx *cli.Context) error {
st, err := openStorage(cctx.String("db"))
ll := cctx.String("log-level")
if err := logging.SetLogLevel("*", ll); err != nil {
return err
}
db, err := sql.Open("postgres", cctx.String("db"))
if err != nil {
return err
}
defer func() {
if err := db.Close(); err != nil {
log.Errorw("Failed to close database", "error", err)
}
}()
if err := db.Ping(); err != nil {
return xerrors.Errorf("Database failed to respond to ping (is it online?): %w", err)
}
minH, err := strconv.ParseInt(cctx.Args().Get(0), 10, 32)
if err != nil {
@ -29,7 +46,7 @@ var dotCmd = &cli.Command{
}
maxH := minH + tosee
res, err := st.db.Query(`select block, parent, b.miner, b.height, p.height from block_parents
res, err := db.Query(`select block, parent, b.miner, b.height, p.height from block_parents
inner join blocks b on block_parents.block = b.cid
inner join blocks p on block_parents.parent = p.cid
where b.height > $1 and b.height < $2`, minH, maxH)
@ -40,7 +57,10 @@ where b.height > $1 and b.height < $2`, minH, maxH)
fmt.Println("digraph D {")
hl := st.hasList()
hl, err := syncedBlocks(db)
if err != nil {
log.Fatal(err)
}
for res.Next() {
var block, parent, miner string
@ -85,3 +105,27 @@ where b.height > $1 and b.height < $2`, minH, maxH)
return nil
},
}
func syncedBlocks(db *sql.DB) (map[cid.Cid]struct{}, error) {
// timestamp is used to return a configurable amount of rows based on when they were last added.
rws, err := db.Query(`select cid FROM blocks_synced`)
if err != nil {
return nil, xerrors.Errorf("Failed to query blocks_synced: %w", err)
}
out := map[cid.Cid]struct{}{}
for rws.Next() {
var c string
if err := rws.Scan(&c); err != nil {
return nil, xerrors.Errorf("Failed to scan blocks_synced: %w", err)
}
ci, err := cid.Parse(c)
if err != nil {
return nil, xerrors.Errorf("Failed to parse blocks_synced: %w", err)
}
out[ci] = struct{}{}
}
return out, nil
}

View File

@ -1,34 +1,22 @@
package main
import (
"fmt"
"net/http"
_ "net/http/pprof"
"os"
"github.com/filecoin-project/lotus/build"
logging "github.com/ipfs/go-log/v2"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/build"
lcli "github.com/filecoin-project/lotus/cli"
)
var log = logging.Logger("chainwatch")
func main() {
_ = logging.SetLogLevel("*", "INFO")
if err := logging.SetLogLevel("rpc", "error"); err != nil {
panic(err)
if err := logging.SetLogLevel("*", "info"); err != nil {
log.Fatal(err)
}
log.Info("Starting chainwatch")
local := []*cli.Command{
runCmd,
dotCmd,
}
app := &cli.App{
Name: "lotus-chainwatch",
Usage: "Devnet token distribution utility",
@ -44,69 +32,19 @@ func main() {
EnvVars: []string{"LOTUS_DB"},
Value: "",
},
&cli.StringFlag{
Name: "log-level",
EnvVars: []string{"GOLOG_LOG_LEVEL"},
Value: "info",
},
},
Commands: []*cli.Command{
dotCmd,
runCmd,
},
Commands: local,
}
if err := app.Run(os.Args); err != nil {
log.Warnf("%+v", err)
os.Exit(1)
log.Fatal(err)
}
}
var runCmd = &cli.Command{
Name: "run",
Usage: "Start lotus chainwatch",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "front",
Value: "127.0.0.1:8418",
},
&cli.IntFlag{
Name: "max-batch",
Value: 1000,
},
},
Action: func(cctx *cli.Context) error {
api, closer, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
v, err := api.Version(ctx)
if err != nil {
return err
}
log.Infof("Remote version: %s", v.Version)
maxBatch := cctx.Int("max-batch")
st, err := openStorage(cctx.String("db"))
if err != nil {
return err
}
defer st.close() //nolint:errcheck
runSyncer(ctx, api, st, maxBatch)
h, err := newHandler(api, st)
if err != nil {
return xerrors.Errorf("handler setup: %w", err)
}
http.Handle("/", h)
fmt.Printf("Open http://%s\n", cctx.String("front"))
go func() {
<-ctx.Done()
os.Exit(0)
}()
return http.ListenAndServe(cctx.String("front"), nil)
},
}

View File

@ -1,60 +0,0 @@
package main
import (
"context"
"time"
"github.com/ipfs/go-cid"
aapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
)
func subMpool(ctx context.Context, api aapi.FullNode, st *storage) {
sub, err := api.MpoolSub(ctx)
if err != nil {
return
}
for {
var updates []aapi.MpoolUpdate
select {
case update := <-sub:
updates = append(updates, update)
case <-ctx.Done():
return
}
loop:
for {
time.Sleep(10 * time.Millisecond)
select {
case update := <-sub:
updates = append(updates, update)
default:
break loop
}
}
msgs := map[cid.Cid]*types.Message{}
for _, v := range updates {
if v.Type != aapi.MpoolAdd {
continue
}
msgs[v.Message.Message.Cid()] = &v.Message.Message
}
log.Debugf("Processing %d mpool updates", len(msgs))
err := st.storeMessages(msgs)
if err != nil {
log.Error(err)
}
if err := st.storeMpoolInclusions(updates); err != nil {
log.Error(err)
}
}
}

View File

@ -0,0 +1,299 @@
package processor
import (
"bytes"
"context"
"time"
"golang.org/x/sync/errgroup"
"golang.org/x/xerrors"
"github.com/ipfs/go-cid"
typegen "github.com/whyrusleeping/cbor-gen"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/specs-actors/actors/builtin"
_init "github.com/filecoin-project/specs-actors/actors/builtin/init"
"github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/lotus/chain/types"
cw_util "github.com/filecoin-project/lotus/cmd/lotus-chainwatch/util"
)
func (p *Processor) setupCommonActors() error {
tx, err := p.db.Begin()
if err != nil {
return err
}
if _, err := tx.Exec(`
create table if not exists id_address_map
(
id text not null,
address text not null,
constraint id_address_map_pk
primary key (id, address)
);
create unique index if not exists id_address_map_id_uindex
on id_address_map (id);
create unique index if not exists id_address_map_address_uindex
on id_address_map (address);
create table if not exists actors
(
id text not null
constraint id_address_map_actors_id_fk
references id_address_map (id),
code text not null,
head text not null,
nonce int not null,
balance text not null,
stateroot text
);
create index if not exists actors_id_index
on actors (id);
create index if not exists id_address_map_address_index
on id_address_map (address);
create index if not exists id_address_map_id_index
on id_address_map (id);
create or replace function actor_tips(epoch bigint)
returns table (id text,
code text,
head text,
nonce int,
balance text,
stateroot text,
height bigint,
parentstateroot text) as
$body$
select distinct on (id) * from actors
inner join state_heights sh on sh.parentstateroot = stateroot
where height < $1
order by id, height desc;
$body$ language sql;
create table if not exists actor_states
(
head text not null,
code text not null,
state json not null
);
create unique index if not exists actor_states_head_code_uindex
on actor_states (head, code);
create index if not exists actor_states_head_index
on actor_states (head);
create index if not exists actor_states_code_head_index
on actor_states (head, code);
`); err != nil {
return err
}
return tx.Commit()
}
func (p *Processor) HandleCommonActorsChanges(ctx context.Context, actors map[cid.Cid]ActorTips) error {
if err := p.storeActorAddresses(ctx, actors); err != nil {
return err
}
grp, _ := errgroup.WithContext(ctx)
grp.Go(func() error {
if err := p.storeActorHeads(actors); err != nil {
return err
}
return nil
})
grp.Go(func() error {
if err := p.storeActorStates(actors); err != nil {
return err
}
return nil
})
return grp.Wait()
}
func (p Processor) storeActorAddresses(ctx context.Context, actors map[cid.Cid]ActorTips) error {
start := time.Now()
defer func() {
log.Debugw("Stored Actor Addresses", "duration", time.Since(start).String())
}()
addressToID := map[address.Address]address.Address{}
// HACK until genesis storage is figured out:
addressToID[builtin.SystemActorAddr] = builtin.SystemActorAddr
addressToID[builtin.InitActorAddr] = builtin.InitActorAddr
addressToID[builtin.RewardActorAddr] = builtin.RewardActorAddr
addressToID[builtin.CronActorAddr] = builtin.CronActorAddr
addressToID[builtin.StoragePowerActorAddr] = builtin.StoragePowerActorAddr
addressToID[builtin.StorageMarketActorAddr] = builtin.StorageMarketActorAddr
addressToID[builtin.VerifiedRegistryActorAddr] = builtin.VerifiedRegistryActorAddr
addressToID[builtin.BurntFundsActorAddr] = builtin.BurntFundsActorAddr
initActor, err := p.node.StateGetActor(ctx, builtin.InitActorAddr, types.EmptyTSK)
if err != nil {
return err
}
initActorRaw, err := p.node.ChainReadObj(ctx, initActor.Head)
if err != nil {
return err
}
var initActorState _init.State
if err := initActorState.UnmarshalCBOR(bytes.NewReader(initActorRaw)); err != nil {
return err
}
ctxStore := cw_util.NewAPIIpldStore(ctx, p.node)
addrMap, err := adt.AsMap(ctxStore, initActorState.AddressMap)
if err != nil {
return err
}
// gross..
var actorID typegen.CborInt
if err := addrMap.ForEach(&actorID, func(key string) error {
longAddr, err := address.NewFromBytes([]byte(key))
if err != nil {
return err
}
shortAddr, err := address.NewIDAddress(uint64(actorID))
if err != nil {
return err
}
addressToID[longAddr] = shortAddr
return nil
}); err != nil {
return err
}
tx, err := p.db.Begin()
if err != nil {
return err
}
if _, err := tx.Exec(`
create temp table iam (like id_address_map excluding constraints) on commit drop;
`); err != nil {
return xerrors.Errorf("prep temp: %w", err)
}
stmt, err := tx.Prepare(`copy iam (id, address) from STDIN `)
if err != nil {
return err
}
for a, i := range addressToID {
if i == address.Undef {
continue
}
if _, err := stmt.Exec(
i.String(),
a.String(),
); err != nil {
return err
}
}
if err := stmt.Close(); err != nil {
return err
}
if _, err := tx.Exec(`insert into id_address_map select * from iam on conflict do nothing `); err != nil {
return xerrors.Errorf("actor put: %w", err)
}
return tx.Commit()
}
func (p *Processor) storeActorHeads(actors map[cid.Cid]ActorTips) error {
start := time.Now()
defer func() {
log.Debugw("Stored Actor Heads", "duration", time.Since(start).String())
}()
// Basic
tx, err := p.db.Begin()
if err != nil {
return err
}
if _, err := tx.Exec(`
create temp table a (like actors excluding constraints) on commit drop;
`); err != nil {
return xerrors.Errorf("prep temp: %w", err)
}
stmt, err := tx.Prepare(`copy a (id, code, head, nonce, balance, stateroot) from stdin `)
if err != nil {
return err
}
for code, actTips := range actors {
for _, actorInfo := range actTips {
for _, a := range actorInfo {
if _, err := stmt.Exec(a.addr.String(), code.String(), a.act.Head.String(), a.act.Nonce, a.act.Balance.String(), a.stateroot.String()); err != nil {
return err
}
}
}
}
if err := stmt.Close(); err != nil {
return err
}
if _, err := tx.Exec(`insert into actors select * from a on conflict do nothing `); err != nil {
return xerrors.Errorf("actor put: %w", err)
}
return tx.Commit()
}
func (p *Processor) storeActorStates(actors map[cid.Cid]ActorTips) error {
start := time.Now()
defer func() {
log.Debugw("Stored Actor States", "duration", time.Since(start).String())
}()
// States
tx, err := p.db.Begin()
if err != nil {
return err
}
if _, err := tx.Exec(`
create temp table a (like actor_states excluding constraints) on commit drop;
`); err != nil {
return xerrors.Errorf("prep temp: %w", err)
}
stmt, err := tx.Prepare(`copy a (head, code, state) from stdin `)
if err != nil {
return err
}
for code, actTips := range actors {
for _, actorInfo := range actTips {
for _, a := range actorInfo {
if _, err := stmt.Exec(a.act.Head.String(), code.String(), a.state); err != nil {
return err
}
}
}
}
if err := stmt.Close(); err != nil {
return err
}
if _, err := tx.Exec(`insert into actor_states select * from a on conflict do nothing `); err != nil {
return xerrors.Errorf("actor put: %w", err)
}
return tx.Commit()
}

View File

@ -0,0 +1,301 @@
package processor
import (
"context"
"strconv"
"time"
"golang.org/x/sync/errgroup"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/chain/events/state"
)
func (p *Processor) setupMarket() error {
tx, err := p.db.Begin()
if err != nil {
return err
}
if _, err := tx.Exec(`
create table if not exists market_deal_proposals
(
deal_id bigint not null,
state_root text not null,
piece_cid text not null,
padded_piece_size bigint not null,
unpadded_piece_size bigint not null,
is_verified bool not null,
client_id text not null,
provider_id text not null,
start_epoch bigint not null,
end_epoch bigint not null,
slashed_epoch bigint,
storage_price_per_epoch text not null,
provider_collateral text not null,
client_collateral text not null,
constraint market_deal_proposal_pk
primary key (deal_id)
);
create table if not exists market_deal_states
(
deal_id bigint not null,
sector_start_epoch bigint not null,
last_update_epoch bigint not null,
slash_epoch bigint not null,
state_root text not null,
unique (deal_id, sector_start_epoch, last_update_epoch, slash_epoch),
constraint market_deal_states_pk
primary key (deal_id, state_root)
);
`); err != nil {
return err
}
return tx.Commit()
}
type marketActorInfo struct {
common actorInfo
}
func (p *Processor) HandleMarketChanges(ctx context.Context, marketTips ActorTips) error {
marketChanges, err := p.processMarket(ctx, marketTips)
if err != nil {
log.Fatalw("Failed to process market actors", "error", err)
}
if err := p.persistMarket(ctx, marketChanges); err != nil {
log.Fatalw("Failed to persist market actors", "error", err)
}
if err := p.updateMarket(ctx, marketChanges); err != nil {
log.Fatalw("Failed to update market actors", "error", err)
}
return nil
}
func (p *Processor) processMarket(ctx context.Context, marketTips ActorTips) ([]marketActorInfo, error) {
start := time.Now()
defer func() {
log.Debugw("Processed Market", "duration", time.Since(start).String())
}()
var out []marketActorInfo
for _, markets := range marketTips {
for _, mt := range markets {
// NB: here is where we can extract the market state when we need it.
out = append(out, marketActorInfo{common: mt})
}
}
return out, nil
}
func (p *Processor) persistMarket(ctx context.Context, info []marketActorInfo) error {
start := time.Now()
defer func() {
log.Debugw("Persisted Market", "duration", time.Since(start).String())
}()
grp, ctx := errgroup.WithContext(ctx)
grp.Go(func() error {
if err := p.storeMarketActorDealProposals(ctx, info); err != nil {
return xerrors.Errorf("Failed to store marker deal proposals: %w", err)
}
return nil
})
grp.Go(func() error {
if err := p.storeMarketActorDealStates(info); err != nil {
return xerrors.Errorf("Failed to store marker deal states: %w", err)
}
return nil
})
return grp.Wait()
}
func (p *Processor) updateMarket(ctx context.Context, info []marketActorInfo) error {
if err := p.updateMarketActorDealProposals(ctx, info); err != nil {
return xerrors.Errorf("Failed to update market info: %w", err)
}
return nil
}
func (p *Processor) storeMarketActorDealStates(marketTips []marketActorInfo) error {
start := time.Now()
defer func() {
log.Debugw("Stored Market Deal States", "duration", time.Since(start).String())
}()
tx, err := p.db.Begin()
if err != nil {
return err
}
if _, err := tx.Exec(`create temp table mds (like market_deal_states excluding constraints) on commit drop;`); err != nil {
return err
}
stmt, err := tx.Prepare(`copy mds (deal_id, sector_start_epoch, last_update_epoch, slash_epoch, state_root) from STDIN`)
if err != nil {
return err
}
for _, mt := range marketTips {
dealStates, err := p.node.StateMarketDeals(context.TODO(), mt.common.tsKey)
if err != nil {
return err
}
for dealID, ds := range dealStates {
id, err := strconv.ParseUint(dealID, 10, 64)
if err != nil {
return err
}
if _, err := stmt.Exec(
id,
ds.State.SectorStartEpoch,
ds.State.LastUpdatedEpoch,
ds.State.SlashEpoch,
mt.common.stateroot.String(),
); err != nil {
return err
}
}
}
if err := stmt.Close(); err != nil {
return err
}
if _, err := tx.Exec(`insert into market_deal_states select * from mds on conflict do nothing`); err != nil {
return err
}
return tx.Commit()
}
func (p *Processor) storeMarketActorDealProposals(ctx context.Context, marketTips []marketActorInfo) error {
start := time.Now()
defer func() {
log.Debugw("Stored Market Deal Proposals", "duration", time.Since(start).String())
}()
tx, err := p.db.Begin()
if err != nil {
return err
}
if _, err := tx.Exec(`create temp table mdp (like market_deal_proposals excluding constraints) on commit drop;`); err != nil {
return xerrors.Errorf("prep temp: %w", err)
}
stmt, err := tx.Prepare(`copy mdp (deal_id, state_root, piece_cid, padded_piece_size, unpadded_piece_size, is_verified, client_id, provider_id, start_epoch, end_epoch, slashed_epoch, storage_price_per_epoch, provider_collateral, client_collateral) from STDIN`)
if err != nil {
return err
}
// insert in sorted order (lowest height -> highest height) since dealid is pk of table.
for _, mt := range marketTips {
dealStates, err := p.node.StateMarketDeals(ctx, mt.common.tsKey)
if err != nil {
return err
}
for dealID, ds := range dealStates {
id, err := strconv.ParseUint(dealID, 10, 64)
if err != nil {
return err
}
if _, err := stmt.Exec(
id,
mt.common.stateroot.String(),
ds.Proposal.PieceCID.String(),
ds.Proposal.PieceSize,
ds.Proposal.PieceSize.Unpadded(),
ds.Proposal.VerifiedDeal,
ds.Proposal.Client.String(),
ds.Proposal.Provider.String(),
ds.Proposal.StartEpoch,
ds.Proposal.EndEpoch,
nil, // slashed_epoch
ds.Proposal.StoragePricePerEpoch.String(),
ds.Proposal.ProviderCollateral.String(),
ds.Proposal.ClientCollateral.String(),
); err != nil {
return err
}
}
}
if err := stmt.Close(); err != nil {
return err
}
if _, err := tx.Exec(`insert into market_deal_proposals select * from mdp on conflict do nothing`); err != nil {
return err
}
return tx.Commit()
}
func (p *Processor) updateMarketActorDealProposals(ctx context.Context, marketTip []marketActorInfo) error {
start := time.Now()
defer func() {
log.Debugw("Updated Market Deal Proposals", "duration", time.Since(start).String())
}()
pred := state.NewStatePredicates(p.node)
tx, err := p.db.Begin()
if err != nil {
return err
}
stmt, err := tx.Prepare(`update market_deal_proposals set slashed_epoch=$1 where deal_id=$2`)
if err != nil {
return err
}
for _, mt := range marketTip {
stateDiff := pred.OnStorageMarketActorChanged(pred.OnDealStateChanged(pred.OnDealStateAmtChanged()))
changed, val, err := stateDiff(ctx, mt.common.parentTsKey, mt.common.tsKey)
if err != nil {
log.Warnw("error getting market deal state diff", "error", err)
}
if !changed {
continue
}
changes, ok := val.(*state.MarketDealStateChanges)
if !ok {
return xerrors.Errorf("Unknown type returned by Deal State AMT predicate: %T", val)
}
for _, modified := range changes.Modified {
if modified.From.SlashEpoch != modified.To.SlashEpoch {
if _, err := stmt.Exec(modified.To.SlashEpoch, modified.ID); err != nil {
return err
}
}
}
}
if err := stmt.Close(); err != nil {
return err
}
return tx.Commit()
}

View File

@ -0,0 +1,316 @@
package processor
import (
"context"
"sync"
"time"
"golang.org/x/sync/errgroup"
"golang.org/x/xerrors"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/parmap"
)
func (p *Processor) setupMessages() error {
tx, err := p.db.Begin()
if err != nil {
return err
}
if _, err := tx.Exec(`
create table if not exists messages
(
cid text not null
constraint messages_pk
primary key,
"from" text not null,
"to" text not null,
nonce bigint not null,
value text not null,
gasprice bigint not null,
gaslimit bigint not null,
method bigint,
params bytea
);
create unique index if not exists messages_cid_uindex
on messages (cid);
create index if not exists messages_from_index
on messages ("from");
create index if not exists messages_to_index
on messages ("to");
create table if not exists block_messages
(
block text not null
constraint blocks_block_cids_cid_fk
references block_cids (cid),
message text not null,
constraint block_messages_pk
primary key (block, message)
);
create table if not exists mpool_messages
(
msg text not null
constraint mpool_messages_pk
primary key
constraint mpool_messages_messages_cid_fk
references messages,
add_ts int not null
);
create unique index if not exists mpool_messages_msg_uindex
on mpool_messages (msg);
create table if not exists receipts
(
msg text not null,
state text not null,
idx int not null,
exit int not null,
gas_used int not null,
return bytea,
constraint receipts_pk
primary key (msg, state)
);
create index if not exists receipts_msg_state_index
on receipts (msg, state);
`); err != nil {
return err
}
return tx.Commit()
}
func (p *Processor) HandleMessageChanges(ctx context.Context, blocks map[cid.Cid]*types.BlockHeader) error {
if err := p.persistMessagesAndReceipts(ctx, blocks); err != nil {
return err
}
return nil
}
func (p *Processor) persistMessagesAndReceipts(ctx context.Context, blocks map[cid.Cid]*types.BlockHeader) error {
messages, inclusions := p.fetchMessages(ctx, blocks)
receipts := p.fetchParentReceipts(ctx, blocks)
grp, _ := errgroup.WithContext(ctx)
grp.Go(func() error {
return p.storeMessages(messages)
})
grp.Go(func() error {
return p.storeMsgInclusions(inclusions)
})
grp.Go(func() error {
return p.storeReceipts(receipts)
})
return grp.Wait()
}
func (p *Processor) storeReceipts(recs map[mrec]*types.MessageReceipt) error {
start := time.Now()
defer func() {
log.Debugw("Persisted Receipts", "duration", time.Since(start).String())
}()
tx, err := p.db.Begin()
if err != nil {
return err
}
if _, err := tx.Exec(`
create temp table recs (like receipts excluding constraints) on commit drop;
`); err != nil {
return xerrors.Errorf("prep temp: %w", err)
}
stmt, err := tx.Prepare(`copy recs (msg, state, idx, exit, gas_used, return) from stdin `)
if err != nil {
return err
}
for c, m := range recs {
if _, err := stmt.Exec(
c.msg.String(),
c.state.String(),
c.idx,
m.ExitCode,
m.GasUsed,
m.Return,
); err != nil {
return err
}
}
if err := stmt.Close(); err != nil {
return err
}
if _, err := tx.Exec(`insert into receipts select * from recs on conflict do nothing `); err != nil {
return xerrors.Errorf("actor put: %w", err)
}
return tx.Commit()
}
func (p *Processor) storeMsgInclusions(incls map[cid.Cid][]cid.Cid) error {
start := time.Now()
defer func() {
log.Debugw("Persisted Message Inclusions", "duration", time.Since(start).String())
}()
tx, err := p.db.Begin()
if err != nil {
return err
}
if _, err := tx.Exec(`
create temp table mi (like block_messages excluding constraints) on commit drop;
`); err != nil {
return xerrors.Errorf("prep temp: %w", err)
}
stmt, err := tx.Prepare(`copy mi (block, message) from STDIN `)
if err != nil {
return err
}
for b, msgs := range incls {
for _, msg := range msgs {
if _, err := stmt.Exec(
b.String(),
msg.String(),
); err != nil {
return err
}
}
}
if err := stmt.Close(); err != nil {
return err
}
if _, err := tx.Exec(`insert into block_messages select * from mi on conflict do nothing `); err != nil {
return xerrors.Errorf("actor put: %w", err)
}
return tx.Commit()
}
func (p *Processor) storeMessages(msgs map[cid.Cid]*types.Message) error {
start := time.Now()
defer func() {
log.Debugw("Persisted Messages", "duration", time.Since(start).String())
}()
tx, err := p.db.Begin()
if err != nil {
return err
}
if _, err := tx.Exec(`
create temp table msgs (like messages excluding constraints) on commit drop;
`); err != nil {
return xerrors.Errorf("prep temp: %w", err)
}
stmt, err := tx.Prepare(`copy msgs (cid, "from", "to", nonce, "value", gasprice, gaslimit, method, params) from stdin `)
if err != nil {
return err
}
for c, m := range msgs {
if _, err := stmt.Exec(
c.String(),
m.From.String(),
m.To.String(),
m.Nonce,
m.Value.String(),
m.GasPrice.String(),
m.GasLimit,
m.Method,
m.Params,
); err != nil {
return err
}
}
if err := stmt.Close(); err != nil {
return err
}
if _, err := tx.Exec(`insert into messages select * from msgs on conflict do nothing `); err != nil {
return xerrors.Errorf("actor put: %w", err)
}
return tx.Commit()
}
func (p *Processor) fetchMessages(ctx context.Context, blocks map[cid.Cid]*types.BlockHeader) (map[cid.Cid]*types.Message, map[cid.Cid][]cid.Cid) {
var lk sync.Mutex
messages := map[cid.Cid]*types.Message{}
inclusions := map[cid.Cid][]cid.Cid{} // block -> msgs
parmap.Par(50, parmap.MapArr(blocks), func(header *types.BlockHeader) {
msgs, err := p.node.ChainGetBlockMessages(ctx, header.Cid())
if err != nil {
panic(err)
}
vmm := make([]*types.Message, 0, len(msgs.Cids))
for _, m := range msgs.BlsMessages {
vmm = append(vmm, m)
}
for _, m := range msgs.SecpkMessages {
vmm = append(vmm, &m.Message)
}
lk.Lock()
for _, message := range vmm {
messages[message.Cid()] = message
inclusions[header.Cid()] = append(inclusions[header.Cid()], message.Cid())
}
lk.Unlock()
})
return messages, inclusions
}
type mrec struct {
msg cid.Cid
state cid.Cid
idx int
}
func (p *Processor) fetchParentReceipts(ctx context.Context, toSync map[cid.Cid]*types.BlockHeader) map[mrec]*types.MessageReceipt {
var lk sync.Mutex
out := map[mrec]*types.MessageReceipt{}
parmap.Par(50, parmap.MapArr(toSync), func(header *types.BlockHeader) {
recs, err := p.node.ChainGetParentReceipts(ctx, header.Cid())
if err != nil {
panic(err)
}
msgs, err := p.node.ChainGetParentMessages(ctx, header.Cid())
if err != nil {
panic(err)
}
lk.Lock()
for i, r := range recs {
out[mrec{
msg: msgs[i].Cid,
state: header.ParentStateRoot,
idx: i,
}] = r
}
lk.Unlock()
})
return out
}

View File

@ -0,0 +1,800 @@
package processor
import (
"bytes"
"context"
"fmt"
"strings"
//"strings"
"sync"
"time"
"github.com/filecoin-project/go-address"
//"github.com/filecoin-project/lotus/chain/events/state"
"github.com/filecoin-project/lotus/chain/events/state"
"github.com/filecoin-project/specs-actors/actors/abi"
"golang.org/x/sync/errgroup"
"golang.org/x/xerrors"
//"github.com/libp2p/go-libp2p-core/peer"
"github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/specs-actors/actors/builtin/power"
"github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
cw_util "github.com/filecoin-project/lotus/cmd/lotus-chainwatch/util"
)
func (p *Processor) setupMiners() error {
tx, err := p.db.Begin()
if err != nil {
return err
}
if _, err := tx.Exec(`
create table if not exists miner_info
(
miner_id text not null,
owner_addr text not null,
worker_addr text not null,
peer_id text,
sector_size text not null,
precommit_deposits text not null,
locked_funds text not null,
next_deadline_process_faults bigint not null,
constraint miner_info_pk
primary key (miner_id)
);
/*
* captures miner-specific power state for any given stateroot
*/
create table if not exists miner_power
(
miner_id text not null,
state_root text not null,
raw_bytes_power text not null,
quality_adjusted_power text not null,
constraint miner_power_pk
primary key (miner_id, state_root)
);
create table if not exists miner_precommits
(
miner_id text not null,
sector_id bigint not null,
precommit_deposit text not null,
precommit_epoch text not null,
constraint miner_precommits_pk
primary key (miner_id, sector_id)
);
create table if not exists miner_sectors
(
miner_id text not null,
sector_id bigint not null,
activation_epoch bigint not null,
expiration_epoch bigint not null,
termination_epoch bigint,
deal_weight text not null,
verified_deal_weight text not null,
seal_cid text not null,
seal_rand_epoch bigint not null,
constraint miner_sectors_pk
primary key (miner_id, sector_id)
);
/* used to tell when a miners sectors (proven-not-yet-expired) changed if the miner_sectors_cid's are different a new sector was added or removed (terminated/expired) */
create table if not exists miner_sectors_heads
(
miner_id text not null,
miner_sectors_cid text not null,
state_root text not null,
constraint miner_sectors_heads_pk
primary key (miner_id,miner_sectors_cid)
);
DO $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM pg_type WHERE typname = 'miner_sector_event_type') THEN
CREATE TYPE miner_sector_event_type AS ENUM
(
'PRECOMMIT', 'COMMIT', 'EXTENDED', 'EXPIRED', 'TERMINATED'
);
END IF;
END$$;
create table if not exists miner_sector_events
(
miner_id text not null,
sector_id bigint not null,
state_root text not null,
event miner_sector_event_type not null,
constraint miner_sector_events_pk
primary key (sector_id, event, miner_id, state_root)
);
create materialized view if not exists miner_sectors_view as
select ms.miner_id, ms.sector_id, mp.precommit_epoch, ms.activation_epoch, ms.expiration_epoch, ms.termination_epoch, ms.deal_weight, ms.verified_deal_weight
from miner_sectors ms
left join miner_precommits mp on ms.sector_id = mp.sector_id and ms.miner_id = mp.miner_id
`); err != nil {
return err
}
return tx.Commit()
}
type minerActorInfo struct {
common actorInfo
state miner.State
// tracked by power actor
rawPower big.Int
qalPower big.Int
}
type sectorUpdate struct {
terminationEpoch abi.ChainEpoch
terminated bool
expirationEpoch abi.ChainEpoch
sectorID abi.SectorNumber
minerID address.Address
}
func (p *Processor) HandleMinerChanges(ctx context.Context, minerTips ActorTips) error {
minerChanges, err := p.processMiners(ctx, minerTips)
if err != nil {
log.Fatalw("Failed to process miner actors", "error", err)
}
if err := p.persistMiners(ctx, minerChanges); err != nil {
log.Fatalw("Failed to persist miner actors", "error", err)
}
if err := p.updateMiners(ctx, minerChanges); err != nil {
log.Fatalw("Failed to update miner actors", "error", err)
}
return nil
}
func (p *Processor) processMiners(ctx context.Context, minerTips map[types.TipSetKey][]actorInfo) ([]minerActorInfo, error) {
start := time.Now()
defer func() {
log.Debugw("Processed Miners", "duration", time.Since(start).String())
}()
var out []minerActorInfo
// TODO add parallel calls if this becomes slow
for tipset, miners := range minerTips {
// get the power actors claims map
minersClaims, err := getPowerActorClaimsMap(ctx, p.node, tipset)
if err != nil {
return nil, err
}
// Get miner raw and quality power
for _, act := range miners {
var mi minerActorInfo
mi.common = act
var claim power.Claim
// get miner claim from power actors claim map and store if found, else the miner had no claim at
// this tipset
found, err := minersClaims.Get(adt.AddrKey(act.addr), &claim)
if err != nil {
return nil, err
}
if found {
mi.qalPower = claim.QualityAdjPower
mi.rawPower = claim.RawBytePower
}
// Get the miner state info
astb, err := p.node.ChainReadObj(ctx, act.act.Head)
if err != nil {
log.Warnw("failed to find miner actor state", "address", act.addr, "error", err)
continue
}
if err := mi.state.UnmarshalCBOR(bytes.NewReader(astb)); err != nil {
return nil, err
}
out = append(out, mi)
}
}
return out, nil
}
func (p *Processor) persistMiners(ctx context.Context, miners []minerActorInfo) error {
start := time.Now()
defer func() {
log.Debugw("Persisted Miners", "duration", time.Since(start).String())
}()
grp, _ := errgroup.WithContext(ctx)
grp.Go(func() error {
if err := p.storeMinersActorState(miners); err != nil {
return err
}
return nil
})
grp.Go(func() error {
if err := p.storeMinersPower(miners); err != nil {
return err
}
return nil
})
grp.Go(func() error {
if err := p.storeMinersSectorState(ctx, miners); err != nil {
return err
}
return nil
})
grp.Go(func() error {
if err := p.storeMinersSectorHeads(miners); err != nil {
return err
}
return nil
})
grp.Go(func() error {
if err := p.storeMinersPreCommitState(ctx, miners); err != nil {
return err
}
return nil
})
return grp.Wait()
}
func (p *Processor) storeMinersActorState(miners []minerActorInfo) error {
start := time.Now()
defer func() {
log.Debugw("Stored Miners Actor State", "duration", time.Since(start).String())
}()
tx, err := p.db.Begin()
if err != nil {
return err
}
if _, err := tx.Exec(`create temp table mi (like miner_info excluding constraints) on commit drop;`); err != nil {
return xerrors.Errorf("prep temp: %w", err)
}
stmt, err := tx.Prepare(`copy mi (miner_id, owner_addr, worker_addr, peer_id, sector_size, precommit_deposits, locked_funds, next_deadline_process_faults) from STDIN`)
if err != nil {
return err
}
// TODO: Consume new Actor API
//for _, m := range miners {
//var pid string
//if len(m.state.Info.PeerId) != 0 {
//peerid, err := peer.IDFromBytes(m.state.Info.PeerId)
//if err != nil {
//// this should "never happen", but if it does we should still store info about the miner.
//log.Warnw("failed to decode peerID", "peerID (bytes)", m.state.Info.PeerId, "miner", m.common.addr, "tipset", m.common.tsKey.String())
//} else {
//pid = peerid.String()
//}
//}
//if _, err := stmt.Exec(
//m.common.addr.String(),
//m.state.Info.Owner.String(),
//m.state.Info.Worker.String(),
//pid,
//m.state.Info.SectorSize.ShortString(),
//m.state.PreCommitDeposits.String(),
//m.state.LockedFunds.String(),
//m.state.NextDeadlineToProcessFaults,
//); err != nil {
//log.Errorw("failed to store miner state", "state", m.state, "info", m.state.Info, "error", err)
//return xerrors.Errorf("failed to store miner state: %w", err)
//}
//}
if err := stmt.Close(); err != nil {
return err
}
if _, err := tx.Exec(`insert into miner_info select * from mi on conflict do nothing `); err != nil {
return xerrors.Errorf("actor put: %w", err)
}
return tx.Commit()
}
func (p *Processor) storeMinersPower(miners []minerActorInfo) error {
start := time.Now()
defer func() {
log.Debugw("Stored Miners Power", "duration", time.Since(start).String())
}()
tx, err := p.db.Begin()
if err != nil {
return xerrors.Errorf("begin miner_power tx: %w", err)
}
if _, err := tx.Exec(`create temp table mp (like miner_power excluding constraints) on commit drop`); err != nil {
return xerrors.Errorf("prep miner_power temp: %w", err)
}
stmt, err := tx.Prepare(`copy mp (miner_id, state_root, raw_bytes_power, quality_adjusted_power) from STDIN`)
if err != nil {
return xerrors.Errorf("prepare tmp miner_power: %w", err)
}
for _, m := range miners {
if _, err := stmt.Exec(
m.common.addr.String(),
m.common.stateroot.String(),
m.rawPower.String(),
m.qalPower.String(),
); err != nil {
log.Errorw("failed to store miner power", "miner", m.common.addr, "stateroot", m.common.stateroot, "error", err)
}
}
if err := stmt.Close(); err != nil {
return xerrors.Errorf("close prepared miner_power: %w", err)
}
if _, err := tx.Exec(`insert into miner_power select * from mp on conflict do nothing`); err != nil {
return xerrors.Errorf("insert miner_power from tmp: %w", err)
}
if err := tx.Commit(); err != nil {
return xerrors.Errorf("commit miner_power tx: %w", err)
}
return nil
}
func (p *Processor) storeMinersSectorState(ctx context.Context, miners []minerActorInfo) error {
start := time.Now()
defer func() {
log.Debugw("Stored Miners Sector State", "duration", time.Since(start).String())
}()
tx, err := p.db.Begin()
if err != nil {
return err
}
if _, err := tx.Exec(`create temp table ms (like miner_sectors excluding constraints) on commit drop;`); err != nil {
return xerrors.Errorf("prep temp: %w", err)
}
stmt, err := tx.Prepare(`copy ms (miner_id, sector_id, activation_epoch, expiration_epoch, deal_weight, verified_deal_weight, seal_cid, seal_rand_epoch) from STDIN`)
if err != nil {
return err
}
grp, ctx := errgroup.WithContext(ctx)
for _, m := range miners {
m := m
grp.Go(func() error {
sectors, err := p.node.StateMinerSectors(ctx, m.common.addr, nil, true, m.common.tsKey)
if err != nil {
log.Debugw("Failed to load sectors", "tipset", m.common.tsKey.String(), "miner", m.common.addr.String(), "error", err)
}
for _, sector := range sectors {
if _, err := stmt.Exec(
m.common.addr.String(),
uint64(sector.ID),
// TODO: Consume new Actor API
//int64(sector.Info.ActivationEpoch),
0,
//int64(sector.Info.Info.Expiration),
0,
sector.Info.DealWeight.String(),
sector.Info.VerifiedDealWeight.String(),
//sector.Info.Info.SealedCID.String(),
"",
//int64(sector.Info.Info.SealRandEpoch),
0,
); err != nil {
return err
}
}
return nil
})
}
if err := grp.Wait(); err != nil {
return err
}
if err := stmt.Close(); err != nil {
return err
}
if _, err := tx.Exec(`insert into miner_sectors select * from ms on conflict do nothing `); err != nil {
return xerrors.Errorf("actor put: %w", err)
}
return tx.Commit()
}
func (p *Processor) storeMinersSectorHeads(miners []minerActorInfo) error {
start := time.Now()
defer func() {
log.Debugw("Stored Miners Sector Heads", "duration", time.Since(start).String())
}()
tx, err := p.db.Begin()
if err != nil {
return err
}
if _, err := tx.Exec(`create temp table msh (like miner_sectors_heads excluding constraints) on commit drop;`); err != nil {
return xerrors.Errorf("prep temp: %w", err)
}
stmt, err := tx.Prepare(`copy msh (miner_id, miner_sectors_cid, state_root) from STDIN`)
if err != nil {
return err
}
for _, m := range miners {
if _, err := stmt.Exec(
m.common.addr.String(),
m.state.Sectors.String(),
m.common.stateroot.String(),
); err != nil {
log.Errorw("failed to store miners sectors head", "state", m.state, "info", m.state.Info, "error", err)
return err
}
}
if err := stmt.Close(); err != nil {
return err
}
if _, err := tx.Exec(`insert into miner_sectors_heads select * from msh on conflict do nothing `); err != nil {
return xerrors.Errorf("actor put: %w", err)
}
return tx.Commit()
}
func (p *Processor) storeMinersPreCommitState(ctx context.Context, miners []minerActorInfo) error {
start := time.Now()
defer func() {
log.Infow("Stored Miners Precommit State", "duration", time.Since(start).String())
}()
precommitTx, err := p.db.Begin()
if err != nil {
return err
}
if _, err := precommitTx.Exec(`create temp table mp (like miner_precommits excluding constraints) on commit drop;`); err != nil {
return xerrors.Errorf("prep temp: %w", err)
}
precommitStmt, err := precommitTx.Prepare(`copy mp (miner_id, sector_id, precommit_deposit, precommit_epoch) from STDIN`)
if err != nil {
return err
}
for _, m := range miners {
m := m
pcMap, err := adt.AsMap(cw_util.NewAPIIpldStore(ctx, p.node), m.state.PreCommittedSectors)
if err != nil {
return err
}
precommit := new(miner.SectorPreCommitOnChainInfo)
if err := pcMap.ForEach(precommit, func(key string) error {
if _, err := precommitStmt.Exec(
m.common.addr.String(),
precommit.Info.SectorNumber,
precommit.PreCommitDeposit.String(),
precommit.PreCommitEpoch,
); err != nil {
return err
}
return nil
}); err != nil {
return err
}
}
if err := precommitStmt.Close(); err != nil {
return err
}
if _, err := precommitTx.Exec(`insert into miner_precommits select * from mp on conflict do nothing`); err != nil {
return err
}
return precommitTx.Commit()
}
func (p *Processor) updateMiners(ctx context.Context, miners []minerActorInfo) error {
// TODO when/if there is more than one update operation here use an errgroup as is done in persistMiners
if err := p.updateMinersSectors(ctx, miners); err != nil {
return err
}
if err := p.updateMinersPrecommits(ctx, miners); err != nil {
return err
}
return nil
}
func (p *Processor) updateMinersPrecommits(ctx context.Context, miners []minerActorInfo) error {
start := time.Now()
defer func() {
log.Infow("Updated Miner Precommits", "duration", time.Since(start).String())
}()
pred := state.NewStatePredicates(p.node)
eventTx, err := p.db.Begin()
if err != nil {
return err
}
if _, err := eventTx.Exec(`create temp table mse (like miner_sector_events excluding constraints) on commit drop;`); err != nil {
return xerrors.Errorf("prep temp: %w", err)
}
eventStmt, err := eventTx.Prepare(`copy mse (sector_id, event, miner_id, state_root) from STDIN `)
if err != nil {
return err
}
for _, m := range miners {
pcDiffFn := pred.OnMinerActorChange(m.common.addr, pred.OnMinerPreCommitChange())
changed, val, err := pcDiffFn(ctx, m.common.parentTsKey, m.common.tsKey)
if err != nil {
if strings.Contains(err.Error(), "address not found") {
continue
}
log.Errorw("error getting miner precommit diff", "miner", m.common.addr, "error", err)
return err
}
if !changed {
continue
}
changes, ok := val.(*state.MinerPreCommitChanges)
if !ok {
log.Fatal("Developer Error")
}
for _, added := range changes.Added {
if _, err := eventStmt.Exec(added.Info.SectorNumber, "PRECOMMIT", m.common.addr.String(), m.common.stateroot.String()); err != nil {
return err
}
}
}
if err := eventStmt.Close(); err != nil {
return err
}
if _, err := eventTx.Exec(`insert into miner_sector_events select * from mse on conflict do nothing `); err != nil {
return xerrors.Errorf("actor put: %w", err)
}
return eventTx.Commit()
}
func (p *Processor) updateMinersSectors(ctx context.Context, miners []minerActorInfo) error {
log.Debugw("Updating Miners Sectors", "#miners", len(miners))
start := time.Now()
defer func() {
log.Debugw("Updated Miners Sectors", "duration", time.Since(start).String())
}()
//pred := state.NewStatePredicates(p.node)
eventTx, err := p.db.Begin()
if err != nil {
return err
}
if _, err := eventTx.Exec(`create temp table mse (like miner_sector_events excluding constraints) on commit drop;`); err != nil {
return xerrors.Errorf("prep temp: %w", err)
}
eventStmt, err := eventTx.Prepare(`copy mse (sector_id, event, miner_id, state_root) from STDIN `)
if err != nil {
return err
}
var updateWg sync.WaitGroup
updateWg.Add(1)
sectorUpdatesCh := make(chan sectorUpdate)
var sectorUpdates []sectorUpdate
go func() {
for u := range sectorUpdatesCh {
sectorUpdates = append(sectorUpdates, u)
}
updateWg.Done()
}()
// TODO: Resolve Actor interface shift
//minerGrp, ctx := errgroup.WithContext(ctx)
//complete := 0
//for _, m := range miners {
// m := m
// if m.common.tsKey == p.genesisTs.Key() {
// genSectors, err := p.node.StateMinerSectors(ctx, m.common.addr, nil, true, p.genesisTs.Key())
// if err != nil {
// return err
// }
// for _, sector := range genSectors {
// if _, err := eventStmt.Exec(sector.ID, "COMMIT", m.common.addr.String(), m.common.stateroot.String()); err != nil {
// return err
// }
// }
// complete++
// continue
// }
// minerGrp.Go(func() error {
// // special case genesis miners
// sectorDiffFn := pred.OnMinerActorChange(m.common.addr, pred.OnMinerSectorChange())
// changed, val, err := sectorDiffFn(ctx, m.common.parentTsKey, m.common.tsKey)
// if err != nil {
// if strings.Contains(err.Error(), "address not found") {
// return nil
// }
// log.Errorw("error getting miner sector diff", "miner", m.common.addr, "error", err)
// return err
// }
// if !changed {
// complete++
// return nil
// }
// changes, ok := val.(*state.MinerSectorChanges)
// if !ok {
// log.Fatalw("Developer Error")
// }
// log.Debugw("sector changes for miner", "miner", m.common.addr.String(), "Added", len(changes.Added), "Extended", len(changes.Extended), "Removed", len(changes.Removed), "oldState", m.common.parentTsKey, "newState", m.common.tsKey)
//for _, extended := range changes.Extended {
//if _, err := eventStmt.Exec(extended.To.Info.SectorNumber, "EXTENDED", m.common.addr.String(), m.common.stateroot.String()); err != nil {
//return err
//}
//sectorUpdatesCh <- sectorUpdate{
//terminationEpoch: 0,
//terminated: false,
//expirationEpoch: extended.To.Info.Expiration,
//sectorID: extended.From.Info.SectorNumber,
//minerID: m.common.addr,
//}
//log.Debugw("sector extended", "miner", m.common.addr.String(), "sector", extended.To.Info.SectorNumber, "old", extended.To.Info.Expiration, "new", extended.From.Info.Expiration)
//}
//curTs, err := p.node.ChainGetTipSet(ctx, m.common.tsKey)
//if err != nil {
//return err
//}
//for _, removed := range changes.Removed {
//log.Debugw("removed", "miner", m.common.addr)
//// decide if they were terminated or extended
//if removed.Info.Expiration > curTs.Height() {
//if _, err := eventStmt.Exec(removed.Info.SectorNumber, "TERMINATED", m.common.addr.String(), m.common.stateroot.String()); err != nil {
//return err
//}
//log.Debugw("sector terminated", "miner", m.common.addr.String(), "sector", removed.Info.SectorNumber, "old", "sectorExpiration", removed.Info.Expiration, "terminationEpoch", curTs.Height())
//sectorUpdatesCh <- sectorUpdate{
//terminationEpoch: curTs.Height(),
//terminated: true,
//expirationEpoch: removed.Info.Expiration,
//sectorID: removed.Info.SectorNumber,
//minerID: m.common.addr,
//}
//}
//if _, err := eventStmt.Exec(removed.Info.SectorNumber, "EXPIRED", m.common.addr.String(), m.common.stateroot.String()); err != nil {
//return err
//}
//log.Debugw("sector removed", "miner", m.common.addr.String(), "sector", removed.Info.SectorNumber, "old", "sectorExpiration", removed.Info.Expiration, "currEpoch", curTs.Height())
//}
// for _, added := range changes.Added {
// if _, err := eventStmt.Exec(added.Info.SectorNumber, "COMMIT", m.common.addr.String(), m.common.stateroot.String()); err != nil {
// return err
// }
// }
// complete++
// log.Debugw("Update Done", "complete", complete, "added", len(changes.Added), "removed", len(changes.Removed), "modified", len(changes.Extended))
// return nil
// })
//}
//if err := minerGrp.Wait(); err != nil {
// return err
//}
close(sectorUpdatesCh)
// wait for the update channel to be drained
updateWg.Wait()
if err := eventStmt.Close(); err != nil {
return err
}
if _, err := eventTx.Exec(`insert into miner_sector_events select * from mse on conflict do nothing `); err != nil {
return xerrors.Errorf("actor put: %w", err)
}
if err := eventTx.Commit(); err != nil {
return err
}
updateTx, err := p.db.Begin()
if err != nil {
return err
}
updateStmt, err := updateTx.Prepare(`UPDATE miner_sectors SET termination_epoch=$1, expiration_epoch=$2 WHERE miner_id=$3 AND sector_id=$4`)
if err != nil {
return err
}
for _, update := range sectorUpdates {
if update.terminated {
if _, err := updateStmt.Exec(update.terminationEpoch, update.expirationEpoch, update.minerID.String(), update.sectorID); err != nil {
return err
}
} else {
if _, err := updateStmt.Exec(nil, update.expirationEpoch, update.minerID.String(), update.sectorID); err != nil {
return err
}
}
}
if err := updateStmt.Close(); err != nil {
return err
}
return updateTx.Commit()
}
// load the power actor state clam as an adt.Map at the tipset `ts`.
func getPowerActorClaimsMap(ctx context.Context, api api.FullNode, ts types.TipSetKey) (*adt.Map, error) {
powerActor, err := api.StateGetActor(ctx, builtin.StoragePowerActorAddr, ts)
if err != nil {
return nil, err
}
powerRaw, err := api.ChainReadObj(ctx, powerActor.Head)
if err != nil {
return nil, err
}
var powerActorState power.State
if err := powerActorState.UnmarshalCBOR(bytes.NewReader(powerRaw)); err != nil {
return nil, fmt.Errorf("failed to unmarshal power actor state: %w", err)
}
s := cw_util.NewAPIIpldStore(ctx, api)
return adt.AsMap(s, powerActorState.Claims)
}

View File

@ -0,0 +1,103 @@
package processor
import (
"context"
"time"
"golang.org/x/xerrors"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
)
func (p *Processor) subMpool(ctx context.Context) {
sub, err := p.node.MpoolSub(ctx)
if err != nil {
return
}
for {
var updates []api.MpoolUpdate
select {
case update := <-sub:
updates = append(updates, update)
case <-ctx.Done():
return
}
loop:
for {
time.Sleep(10 * time.Millisecond)
select {
case update := <-sub:
updates = append(updates, update)
default:
break loop
}
}
msgs := map[cid.Cid]*types.Message{}
for _, v := range updates {
if v.Type != api.MpoolAdd {
continue
}
msgs[v.Message.Message.Cid()] = &v.Message.Message
}
log.Debugf("Processing %d mpool updates", len(msgs))
err := p.storeMessages(msgs)
if err != nil {
log.Error(err)
}
if err := p.storeMpoolInclusions(updates); err != nil {
log.Error(err)
}
}
}
func (p *Processor) storeMpoolInclusions(msgs []api.MpoolUpdate) error {
tx, err := p.db.Begin()
if err != nil {
return err
}
if _, err := tx.Exec(`
create temp table mi (like mpool_messages excluding constraints) on commit drop;
`); err != nil {
return xerrors.Errorf("prep temp: %w", err)
}
stmt, err := tx.Prepare(`copy mi (msg, add_ts) from stdin `)
if err != nil {
return err
}
for _, msg := range msgs {
if msg.Type != api.MpoolAdd {
continue
}
if _, err := stmt.Exec(
msg.Message.Message.Cid().String(),
time.Now().Unix(),
); err != nil {
return err
}
}
if err := stmt.Close(); err != nil {
return err
}
if _, err := tx.Exec(`insert into mpool_messages select * from mi on conflict do nothing `); err != nil {
return xerrors.Errorf("actor put: %w", err)
}
return tx.Commit()
}

View File

@ -0,0 +1,344 @@
package processor
import (
"context"
"database/sql"
"encoding/json"
"sync"
"time"
"golang.org/x/sync/errgroup"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/parmap"
)
var log = logging.Logger("processor")
type Processor struct {
db *sql.DB
node api.FullNode
genesisTs *types.TipSet
// number of blocks processed at a time
batch int
}
type ActorTips map[types.TipSetKey][]actorInfo
type actorInfo struct {
act types.Actor
stateroot cid.Cid
height abi.ChainEpoch // so that we can walk the actor changes in chronological order.
tsKey types.TipSetKey
parentTsKey types.TipSetKey
addr address.Address
state string
}
func NewProcessor(db *sql.DB, node api.FullNode, batch int) *Processor {
return &Processor{
db: db,
node: node,
batch: batch,
}
}
func (p *Processor) setupSchemas() error {
if err := p.setupMarket(); err != nil {
return err
}
if err := p.setupMiners(); err != nil {
return err
}
if err := p.setupRewards(); err != nil {
return err
}
if err := p.setupMessages(); err != nil {
return err
}
if err := p.setupCommonActors(); err != nil {
return err
}
return nil
}
func (p *Processor) Start(ctx context.Context) {
log.Debug("Starting Processor")
if err := p.setupSchemas(); err != nil {
log.Fatalw("Failed to setup processor", "error", err)
}
var err error
p.genesisTs, err = p.node.ChainGetGenesis(ctx)
if err != nil {
log.Fatalw("Failed to get genesis state from lotus", "error", err.Error())
}
go p.subMpool(ctx)
// main processor loop
go func() {
for {
select {
case <-ctx.Done():
log.Debugw("Stopping Processor...")
return
default:
toProcess, err := p.unprocessedBlocks(ctx, p.batch)
if err != nil {
log.Fatalw("Failed to get unprocessed blocks", "error", err)
}
if len(toProcess) == 0 {
log.Debugw("No unprocessed blocks. Wait then try again...")
time.Sleep(time.Second * 10)
continue
}
// TODO special case genesis state handling here to avoid all the special cases that will be needed for it else where
// before doing "normal" processing.
actorChanges, err := p.collectActorChanges(ctx, toProcess)
if err != nil {
log.Fatalw("Failed to collect actor changes", "error", err)
}
grp, ctx := errgroup.WithContext(ctx)
grp.Go(func() error {
if err := p.HandleMarketChanges(ctx, actorChanges[builtin.StorageMarketActorCodeID]); err != nil {
return xerrors.Errorf("Failed to handle market changes: %w", err)
}
return nil
})
grp.Go(func() error {
if err := p.HandleMinerChanges(ctx, actorChanges[builtin.StorageMinerActorCodeID]); err != nil {
return xerrors.Errorf("Failed to handle miner changes: %w", err)
}
return nil
})
grp.Go(func() error {
if err := p.HandleRewardChanges(ctx, actorChanges[builtin.RewardActorCodeID]); err != nil {
return xerrors.Errorf("Failed to handle reward changes: %w", err)
}
return nil
})
grp.Go(func() error {
if err := p.HandleMessageChanges(ctx, toProcess); err != nil {
return xerrors.Errorf("Failed to handle message changes: %w", err)
}
return nil
})
grp.Go(func() error {
if err := p.HandleCommonActorsChanges(ctx, actorChanges); err != nil {
return xerrors.Errorf("Failed to handle common actor changes: %w", err)
}
return nil
})
if err := grp.Wait(); err != nil {
log.Errorw("Failed to handle actor changes...retrying", "error", err)
continue
}
if err := p.markBlocksProcessed(ctx, toProcess); err != nil {
log.Fatalw("Failed to mark blocks as processed", "error", err)
}
if err := p.refreshViews(); err != nil {
log.Errorw("Failed to refresh views", "error", err)
}
}
}
}()
}
func (p *Processor) refreshViews() error {
if _, err := p.db.Exec(`refresh materialized view state_heights`); err != nil {
return err
}
if _, err := p.db.Exec(`refresh materialized view miner_sectors_view`); err != nil {
return err
}
return nil
}
func (p *Processor) collectActorChanges(ctx context.Context, toProcess map[cid.Cid]*types.BlockHeader) (map[cid.Cid]ActorTips, error) {
start := time.Now()
defer func() {
log.Debugw("Collected Actor Changes", "duration", time.Since(start).String())
}()
// ActorCode - > tipset->[]actorInfo
out := map[cid.Cid]ActorTips{}
var outMu sync.Mutex
// map of addresses to changed actors
var changes map[string]types.Actor
actorsSeen := map[cid.Cid]struct{}{}
// collect all actor state that has changes between block headers
paDone := 0
parmap.Par(50, parmap.MapArr(toProcess), func(bh *types.BlockHeader) {
paDone++
if paDone%100 == 0 {
log.Debugw("Collecting actor changes", "done", paDone, "percent", (paDone*100)/len(toProcess))
}
pts, err := p.node.ChainGetTipSet(ctx, types.NewTipSetKey(bh.Parents...))
if err != nil {
panic(err)
}
// collect all actors that had state changes between the blockheader parent-state and its grandparent-state.
// TODO: changes will contain deleted actors, this causes needless processing further down the pipeline, consider
// a separate strategy for deleted actors
changes, err = p.node.StateChangedActors(ctx, pts.ParentState(), bh.ParentStateRoot)
if err != nil {
panic(err)
}
// record the state of all actors that have changed
for a, act := range changes {
act := act
a := a
addr, err := address.NewFromString(a)
if err != nil {
panic(err)
}
ast, err := p.node.StateReadState(ctx, addr, pts.Key())
if err != nil {
panic(err)
}
// TODO look here for an empty state, maybe thats a sign the actor was deleted?
state, err := json.Marshal(ast.State)
if err != nil {
panic(err)
}
outMu.Lock()
if _, ok := actorsSeen[act.Head]; !ok {
_, ok := out[act.Code]
if !ok {
out[act.Code] = map[types.TipSetKey][]actorInfo{}
}
out[act.Code][pts.Key()] = append(out[act.Code][pts.Key()], actorInfo{
act: act,
stateroot: bh.ParentStateRoot,
height: bh.Height,
tsKey: pts.Key(),
parentTsKey: pts.Parents(),
addr: addr,
state: string(state),
})
}
actorsSeen[act.Head] = struct{}{}
outMu.Unlock()
}
})
return out, nil
}
func (p *Processor) unprocessedBlocks(ctx context.Context, batch int) (map[cid.Cid]*types.BlockHeader, error) {
start := time.Now()
defer func() {
log.Debugw("Gathered Blocks to process", "duration", time.Since(start).String())
}()
rows, err := p.db.Query(`
with toProcess as (
select blocks.cid, blocks.height, rank() over (order by height) as rnk
from blocks
left join blocks_synced bs on blocks.cid = bs.cid
where bs.processed_at is null and blocks.height > 0
)
select cid
from toProcess
where rnk <= $1
`, batch)
if err != nil {
return nil, xerrors.Errorf("Failed to query for unprocessed blocks: %w", err)
}
out := map[cid.Cid]*types.BlockHeader{}
// TODO consider parallel execution here for getting the blocks from the api as is done in fetchMessages()
for rows.Next() {
if rows.Err() != nil {
return nil, err
}
var c string
if err := rows.Scan(&c); err != nil {
return nil, xerrors.Errorf("Failed to scan unprocessed blocks: %w", err)
}
ci, err := cid.Parse(c)
if err != nil {
return nil, xerrors.Errorf("Failed to parse unprocessed blocks: %w", err)
}
bh, err := p.node.ChainGetBlock(ctx, ci)
if err != nil {
// this is a pretty serious issue.
return nil, xerrors.Errorf("Failed to get block header %s: %w", ci.String(), err)
}
out[ci] = bh
}
return out, rows.Close()
}
func (p *Processor) markBlocksProcessed(ctx context.Context, processed map[cid.Cid]*types.BlockHeader) error {
start := time.Now()
defer func() {
log.Debugw("Marked blocks as Processed", "duration", time.Since(start).String())
}()
tx, err := p.db.Begin()
if err != nil {
return err
}
processedAt := time.Now().Unix()
stmt, err := tx.Prepare(`update blocks_synced set processed_at=$1 where cid=$2`)
if err != nil {
return err
}
for c := range processed {
if _, err := stmt.Exec(processedAt, c.String()); err != nil {
return err
}
}
if err := stmt.Close(); err != nil {
return err
}
return tx.Commit()
}

View File

@ -0,0 +1,234 @@
package processor
import (
"bytes"
"context"
"time"
"golang.org/x/sync/errgroup"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/specs-actors/actors/builtin/reward"
)
type rewardActorInfo struct {
common actorInfo
// expected power in bytes during this epoch
baselinePower big.Int
// base reward in attofil for each block found during this epoch
baseBlockReward big.Int
}
func (p *Processor) setupRewards() error {
tx, err := p.db.Begin()
if err != nil {
return err
}
if _, err := tx.Exec(`
/*
* captures base block reward per miner per state root and does not
* include penalties or gas reward
*/
create table if not exists base_block_rewards
(
state_root text not null
constraint block_rewards_pk
primary key,
base_block_reward numeric not null
);
/* captures chain-specific power state for any given stateroot */
create table if not exists chain_power
(
state_root text not null
constraint chain_power_pk
primary key,
baseline_power text not null
);
create materialized view if not exists top_miners_by_base_reward as
with total_rewards_by_miner as (
select
b.miner,
sum(bbr.base_block_reward) as total_reward
from blocks b
inner join base_block_rewards bbr on b.parentstateroot = bbr.state_root
group by 1
) select
rank() over (order by total_reward desc),
miner,
total_reward
from total_rewards_by_miner
group by 2, 3;
create index if not exists top_miners_by_base_reward_miner_index
on top_miners_by_base_reward (miner);
`); err != nil {
return err
}
return tx.Commit()
}
func (p *Processor) HandleRewardChanges(ctx context.Context, rewardTips ActorTips) error {
rewardChanges, err := p.processRewardActors(ctx, rewardTips)
if err != nil {
log.Fatalw("Failed to process reward actors", "error", err)
}
if err := p.persistRewardActors(ctx, rewardChanges); err != nil {
return err
}
return nil
}
func (p *Processor) processRewardActors(ctx context.Context, rewardTips ActorTips) ([]rewardActorInfo, error) {
start := time.Now()
defer func() {
log.Debugw("Processed Reward Actors", "duration", time.Since(start).String())
}()
var out []rewardActorInfo
for tipset, rewards := range rewardTips {
for _, act := range rewards {
var rw rewardActorInfo
rw.common = act
// get reward actor states at each tipset once for all updates
rewardActor, err := p.node.StateGetActor(ctx, builtin.RewardActorAddr, tipset)
if err != nil {
return nil, xerrors.Errorf("get reward state (@ %s): %w", rw.common.stateroot.String(), err)
}
rewardStateRaw, err := p.node.ChainReadObj(ctx, rewardActor.Head)
if err != nil {
return nil, xerrors.Errorf("read state obj (@ %s): %w", rw.common.stateroot.String(), err)
}
var rewardActorState reward.State
if err := rewardActorState.UnmarshalCBOR(bytes.NewReader(rewardStateRaw)); err != nil {
return nil, xerrors.Errorf("unmarshal state (@ %s): %w", rw.common.stateroot.String(), err)
}
// TODO: Resolve Actor API shift
//rw.baseBlockReward = rewardActorState.LastPerEpochReward
//rw.baselinePower = rewardActorState.BaselinePower
rw.baseBlockReward = big.Zero()
rw.baselinePower = big.Zero()
out = append(out, rw)
}
}
return out, nil
}
func (p *Processor) persistRewardActors(ctx context.Context, rewards []rewardActorInfo) error {
start := time.Now()
defer func() {
log.Debugw("Persisted Reward Actors", "duration", time.Since(start).String())
}()
grp, ctx := errgroup.WithContext(ctx)
grp.Go(func() error {
if err := p.storeChainPower(rewards); err != nil {
return err
}
return nil
})
grp.Go(func() error {
if err := p.storeBaseBlockReward(rewards); err != nil {
return err
}
return nil
})
return grp.Wait()
}
func (p *Processor) storeChainPower(rewards []rewardActorInfo) error {
tx, err := p.db.Begin()
if err != nil {
return xerrors.Errorf("begin chain_power tx: %w", err)
}
if _, err := tx.Exec(`create temp table cp (like chain_power excluding constraints) on commit drop`); err != nil {
return xerrors.Errorf("prep chain_power temp: %w", err)
}
stmt, err := tx.Prepare(`copy cp (state_root, baseline_power) from STDIN`)
if err != nil {
return xerrors.Errorf("prepare tmp chain_power: %w", err)
}
for _, rewardState := range rewards {
if _, err := stmt.Exec(
rewardState.common.stateroot.String(),
rewardState.baselinePower.String(),
); err != nil {
log.Errorw("failed to store chain power", "state_root", rewardState.common.stateroot, "error", err)
}
}
if err := stmt.Close(); err != nil {
return xerrors.Errorf("close prepared chain_power: %w", err)
}
if _, err := tx.Exec(`insert into chain_power select * from cp on conflict do nothing`); err != nil {
return xerrors.Errorf("insert chain_power from tmp: %w", err)
}
if err := tx.Commit(); err != nil {
return xerrors.Errorf("commit chain_power tx: %w", err)
}
return nil
}
func (p *Processor) storeBaseBlockReward(rewards []rewardActorInfo) error {
tx, err := p.db.Begin()
if err != nil {
return xerrors.Errorf("begin base_block_reward tx: %w", err)
}
if _, err := tx.Exec(`create temp table bbr (like base_block_rewards excluding constraints) on commit drop`); err != nil {
return xerrors.Errorf("prep base_block_reward temp: %w", err)
}
stmt, err := tx.Prepare(`copy bbr (state_root, base_block_reward) from STDIN`)
if err != nil {
return xerrors.Errorf("prepare tmp base_block_reward: %w", err)
}
for _, rewardState := range rewards {
baseBlockReward := big.Div(rewardState.baseBlockReward, big.NewIntUnsigned(build.BlocksPerEpoch))
if _, err := stmt.Exec(
rewardState.common.stateroot.String(),
baseBlockReward.String(),
); err != nil {
log.Errorw("failed to store base block reward", "state_root", rewardState.common.stateroot, "error", err)
}
}
if err := stmt.Close(); err != nil {
return xerrors.Errorf("close prepared base_block_reward: %w", err)
}
if _, err := tx.Exec(`insert into base_block_rewards select * from bbr on conflict do nothing`); err != nil {
return xerrors.Errorf("insert base_block_reward from tmp: %w", err)
}
if err := tx.Commit(); err != nil {
return xerrors.Errorf("commit base_block_reward tx: %w", err)
}
return nil
}

View File

@ -0,0 +1,77 @@
package main
import (
"database/sql"
"os"
_ "github.com/lib/pq"
lcli "github.com/filecoin-project/lotus/cli"
logging "github.com/ipfs/go-log/v2"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/cmd/lotus-chainwatch/processor"
"github.com/filecoin-project/lotus/cmd/lotus-chainwatch/syncer"
)
var runCmd = &cli.Command{
Name: "run",
Usage: "Start lotus chainwatch",
Flags: []cli.Flag{
&cli.IntFlag{
Name: "max-batch",
Value: 1000,
},
},
Action: func(cctx *cli.Context) error {
ll := cctx.String("log-level")
if err := logging.SetLogLevel("*", ll); err != nil {
return err
}
if err := logging.SetLogLevel("rpc", "error"); err != nil {
return err
}
api, closer, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
v, err := api.Version(ctx)
if err != nil {
return err
}
log.Infof("Remote version: %s", v.Version)
maxBatch := cctx.Int("max-batch")
db, err := sql.Open("postgres", cctx.String("db"))
if err != nil {
return err
}
defer func() {
if err := db.Close(); err != nil {
log.Errorw("Failed to close database", "error", err)
}
}()
if err := db.Ping(); err != nil {
return xerrors.Errorf("Database failed to respond to ping (is it online?): %w", err)
}
db.SetMaxOpenConns(1350)
sync := syncer.NewSyncer(db, api)
sync.Start(ctx)
proc := processor.NewProcessor(db, api, maxBatch)
proc.Start(ctx)
<-ctx.Done()
os.Exit(0)
return nil
},
}

View File

@ -1,61 +0,0 @@
<!DOCTYPE html>
<html>
<head>
<title>Lotus ChainWatch</title>
<link rel="stylesheet" type="text/css" href="main.css">
</head>
<body>
{{$cid := param "cid"}}
<div class="Index">
<div class="Index-header">
<div>
<span>Lotus ChainWatch - Wallets</span>
</div>
</div>
<div class="Index-nodes">
<div class="Index-node">
<div>Miner: {{index (strings "blocks" "miner" "cid=$1" $cid) 0}}</div>
<div>Parents:</div>
<div>
{{range strings "block_parents" "parent" "block=$1" $cid}}
{{$parent := .}}
<a href="block.html?cid={{$parent}}">{{. | substr 54 62}}</a>
{{end}}
</div>
<div>Messages:</div>
<table>
{{range strings "block_messages" "message" "block=$1" $cid}}
{{$msg := .}}
<tr>
<td><a href="message.html?cid={{$msg}}">{{$msg | substr 54 62}}</a></td>
<td>
{{$from := qstr "select \"from\" from messages where cid=$1" $msg}}
{{$nonce := qstr "select nonce from messages where cid=$1" $msg}}
<a href="key.html?w={{$from}}">{{$from}}</a> (N:{{$nonce}})
</td>
<td>-&gt;</td>
<td>
{{$to := qstr "select \"to\" from messages where cid=$1" $msg}}
<a href="key.html?w={{$to}}">{{$to}}</a>
</td>
<td>
Method:<b>{{qstr "select method from messages where cid=$1" $msg}}</b>
</td>
{{$rec := qstrs `select r.exit, r.gas_used from messages
inner join block_messages bm on messages.cid = bm.message
inner join blocks b on bm.block = b.cid
inner join block_parents bp on b.cid = bp.parent
inner join blocks chd on bp.block = chd.cid
inner join receipts r on messages.cid = r.msg and chd.parentStateRoot = r.state
where messages.cid=$1 and b.cid=$2` 2 $msg $cid}}
<td>exit:<b>{{index $rec 0}}</b></td>
<td>gasUsed:<b>{{index $rec 1}}</b></td>
</tr>
{{end}}
</table>
</div>
</div>
</div>
</body>
</html>

View File

@ -1,43 +0,0 @@
<!DOCTYPE html>
<html>
<head>
<title>Lotus ChainWatch</title>
<link rel="stylesheet" type="text/css" href="main.css">
</head>
<body>
{{$start := param "start" | parseInt}}
<div class="Index">
<div class="Index-header">
<div>
<span>Lotus ChainWatch - Wallets</span>
</div>
</div>
<div class="Index-nodes">
<div class="Index-node">
<table>
{{range pageDown $start 50}}
<tr>
<td>
{{$h := .}}
{{$h}};
</td>
<td>
<b>{{qstr `select count(distinct block_messages.message) from block_messages
inner join blocks b on block_messages.block = b.cid
where b.height = $1` $h}}</b> Msgs
</td>
<td>
{{range strings "blocks" "cid" "height = $1" $h}}
<a href="block.html?cid={{.}}">{{. | substr 54 62}}</a>
{{end}}
</td>
</tr>
{{end}}
</table>
<a href="blocks.html?start={{sub $start 50}}">Next 50</a>
</div>
</div>
</div>
</body>
</html>

View File

@ -1,37 +0,0 @@
<!DOCTYPE html>
<html>
<head>
<title>Lotus ChainWatch</title>
<link rel="stylesheet" type="text/css" href="main.css">
</head>
<body>
<div class="Index">
<div class="Index-header">
<div>
<span>Lotus ChainWatch</span>
</div>
</div>
<div class="Index-nodes">
<div class="Index-node">
<b>{{countCol "actors" "id"}}</b> Actors;
<b>{{countCol "miner_heads" "addr"}}</b> Miners;
<b>{{netPower "slashed_at = 0" | sizeStr}}</b> Power
(<b>{{netPower "" | sizeStr}}</b> Total;
<b>{{netPower "slashed_at > 0" | sizeStr}}</b> Slashed)
</div>
<div class="Index-node">
{{count "messages"}} Messages; {{count "actors"}} state changes
</div>
<div class="Index-node">
{{count "id_address_map" "id != address"}} <a href="keys.html">Keys</a>;
E% FIL in wallets; F% FIL in miners; M% in market; %G Other actors; %H FIL it treasury
</div>
<div class="Index-node">
{{$maxH := queryNum "select max(height) from blocks inner join blocks_synced bs on blocks.cid = bs.cid"}}
{{count "blocks"}} <a href="blocks.html?start={{$maxH}}">Blocks</a>; Current Height: {{$maxH}};
</div>
</div>
</div>
</body>
</html>

View File

@ -1,40 +0,0 @@
<!DOCTYPE html>
<html>
<head>
<title>Lotus ChainWatch</title>
<link rel="stylesheet" type="text/css" href="main.css">
</head>
<body>
{{$wallet := param "w"}}
<div class="Index">
<div class="Index-header">
<div>
<span>Lotus ChainWatch - Wallet {{$wallet}}</span>
</div>
</div>
<div class="Index-nodes">
<div class="Index-node">
Balance: {{queryNum "select balance from actors inner join id_address_map m on m.address = $1 where actors.id = m.id order by nonce desc limit 1" $wallet }}
</div>
<div class="Index-node">
Messages:
<table>
<tr><td>Dir</td><td>Peer</td><td>Nonce</td><td>Value</td><td>Block</td><td>Mpool Wait</td></tr>
{{ range messages "\"from\" = $1 or \"to\" = $1" $wallet}}
<tr>
{{ if eq .From.String $wallet }}
<td>To</td><td><a href="key.html?w={{.To.String}}">{{.To.String}}</a></td>
{{else}}
<td>From</td><td><a href="key.html?w={{.From.String}}">{{.From.String}}</a></td>
{{end}}
<td>{{.Nonce}}</td>
<td>{{.Value}}</td>
</tr>
{{end}}
</table>
</div>
</div>
</div>
</body>
</html>

View File

@ -1,28 +0,0 @@
<!DOCTYPE html>
<html>
<head>
<title>Lotus ChainWatch</title>
<link rel="stylesheet" type="text/css" href="main.css">
</head>
<body>
<div class="Index">
<div class="Index-header">
<div>
<span>Lotus ChainWatch - Wallets</span>
</div>
</div>
<div class="Index-nodes">
<div class="Index-node">
{{range strings "id_address_map" "address" "address != id"}}
{{$addr := .}}
<div>
<a href="key.html?w={{$addr}}">{{$addr}}</a>
<span><b>{{qstr "select count(distinct cid) from messages where \"from\"=$1" $addr}}</b> outmsgs;</span>
<span><b>{{qstr "select count(distinct cid) from messages where \"to\"=$1" $addr}}</b> inmsgs</span>
</div>
{{end}}
</div>
</div>
</div>
</body>
</html>

View File

@ -1,66 +0,0 @@
body {
font-family: 'monospace';
background: #1f1f1f;
color: #f0f0f0;
padding: 0;
margin: 0;
}
b {
color: #aff;
}
.Index {
width: 100vw;
height: 100vh;
background: #1a1a1a;
color: #f0f0f0;
font-family: monospace;
overflow: auto;
display: grid;
grid-template-columns: auto 80vw auto;
grid-template-rows: 3em auto auto auto;
grid-template-areas:
"header header header header"
". . . ."
". main main ."
". main main ."
". main main ."
". main main ."
". main main ."
". . . .";
}
.Index-header {
background: #2a2a2a;
grid-area: header;
}
.Index-Index-header > div {
padding-left: 0.7em;
padding-top: 0.7em;
}
.Index-nodes {
grid-area: main;
background: #2a2a2a;
}
.Index-node {
margin: 5px;
padding: 15px;
background: #1f1f1f;
}
a:link {
color: #50f020;
}
a:visited {
color: #50f020;
}
a:hover {
color: #30a00a;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,747 +0,0 @@
package main
import (
"bytes"
"container/list"
"context"
"encoding/json"
"fmt"
"math"
"sort"
"sync"
"time"
"github.com/filecoin-project/go-address"
"github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/specs-actors/actors/builtin/market"
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/specs-actors/actors/builtin/power"
"github.com/filecoin-project/specs-actors/actors/builtin/reward"
"github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
parmap "github.com/filecoin-project/lotus/lib/parmap"
)
func runSyncer(ctx context.Context, api api.FullNode, st *storage, maxBatch int) {
notifs, err := api.ChainNotify(ctx)
if err != nil {
panic(err)
}
go func() {
for notif := range notifs {
for _, change := range notif {
switch change.Type {
case store.HCCurrent:
fallthrough
case store.HCApply:
syncHead(ctx, api, st, change.Val, maxBatch)
case store.HCRevert:
log.Warnf("revert todo")
}
if change.Type == store.HCCurrent {
go subMpool(ctx, api, st)
go subBlocks(ctx, api, st)
}
}
}
}()
}
type rewardStateInfo struct {
stateroot cid.Cid
baselinePower big.Int
}
type minerStateInfo struct {
// common
addr address.Address
act types.Actor
stateroot cid.Cid
// calculating changes
tsKey types.TipSetKey
parentTsKey types.TipSetKey
// miner specific
state miner.State
info *miner.MinerInfo
// tracked by power actor
rawPower big.Int
qalPower big.Int
ssize uint64
psize uint64
}
type marketStateInfo struct {
// common
act types.Actor
stateroot cid.Cid
// calculating changes
// calculating changes
tsKey types.TipSetKey
parentTsKey types.TipSetKey
// market actor specific
state market.State
}
type actorInfo struct {
stateroot cid.Cid
tsKey types.TipSetKey
parentTsKey types.TipSetKey
state string
}
type tipsetKeyHeight struct {
height abi.ChainEpoch
tsKey types.TipSetKey
}
func syncHead(ctx context.Context, api api.FullNode, st *storage, headTs *types.TipSet, maxBatch int) {
var alk sync.Mutex
log.Infof("Getting synced block list")
hazlist := st.hasList()
log.Infof("Getting headers / actors")
// global list of all blocks that need to be synced
allToSync := map[cid.Cid]*types.BlockHeader{}
// a stack
toVisit := list.New()
for _, header := range headTs.Blocks() {
toVisit.PushBack(header)
}
// TODO consider making a db query to check where syncing left off at in the case of a restart and avoid reprocessing
// those entries, or write value to file on shutdown
// walk the entire chain starting from headTS
for toVisit.Len() > 0 {
bh := toVisit.Remove(toVisit.Back()).(*types.BlockHeader)
_, has := hazlist[bh.Cid()]
if _, seen := allToSync[bh.Cid()]; seen || has {
continue
}
allToSync[bh.Cid()] = bh
if len(allToSync)%500 == 10 {
log.Debugf("to visit: (%d) %s @%d", len(allToSync), bh.Cid(), bh.Height)
}
if len(bh.Parents) == 0 {
continue
}
pts, err := api.ChainGetTipSet(ctx, types.NewTipSetKey(bh.Parents...))
if err != nil {
log.Error(err)
continue
}
for _, header := range pts.Blocks() {
toVisit.PushBack(header)
}
}
// Main worker loop, this loop runs until all tipse from headTS to genesis have been processed.
for len(allToSync) > 0 {
// first map is addresses -> common actors states (head, code, balance, nonce)
// second map common actor states -> chain state (tipset, stateroot) & unique actor state (deserialization of their head CID) represented as json.
actors := map[address.Address]map[types.Actor]actorInfo{}
// map of actor public key address to ID address
addressToID := map[address.Address]address.Address{}
minH := abi.ChainEpoch(math.MaxInt64)
// find the blockheader with the lowest height
for _, header := range allToSync {
if header.Height < minH {
minH = header.Height
}
}
// toSync maps block cids to their headers and contains all block headers that will be synced in this batch
// `maxBatch` is a tunable parameter to control how many blocks we sync per iteration.
toSync := map[cid.Cid]*types.BlockHeader{}
for c, header := range allToSync {
if header.Height < minH+abi.ChainEpoch(maxBatch) {
toSync[c] = header
addressToID[header.Miner] = address.Undef
}
}
// remove everything we are syncing this round from the global list of blocks to sync
for c := range toSync {
delete(allToSync, c)
}
log.Infow("Starting Sync", "height", minH, "numBlocks", len(toSync), "maxBatch", maxBatch)
// relate tipset keys to height so they may be processed in ascending order.
var tipHeights []tipsetKeyHeight
tipsSeen := make(map[types.TipSetKey]struct{})
// map of addresses to changed actors
var changes map[string]types.Actor
// collect all actor state that has changes between block headers
paDone := 0
parmap.Par(50, parmap.MapArr(toSync), func(bh *types.BlockHeader) {
paDone++
if paDone%100 == 0 {
log.Infof("pa: %d %d%%", paDone, (paDone*100)/len(toSync))
}
if len(bh.Parents) == 0 { // genesis case
genesisTs, _ := types.NewTipSet([]*types.BlockHeader{bh})
st.genesisTs = genesisTs
aadrs, err := api.StateListActors(ctx, genesisTs.Key())
if err != nil {
log.Error(err)
return
}
// TODO suspicious there is not a lot to be gained by doing this in parallel since the genesis state
// is unlikely to contain a lot of actors, why not for loop here?
parmap.Par(50, aadrs, func(addr address.Address) {
act, err := api.StateGetActor(ctx, addr, genesisTs.Key())
if err != nil {
log.Error(err)
return
}
ast, err := api.StateReadState(ctx, addr, genesisTs.Key())
if err != nil {
log.Error(err)
return
}
state, err := json.Marshal(ast.State)
if err != nil {
log.Error(err)
return
}
alk.Lock()
_, ok := actors[addr]
if !ok {
actors[addr] = map[types.Actor]actorInfo{}
}
actors[addr][*act] = actorInfo{
stateroot: bh.ParentStateRoot,
tsKey: genesisTs.Key(),
parentTsKey: genesisTs.Key(),
state: string(state),
}
addressToID[addr] = address.Undef
alk.Unlock()
})
return
}
pts, err := api.ChainGetTipSet(ctx, types.NewTipSetKey(bh.Parents...))
if err != nil {
log.Error(err)
return
}
// TODO Does this return actors that have been deleted between states?
// collect all actors that had state changes between the blockheader parent-state and its grandparent-state.
changes, err = api.StateChangedActors(ctx, pts.ParentState(), bh.ParentStateRoot)
if err != nil {
log.Error(err)
return
}
// record the state of all actors that have changed
for a, act := range changes {
act := act
addr, err := address.NewFromString(a)
if err != nil {
log.Error(err)
return
}
ast, err := api.StateReadState(ctx, addr, pts.Key())
if err != nil {
log.Error(err)
return
}
state, err := json.Marshal(ast.State)
if err != nil {
log.Error(err)
return
}
alk.Lock()
_, ok := actors[addr]
if !ok {
actors[addr] = map[types.Actor]actorInfo{}
}
// a change occurred for the actor with address `addr` and state `act` at tipset `pts`.
actors[addr][act] = actorInfo{
stateroot: bh.ParentStateRoot,
state: string(state),
tsKey: pts.Key(),
parentTsKey: pts.Parents(),
}
addressToID[addr] = address.Undef
if _, ok := tipsSeen[pts.Key()]; !ok {
tipHeights = append(tipHeights, tipsetKeyHeight{
height: pts.Height(),
tsKey: pts.Key(),
})
}
tipsSeen[pts.Key()] = struct{}{}
alk.Unlock()
}
})
// sort tipHeights in ascending order.
sort.Slice(tipHeights, func(i, j int) bool {
return tipHeights[i].height < tipHeights[j].height
})
// map of tipset to reward state
rewardTips := make(map[types.TipSetKey]*rewardStateInfo, len(changes))
// map of tipset to all miners that had a head-change at that tipset.
minerTips := make(map[types.TipSetKey][]*minerStateInfo, len(changes))
// heads we've seen, im being paranoid
headsSeen := make(map[cid.Cid]struct{}, len(actors))
log.Infof("Getting messages")
msgs, incls := fetchMessages(ctx, api, toSync)
log.Infof("Resolving addresses")
for _, message := range msgs {
addressToID[message.To] = address.Undef
addressToID[message.From] = address.Undef
}
parmap.Par(50, parmap.KMapArr(addressToID), func(addr address.Address) {
// FIXME: cannot use EmptyTSK here since actorID's can change during reorgs, need to use the corresponding tipset.
// TODO: figure out a way to get the corresponding tipset...
raddr, err := api.StateLookupID(ctx, addr, types.EmptyTSK)
if err != nil {
log.Warn(err)
return
}
alk.Lock()
addressToID[addr] = raddr
alk.Unlock()
})
log.Infof("Getting actor change info")
// highly likely that the market actor will change at every epoch
marketActorChanges := make(map[types.TipSetKey]*marketStateInfo, len(changes))
minerChanges := 0
for addr, m := range actors {
for actor, c := range m {
// only want actors with head change events
if _, found := headsSeen[actor.Head]; found {
continue
}
headsSeen[actor.Head] = struct{}{}
switch actor.Code {
case builtin.StorageMarketActorCodeID:
marketActorChanges[c.tsKey] = &marketStateInfo{
act: actor,
stateroot: c.stateroot,
tsKey: c.tsKey,
parentTsKey: c.parentTsKey,
state: market.State{},
}
case builtin.StorageMinerActorCodeID:
minerChanges++
minerTips[c.tsKey] = append(minerTips[c.tsKey], &minerStateInfo{
addr: addr,
act: actor,
stateroot: c.stateroot,
tsKey: c.tsKey,
parentTsKey: c.parentTsKey,
state: miner.State{},
info: nil,
rawPower: big.Zero(),
qalPower: big.Zero(),
})
// reward actor
case builtin.RewardActorCodeID:
rewardTips[c.tsKey] = &rewardStateInfo{
stateroot: c.stateroot,
baselinePower: big.Zero(),
}
}
}
}
rewardProcessingStartedAt := time.Now()
parmap.Par(50, parmap.KVMapArr(rewardTips), func(it func() (types.TipSetKey, *rewardStateInfo)) {
tsKey, rewardInfo := it()
// get reward actor states at each tipset once for all updates
rewardActor, err := api.StateGetActor(ctx, builtin.RewardActorAddr, tsKey)
if err != nil {
log.Error(xerrors.Errorf("get reward state (@ %s): %w", rewardInfo.stateroot.String(), err))
return
}
rewardStateRaw, err := api.ChainReadObj(ctx, rewardActor.Head)
if err != nil {
log.Error(xerrors.Errorf("read state obj (@ %s): %w", rewardInfo.stateroot.String(), err))
return
}
var rewardActorState reward.State
if err := rewardActorState.UnmarshalCBOR(bytes.NewReader(rewardStateRaw)); err != nil {
log.Error(xerrors.Errorf("unmarshal state (@ %s): %w", rewardInfo.stateroot.String(), err))
return
}
rewardInfo.baselinePower = rewardActorState.BaselinePower
})
log.Infow("Completed Reward Processing", "duration", time.Since(rewardProcessingStartedAt).String(), "processed", len(rewardTips))
minerProcessingStartedAt := time.Now()
log.Infow("Processing miners", "numTips", len(minerTips), "numMinerChanges", minerChanges)
// extract the power actor state at each tipset, loop over all miners that changed at said tipset and extract their
// claims from the power actor state. This ensures we only fetch the power actors state once for each tipset.
parmap.Par(50, parmap.KVMapArr(minerTips), func(it func() (types.TipSetKey, []*minerStateInfo)) {
tsKey, minerInfo := it()
// get the power actors claims map
mp, err := getPowerActorClaimsMap(ctx, api, tsKey)
if err != nil {
log.Error(err)
return
}
// Get miner raw and quality power
for _, mi := range minerInfo {
var claim power.Claim
// get miner claim from power actors claim map and store if found, else the miner had no claim at
// this tipset
found, err := mp.Get(adt.AddrKey(mi.addr), &claim)
if err != nil {
log.Error(err)
}
if found {
mi.qalPower = claim.QualityAdjPower
mi.rawPower = claim.RawBytePower
}
// Get the miner state info
astb, err := api.ChainReadObj(ctx, mi.act.Head)
if err != nil {
log.Error(err)
return
}
if err := mi.state.UnmarshalCBOR(bytes.NewReader(astb)); err != nil {
log.Error(err)
return
}
mi.info, err = mi.state.GetInfo(&apiIpldStore{ctx, api})
if err != nil {
log.Error(err)
return
}
}
// TODO Get the Sector Count
// FIXME this is returning a lot of "address not found" errors, which is strange given that StateChangedActors
// retruns all actors that had a state change at tipset `k.tsKey`, maybe its returning deleted miners too??
/*
sszs, err := api.StateMinerSectorCount(ctx, k.addr, k.tsKey)
if err != nil {
info.psize = 0
info.ssize = 0
} else {
info.psize = sszs.Pset
info.ssize = sszs.Sset
}
*/
})
log.Infow("Completed Miner Processing", "duration", time.Since(minerProcessingStartedAt).String(), "processed", minerChanges)
log.Info("Getting market actor info")
// TODO: consider taking the min of the array length and using that for concurrency param, e.g:
// concurrency := math.Min(len(marketActorChanges), 50)
parmap.Par(50, parmap.MapArr(marketActorChanges), func(mrktInfo *marketStateInfo) {
astb, err := api.ChainReadObj(ctx, mrktInfo.act.Head)
if err != nil {
log.Error(err)
return
}
if err := mrktInfo.state.UnmarshalCBOR(bytes.NewReader(astb)); err != nil {
log.Error(err)
return
}
})
log.Info("Getting receipts")
receipts := fetchParentReceipts(ctx, api, toSync)
log.Info("Storing headers")
if err := st.storeHeaders(toSync, true); err != nil {
log.Errorf("%+v", err)
return
}
log.Info("Storing address mapping")
if err := st.storeAddressMap(addressToID); err != nil {
log.Error(err)
return
}
log.Info("Storing actors")
if err := st.storeActors(actors); err != nil {
log.Error(err)
return
}
chainPowerStartedAt := time.Now()
if err := st.storeChainPower(rewardTips); err != nil {
log.Error(err)
}
log.Infow("Stored chain power", "duration", time.Since(chainPowerStartedAt).String())
log.Info("Storing miners")
if err := st.storeMiners(minerTips); err != nil {
log.Error(err)
return
}
minerPowerStartedAt := time.Now()
if err := st.storeMinerPower(minerTips); err != nil {
log.Error(err)
}
log.Infow("Stored miner power", "duration", time.Since(minerPowerStartedAt).String())
sectorStart := time.Now()
if err := st.storeSectors(minerTips, api); err != nil {
log.Error(err)
return
}
log.Infow("Stored miner sectors", "duration", time.Since(sectorStart).String())
log.Info("Storing miner sectors heads")
if err := st.storeMinerSectorsHeads(minerTips, api); err != nil {
log.Error(err)
return
}
log.Info("updating miner sectors heads")
if err := st.updateMinerSectors(minerTips, api); err != nil {
log.Error(err)
return
}
log.Info("Storing market actor deal proposal info")
if err := st.storeMarketActorDealProposals(marketActorChanges, tipHeights, api); err != nil {
log.Error(err)
return
}
log.Info("Storing market actor deal state info")
if err := st.storeMarketActorDealStates(marketActorChanges, tipHeights, api); err != nil {
log.Error(err)
return
}
log.Info("Updating market actor deal proposal info")
if err := st.updateMarketActorDealProposals(marketActorChanges, tipHeights, api); err != nil {
log.Error(err)
return
}
log.Infof("Storing messages")
if err := st.storeMessages(msgs); err != nil {
log.Error(err)
return
}
log.Info("Storing message inclusions")
if err := st.storeMsgInclusions(incls); err != nil {
log.Error(err)
return
}
log.Infof("Storing parent receipts")
if err := st.storeReceipts(receipts); err != nil {
log.Error(err)
return
}
log.Infof("Sync stage done")
}
log.Infof("Get deals")
// TODO: incremental, gather expired
deals, err := api.StateMarketDeals(ctx, headTs.Key())
if err != nil {
log.Error(err)
return
}
log.Infof("Store deals")
if err := st.storeDeals(deals); err != nil {
log.Error(err)
return
}
log.Infof("Refresh views")
if err := st.refreshViews(); err != nil {
log.Error(err)
return
}
log.Infof("Sync done")
}
func fetchMessages(ctx context.Context, api api.FullNode, toSync map[cid.Cid]*types.BlockHeader) (map[cid.Cid]*types.Message, map[cid.Cid][]cid.Cid) {
var lk sync.Mutex
messages := map[cid.Cid]*types.Message{}
inclusions := map[cid.Cid][]cid.Cid{} // block -> msgs
parmap.Par(50, parmap.MapArr(toSync), func(header *types.BlockHeader) {
msgs, err := api.ChainGetBlockMessages(ctx, header.Cid())
if err != nil {
log.Error(err)
return
}
vmm := make([]*types.Message, 0, len(msgs.Cids))
for _, m := range msgs.BlsMessages {
vmm = append(vmm, m)
}
for _, m := range msgs.SecpkMessages {
vmm = append(vmm, &m.Message)
}
lk.Lock()
for _, message := range vmm {
messages[message.Cid()] = message
inclusions[header.Cid()] = append(inclusions[header.Cid()], message.Cid())
}
lk.Unlock()
})
return messages, inclusions
}
type mrec struct {
msg cid.Cid
state cid.Cid
idx int
}
func fetchParentReceipts(ctx context.Context, api api.FullNode, toSync map[cid.Cid]*types.BlockHeader) map[mrec]*types.MessageReceipt {
var lk sync.Mutex
out := map[mrec]*types.MessageReceipt{}
parmap.Par(50, parmap.MapArr(toSync), func(header *types.BlockHeader) {
recs, err := api.ChainGetParentReceipts(ctx, header.Cid())
if err != nil {
log.Error(err)
return
}
msgs, err := api.ChainGetParentMessages(ctx, header.Cid())
if err != nil {
log.Error(err)
return
}
lk.Lock()
for i, r := range recs {
out[mrec{
msg: msgs[i].Cid,
state: header.ParentStateRoot,
idx: i,
}] = r
}
lk.Unlock()
})
return out
}
// load the power actor state clam as an adt.Map at the tipset `ts`.
func getPowerActorClaimsMap(ctx context.Context, api api.FullNode, ts types.TipSetKey) (*adt.Map, error) {
powerActor, err := api.StateGetActor(ctx, builtin.StoragePowerActorAddr, ts)
if err != nil {
return nil, err
}
powerRaw, err := api.ChainReadObj(ctx, powerActor.Head)
if err != nil {
return nil, err
}
var powerActorState power.State
if err := powerActorState.UnmarshalCBOR(bytes.NewReader(powerRaw)); err != nil {
return nil, fmt.Errorf("failed to unmarshal power actor state: %w", err)
}
s := &apiIpldStore{ctx, api}
return adt.AsMap(s, powerActorState.Claims)
}
// require for AMT and HAMT access
// TODO extract this to a common location in lotus and reuse the code
type apiIpldStore struct {
ctx context.Context
api api.FullNode
}
func (ht *apiIpldStore) Context() context.Context {
return ht.ctx
}
func (ht *apiIpldStore) Get(ctx context.Context, c cid.Cid, out interface{}) error {
raw, err := ht.api.ChainReadObj(ctx, c)
if err != nil {
return err
}
cu, ok := out.(cbg.CBORUnmarshaler)
if ok {
if err := cu.UnmarshalCBOR(bytes.NewReader(raw)); err != nil {
return err
}
return nil
}
return fmt.Errorf("Object does not implement CBORUnmarshaler: %T", out)
}
func (ht *apiIpldStore) Put(ctx context.Context, v interface{}) (cid.Cid, error) {
return cid.Undef, fmt.Errorf("Put is not implemented on apiIpldStore")
}

View File

@ -1,25 +1,24 @@
package main
package syncer
import (
"context"
"time"
"github.com/filecoin-project/lotus/chain/types"
"github.com/ipfs/go-cid"
aapi "github.com/filecoin-project/lotus/api"
)
func subBlocks(ctx context.Context, api aapi.FullNode, st *storage) {
sub, err := api.SyncIncomingBlocks(ctx)
func (s *Syncer) subBlocks(ctx context.Context) {
sub, err := s.node.SyncIncomingBlocks(ctx)
if err != nil {
log.Error(err)
return
}
for bh := range sub {
err := st.storeHeaders(map[cid.Cid]*types.BlockHeader{
err := s.storeHeaders(map[cid.Cid]*types.BlockHeader{
bh.Cid(): bh,
}, false)
}, false, time.Now())
if err != nil {
log.Errorf("%+v", err)
}

View File

@ -0,0 +1,446 @@
package syncer
import (
"container/list"
"context"
"database/sql"
"sync"
"time"
"golang.org/x/xerrors"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
)
var log = logging.Logger("syncer")
type Syncer struct {
db *sql.DB
headerLk sync.Mutex
node api.FullNode
}
func NewSyncer(db *sql.DB, node api.FullNode) *Syncer {
return &Syncer{
db: db,
node: node,
}
}
func (s *Syncer) setupSchemas() error {
tx, err := s.db.Begin()
if err != nil {
return err
}
if _, err := tx.Exec(`
create table if not exists block_cids
(
cid text not null
constraint block_cids_pk
primary key
);
create unique index if not exists block_cids_cid_uindex
on block_cids (cid);
create table if not exists blocks_synced
(
cid text not null
constraint blocks_synced_pk
primary key
constraint blocks_block_cids_cid_fk
references block_cids (cid),
synced_at int not null,
processed_at int
);
create unique index if not exists blocks_synced_cid_uindex
on blocks_synced (cid,processed_at);
create table if not exists block_parents
(
block text not null
constraint blocks_block_cids_cid_fk
references block_cids (cid),
parent text not null
);
create unique index if not exists block_parents_block_parent_uindex
on block_parents (block, parent);
create table if not exists drand_entries
(
round bigint not null
constraint drand_entries_pk
primary key,
data bytea not null
);
create unique index if not exists drand_entries_round_uindex
on drand_entries (round);
create table if not exists block_drand_entries
(
round bigint not null
constraint block_drand_entries_drand_entries_round_fk
references drand_entries (round),
block text not null
constraint blocks_block_cids_cid_fk
references block_cids (cid)
);
create unique index if not exists block_drand_entries_round_uindex
on block_drand_entries (round, block);
create table if not exists blocks
(
cid text not null
constraint blocks_pk
primary key
constraint blocks_block_cids_cid_fk
references block_cids (cid),
parentWeight numeric not null,
parentStateRoot text not null,
height bigint not null,
miner text not null,
timestamp bigint not null,
ticket bytea not null,
eprof bytea,
forksig bigint not null
);
create unique index if not exists block_cid_uindex
on blocks (cid,height);
create materialized view if not exists state_heights
as select distinct height, parentstateroot from blocks;
create index if not exists state_heights_height_index
on state_heights (height);
create index if not exists state_heights_parentstateroot_index
on state_heights (parentstateroot);
`); err != nil {
return err
}
return tx.Commit()
}
func (s *Syncer) Start(ctx context.Context) {
log.Debug("Starting Syncer")
if err := s.setupSchemas(); err != nil {
log.Fatal(err)
}
// doing the initial sync here lets us avoid the HCCurrent case in the switch
head, err := s.node.ChainHead(ctx)
if err != nil {
log.Fatalw("Failed to get chain head form lotus", "error", err)
}
unsynced, err := s.unsyncedBlocks(ctx, head, time.Unix(0, 0))
if err != nil {
log.Fatalw("failed to gather unsynced blocks", "error", err)
}
if err := s.storeHeaders(unsynced, true, time.Now()); err != nil {
log.Fatalw("failed to store unsynced blocks", "error", err)
}
// continue to keep the block headers table up to date.
notifs, err := s.node.ChainNotify(ctx)
if err != nil {
log.Fatal(err)
}
lastSynced := time.Now()
go func() {
for notif := range notifs {
for _, change := range notif {
switch change.Type {
case store.HCApply:
unsynced, err := s.unsyncedBlocks(ctx, change.Val, lastSynced)
if err != nil {
log.Errorw("failed to gather unsynced blocks", "error", err)
}
if len(unsynced) == 0 {
continue
}
if err := s.storeHeaders(unsynced, true, lastSynced); err != nil {
// so this is pretty bad, need some kind of retry..
// for now just log an error and the blocks will be attempted again on next notifi
log.Errorw("failed to store unsynced blocks", "error", err)
}
lastSynced = time.Now()
case store.HCRevert:
log.Debug("revert todo")
}
}
}
}()
}
func (s *Syncer) unsyncedBlocks(ctx context.Context, head *types.TipSet, since time.Time) (map[cid.Cid]*types.BlockHeader, error) {
// get a list of blocks we have already synced in the past 3 mins. This ensures we aren't returning the entire
// table every time.
lookback := since.Add(-(time.Minute * 3))
log.Debugw("Gathering unsynced blocks", "since", lookback.String())
hasList, err := s.syncedBlocks(lookback)
if err != nil {
return nil, err
}
// build a list of blocks that we have not synced.
toVisit := list.New()
for _, header := range head.Blocks() {
toVisit.PushBack(header)
}
toSync := map[cid.Cid]*types.BlockHeader{}
for toVisit.Len() > 0 {
bh := toVisit.Remove(toVisit.Back()).(*types.BlockHeader)
_, has := hasList[bh.Cid()]
if _, seen := toSync[bh.Cid()]; seen || has {
continue
}
toSync[bh.Cid()] = bh
if len(toSync)%500 == 10 {
log.Debugw("To visit", "toVisit", toVisit.Len(), "toSync", len(toSync), "current_height", bh.Height)
}
if len(bh.Parents) == 0 {
continue
}
pts, err := s.node.ChainGetTipSet(ctx, types.NewTipSetKey(bh.Parents...))
if err != nil {
log.Error(err)
continue
}
for _, header := range pts.Blocks() {
toVisit.PushBack(header)
}
}
log.Debugw("Gathered unsynced blocks", "count", len(toSync))
return toSync, nil
}
func (s *Syncer) syncedBlocks(timestamp time.Time) (map[cid.Cid]struct{}, error) {
// timestamp is used to return a configurable amount of rows based on when they were last added.
rws, err := s.db.Query(`select cid FROM blocks_synced where synced_at > $1`, timestamp.Unix())
if err != nil {
return nil, xerrors.Errorf("Failed to query blocks_synced: %w", err)
}
out := map[cid.Cid]struct{}{}
for rws.Next() {
var c string
if err := rws.Scan(&c); err != nil {
return nil, xerrors.Errorf("Failed to scan blocks_synced: %w", err)
}
ci, err := cid.Parse(c)
if err != nil {
return nil, xerrors.Errorf("Failed to parse blocks_synced: %w", err)
}
out[ci] = struct{}{}
}
return out, nil
}
func (s *Syncer) storeHeaders(bhs map[cid.Cid]*types.BlockHeader, sync bool, timestamp time.Time) error {
s.headerLk.Lock()
defer s.headerLk.Unlock()
if len(bhs) == 0 {
return nil
}
log.Debugw("Storing Headers", "count", len(bhs))
tx, err := s.db.Begin()
if err != nil {
return xerrors.Errorf("begin: %w", err)
}
if _, err := tx.Exec(`
create temp table bc (like block_cids excluding constraints) on commit drop;
create temp table de (like drand_entries excluding constraints) on commit drop;
create temp table bde (like block_drand_entries excluding constraints) on commit drop;
create temp table tbp (like block_parents excluding constraints) on commit drop;
create temp table bs (like blocks_synced excluding constraints) on commit drop;
create temp table b (like blocks excluding constraints) on commit drop;
`); err != nil {
return xerrors.Errorf("prep temp: %w", err)
}
{
stmt, err := tx.Prepare(`copy bc (cid) from STDIN`)
if err != nil {
return err
}
for _, bh := range bhs {
if _, err := stmt.Exec(bh.Cid().String()); err != nil {
log.Error(err)
}
}
if err := stmt.Close(); err != nil {
return err
}
if _, err := tx.Exec(`insert into block_cids select * from bc on conflict do nothing `); err != nil {
return xerrors.Errorf("drand entries put: %w", err)
}
}
{
stmt, err := tx.Prepare(`copy de (round, data) from STDIN`)
if err != nil {
return err
}
for _, bh := range bhs {
for _, ent := range bh.BeaconEntries {
if _, err := stmt.Exec(ent.Round, ent.Data); err != nil {
log.Error(err)
}
}
}
if err := stmt.Close(); err != nil {
return err
}
if _, err := tx.Exec(`insert into drand_entries select * from de on conflict do nothing `); err != nil {
return xerrors.Errorf("drand entries put: %w", err)
}
}
{
stmt, err := tx.Prepare(`copy bde (round, block) from STDIN`)
if err != nil {
return err
}
for _, bh := range bhs {
for _, ent := range bh.BeaconEntries {
if _, err := stmt.Exec(ent.Round, bh.Cid().String()); err != nil {
log.Error(err)
}
}
}
if err := stmt.Close(); err != nil {
return err
}
if _, err := tx.Exec(`insert into block_drand_entries select * from bde on conflict do nothing `); err != nil {
return xerrors.Errorf("block drand entries put: %w", err)
}
}
{
stmt, err := tx.Prepare(`copy tbp (block, parent) from STDIN`)
if err != nil {
return err
}
for _, bh := range bhs {
for _, parent := range bh.Parents {
if _, err := stmt.Exec(bh.Cid().String(), parent.String()); err != nil {
log.Error(err)
}
}
}
if err := stmt.Close(); err != nil {
return err
}
if _, err := tx.Exec(`insert into block_parents select * from tbp on conflict do nothing `); err != nil {
return xerrors.Errorf("parent put: %w", err)
}
}
if sync {
stmt, err := tx.Prepare(`copy bs (cid, synced_at) from stdin `)
if err != nil {
return err
}
for _, bh := range bhs {
if _, err := stmt.Exec(bh.Cid().String(), timestamp.Unix()); err != nil {
log.Error(err)
}
}
if err := stmt.Close(); err != nil {
return err
}
if _, err := tx.Exec(`insert into blocks_synced select * from bs on conflict do nothing `); err != nil {
return xerrors.Errorf("syncd put: %w", err)
}
}
stmt2, err := tx.Prepare(`copy b (cid, parentWeight, parentStateRoot, height, miner, "timestamp", ticket, eprof, forksig) from stdin`)
if err != nil {
return err
}
for _, bh := range bhs {
var eprof interface{}
if bh.ElectionProof != nil {
eprof = bh.ElectionProof.VRFProof
}
if bh.Ticket == nil {
log.Warnf("got a block with nil ticket")
bh.Ticket = &types.Ticket{
VRFProof: []byte{},
}
}
if _, err := stmt2.Exec(
bh.Cid().String(),
bh.ParentWeight.String(),
bh.ParentStateRoot.String(),
bh.Height,
bh.Miner.String(),
bh.Timestamp,
bh.Ticket.VRFProof,
eprof,
bh.ForkSignaling); err != nil {
log.Error(err)
}
}
if err := stmt2.Close(); err != nil {
return xerrors.Errorf("s2 close: %w", err)
}
if _, err := tx.Exec(`insert into blocks select * from b on conflict do nothing `); err != nil {
return xerrors.Errorf("blk put: %w", err)
}
return tx.Commit()
}

View File

@ -1,350 +0,0 @@
package main
import (
"fmt"
"html/template"
"net/http"
"os"
"path/filepath"
"strconv"
rice "github.com/GeertJohan/go.rice"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
)
type handler struct {
api api.FullNode
st *storage
site *rice.Box
assets http.Handler
templates map[string]*template.Template
}
func newHandler(api api.FullNode, st *storage) (*handler, error) {
h := &handler{
api: api,
st: st,
site: rice.MustFindBox("site"),
templates: map[string]*template.Template{},
}
h.assets = http.FileServer(h.site.HTTPBox())
funcs := template.FuncMap{
"count": h.count,
"countCol": h.countCol,
"sum": h.sum,
"netPower": h.netPower,
"queryNum": h.queryNum,
"sizeStr": sizeStr,
"strings": h.strings,
"qstr": h.qstr,
"qstrs": h.qstrs,
"messages": h.messages,
"pageDown": pageDown,
"parseInt": func(s string) (int, error) { i, e := strconv.ParseInt(s, 10, 64); return int(i), e },
"substr": func(i, j int, s string) string { return s[i:j] },
"sub": func(a, b int) int { return a - b }, // TODO: really not builtin?
"param": func(string) string { return "" }, // replaced in request handler
}
base := template.New("")
base.Funcs(funcs)
return h, h.site.Walk("", func(path string, info os.FileInfo, err error) error {
if filepath.Ext(path) != ".html" {
return nil
}
if err != nil {
return err
}
log.Info(path)
h.templates["/"+path], err = base.New(path).Parse(h.site.MustString(path))
return err
})
}
func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
h, err := newHandler(h.api, h.st) // for faster dev
if err != nil {
log.Error(err)
return
}
p := r.URL.Path
if p == "/" {
p = "/index.html"
}
t, ok := h.templates[p]
if !ok {
h.assets.ServeHTTP(w, r)
return
}
t, err = t.Clone()
if err != nil {
log.Error(err)
return
}
t.Funcs(map[string]interface{}{
"param": r.FormValue,
})
if err := t.Execute(w, nil); err != nil {
log.Errorf("%+v", err)
return
}
log.Info(r.URL.Path)
}
// Template funcs
func (h *handler) count(table string, filters ...string) (int, error) {
// explicitly not caring about sql injection too much, this doesn't take user input
filts := ""
if len(filters) > 0 {
filts = " where "
for _, filter := range filters {
filts += filter + " and "
}
filts = filts[:len(filts)-5]
}
var c int
err := h.st.db.QueryRow("select count(1) from " + table + filts).Scan(&c)
if err != nil {
return 0, err
}
return c, nil
}
func (h *handler) countCol(table string, col string, filters ...string) (int, error) {
// explicitly not caring about sql injection too much, this doesn't take user input
filts := ""
if len(filters) > 0 {
filts = " where "
for _, filter := range filters {
filts += filter + " and "
}
filts = filts[:len(filts)-5]
}
var c int
err := h.st.db.QueryRow("select count(distinct " + col + ") from " + table + filts).Scan(&c)
if err != nil {
return 0, err
}
return c, nil
}
func (h *handler) sum(table string, col string) (types.BigInt, error) {
return h.queryNum("select sum(cast(" + col + " as bigint)) from " + table)
}
func (h *handler) netPower(slashFilt string) (types.BigInt, error) {
if slashFilt != "" {
slashFilt = " where " + slashFilt
}
return h.queryNum(`select sum(power) from (select distinct on (addr) power, slashed_at from miner_heads
inner join blocks b on miner_heads.stateroot = b.parentStateRoot
order by addr, height desc) as p` + slashFilt)
}
func (h *handler) queryNum(q string, p ...interface{}) (types.BigInt, error) {
// explicitly not caring about sql injection too much, this doesn't take user input
var c string
err := h.st.db.QueryRow(q, p...).Scan(&c)
if err != nil {
log.Error("qnum ", q, p, err)
return types.NewInt(0), err
}
i := types.NewInt(0)
_, ok := i.SetString(c, 10)
if !ok {
return types.NewInt(0), xerrors.New("num parse error: " + c)
}
return i, nil
}
var units = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB"}
func sizeStr(size types.BigInt) string {
size = types.BigMul(size, types.NewInt(100))
i := 0
for types.BigCmp(size, types.NewInt(102400)) >= 0 && i < len(units)-1 {
size = types.BigDiv(size, types.NewInt(1024))
i++
}
return fmt.Sprintf("%s.%s %s", types.BigDiv(size, types.NewInt(100)), types.BigMod(size, types.NewInt(100)), units[i])
}
func (h *handler) strings(table string, col string, filter string, args ...interface{}) (out []string, err error) {
if len(filter) > 0 {
filter = " where " + filter
}
log.Info("strings qstr ", "select "+col+" from "+table+filter, args)
rws, err := h.st.db.Query("select "+col+" from "+table+filter, args...)
if err != nil {
return nil, err
}
for rws.Next() {
var r string
if err := rws.Scan(&r); err != nil {
return nil, err
}
out = append(out, r)
}
return
}
func (h *handler) qstr(q string, p ...interface{}) (string, error) {
// explicitly not caring about sql injection too much, this doesn't take user input
r, err := h.qstrs(q, 1, p...)
if err != nil {
return "", err
}
return r[0], nil
}
func (h *handler) qstrs(q string, n int, p ...interface{}) ([]string, error) {
// explicitly not caring about sql injection too much, this doesn't take user input
c := make([]string, n)
ia := make([]interface{}, n)
for i := range c {
ia[i] = &c[i]
}
err := h.st.db.QueryRow(q, p...).Scan(ia...)
if err != nil {
log.Error("qnum ", q, p, err)
return nil, err
}
return c, nil
}
type sbig types.BigInt
func (bi *sbig) Scan(value interface{}) error {
switch value := value.(type) {
case string:
i, ok := big.NewInt(0).SetString(value, 10)
if !ok {
if value == "<nil>" {
return nil
}
return xerrors.Errorf("failed to parse bigint string: '%s'", value)
}
bi.Int = i
return nil
case int64:
bi.Int = big.NewInt(value).Int
return nil
default:
return xerrors.Errorf("non-string types unsupported: %T", value)
}
}
type Message struct {
To address.Address
From address.Address
Nonce uint64
Value sbig
GasPrice sbig
GasLimit int64
Method abi.MethodNum
Params []byte
}
func (h *handler) messages(filter string, args ...interface{}) (out []types.Message, err error) {
if len(filter) > 0 {
filter = " where " + filter
}
log.Info("select * from messages " + filter)
rws, err := h.st.db.Query("select * from messages "+filter, args...)
if err != nil {
return nil, err
}
for rws.Next() {
var r Message
var cs string
if err := rws.Scan(
&cs,
&r.From,
&r.To,
&r.Nonce,
&r.Value,
&r.GasPrice,
&r.GasLimit,
&r.Method,
&r.Params,
); err != nil {
return nil, err
}
c, err := cid.Parse(cs)
if err != nil {
return nil, err
}
tr := types.Message{
To: r.To,
From: r.From,
Nonce: r.Nonce,
Value: types.BigInt(r.Value),
GasPrice: types.BigInt(r.GasPrice),
GasLimit: r.GasLimit,
Method: r.Method,
Params: r.Params,
}
if c != tr.Cid() {
log.Warn("msg cid doesn't match")
}
out = append(out, tr)
}
return
}
func pageDown(base, n int) []int {
out := make([]int, n)
for i := range out {
out[i] = base - i
}
return out
}
var _ http.Handler = &handler{}

View File

@ -0,0 +1,51 @@
package util
import (
"bytes"
"context"
"fmt"
"github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
"github.com/filecoin-project/lotus/api"
)
// TODO extract this to a common location in lotus and reuse the code
// APIIpldStore is required for AMT and HAMT access.
type APIIpldStore struct {
ctx context.Context
api api.FullNode
}
func NewAPIIpldStore(ctx context.Context, api api.FullNode) *APIIpldStore {
return &APIIpldStore{
ctx: ctx,
api: api,
}
}
func (ht *APIIpldStore) Context() context.Context {
return ht.ctx
}
func (ht *APIIpldStore) Get(ctx context.Context, c cid.Cid, out interface{}) error {
raw, err := ht.api.ChainReadObj(ctx, c)
if err != nil {
return err
}
cu, ok := out.(cbg.CBORUnmarshaler)
if ok {
if err := cu.UnmarshalCBOR(bytes.NewReader(raw)); err != nil {
return err
}
return nil
}
return fmt.Errorf("Object does not implement CBORUnmarshaler: %T", out)
}
func (ht *APIIpldStore) Put(ctx context.Context, v interface{}) (cid.Cid, error) {
return cid.Undef, fmt.Errorf("Put is not implemented on APIIpldStore")
}

View File

@ -1,8 +1,13 @@
package main
import (
"encoding/csv"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"strconv"
"strings"
"github.com/google/uuid"
"github.com/mitchellh/go-homedir"
@ -10,10 +15,12 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/filecoin-project/lotus/build"
genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/genesis"
)
@ -23,6 +30,7 @@ var genesisCmd = &cli.Command{
Subcommands: []*cli.Command{
genesisNewCmd,
genesisAddMinerCmd,
genesisAddMsigsCmd,
},
}
@ -141,3 +149,153 @@ var genesisAddMinerCmd = &cli.Command{
return nil
},
}
type GenAccountEntry struct {
Version int
ID string
Amount types.FIL
VestingMonths int
CustodianID int
M int
N int
Addresses []address.Address
Type string
Sig1 string
Sig2 string
}
var genesisAddMsigsCmd = &cli.Command{
Name: "add-msigs",
Action: func(cctx *cli.Context) error {
if cctx.Args().Len() < 2 {
return fmt.Errorf("must specify template file and csv file with accounts")
}
genf, err := homedir.Expand(cctx.Args().First())
if err != nil {
return err
}
csvf, err := homedir.Expand(cctx.Args().Get(1))
if err != nil {
return err
}
var template genesis.Template
b, err := ioutil.ReadFile(genf)
if err != nil {
return xerrors.Errorf("read genesis template: %w", err)
}
if err := json.Unmarshal(b, &template); err != nil {
return xerrors.Errorf("unmarshal genesis template: %w", err)
}
entries, err := parseMultisigCsv(csvf)
if err != nil {
return xerrors.Errorf("parsing multisig csv file: %w", err)
}
for i, e := range entries {
if len(e.Addresses) != e.N {
return fmt.Errorf("entry %d had mismatch between 'N' and number of addresses", i)
}
msig := &genesis.MultisigMeta{
Signers: e.Addresses,
Threshold: e.M,
VestingDuration: monthsToBlocks(e.VestingMonths),
VestingStart: 0,
}
act := genesis.Actor{
Type: genesis.TMultisig,
Balance: abi.TokenAmount(e.Amount),
Meta: msig.ActorMeta(),
}
template.Accounts = append(template.Accounts, act)
}
b, err = json.MarshalIndent(&template, "", " ")
if err != nil {
return err
}
if err := ioutil.WriteFile(genf, b, 0644); err != nil {
return err
}
return nil
},
}
func monthsToBlocks(nmonths int) int {
days := uint64((365 * nmonths) / 12)
return int(days * 24 * 60 * 60 / build.BlockDelaySecs)
}
func parseMultisigCsv(csvf string) ([]GenAccountEntry, error) {
fileReader, err := os.Open(csvf)
if err != nil {
return nil, xerrors.Errorf("read multisig csv: %w", err)
}
r := csv.NewReader(fileReader)
records, err := r.ReadAll()
if err != nil {
return nil, xerrors.Errorf("read multisig csv: %w", err)
}
var entries []GenAccountEntry
for i, e := range records[1:] {
var addrs []address.Address
addrStrs := strings.Split(strings.TrimSpace(e[7]), ":")
for j, a := range addrStrs {
addr, err := address.NewFromString(a)
if err != nil {
return nil, xerrors.Errorf("failed to parse address %d in row %d (%q): %w", j, i, a, err)
}
addrs = append(addrs, addr)
}
balance, err := types.ParseFIL(strings.TrimSpace(e[2]))
if err != nil {
return nil, xerrors.Errorf("failed to parse account balance: %w", err)
}
vesting, err := strconv.Atoi(strings.TrimSpace(e[3]))
if err != nil {
return nil, xerrors.Errorf("failed to parse vesting duration for record %d: %w", i, err)
}
custodianID, err := strconv.Atoi(strings.TrimSpace(e[4]))
if err != nil {
return nil, xerrors.Errorf("failed to parse custodianID in record %d: %w", i, err)
}
threshold, err := strconv.Atoi(strings.TrimSpace(e[5]))
if err != nil {
return nil, xerrors.Errorf("failed to parse multisigM in record %d: %w", i, err)
}
num, err := strconv.Atoi(strings.TrimSpace(e[6]))
if err != nil {
return nil, xerrors.Errorf("Number of addresses be integer: %w", err)
}
if e[0] != "1" {
return nil, xerrors.Errorf("record version must be 1")
}
entries = append(entries, GenAccountEntry{
Version: 1,
ID: e[1],
Amount: balance,
CustodianID: custodianID,
VestingMonths: vesting,
M: threshold,
N: num,
Type: e[8],
Sig1: e[9],
Sig2: e[10],
Addresses: addrs,
})
}
return entries, nil
}

View File

@ -106,16 +106,16 @@ var infoCmd = &cli.Command{
return xerrors.Errorf("counting faults: %w", err)
}
fmt.Printf("\tCommitted: %s\n", types.SizeStr(types.BigMul(types.NewInt(secCounts.Sset), types.NewInt(uint64(mi.SectorSize)))))
fmt.Printf("\tCommitted: %s\n", types.SizeStr(types.BigMul(types.NewInt(secCounts.Sectors), types.NewInt(uint64(mi.SectorSize)))))
if nfaults == 0 {
fmt.Printf("\tProving: %s\n", types.SizeStr(types.BigMul(types.NewInt(secCounts.Pset), types.NewInt(uint64(mi.SectorSize)))))
fmt.Printf("\tProving: %s\n", types.SizeStr(types.BigMul(types.NewInt(secCounts.Active), types.NewInt(uint64(mi.SectorSize)))))
} else {
var faultyPercentage float64
if secCounts.Sset != 0 {
faultyPercentage = float64(10000*nfaults/secCounts.Sset) / 100.
if secCounts.Sectors != 0 {
faultyPercentage = float64(10000*nfaults/secCounts.Sectors) / 100.
}
fmt.Printf("\tProving: %s (%s Faulty, %.2f%%)\n",
types.SizeStr(types.BigMul(types.NewInt(secCounts.Pset), types.NewInt(uint64(mi.SectorSize)))),
types.SizeStr(types.BigMul(types.NewInt(secCounts.Sectors), types.NewInt(uint64(mi.SectorSize)))),
types.SizeStr(types.BigMul(types.NewInt(nfaults), types.NewInt(uint64(mi.SectorSize)))),
faultyPercentage)
}

View File

@ -7,20 +7,15 @@ import (
"text/tabwriter"
"time"
cbor "github.com/ipfs/go-ipld-cbor"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-bitfield"
rlepluslazy "github.com/filecoin-project/go-bitfield/rle"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/lotus/api/apibstore"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/lib/adtutil"
)
var provingCmd = &cli.Command{
@ -70,14 +65,7 @@ var provingFaultsCmd = &cli.Command{
return err
}
}
faults, err := mas.Faults.All(100000000000)
if err != nil {
return err
}
if len(faults) == 0 {
fmt.Println("no faulty sectors")
return nil
}
head, err := api.ChainHead(ctx)
if err != nil {
return xerrors.Errorf("getting chain head: %w", err)
@ -87,16 +75,23 @@ var provingFaultsCmd = &cli.Command{
return xerrors.Errorf("getting miner deadlines: %w", err)
}
tw := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0)
_, _ = fmt.Fprintln(tw, "deadline\tsectors")
for deadline, sectors := range deadlines.Due {
intersectSectors, _ := bitfield.IntersectBitField(sectors, mas.Faults)
if intersectSectors != nil {
allSectors, _ := intersectSectors.All(100000000000)
for _, num := range allSectors {
_, _ = fmt.Fprintf(tw, "%d\t%d\n", deadline, num)
}
_, _ = fmt.Fprintln(tw, "deadline\tpartition\tsectors")
for dlIdx := range deadlines {
partitions, err := api.StateMinerPartitions(ctx, maddr, uint64(dlIdx), types.EmptyTSK)
if err != nil {
return xerrors.Errorf("loading partitions for deadline %d: %w", dlIdx, err)
}
for partIdx, partition := range partitions {
faulty, err := partition.Faults.All(10000000)
if err != nil {
return err
}
for _, num := range faulty {
_, _ = fmt.Fprintf(tw, "%d\t%d\t%d\n", dlIdx, partIdx, num)
}
}
}
return tw.Flush()
},
@ -155,55 +150,68 @@ var provingInfoCmd = &cli.Command{
}
}
newSectors, err := mas.NewSectors.Count()
if err != nil {
return err
}
faults, err := mas.Faults.Count()
if err != nil {
return err
}
recoveries, err := mas.Recoveries.Count()
if err != nil {
return err
}
var provenSectors uint64
for _, d := range deadlines.Due {
c, err := d.Count()
parts := map[uint64][]*miner.Partition{}
for dlIdx := range deadlines {
part, err := api.StateMinerPartitions(ctx, maddr, uint64(dlIdx), types.EmptyTSK)
if err != nil {
return err
return xerrors.Errorf("getting miner partition: %w", err)
}
parts[uint64(dlIdx)] = part
}
proving := uint64(0)
faults := uint64(0)
recovering := uint64(0)
for _, partitions := range parts {
for _, partition := range partitions {
sc, err := partition.Sectors.Count()
if err != nil {
return xerrors.Errorf("count partition sectors: %w", err)
}
proving += sc
fc, err := partition.Faults.Count()
if err != nil {
return xerrors.Errorf("count partition sectors: %w", err)
}
faults += fc
rc, err := partition.Faults.Count()
if err != nil {
return xerrors.Errorf("count partition sectors: %w", err)
}
recovering += rc
}
provenSectors += c
}
var faultPerc float64
if provenSectors > 0 {
faultPerc = float64(faults*10000/provenSectors) / 100
if proving > 0 {
faultPerc = float64(faults*10000/proving) / 100
}
fmt.Printf("Current Epoch: %d\n", cd.CurrentEpoch)
fmt.Printf("Chain Period: %d\n", cd.CurrentEpoch/miner.WPoStProvingPeriod)
fmt.Printf("Chain Period Start: %s\n", epochTime(cd.CurrentEpoch, (cd.CurrentEpoch/miner.WPoStProvingPeriod)*miner.WPoStProvingPeriod))
fmt.Printf("Chain Period End: %s\n\n", epochTime(cd.CurrentEpoch, (cd.CurrentEpoch/miner.WPoStProvingPeriod+1)*miner.WPoStProvingPeriod))
fmt.Printf("Proving Period Boundary: %d\n", cd.PeriodStart%miner.WPoStProvingPeriod)
fmt.Printf("Proving Period Start: %s\n", epochTime(cd.CurrentEpoch, cd.PeriodStart))
fmt.Printf("Next Period Start: %s\n\n", epochTime(cd.CurrentEpoch, cd.PeriodStart+miner.WPoStProvingPeriod))
fmt.Printf("Faults: %d (%.2f%%)\n", faults, faultPerc)
fmt.Printf("Recovering: %d\n", recoveries)
fmt.Printf("New Sectors: %d\n\n", newSectors)
fmt.Printf("Recovering: %d\n", recovering)
fmt.Printf("Deadline Index: %d\n", cd.Index)
if cd.Index < uint64(len(deadlines.Due)) {
curDeadlineSectors, err := deadlines.Due[cd.Index].Count()
if err != nil {
return xerrors.Errorf("counting deadline sectors: %w", err)
if cd.Index < miner.WPoStPeriodDeadlines {
curDeadlineSectors := uint64(0)
for _, partition := range parts[cd.Index] {
sc, err := partition.Sectors.Count()
if err != nil {
return xerrors.Errorf("counting current deadline sectors: %w", err)
}
curDeadlineSectors += sc
}
fmt.Printf("Deadline Sectors: %d\n", curDeadlineSectors)
}
@ -262,7 +270,6 @@ var provingDeadlinesCmd = &cli.Command{
}
var mas miner.State
var info *miner.MinerInfo
{
mact, err := api.StateGetActor(ctx, maddr, types.EmptyTSK)
if err != nil {
@ -275,64 +282,27 @@ var provingDeadlinesCmd = &cli.Command{
if err := mas.UnmarshalCBOR(bytes.NewReader(rmas)); err != nil {
return err
}
info, err = mas.GetInfo(adtutil.NewStore(ctx, cbor.NewCborStore(apibstore.NewAPIBlockstore(api))))
if err != nil {
return err
}
}
tw := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0)
_, _ = fmt.Fprintln(tw, "deadline\tsectors\tpartitions\tproven")
_, _ = fmt.Fprintln(tw, "deadline\tpartitions\tsectors\tproven")
for i, field := range deadlines.Due {
c, err := field.Count()
for dlIdx, deadline := range deadlines {
partitions, err := api.StateMinerPartitions(ctx, maddr, uint64(dlIdx), types.EmptyTSK)
if err != nil {
return err
return xerrors.Errorf("getting partitions for deadline %d: %w", dlIdx, err)
}
firstPartition, sectorCount, err := miner.PartitionsForDeadline(deadlines, info.WindowPoStPartitionSectors, uint64(i))
provenPartitions, err := deadline.PostSubmissions.Count()
if err != nil {
return err
}
partitionCount := (sectorCount + info.WindowPoStPartitionSectors - 1) / info.WindowPoStPartitionSectors
var provenPartitions uint64
{
var maskRuns []rlepluslazy.Run
if firstPartition > 0 {
maskRuns = append(maskRuns, rlepluslazy.Run{
Val: false,
Len: firstPartition,
})
}
maskRuns = append(maskRuns, rlepluslazy.Run{
Val: true,
Len: partitionCount,
})
ppbm, err := bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{Runs: maskRuns})
if err != nil {
return err
}
pp, err := bitfield.IntersectBitField(ppbm, mas.PostSubmissions)
if err != nil {
return err
}
provenPartitions, err = pp.Count()
if err != nil {
return err
}
}
var cur string
if di.Index == uint64(i) {
if di.Index == uint64(dlIdx) {
cur += "\t(current)"
}
_, _ = fmt.Fprintf(tw, "%d\t%d\t%d\t%d%s\n", i, c, partitionCount, provenPartitions, cur)
_, _ = fmt.Fprintf(tw, "%d\t%d\t%d%s\n", dlIdx, len(partitions), provenPartitions, cur)
}
return tw.Flush()

View File

@ -138,20 +138,20 @@ var sectorsListCmd = &cli.Command{
return err
}
pset, err := fullApi.StateMinerProvingSet(ctx, maddr, types.EmptyTSK)
activeSet, err := fullApi.StateMinerActiveSectors(ctx, maddr, types.EmptyTSK)
if err != nil {
return err
}
provingIDs := make(map[abi.SectorNumber]struct{}, len(pset))
for _, info := range pset {
provingIDs[info.ID] = struct{}{}
activeIDs := make(map[abi.SectorNumber]struct{}, len(activeSet))
for _, info := range activeSet {
activeIDs[info.ID] = struct{}{}
}
sset, err := fullApi.StateMinerSectors(ctx, maddr, nil, true, types.EmptyTSK)
if err != nil {
return err
}
commitedIDs := make(map[abi.SectorNumber]struct{}, len(pset))
commitedIDs := make(map[abi.SectorNumber]struct{}, len(activeSet))
for _, info := range sset {
commitedIDs[info.ID] = struct{}{}
}
@ -170,13 +170,13 @@ var sectorsListCmd = &cli.Command{
}
_, inSSet := commitedIDs[s]
_, inPSet := provingIDs[s]
_, inASet := activeIDs[s]
fmt.Fprintf(w, "%d: %s\tsSet: %s\tpSet: %s\ttktH: %d\tseedH: %d\tdeals: %v\n",
fmt.Fprintf(w, "%d: %s\tsSet: %s\tactive: %s\ttktH: %d\tseedH: %d\tdeals: %v\n",
s,
st.State,
yesno(inSSet),
yesno(inPSet),
yesno(inASet),
st.Ticket.Epoch,
st.Seed.Epoch,
st.Deals,

2
extern/filecoin-ffi vendored

@ -1 +1 @@
Subproject commit 6a143e06f923f3a4f544c7a652e8b4df420a3d28
Subproject commit cddc56607e1d851ea6d09d49404bd7db70cb3c2e

View File

@ -51,7 +51,18 @@ func (am *AccountMeta) ActorMeta() json.RawMessage {
}
type MultisigMeta struct {
// TODO
Signers []address.Address
Threshold int
VestingDuration int
VestingStart int
}
func (mm *MultisigMeta) ActorMeta() json.RawMessage {
out, err := json.Marshal(mm)
if err != nil {
panic(err)
}
return out
}
type Actor struct {

21
go.mod
View File

@ -15,24 +15,24 @@ require (
github.com/drand/drand v1.0.3-0.20200714175734-29705eaf09d4
github.com/drand/kyber v1.1.1
github.com/fatih/color v1.8.0
github.com/filecoin-project/chain-validation v0.0.6-0.20200713115604-652494bba69e
github.com/filecoin-project/filecoin-ffi v0.26.1-0.20200508175440-05b30afeb00d
github.com/filecoin-project/chain-validation v0.0.6-0.20200717222607-ced2bcb96ec0
github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200716204036-cddc56607e1d
github.com/filecoin-project/go-address v0.0.2-0.20200504173055-8b6f2fb2b3ef
github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200424220931-6263827e49f2
github.com/filecoin-project/go-bitfield v0.0.4-0.20200703174658-f4a5758051a1
github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03
github.com/filecoin-project/go-data-transfer v0.4.1-0.20200715144713-b3311844e1a5
github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f
github.com/filecoin-project/go-fil-markets v0.4.1-0.20200715201050-c141144ea312
github.com/filecoin-project/go-jsonrpc v0.1.1-0.20200602181149-522144ab4e24
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261
github.com/filecoin-project/go-statestore v0.1.0
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b
github.com/filecoin-project/sector-storage v0.0.0-20200712023225-1d67dcfa3c15
github.com/filecoin-project/specs-actors v0.7.2
github.com/filecoin-project/sector-storage v0.0.0-20200717213554-a109ef9cbeab
github.com/filecoin-project/specs-actors v0.7.3-0.20200717200758-365408676dbb
github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea
github.com/filecoin-project/storage-fsm v0.0.0-20200715191202-7e92e888bf41
github.com/filecoin-project/storage-fsm v0.0.0-20200717125541-d575c3a5f7f2
github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1
github.com/go-kit/kit v0.10.0
github.com/go-ole/go-ole v1.2.4 // indirect
@ -74,7 +74,7 @@ require (
github.com/ipld/go-car v0.1.1-0.20200526133713-1c7508d55aae
github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e
github.com/kelseyhightower/envconfig v1.4.0
github.com/lib/pq v1.2.0
github.com/lib/pq v1.7.0
github.com/libp2p/go-eventbus v0.2.1
github.com/libp2p/go-libp2p v0.10.0
github.com/libp2p/go-libp2p-connmgr v0.2.4
@ -101,7 +101,7 @@ require (
github.com/multiformats/go-multiaddr-dns v0.2.0
github.com/multiformats/go-multiaddr-net v0.1.5
github.com/multiformats/go-multibase v0.0.3
github.com/multiformats/go-multihash v0.0.13
github.com/multiformats/go-multihash v0.0.14
github.com/opentracing/opentracing-go v1.1.0
github.com/raulk/clock v1.1.0
github.com/stretchr/objx v0.2.0 // indirect
@ -109,7 +109,7 @@ require (
github.com/syndtr/goleveldb v1.0.0
github.com/urfave/cli/v2 v2.2.0
github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba
github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d
github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377
github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7
github.com/whyrusleeping/pubsub v0.0.0-20131020042734-02de8aa2db3d
github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542
@ -119,6 +119,7 @@ require (
go.uber.org/multierr v1.5.0
go.uber.org/zap v1.15.0
go4.org v0.0.0-20190313082347-94abd6928b1d // indirect
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980
golang.org/x/time v0.0.0-20191024005414-555d28b269f0
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543
@ -129,3 +130,5 @@ require (
replace github.com/golangci/golangci-lint => github.com/golangci/golangci-lint v1.18.0
replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi
replace github.com/dgraph-io/badger/v2 => github.com/dgraph-io/badger/v2 v2.0.1-rc1.0.20200716180832-3ab515320794

45
go.sum
View File

@ -170,11 +170,12 @@ github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhY
github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4=
github.com/dgraph-io/badger v1.6.1 h1:w9pSFNSdq/JPM1N12Fz/F/bzo993Is1W+Q7HjPzi7yg=
github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU=
github.com/dgraph-io/badger/v2 v2.0.3 h1:inzdf6VF/NZ+tJ8RwwYMjJMvsOALTHYdozn0qSl6XJI=
github.com/dgraph-io/badger/v2 v2.0.3/go.mod h1:3KY8+bsP8wI0OEnQJAKpd4wIJW/Mm32yw2j/9FUVnIM=
github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
github.com/dgraph-io/badger/v2 v2.0.1-rc1.0.20200716180832-3ab515320794 h1:PIPH4SLjYXMMlX/cQqV7nIRatv7556yqUfWY+KBjrtQ=
github.com/dgraph-io/badger/v2 v2.0.1-rc1.0.20200716180832-3ab515320794/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE=
github.com/dgraph-io/ristretto v0.0.2 h1:a5WaUrDa0qm0YrAAS1tUykT5El3kt62KNZZeMxQn3po=
github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de h1:t0UHb5vdojIDUqktM6+xJAfScFBsVpXZmqC9dsgJmeA=
github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
@ -215,8 +216,8 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv
github.com/fatih/color v1.8.0 h1:5bzFgL+oy7JITMTxUPJ00n7VxmYd/PdMp5mHFX40/RY=
github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGjnw8=
github.com/fd/go-nat v1.0.0/go.mod h1:BTBu/CKvMmOMUPkKVef1pngt2WFH/lg7E6yQnulfp6E=
github.com/filecoin-project/chain-validation v0.0.6-0.20200713115604-652494bba69e h1:oCdk3QSDcHu3l6dWpZcHhaUVz5RQi1mXNyv8hgcK1zA=
github.com/filecoin-project/chain-validation v0.0.6-0.20200713115604-652494bba69e/go.mod h1:293UFGwKduXCuIC2/5pIepH7lof+L9fNiPku/+arST4=
github.com/filecoin-project/chain-validation v0.0.6-0.20200717222607-ced2bcb96ec0 h1:orVeRgvZ6D52O3tE+/9UyuXKFOFT6iE9x2ncFZAgegM=
github.com/filecoin-project/chain-validation v0.0.6-0.20200717222607-ced2bcb96ec0/go.mod h1:ikB0DDO3N4nF0Yn3aAgUqofggbLvUnITJKsrEzI1E08=
github.com/filecoin-project/go-address v0.0.0-20200107215422-da8eea2842b5/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0=
github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0=
github.com/filecoin-project/go-address v0.0.2-0.20200504173055-8b6f2fb2b3ef h1:Wi5E+P1QfHP8IF27eUiTx5vYfqQZwfPxzq3oFEq8w8U=
@ -226,7 +227,6 @@ github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200424220931-6263827e49f2
github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200424220931-6263827e49f2/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg=
github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw=
github.com/filecoin-project/go-bitfield v0.0.1/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY=
github.com/filecoin-project/go-bitfield v0.0.2-0.20200518150651-562fdb554b6e h1:gkG/7G+iKy4He+IiQNeQn+nndFznb/vCoOR8iRQsm60=
github.com/filecoin-project/go-bitfield v0.0.2-0.20200518150651-562fdb554b6e/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY=
github.com/filecoin-project/go-bitfield v0.0.3/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY=
github.com/filecoin-project/go-bitfield v0.0.4-0.20200703174658-f4a5758051a1 h1:xuHlrdznafh7ul5t4xEncnA4qgpQvJZEw+mr98eqHXw=
@ -237,8 +237,9 @@ github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMX
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ=
github.com/filecoin-project/go-data-transfer v0.4.1-0.20200715144713-b3311844e1a5 h1:/OZ+nr0x3uMZCPrreuUbS5EUOFm9DDo4ljgdav8rp/s=
github.com/filecoin-project/go-data-transfer v0.4.1-0.20200715144713-b3311844e1a5/go.mod h1:duGDSKvsOxiKl6Dueh8DNA6ZbiM30PWUWlSKjo9ac+o=
github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 h1:yvQJCW9mmi9zy+51xA01Ea2X7/dL7r8eKDPuGUjRmbo=
github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5/go.mod h1:JbkIgFF/Z9BDlvrJO1FuKkaWsH673/UdFaiVS6uIHlA=
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f h1:GxJzR3oRIMTPtpZ0b7QF8FKPK6/iPAc7trhlL5k/g+s=
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ=
github.com/filecoin-project/go-fil-markets v0.4.1-0.20200715201050-c141144ea312 h1:oVZggNjDWZWEjomkxPl8U3jrOLURoS4QSZA6t4YU5BY=
github.com/filecoin-project/go-fil-markets v0.4.1-0.20200715201050-c141144ea312/go.mod h1:MvrpKOiETu39e9H167gdQzdzLNcvHsUp48UkXqPSdtU=
github.com/filecoin-project/go-jsonrpc v0.1.1-0.20200602181149-522144ab4e24 h1:Jc7vkplmZYVuaEcSXGHDwefvZIdoyyaoGDLqSr8Svms=
@ -259,19 +260,23 @@ github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/
github.com/filecoin-project/sector-storage v0.0.0-20200615154852-728a47ab99d6/go.mod h1:M59QnAeA/oV+Z8oHFLoNpGMv0LZ8Rll+vHVXX7GirPM=
github.com/filecoin-project/sector-storage v0.0.0-20200712023225-1d67dcfa3c15 h1:miw6hiusb/MkV1ryoqUKKWnvHhPW00AYtyeCj0L8pqo=
github.com/filecoin-project/sector-storage v0.0.0-20200712023225-1d67dcfa3c15/go.mod h1:salgVdX7qeXFo/xaiEQE29J4pPkjn71T0kt0n+VDBzo=
github.com/filecoin-project/sector-storage v0.0.0-20200717213554-a109ef9cbeab h1:jEQtbWFyEKnCw3eAVCW3MSX/K7Nv03B3zzS/rfm2k+Q=
github.com/filecoin-project/sector-storage v0.0.0-20200717213554-a109ef9cbeab/go.mod h1:7EE+f7jM4kCy2MKHoiiwNDQGJSb+QQzZ+y+/17ugq4w=
github.com/filecoin-project/specs-actors v0.0.0-20200210130641-2d1fbd8672cf/go.mod h1:xtDZUB6pe4Pksa/bAJbJ693OilaC5Wbot9jMhLm3cZA=
github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y=
github.com/filecoin-project/specs-actors v0.6.0/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY=
github.com/filecoin-project/specs-actors v0.6.1/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY=
github.com/filecoin-project/specs-actors v0.7.0/go.mod h1:+z0htZu/wLBDbOLcQTKKUEC2rkUTFzL2KJ/bRAVWkws=
github.com/filecoin-project/specs-actors v0.7.1/go.mod h1:+z0htZu/wLBDbOLcQTKKUEC2rkUTFzL2KJ/bRAVWkws=
github.com/filecoin-project/specs-actors v0.7.2 h1:zMOU6LJS3gz7E9rwPPiPgTVawe7ypUWK9ugPBiaDZGc=
github.com/filecoin-project/specs-actors v0.7.2/go.mod h1:oJMdZvXSDZWWvjDxCkAywNz2MhgxV6dBzSCVg4vNf4o=
github.com/filecoin-project/specs-actors v0.7.3-0.20200716231407-60a2ae96d2e6 h1:F+GcBdKPdW/wTv6bMJxG9Zj1dc0UGkO6uNOQmKP/g1o=
github.com/filecoin-project/specs-actors v0.7.3-0.20200716231407-60a2ae96d2e6/go.mod h1:JOMUa7EijvpOO4ofD1yeHNmqohkmmnhTvz/IpB6so4c=
github.com/filecoin-project/specs-actors v0.7.3-0.20200717200758-365408676dbb h1:wXbLPkV83vdbNihBa855QRM2zt+HCCGwIWYJxU9YEK8=
github.com/filecoin-project/specs-actors v0.7.3-0.20200717200758-365408676dbb/go.mod h1:JOMUa7EijvpOO4ofD1yeHNmqohkmmnhTvz/IpB6so4c=
github.com/filecoin-project/specs-storage v0.1.0 h1:PkDgTOT5W5Ao7752onjDl4QSv+sgOVdJbvFjOnD5w94=
github.com/filecoin-project/specs-storage v0.1.0/go.mod h1:Pr5ntAaxsh+sLG/LYiL4tKzvA83Vk5vLODYhfNwOg7k=
github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea h1:iixjULRQFPn7Q9KlIqfwLJnlAXO10bbkI+xy5GKGdLY=
github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea/go.mod h1:Pr5ntAaxsh+sLG/LYiL4tKzvA83Vk5vLODYhfNwOg7k=
github.com/filecoin-project/storage-fsm v0.0.0-20200715191202-7e92e888bf41 h1:K2DI5+IKuY0cOjX/r1Agy6rYcAhU89LVNOjutCUib4g=
github.com/filecoin-project/storage-fsm v0.0.0-20200715191202-7e92e888bf41/go.mod h1:TDNjb0HYG2fppxWH5EsiNCZu97iJZNuPYmivSK13Ao0=
github.com/filecoin-project/storage-fsm v0.0.0-20200717125541-d575c3a5f7f2 h1:A9zUXOMuVnSTp9a0i0KtHkB05hA8mRWVLls6Op9Czuo=
github.com/filecoin-project/storage-fsm v0.0.0-20200717125541-d575c3a5f7f2/go.mod h1:1CGbd11KkHuyWPT+xwwCol1zl/jnlpiKD2L4fzKxaiI=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6 h1:u/UEqS66A5ckRmS4yNpjmVH56sVtS/RfclBAYocb4as=
github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ=
@ -648,6 +653,7 @@ github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0
github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4=
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
@ -695,8 +701,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.7.0 h1:h93mCPfUSkaul3Ka/VG8uZdmW1uMHDGxzu0NWHuJmHY=
github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ=
github.com/libp2p/go-addr-util v0.0.2 h1:7cWK5cdA5x72jX0g8iLrQWm5TRJZ6CzGdPEhWj7plWU=
github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E=
@ -1090,8 +1096,9 @@ github.com/multiformats/go-multihash v0.0.7/go.mod h1:XuKXPp8VHcTygube3OWZC+aZrA
github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc=
github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc=
github.com/multiformats/go-multihash v0.0.14 h1:QoBceQYQQtNUuf6s7wHxnE2c8bhbMqhfGzNI032se/I=
github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc=
github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg=
github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg=
github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg=
@ -1113,6 +1120,7 @@ github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJE
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c h1:5bFTChQxSKNwy8ALwOebjekYExl9HTT9urdawqC95tA=
github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c/go.mod h1:7qN3Y0BvzRUf4LofcoJplQL10lsFDb4PYlePTVwrP28=
github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229 h1:E2B8qYyeSgv5MXpmzZXRNp8IAQ4vjxIjhpAf5hv/tAg=
github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
@ -1319,7 +1327,9 @@ github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX
github.com/urfave/cli/v2 v2.0.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ=
github.com/urfave/cli/v2 v2.2.0 h1:JTTnM6wKzdA0Jqodd966MVj4vWbbquZykeX1sKbe2C4=
github.com/urfave/cli/v2 v2.2.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasttemplate v1.0.1 h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8WdUSz8=
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
@ -1345,6 +1355,10 @@ github.com/whyrusleeping/cbor-gen v0.0.0-20200501014322-5f9941ef88e0/go.mod h1:X
github.com/whyrusleeping/cbor-gen v0.0.0-20200501232601-351665a6e756/go.mod h1:W5MvapuoHRP8rz4vxjwCK1pDqF1aQcWsV5PZ+AHbqdg=
github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d h1:Y25auOnuZb/GuJvqMflRSDWBz8/HBRME8fiD+H8zLfs=
github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d/go.mod h1:W5MvapuoHRP8rz4vxjwCK1pDqF1aQcWsV5PZ+AHbqdg=
github.com/whyrusleeping/cbor-gen v0.0.0-20200710004633-5379fc63235d h1:wSxKhvbN7kUoP0sfRS+w2tWr45qlU8409i94hHLOT8w=
github.com/whyrusleeping/cbor-gen v0.0.0-20200710004633-5379fc63235d/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377 h1:LHFlP/ktDvOnCap7PsT87cs7Gwd0p+qv6Qm5g2ZPR+I=
github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E=
github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8=
github.com/whyrusleeping/go-ctrlnet v0.0.0-20180313164037-f564fbbdaa95/go.mod h1:SJqKCCPXRfBFCwXjfNT/skfsceF7+MBFLI2OrvuRA7g=
@ -1369,6 +1383,7 @@ github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:
github.com/whyrusleeping/yamux v1.1.5/go.mod h1:E8LnQQ8HKx5KD29HZFUwM1PxCOdPRzGwur1mcYhXcD8=
github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xorcare/golden v0.6.0/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ=
github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 h1:oWgZJmC1DorFZDpfMfWg7xk29yEOZiXmo/wZl+utTI8=
github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=

View File

@ -7,7 +7,7 @@ import (
"io"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/ipfs/go-cid"
cid "github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
xerrors "golang.org/x/xerrors"
)
@ -67,6 +67,8 @@ func (t *HelloMessage) MarshalCBOR(w io.Writer) error {
}
func (t *HelloMessage) UnmarshalCBOR(r io.Reader) error {
*t = HelloMessage{}
br := cbg.GetPeeker(r)
scratch := make([]byte, 8)
@ -197,6 +199,8 @@ func (t *LatencyMessage) MarshalCBOR(w io.Writer) error {
}
func (t *LatencyMessage) UnmarshalCBOR(r io.Reader) error {
*t = LatencyMessage{}
br := cbg.GetPeeker(r)
scratch := make([]byte, 8)

View File

@ -296,7 +296,7 @@ func (a *API) ClientImport(ctx context.Context, ref api.FileRef) (*api.ImportRes
}, nil
}
func (a *API) ClientRemoveImport(ctx context.Context, importID int64) error {
func (a *API) ClientRemoveImport(ctx context.Context, importID int) error {
return a.imgr().Remove(importID)
}

View File

@ -11,7 +11,6 @@ import (
"sync"
"github.com/filecoin-project/go-amt-ipld/v2"
commcid "github.com/filecoin-project/go-fil-commcid"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/filecoin-project/specs-actors/actors/util/adt"
@ -206,7 +205,7 @@ func (a *ChainAPI) ChainStatObj(ctx context.Context, obj cid.Cid, base cid.Cid)
var collect = true
walker := func(ctx context.Context, c cid.Cid) ([]*ipld.Link, error) {
if c.Prefix().MhType == uint64(commcid.FC_SEALED_V1) || c.Prefix().MhType == uint64(commcid.FC_UNSEALED_V1) {
if c.Prefix().Codec == cid.FilCommitmentSealed || c.Prefix().Codec == cid.FilCommitmentUnsealed {
return []*ipld.Link{}, nil
}

View File

@ -2,6 +2,7 @@ package full
import (
"context"
"github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/api"
@ -135,6 +136,33 @@ func (a *MsigAPI) MsigPropose(ctx context.Context, msig address.Address, to addr
return smsg.Cid(), nil
}
func (a *MsigAPI) MsigSwapPropose(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
enc, actErr := serializeSwapParams(oldAdd, newAdd)
if actErr != nil {
return cid.Undef, actErr
}
return a.MsigPropose(ctx, msig, msig, big.Zero(), src, uint64(builtin.MethodsMultisig.SwapSigner), enc)
}
func (a *MsigAPI) MsigSwapApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
enc, actErr := serializeSwapParams(oldAdd, newAdd)
if actErr != nil {
return cid.Undef, actErr
}
return a.MsigApprove(ctx, msig, txID, proposer, msig, big.Zero(), src, uint64(builtin.MethodsMultisig.SwapSigner), enc)
}
func (a *MsigAPI) MsigSwapCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
enc, actErr := serializeSwapParams(oldAdd, newAdd)
if actErr != nil {
return cid.Undef, actErr
}
return a.MsigCancel(ctx, msig, txID, msig, big.Zero(), src, uint64(builtin.MethodsMultisig.SwapSigner), enc)
}
func (a *MsigAPI) MsigApprove(ctx context.Context, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) {
return a.msigApproveOrCancel(ctx, api.MsigApprove, msig, txID, proposer, to, amt, src, method, params)
}
@ -223,3 +251,15 @@ func (a *MsigAPI) msigApproveOrCancel(ctx context.Context, operation api.MsigPro
return smsg.Cid(), nil
}
func serializeSwapParams(old address.Address, new address.Address) ([]byte, error) {
enc, actErr := actors.SerializeParams(&samsig.SwapSignerParams{
From: old,
To: new,
})
if actErr != nil {
return nil, actErr
}
return enc, nil
}

View File

@ -3,6 +3,7 @@ package full
import (
"bytes"
"context"
"errors"
"fmt"
"strconv"
@ -15,6 +16,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-amt-ipld/v2"
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/sector-storage/ffiwrapper"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/abi/big"
@ -25,6 +27,7 @@ import (
"github.com/filecoin-project/specs-actors/actors/builtin/power"
"github.com/filecoin-project/specs-actors/actors/builtin/reward"
"github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
"github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors"
@ -40,6 +43,8 @@ import (
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
var errBreakForeach = errors.New("break")
type StateAPI struct {
fx.In
@ -65,19 +70,42 @@ func (a *StateAPI) StateMinerSectors(ctx context.Context, addr address.Address,
return stmgr.GetMinerSectorSet(ctx, a.StateManager, ts, addr, filter, filterOut)
}
func (a *StateAPI) StateMinerProvingSet(ctx context.Context, addr address.Address, tsk types.TipSetKey) ([]*api.ChainSectorInfo, error) {
ts, err := a.Chain.GetTipSetFromKey(tsk)
func (a *StateAPI) StateMinerActiveSectors(ctx context.Context, maddr address.Address, tsk types.TipSetKey) ([]*api.ChainSectorInfo, error) {
var out []*api.ChainSectorInfo
err := a.StateManager.WithParentStateTsk(tsk,
a.StateManager.WithActor(maddr,
a.StateManager.WithActorState(ctx, func(store adt.Store, mas *miner.State) error {
var allActive []*abi.BitField
err := a.StateManager.WithDeadlines(
a.StateManager.WithEachDeadline(
a.StateManager.WithEachPartition(func(store adt.Store, partIdx uint64, partition *miner.Partition) error {
active, err := partition.ActiveSectors()
if err != nil {
return xerrors.Errorf("partition.ActiveSectors: %w", err)
}
allActive = append(allActive, active)
return nil
})))(store, mas)
if err != nil {
return xerrors.Errorf("with deadlines: %w", err)
}
active, err := bitfield.MultiMerge(allActive...)
if err != nil {
return xerrors.Errorf("merging active sector bitfields: %w", err)
}
out, err = stmgr.LoadSectorsFromSet(ctx, a.Chain.Blockstore(), mas.Sectors, active, false)
return err
})))
if err != nil {
return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err)
return nil, xerrors.Errorf("getting active sectors from partitions: %w", err)
}
var mas miner.State
_, err = a.StateManager.LoadActorState(ctx, addr, &mas, ts)
if err != nil {
return nil, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err)
}
return stmgr.GetProvingSetRaw(ctx, a.StateManager, mas)
return out, nil
}
func (a *StateAPI) StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (api.MinerInfo, error) {
@ -93,12 +121,30 @@ func (a *StateAPI) StateMinerInfo(ctx context.Context, actor address.Address, ts
return api.NewApiMinerInfo(mi), nil
}
func (a *StateAPI) StateMinerDeadlines(ctx context.Context, m address.Address, tsk types.TipSetKey) (*miner.Deadlines, error) {
ts, err := a.Chain.GetTipSetFromKey(tsk)
if err != nil {
return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err)
}
return stmgr.GetMinerDeadlines(ctx, a.StateManager, ts, m)
func (a *StateAPI) StateMinerDeadlines(ctx context.Context, m address.Address, tsk types.TipSetKey) ([]*miner.Deadline, error) {
var out []*miner.Deadline
return out, a.StateManager.WithParentStateTsk(tsk,
a.StateManager.WithActor(m,
a.StateManager.WithActorState(ctx,
a.StateManager.WithDeadlines(
a.StateManager.WithEachDeadline(
func(store adt.Store, idx uint64, deadline *miner.Deadline) error {
out = append(out, deadline)
return nil
})))))
}
func (a *StateAPI) StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tsk types.TipSetKey) ([]*miner.Partition, error) {
var out []*miner.Partition
return out, a.StateManager.WithParentStateTsk(tsk,
a.StateManager.WithActor(m,
a.StateManager.WithActorState(ctx,
a.StateManager.WithDeadlines(
a.StateManager.WithDeadline(dlIdx,
a.StateManager.WithEachPartition(func(store adt.Store, partIdx uint64, partition *miner.Partition) error {
out = append(out, partition)
return nil
}))))))
}
func (a *StateAPI) StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*miner.DeadlineInfo, error) {
@ -113,19 +159,32 @@ func (a *StateAPI) StateMinerProvingDeadline(ctx context.Context, addr address.A
return nil, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err)
}
return miner.ComputeProvingPeriodDeadline(mas.ProvingPeriodStart, ts.Height()), nil
return mas.DeadlineInfo(ts.Height()).NextNotElapsed(), nil
}
func (a *StateAPI) StateMinerFaults(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.BitField, error) {
ts, err := a.Chain.GetTipSetFromKey(tsk)
out := abi.NewBitField()
err := a.StateManager.WithParentStateTsk(tsk,
a.StateManager.WithActor(addr,
a.StateManager.WithActorState(ctx,
a.StateManager.WithDeadlines(
a.StateManager.WithEachDeadline(
a.StateManager.WithEachPartition(func(store adt.Store, idx uint64, partition *miner.Partition) (err error) {
out, err = bitfield.MergeBitFields(out, partition.Faults)
return err
}))))))
if err != nil {
return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err)
return nil, err
}
return stmgr.GetMinerFaults(ctx, a.StateManager, ts, addr)
return out, err
}
func (a *StateAPI) StateAllMinerFaults(ctx context.Context, lookback abi.ChainEpoch, endTsk types.TipSetKey) ([]*api.Fault, error) {
endTs, err := a.Chain.GetTipSetFromKey(endTsk)
return nil, xerrors.Errorf("fixme")
/*endTs, err := a.Chain.GetTipSetFromKey(endTsk)
if err != nil {
return nil, xerrors.Errorf("loading end tipset %s: %w", endTsk, err)
}
@ -162,15 +221,26 @@ func (a *StateAPI) StateAllMinerFaults(ctx context.Context, lookback abi.ChainEp
}
}
return allFaults, nil
return allFaults, nil*/
}
func (a *StateAPI) StateMinerRecoveries(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.BitField, error) {
ts, err := a.Chain.GetTipSetFromKey(tsk)
out := abi.NewBitField()
err := a.StateManager.WithParentStateTsk(tsk,
a.StateManager.WithActor(addr,
a.StateManager.WithActorState(ctx,
a.StateManager.WithDeadlines(
a.StateManager.WithEachDeadline(
a.StateManager.WithEachPartition(func(store adt.Store, idx uint64, partition *miner.Partition) (err error) {
out, err = bitfield.MergeBitFields(out, partition.Recoveries)
return err
}))))))
if err != nil {
return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err)
return nil, err
}
return stmgr.GetMinerRecoveries(ctx, a.StateManager, ts, addr)
return out, err
}
func (a *StateAPI) StateMinerPower(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*api.MinerPower, error) {
@ -598,11 +668,51 @@ func (a *StateAPI) StateChangedActors(ctx context.Context, old cid.Cid, new cid.
}
func (a *StateAPI) StateMinerSectorCount(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MinerSectors, error) {
ts, err := a.Chain.GetTipSetFromKey(tsk)
var out api.MinerSectors
err := a.StateManager.WithParentStateTsk(tsk,
a.StateManager.WithActor(addr,
a.StateManager.WithActorState(ctx, func(store adt.Store, mas *miner.State) error {
var allActive []*abi.BitField
err := a.StateManager.WithDeadlines(
a.StateManager.WithEachDeadline(
a.StateManager.WithEachPartition(func(store adt.Store, partIdx uint64, partition *miner.Partition) error {
active, err := partition.ActiveSectors()
if err != nil {
return xerrors.Errorf("partition.ActiveSectors: %w", err)
}
allActive = append(allActive, active)
return nil
})))(store, mas)
if err != nil {
return xerrors.Errorf("with deadlines: %w", err)
}
active, err := bitfield.MultiMerge(allActive...)
if err != nil {
return xerrors.Errorf("merging active sector bitfields: %w", err)
}
out.Active, err = active.Count()
if err != nil {
return xerrors.Errorf("counting active sectors: %w", err)
}
sarr, err := adt.AsArray(store, mas.Sectors)
if err != nil {
return err
}
out.Sectors = sarr.Length()
return nil
})))
if err != nil {
return api.MinerSectors{}, xerrors.Errorf("loading tipset %s: %w", tsk, err)
return api.MinerSectors{}, err
}
return stmgr.SectorSetSizes(ctx, a.StateManager, addr, ts)
return out, nil
}
func (a *StateAPI) StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) {
@ -621,6 +731,103 @@ func (a *StateAPI) StateSectorGetInfo(ctx context.Context, maddr address.Address
return stmgr.MinerSectorInfo(ctx, a.StateManager, maddr, n, ts)
}
type sectorPartitionCb func(store adt.Store, mas *miner.State, di uint64, pi uint64, part *miner.Partition) error
func (a *StateAPI) sectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey, cb sectorPartitionCb) error {
return a.StateManager.WithParentStateTsk(tsk,
a.StateManager.WithActor(maddr,
a.StateManager.WithActorState(ctx, func(store adt.Store, mas *miner.State) error {
return a.StateManager.WithDeadlines(func(store adt.Store, deadlines *miner.Deadlines) error {
err := a.StateManager.WithEachDeadline(func(store adt.Store, di uint64, deadline *miner.Deadline) error {
return a.StateManager.WithEachPartition(func(store adt.Store, pi uint64, partition *miner.Partition) error {
set, err := partition.Sectors.IsSet(uint64(sectorNumber))
if err != nil {
return xerrors.Errorf("is set: %w", err)
}
if set {
if err := cb(store, mas, di, pi, partition); err != nil {
return err
}
return errBreakForeach
}
return nil
})(store, di, deadline)
})(store, deadlines)
if err == errBreakForeach {
err = nil
}
return err
})(store, mas)
})))
}
func (a *StateAPI) StateSectorExpiration(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey) (*api.SectorExpiration, error) {
var onTimeEpoch, earlyEpoch abi.ChainEpoch
err := a.sectorPartition(ctx, maddr, sectorNumber, tsk, func(store adt.Store, mas *miner.State, di uint64, pi uint64, part *miner.Partition) error {
quant := mas.QuantEndOfDeadline()
expirations, err := miner.LoadExpirationQueue(store, part.ExpirationsEpochs, quant)
if err != nil {
return xerrors.Errorf("loading expiration queue: %w", err)
}
var eset miner.ExpirationSet
return expirations.Array.ForEach(&eset, func(epoch int64) error {
set, err := eset.OnTimeSectors.IsSet(uint64(sectorNumber))
if err != nil {
return xerrors.Errorf("checking if sector is in onTime set: %w", err)
}
if set {
onTimeEpoch = abi.ChainEpoch(epoch)
}
set, err = eset.EarlySectors.IsSet(uint64(sectorNumber))
if err != nil {
return xerrors.Errorf("checking if sector is in early set: %w", err)
}
if set {
earlyEpoch = abi.ChainEpoch(epoch)
}
return nil
})
})
if err != nil {
return nil, err
}
if onTimeEpoch == 0 {
return nil, xerrors.Errorf("expiration for sector %d not found", sectorNumber)
}
return &api.SectorExpiration{
OnTime: onTimeEpoch,
Early: earlyEpoch,
}, nil
}
func (a *StateAPI) StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey) (*api.SectorLocation, error) {
var found *api.SectorLocation
err := a.sectorPartition(ctx, maddr, sectorNumber, tsk, func(store adt.Store, mas *miner.State, di, pi uint64, partition *miner.Partition) error {
found = &api.SectorLocation{
Deadline: di,
Partition: pi,
}
return errBreakForeach
})
if err != nil {
return nil, err
}
if found == nil {
}
return found, nil
}
func (a *StateAPI) StateListMessages(ctx context.Context, match *types.Message, tsk types.TipSetKey, toheight abi.ChainEpoch) ([]cid.Cid, error) {
ts, err := a.Chain.GetTipSetFromKey(tsk)
if err != nil {
@ -729,39 +936,27 @@ func (a *StateAPI) StateMinerInitialPledgeCollateral(ctx context.Context, maddr
return types.EmptyInt, xerrors.Errorf("loading tipset %s: %w", tsk, err)
}
as := store.ActorStore(ctx, a.Chain.Blockstore())
var minerState miner.State
{
act, err := a.StateManager.GetActor(maddr, ts)
if err != nil {
return types.EmptyInt, err
}
if err := as.Get(ctx, act.Head, &minerState); err != nil {
return types.EmptyInt, err
}
}
var powerState power.State
{
act, err := a.StateManager.GetActor(builtin.StoragePowerActorAddr, ts)
if err != nil {
return types.EmptyInt, err
}
if err := as.Get(ctx, act.Head, &powerState); err != nil {
return types.EmptyInt, err
}
}
var rewardState reward.State
{
act, err := a.StateManager.GetActor(builtin.RewardActorAddr, ts)
if err != nil {
return types.EmptyInt, err
err = a.StateManager.WithParentStateTsk(tsk, func(state *state.StateTree) error {
if err := a.StateManager.WithActor(maddr, a.StateManager.WithActorState(ctx, &minerState))(state); err != nil {
return xerrors.Errorf("getting miner state: %w", err)
}
if err := as.Get(ctx, act.Head, &rewardState); err != nil {
return types.EmptyInt, err
if err := a.StateManager.WithActor(builtin.StoragePowerActorAddr, a.StateManager.WithActorState(ctx, &powerState))(state); err != nil {
return xerrors.Errorf("getting power state: %w", err)
}
if err := a.StateManager.WithActor(builtin.RewardActorAddr, a.StateManager.WithActorState(ctx, &rewardState))(state); err != nil {
return xerrors.Errorf("getting reward state: %w", err)
}
return nil
})
if err != nil {
return types.EmptyInt, err
}
var dealWeights market.VerifyDealsForActivationReturn
@ -805,7 +1000,7 @@ func (a *StateAPI) StateMinerInitialPledgeCollateral(ctx context.Context, maddr
}
sectorWeight := miner.QAPowerForWeight(ssize, duration, dealWeights.DealWeight, dealWeights.VerifiedDealWeight)
initialPledge := miner.InitialPledgeForPower(sectorWeight, powerState.TotalQualityAdjPower, powerState.TotalPledgeCollateral, rewardState.ThisEpochReward, circSupply)
initialPledge := miner.InitialPledgeForPower(sectorWeight, powerState.TotalQualityAdjPower, reward.BaselinePowerAt(ts.Height()), powerState.TotalPledgeCollateral, rewardState.ThisEpochReward, circSupply)
return types.BigDiv(types.BigMul(initialPledge, initialPledgeNum), initialPledgeDen), nil
}
@ -816,24 +1011,23 @@ func (a *StateAPI) StateMinerAvailableBalance(ctx context.Context, maddr address
return types.EmptyInt, xerrors.Errorf("loading tipset %s: %w", tsk, err)
}
act, err := a.StateManager.GetActor(maddr, ts)
if err != nil {
return types.EmptyInt, err
}
var act *types.Actor
var mas miner.State
if err := a.StateManager.WithParentState(ts, a.StateManager.WithActor(maddr, func(actor *types.Actor) error {
act = actor
return a.StateManager.WithActorState(ctx, &mas)(actor)
})); err != nil {
return types.BigInt{}, xerrors.Errorf("getting miner state: %w", err)
}
as := store.ActorStore(ctx, a.Chain.Blockstore())
var st miner.State
if err := as.Get(ctx, act.Head, &st); err != nil {
return types.EmptyInt, err
}
vested, err := st.CheckVestedFunds(as, ts.Height())
vested, err := mas.CheckVestedFunds(as, ts.Height())
if err != nil {
return types.EmptyInt, err
}
return types.BigAdd(st.GetAvailableBalance(act.Balance), vested), nil
return types.BigAdd(mas.GetAvailableBalance(act.Balance), vested), nil
}
// StateVerifiedClientStatus returns the data cap for the given address.

View File

@ -36,7 +36,11 @@ func (a *WalletAPI) WalletList(ctx context.Context) ([]address.Address, error) {
}
func (a *WalletAPI) WalletBalance(ctx context.Context, addr address.Address) (types.BigInt, error) {
return a.StateManager.GetBalance(addr, nil)
var bal types.BigInt
return bal, a.StateManager.WithParentStateTsk(types.EmptyTSK, a.StateManager.WithActor(addr, func(act *types.Actor) error {
bal = act.Balance
return nil
}))
}
func (a *WalletAPI) WalletSign(ctx context.Context, k address.Address, msg []byte) (*crypto.Signature, error) {

View File

@ -63,6 +63,8 @@ func (a *PaychAPI) PaychNewPayment(ctx context.Context, from, to address.Address
for i, v := range vouchers {
sv, err := a.paychVoucherCreate(ctx, ch.Channel, paych.SignedVoucher{
ChannelAddr: ch.Channel,
Amount: v.Amount,
Lane: uint64(lane),
@ -161,7 +163,7 @@ func (a *PaychAPI) PaychVoucherAdd(ctx context.Context, ch address.Address, sv *
// actual additional value of this voucher will only be the difference between
// the two.
func (a *PaychAPI) PaychVoucherCreate(ctx context.Context, pch address.Address, amt types.BigInt, lane uint64) (*paych.SignedVoucher, error) {
return a.paychVoucherCreate(ctx, pch, paych.SignedVoucher{Amount: amt, Lane: lane})
return a.paychVoucherCreate(ctx, pch, paych.SignedVoucher{ChannelAddr: pch, Amount: amt, Lane: lane})
}
func (a *PaychAPI) paychVoucherCreate(ctx context.Context, pch address.Address, voucher paych.SignedVoucher) (*paych.SignedVoucher, error) {

View File

@ -5,6 +5,7 @@ import (
"time"
"github.com/filecoin-project/lotus/lib/bufbstore"
"golang.org/x/xerrors"
blockstore "github.com/ipfs/go-ipfs-blockstore"
"github.com/libp2p/go-libp2p-core/host"
@ -36,7 +37,12 @@ import (
)
func ClientMultiDatastore(lc fx.Lifecycle, r repo.LockedRepo) (dtypes.ClientMultiDstore, error) {
mds, err := importmgr.NewMultiDstore(r, "/client")
ds, err := r.Datastore("/client")
if err != nil {
return nil, xerrors.Errorf("getting datastore out of reop: %w", err)
}
mds, err := importmgr.NewMultiDstore(ds)
if err != nil {
return nil, err
}

View File

@ -460,13 +460,7 @@ func SectorStorage(mctx helpers.MetricsCtx, lc fx.Lifecycle, ls stores.LocalStor
}
lc.Append(fx.Hook{
OnStop: func(_ context.Context) error {
if err := sst.Close(); err != nil {
log.Errorf("%+v", err)
}
return nil
},
OnStop: sst.Close,
})
return sst, nil

View File

@ -226,11 +226,9 @@ type fsLockedRepo struct {
repoType RepoType
closer io.Closer
ds map[string]datastore.Batching
multiDs map[string]map[int64]datastore.Batching
dsErr error
dsOnce sync.Once
dsLk sync.Mutex
ds map[string]datastore.Batching
dsErr error
dsOnce sync.Once
storageLk sync.Mutex
configLk sync.Mutex

View File

@ -1,13 +1,11 @@
package repo
import (
"fmt"
"github.com/ipfs/go-datastore"
"golang.org/x/xerrors"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"github.com/ipfs/go-datastore"
"golang.org/x/xerrors"
dgbadger "github.com/dgraph-io/badger/v2"
badger "github.com/ipfs/go-ds-badger2"
@ -24,9 +22,7 @@ var fsDatastores = map[string]dsCtor{
// Those need to be fast for large writes... but also need a really good GC :c
"staging": badgerDs, // miner specific
}
var fsMultiDatastores = map[string]dsCtor{
"client": badgerDs, // client specific
}
@ -68,78 +64,11 @@ func (fsr *fsLockedRepo) openDatastores() (map[string]datastore.Batching, error)
return out, nil
}
func (fsr *fsLockedRepo) openMultiDatastores() (map[string]map[int64]datastore.Batching, error) {
out := map[string]map[int64]datastore.Batching{}
for p, ctor := range fsMultiDatastores {
path := fsr.join(filepath.Join(fsDatastore, p))
if err := os.MkdirAll(path, 0755); err != nil {
return nil, xerrors.Errorf("mkdir %s: %w", path, err)
}
di, err := ioutil.ReadDir(path)
if err != nil {
return nil, xerrors.Errorf("readdir '%s': %w", path, err)
}
out[p] = map[int64]datastore.Batching{}
for _, info := range di {
path := filepath.Join(path, info.Name())
prefix := datastore.NewKey(p)
id, err := strconv.ParseInt(info.Name(), 10, 64)
if err != nil {
log.Errorf("error parsing multi-datastore id for '%s': %w", path, err)
continue
}
// TODO: optimization: don't init datastores we don't need
ds, err := ctor(path)
if err != nil {
return nil, xerrors.Errorf("opening datastore %s: %w", prefix, err)
}
ds = measure.New("fsrepo."+p+"."+info.Name(), ds)
out[p][id] = ds
}
}
return out, nil
}
func (fsr *fsLockedRepo) openMultiDatastore(ns string, idx int64) (datastore.Batching, error) {
ctor, ok := fsMultiDatastores[ns]
if !ok {
return nil, xerrors.Errorf("no multi-datastore with namespace '%s'", ns)
}
si := fmt.Sprintf("%d", idx)
path := fsr.join(filepath.Join(fsDatastore, ns, si))
ds, err := ctor(path)
if err != nil {
return nil, xerrors.Errorf("opening datastore %s: %w", path, err)
}
ds = measure.New("fsrepo."+ns+"."+si, ds)
return ds, nil
}
func (fsr *fsLockedRepo) Datastore(ns string) (datastore.Batching, error) {
fsr.dsOnce.Do(func() {
var err error
fsr.ds, err = fsr.openDatastores()
if err != nil {
fsr.dsErr = err
return
}
fsr.multiDs, fsr.dsErr = fsr.openMultiDatastores()
fsr.ds, fsr.dsErr = fsr.openDatastores()
})
if fsr.dsErr != nil {
return nil, fsr.dsErr
}
@ -147,99 +76,5 @@ func (fsr *fsLockedRepo) Datastore(ns string) (datastore.Batching, error) {
if ok {
return ds, nil
}
k := datastore.NewKey(ns)
parts := k.List()
if len(parts) != 2 {
return nil, xerrors.Errorf("expected multi-datastore namespace to have 2 parts")
}
fsr.dsLk.Lock()
defer fsr.dsLk.Unlock()
mds, ok := fsr.multiDs[parts[0]]
if !ok {
return nil, xerrors.Errorf("no multi-datastore with namespace %s", ns)
}
idx, err := strconv.ParseInt(parts[1], 10, 64)
if err != nil {
return nil, xerrors.Errorf("parsing mult-datastore index('%s'): %w", parts[1], err)
}
ds, ok = mds[idx]
if !ok {
ds, err = fsr.openMultiDatastore(parts[0], idx)
if err != nil {
return nil, xerrors.Errorf("opening multi-datastore: %w", err)
}
mds[idx] = ds
}
return ds, nil
}
func (fsr *fsLockedRepo) ListDatastores(ns string) ([]int64, error) {
k := datastore.NewKey(ns)
parts := k.List()
if len(parts) != 1 {
return nil, xerrors.Errorf("expected multi-datastore namespace to have 1 part")
}
fsr.dsLk.Lock()
defer fsr.dsLk.Unlock()
mds, ok := fsr.multiDs[parts[0]]
if !ok {
return nil, xerrors.Errorf("no multi-datastore with namespace %s", ns)
}
out := make([]int64, 0, len(mds))
for i := range mds {
out = append(out, i)
}
return out, nil
}
func (fsr *fsLockedRepo) DeleteDatastore(ns string) error {
k := datastore.NewKey(ns)
parts := k.List()
if len(parts) != 2 {
return xerrors.Errorf("expected multi-datastore namespace to have 2 parts")
}
mds, ok := fsr.multiDs[parts[0]]
if !ok {
return xerrors.Errorf("no multi-datastore with namespace %s", ns)
}
idx, err := strconv.ParseInt(parts[1], 10, 64)
if err != nil {
return xerrors.Errorf("parsing mult-datastore index('%s'): %w", parts[1], err)
}
fsr.dsLk.Lock()
defer fsr.dsLk.Unlock()
ds, ok := mds[idx]
if !ok {
return xerrors.Errorf("no multi-datastore with at index (namespace %s)", ns)
}
delete(mds, idx)
if err := ds.Close(); err != nil {
return xerrors.Errorf("closing datastore: %w", err)
}
path := fsr.join(filepath.Join(fsDatastore, parts[0], parts[1]))
log.Warnw("removing sub-datastore", "path", path, "namespace", ns)
if err := os.RemoveAll(path); err != nil {
return xerrors.Errorf("remove '%s': %w", path, err)
}
return nil
return nil, xerrors.Errorf("no such datastore: %s", ns)
}

View File

@ -42,7 +42,7 @@ type StoreMeta struct {
Labels map[string]string
}
func (m *Mgr) NewStore() (int64, *Store, error) {
func (m *Mgr) NewStore() (int, *Store, error) {
id := m.mds.Next()
st, err := m.mds.Get(id)
if err != nil {
@ -60,7 +60,7 @@ func (m *Mgr) NewStore() (int64, *Store, error) {
return id, st, err
}
func (m *Mgr) AddLabel(id int64, key, value string) error { // source, file path, data CID..
func (m *Mgr) AddLabel(id int, key, value string) error { // source, file path, data CID..
meta, err := m.ds.Get(datastore.NewKey(fmt.Sprintf("%d", id)))
if err != nil {
return xerrors.Errorf("getting metadata form datastore: %w", err)
@ -81,11 +81,11 @@ func (m *Mgr) AddLabel(id int64, key, value string) error { // source, file path
return m.ds.Put(datastore.NewKey(fmt.Sprintf("%d", id)), meta)
}
func (m *Mgr) List() []int64 {
func (m *Mgr) List() []int {
return m.mds.List()
}
func (m *Mgr) Info(id int64) (*StoreMeta, error) {
func (m *Mgr) Info(id int) (*StoreMeta, error) {
meta, err := m.ds.Get(datastore.NewKey(fmt.Sprintf("%d", id)))
if err != nil {
return nil, xerrors.Errorf("getting metadata form datastore: %w", err)
@ -99,7 +99,7 @@ func (m *Mgr) Info(id int64) (*StoreMeta, error) {
return &sm, nil
}
func (m *Mgr) Remove(id int64) error {
func (m *Mgr) Remove(id int) error {
if err := m.mds.Delete(id); err != nil {
return xerrors.Errorf("removing import: %w", err)
}

View File

@ -1,44 +1,47 @@
package importmgr
import (
"encoding/json"
"fmt"
"path"
"sort"
"sync"
"sync/atomic"
"github.com/hashicorp/go-multierror"
"go.uber.org/multierr"
"golang.org/x/xerrors"
"github.com/ipfs/go-datastore"
ktds "github.com/ipfs/go-datastore/keytransform"
"github.com/ipfs/go-datastore/query"
)
type dsProvider interface {
Datastore(namespace string) (datastore.Batching, error)
ListDatastores(namespace string) ([]int64, error)
DeleteDatastore(namespace string) error
}
type MultiStore struct {
provider dsProvider
namespace string
ds datastore.Batching
open map[int64]*Store
next int64
open map[int]*Store
next int
lk sync.RWMutex
}
func NewMultiDstore(provider dsProvider, namespace string) (*MultiStore, error) {
ids, err := provider.ListDatastores(namespace)
if err != nil {
return nil, xerrors.Errorf("listing datastores: %w", err)
var dsListKey = datastore.NewKey("/list")
var dsMultiKey = datastore.NewKey("/multi")
func NewMultiDstore(ds datastore.Batching) (*MultiStore, error) {
listBytes, err := ds.Get(dsListKey)
if xerrors.Is(err, datastore.ErrNotFound) {
listBytes, _ = json.Marshal([]int{})
} else if err != nil {
return nil, xerrors.Errorf("could not read multistore list: %w", err)
}
var ids []int
if err := json.Unmarshal(listBytes, &ids); err != nil {
return nil, xerrors.Errorf("could not unmarshal multistore list: %w", err)
}
mds := &MultiStore{
provider: provider,
namespace: namespace,
open: map[int64]*Store{},
ds: ds,
open: map[int]*Store{},
}
for _, i := range ids {
@ -55,15 +58,33 @@ func NewMultiDstore(provider dsProvider, namespace string) (*MultiStore, error)
return mds, nil
}
func (mds *MultiStore) path(i int64) string {
return path.Join("/", mds.namespace, fmt.Sprintf("%d", i))
func (mds *MultiStore) Next() int {
mds.lk.Lock()
defer mds.lk.Unlock()
mds.next++
return mds.next
}
func (mds *MultiStore) Next() int64 {
return atomic.AddInt64(&mds.next, 1)
func (mds *MultiStore) updateStores() error {
stores := make([]int, 0, len(mds.open))
for k := range mds.open {
stores = append(stores, k)
}
sort.Ints(stores)
listBytes, err := json.Marshal(stores)
if err != nil {
return xerrors.Errorf("could not marshal list: %w", err)
}
err = mds.ds.Put(dsListKey, listBytes)
if err != nil {
return xerrors.Errorf("could not save stores list: %w", err)
}
return nil
}
func (mds *MultiStore) Get(i int64) (*Store, error) {
func (mds *MultiStore) Get(i int) (*Store, error) {
mds.lk.Lock()
defer mds.lk.Unlock()
@ -72,40 +93,85 @@ func (mds *MultiStore) Get(i int64) (*Store, error) {
return store, nil
}
ds, err := mds.provider.Datastore(mds.path(i))
wds := ktds.Wrap(mds.ds, ktds.PrefixTransform{
Prefix: dsMultiKey.ChildString(fmt.Sprintf("%d", i)),
})
var err error
mds.open[i], err = openStore(wds)
if err != nil {
return nil, err
return nil, xerrors.Errorf("could not open new store: %w", err)
}
mds.open[i], err = openStore(ds)
return mds.open[i], err
err = mds.updateStores()
if err != nil {
return nil, xerrors.Errorf("updating stores: %w", err)
}
return mds.open[i], nil
}
func (mds *MultiStore) List() []int64 {
func (mds *MultiStore) List() []int {
mds.lk.RLock()
defer mds.lk.RUnlock()
out := make([]int64, 0, len(mds.open))
out := make([]int, 0, len(mds.open))
for i := range mds.open {
out = append(out, i)
}
sort.Ints(out)
return out
}
func (mds *MultiStore) Delete(i int64) error {
func (mds *MultiStore) Delete(i int) error {
mds.lk.Lock()
defer mds.lk.Unlock()
store, ok := mds.open[i]
if ok {
if err := store.Close(); err != nil {
return xerrors.Errorf("closing sub-datastore %d: %w", i, err)
}
delete(mds.open, i)
if !ok {
return nil
}
delete(mds.open, i)
err := store.Close()
if err != nil {
return xerrors.Errorf("closing store: %w", err)
}
return mds.provider.DeleteDatastore(mds.path(i))
err = mds.updateStores()
if err != nil {
return xerrors.Errorf("updating stores: %w", err)
}
qres, err := store.ds.Query(query.Query{KeysOnly: true})
if err != nil {
return xerrors.Errorf("query error: %w", err)
}
defer qres.Close() //nolint:errcheck
b, err := store.ds.Batch()
if err != nil {
return xerrors.Errorf("batch error: %w", err)
}
for r := range qres.Next() {
if r.Error != nil {
_ = b.Commit()
return xerrors.Errorf("iterator error: %w", err)
}
err := b.Delete(datastore.NewKey(r.Key))
if err != nil {
_ = b.Commit()
return xerrors.Errorf("adding to batch: %w", err)
}
}
err = b.Commit()
if err != nil {
return xerrors.Errorf("committing: %w", err)
}
return nil
}
func (mds *MultiStore) Close() error {
@ -113,12 +179,10 @@ func (mds *MultiStore) Close() error {
defer mds.lk.Unlock()
var err error
for i, store := range mds.open {
cerr := store.Close()
if cerr != nil {
err = multierror.Append(err, xerrors.Errorf("closing sub-datastore %d: %w", i, cerr))
}
for _, s := range mds.open {
err = multierr.Append(err, s.Close())
}
mds.open = make(map[int]*Store)
return err
}

View File

@ -36,8 +36,6 @@ type LockedRepo interface {
// Returns datastore defined in this repo.
Datastore(namespace string) (datastore.Batching, error)
ListDatastores(namespace string) ([]int64, error)
DeleteDatastore(namespace string) error
// Returns config in this repo
Config() (interface{}, error)

View File

@ -67,6 +67,8 @@ func (t *VoucherInfo) MarshalCBOR(w io.Writer) error {
}
func (t *VoucherInfo) UnmarshalCBOR(r io.Reader) error {
*t = VoucherInfo{}
br := cbg.GetPeeker(r)
scratch := make([]byte, 8)
@ -265,6 +267,8 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error {
}
func (t *ChannelInfo) UnmarshalCBOR(r io.Reader) error {
*t = ChannelInfo{}
br := cbg.GetPeeker(r)
scratch := make([]byte, 8)

View File

@ -102,6 +102,10 @@ func (pm *Manager) CheckVoucherValid(ctx context.Context, ch address.Address, sv
}
func (pm *Manager) checkVoucherValid(ctx context.Context, ch address.Address, sv *paych.SignedVoucher) (map[uint64]*paych.LaneState, error) {
if sv.ChannelAddr != ch {
return nil, xerrors.Errorf("voucher ChannelAddr doesn't match channel address, got %s, expected %s", sv.ChannelAddr, ch)
}
act, pchState, err := pm.loadPaychState(ctx, ch)
if err != nil {
return nil, err

View File

@ -325,7 +325,7 @@ func TestCheckVoucherValid(t *testing.T) {
err := mgr.TrackInboundChannel(ctx, ch)
require.NoError(t, err)
sv := testCreateVoucher(t, tcase.voucherLane, tcase.voucherNonce, tcase.voucherAmount, tcase.key)
sv := testCreateVoucher(t, ch, tcase.voucherLane, tcase.voucherNonce, tcase.voucherAmount, tcase.key)
err = mgr.CheckVoucherValid(ctx, ch, sv)
if tcase.expectError {
@ -405,7 +405,7 @@ func TestCheckVoucherValidCountingAllLanes(t *testing.T) {
voucherLane := uint64(1)
voucherNonce := uint64(2)
voucherAmount := big.NewInt(6)
sv := testCreateVoucher(t, voucherLane, voucherNonce, voucherAmount, fromKeyPrivate)
sv := testCreateVoucher(t, ch, voucherLane, voucherNonce, voucherAmount, fromKeyPrivate)
err = mgr.CheckVoucherValid(ctx, ch, sv)
require.Error(t, err)
@ -423,7 +423,7 @@ func TestCheckVoucherValidCountingAllLanes(t *testing.T) {
// actor balance is 10 so total is ok.
//
voucherAmount = big.NewInt(4)
sv = testCreateVoucher(t, voucherLane, voucherNonce, voucherAmount, fromKeyPrivate)
sv = testCreateVoucher(t, ch, voucherLane, voucherNonce, voucherAmount, fromKeyPrivate)
err = mgr.CheckVoucherValid(ctx, ch, sv)
require.NoError(t, err)
@ -447,7 +447,7 @@ func TestCheckVoucherValidCountingAllLanes(t *testing.T) {
//
voucherNonce++
voucherAmount = big.NewInt(6)
sv = testCreateVoucher(t, voucherLane, voucherNonce, voucherAmount, fromKeyPrivate)
sv = testCreateVoucher(t, ch, voucherLane, voucherNonce, voucherAmount, fromKeyPrivate)
err = mgr.CheckVoucherValid(ctx, ch, sv)
require.Error(t, err)
@ -465,7 +465,7 @@ func TestCheckVoucherValidCountingAllLanes(t *testing.T) {
// actor balance is 10 so total is ok.
//
voucherAmount = big.NewInt(5)
sv = testCreateVoucher(t, voucherLane, voucherNonce, voucherAmount, fromKeyPrivate)
sv = testCreateVoucher(t, ch, voucherLane, voucherNonce, voucherAmount, fromKeyPrivate)
err = mgr.CheckVoucherValid(ctx, ch, sv)
require.NoError(t, err)
}
@ -482,14 +482,14 @@ func TestAddVoucherDelta(t *testing.T) {
minDelta := big.NewInt(2)
nonce := uint64(1)
voucherAmount := big.NewInt(1)
sv := testCreateVoucher(t, voucherLane, nonce, voucherAmount, fromKeyPrivate)
sv := testCreateVoucher(t, ch, voucherLane, nonce, voucherAmount, fromKeyPrivate)
_, err := mgr.AddVoucher(ctx, ch, sv, nil, minDelta)
require.Error(t, err)
// Expect success when adding a voucher whose amount is equal to minDelta
nonce++
voucherAmount = big.NewInt(2)
sv = testCreateVoucher(t, voucherLane, nonce, voucherAmount, fromKeyPrivate)
sv = testCreateVoucher(t, ch, voucherLane, nonce, voucherAmount, fromKeyPrivate)
delta, err := mgr.AddVoucher(ctx, ch, sv, nil, minDelta)
require.NoError(t, err)
require.EqualValues(t, delta.Int64(), 2)
@ -497,7 +497,7 @@ func TestAddVoucherDelta(t *testing.T) {
// Check that delta is correct when there's an existing voucher
nonce++
voucherAmount = big.NewInt(5)
sv = testCreateVoucher(t, voucherLane, nonce, voucherAmount, fromKeyPrivate)
sv = testCreateVoucher(t, ch, voucherLane, nonce, voucherAmount, fromKeyPrivate)
delta, err = mgr.AddVoucher(ctx, ch, sv, nil, minDelta)
require.NoError(t, err)
require.EqualValues(t, delta.Int64(), 3)
@ -506,7 +506,7 @@ func TestAddVoucherDelta(t *testing.T) {
nonce = uint64(1)
voucherAmount = big.NewInt(6)
voucherLane = uint64(2)
sv = testCreateVoucher(t, voucherLane, nonce, voucherAmount, fromKeyPrivate)
sv = testCreateVoucher(t, ch, voucherLane, nonce, voucherAmount, fromKeyPrivate)
delta, err = mgr.AddVoucher(ctx, ch, sv, nil, minDelta)
require.NoError(t, err)
require.EqualValues(t, delta.Int64(), 6)
@ -524,7 +524,7 @@ func TestAddVoucherNextLane(t *testing.T) {
// Add a voucher in lane 2
nonce := uint64(1)
voucherLane := uint64(2)
sv := testCreateVoucher(t, voucherLane, nonce, voucherAmount, fromKeyPrivate)
sv := testCreateVoucher(t, ch, voucherLane, nonce, voucherAmount, fromKeyPrivate)
_, err := mgr.AddVoucher(ctx, ch, sv, nil, minDelta)
require.NoError(t, err)
@ -534,7 +534,7 @@ func TestAddVoucherNextLane(t *testing.T) {
// Add a voucher in lane 1
voucherLane = uint64(1)
sv = testCreateVoucher(t, voucherLane, nonce, voucherAmount, fromKeyPrivate)
sv = testCreateVoucher(t, ch, voucherLane, nonce, voucherAmount, fromKeyPrivate)
_, err = mgr.AddVoucher(ctx, ch, sv, nil, minDelta)
require.NoError(t, err)
@ -544,7 +544,7 @@ func TestAddVoucherNextLane(t *testing.T) {
// Add a voucher in lane 5
voucherLane = uint64(5)
sv = testCreateVoucher(t, voucherLane, nonce, voucherAmount, fromKeyPrivate)
sv = testCreateVoucher(t, ch, voucherLane, nonce, voucherAmount, fromKeyPrivate)
_, err = mgr.AddVoucher(ctx, ch, sv, nil, minDelta)
require.NoError(t, err)
@ -567,7 +567,7 @@ func TestAddVoucherProof(t *testing.T) {
// Add a voucher with no proof
var proof []byte
sv := testCreateVoucher(t, voucherLane, nonce, voucherAmount, fromKeyPrivate)
sv := testCreateVoucher(t, ch, voucherLane, nonce, voucherAmount, fromKeyPrivate)
_, err := mgr.AddVoucher(ctx, ch, sv, nil, minDelta)
require.NoError(t, err)
@ -639,14 +639,14 @@ func TestNextNonceForLane(t *testing.T) {
voucherLane := uint64(1)
for _, nonce := range []uint64{2, 4} {
voucherAmount = big.Add(voucherAmount, big.NewInt(1))
sv := testCreateVoucher(t, voucherLane, nonce, voucherAmount, key)
sv := testCreateVoucher(t, ch, voucherLane, nonce, voucherAmount, key)
_, err := mgr.AddVoucher(ctx, ch, sv, nil, minDelta)
require.NoError(t, err)
}
voucherLane = uint64(2)
nonce := uint64(7)
sv := testCreateVoucher(t, voucherLane, nonce, voucherAmount, key)
sv := testCreateVoucher(t, ch, voucherLane, nonce, voucherAmount, key)
_, err = mgr.AddVoucher(ctx, ch, sv, nil, minDelta)
require.NoError(t, err)
@ -704,11 +704,12 @@ func testGenerateKeyPair(t *testing.T) ([]byte, []byte) {
return priv, pub
}
func testCreateVoucher(t *testing.T, voucherLane uint64, nonce uint64, voucherAmount big.Int, key []byte) *paych.SignedVoucher {
func testCreateVoucher(t *testing.T, ch address.Address, voucherLane uint64, nonce uint64, voucherAmount big.Int, key []byte) *paych.SignedVoucher {
sv := &paych.SignedVoucher{
Lane: voucherLane,
Nonce: nonce,
Amount: voucherAmount,
ChannelAddr: ch,
Lane: voucherLane,
Nonce: nonce,
Amount: voucherAmount,
}
signingBytes, err := sv.SigningBytes()

View File

@ -72,7 +72,7 @@ func (s SealingAPIAdapter) StateMinerWorkerAddress(ctx context.Context, maddr ad
return mi.Worker, nil
}
func (s SealingAPIAdapter) StateMinerDeadlines(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) (*miner.Deadlines, error) {
func (s SealingAPIAdapter) StateMinerDeadlines(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) ([]*miner.Deadline, error) {
tsk, err := types.TipSetKeyFromBytes(tok)
if err != nil {
return nil, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err)
@ -184,6 +184,26 @@ func (s SealingAPIAdapter) StateSectorGetInfo(ctx context.Context, maddr address
return s.delegate.StateSectorGetInfo(ctx, maddr, sectorNumber, tsk)
}
func (s SealingAPIAdapter) StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok sealing.TipSetToken) (*sealing.SectorLocation, error) {
tsk, err := types.TipSetKeyFromBytes(tok)
if err != nil {
return nil, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err)
}
l, err := s.delegate.StateSectorPartition(ctx, maddr, sectorNumber, tsk)
if err != nil {
return nil, err
}
if l != nil {
return &sealing.SectorLocation{
Deadline: l.Deadline,
Partition: l.Partition,
}, nil
}
return nil, nil // not found
}
func (s SealingAPIAdapter) StateMarketStorageDeal(ctx context.Context, dealID abi.DealID, tok sealing.TipSetToken) (market.DealProposal, error) {
tsk, err := types.TipSetKeyFromBytes(tok)
if err != nil {

View File

@ -48,10 +48,12 @@ type Miner struct {
type storageMinerApi interface {
// Call a read only method on actors (no interaction with the chain required)
StateCall(context.Context, *types.Message, types.TipSetKey) (*api.InvocResult, error)
StateMinerDeadlines(ctx context.Context, maddr address.Address, tok types.TipSetKey) (*miner.Deadlines, error)
StateMinerDeadlines(ctx context.Context, maddr address.Address, tok types.TipSetKey) ([]*miner.Deadline, error)
StateMinerPartitions(context.Context, address.Address, uint64, types.TipSetKey) ([]*miner.Partition, error)
StateMinerSectors(context.Context, address.Address, *abi.BitField, bool, types.TipSetKey) ([]*api.ChainSectorInfo, error)
StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error)
StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error)
StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*api.SectorLocation, error)
StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error)
StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*miner.DeadlineInfo, error)
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error)

View File

@ -45,7 +45,7 @@ func PreSeal(ssize abi.SectorSize, maddr address.Address, sectors int) (*genesis
preseal.CommD = zerocomm.ZeroPieceCommitment(abi.PaddedPieceSize(ssize).Unpadded())
d, _ := commcid.CIDToPieceCommitmentV1(preseal.CommD)
r := mock.CommDR(d)
preseal.CommR = commcid.ReplicaCommitmentV1ToCID(r[:])
preseal.CommR, _ = commcid.ReplicaCommitmentV1ToCID(r[:])
preseal.SectorID = abi.SectorNumber(i + 1)
preseal.Deal = market.DealProposal{
PieceCID: preseal.CommD,

View File

@ -4,6 +4,7 @@ import (
"bytes"
"context"
"errors"
"sync"
"time"
"github.com/filecoin-project/go-bitfield"
@ -107,63 +108,61 @@ func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check *abi.BitFi
return &sbf, nil
}
func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, deadline uint64, deadlineSectors *abi.BitField, ts *types.TipSet) error {
faults, err := s.api.StateMinerFaults(ctx, s.actor, ts.Key())
if err != nil {
return xerrors.Errorf("getting on-chain faults: %w", err)
}
fc, err := faults.Count()
if err != nil {
return xerrors.Errorf("counting faulty sectors: %w", err)
}
if fc == 0 {
return nil
}
recov, err := s.api.StateMinerRecoveries(ctx, s.actor, ts.Key())
if err != nil {
return xerrors.Errorf("getting on-chain recoveries: %w", err)
}
unrecovered, err := bitfield.SubtractBitField(faults, recov)
if err != nil {
return xerrors.Errorf("subtracting recovered set from fault set: %w", err)
}
unrecovered, err = bitfield.IntersectBitField(unrecovered, deadlineSectors)
if err != nil {
return xerrors.Errorf("intersect unrecovered set with deadlineSectors: %w", err)
}
uc, err := unrecovered.Count()
if err != nil {
return xerrors.Errorf("counting unrecovered sectors: %w", err)
}
if uc == 0 {
return nil
}
sbf, err := s.checkSectors(ctx, unrecovered)
if err != nil {
return xerrors.Errorf("checking unrecovered sectors: %w", err)
}
// if all sectors failed to recover, don't declare recoveries
sbfCount, err := sbf.Count()
if err != nil {
return xerrors.Errorf("counting recovered sectors: %w", err)
}
if sbfCount == 0 {
log.Warnw("No recoveries to declare", "deadline", deadline, "faulty", uc)
return nil
}
func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, dlIdx uint64, partitions []*miner.Partition) error {
ctx, span := trace.StartSpan(ctx, "storage.checkNextRecoveries")
defer span.End()
params := &miner.DeclareFaultsRecoveredParams{
Recoveries: []miner.RecoveryDeclaration{{Deadline: deadline, Sectors: sbf}},
Recoveries: []miner.RecoveryDeclaration{},
}
faulty := uint64(0)
for partIdx, partition := range partitions {
unrecovered, err := bitfield.SubtractBitField(partition.Faults, partition.Recoveries)
if err != nil {
return xerrors.Errorf("subtracting recovered set from fault set: %w", err)
}
uc, err := unrecovered.Count()
if err != nil {
return xerrors.Errorf("counting unrecovered sectors: %w", err)
}
if uc == 0 {
continue
}
faulty += uc
recovered, err := s.checkSectors(ctx, unrecovered)
if err != nil {
return xerrors.Errorf("checking unrecovered sectors: %w", err)
}
// if all sectors failed to recover, don't declare recoveries
recoveredCount, err := recovered.Count()
if err != nil {
return xerrors.Errorf("counting recovered sectors: %w", err)
}
if recoveredCount == 0 {
continue
}
params.Recoveries = append(params.Recoveries, miner.RecoveryDeclaration{
Deadline: dlIdx,
Partition: uint64(partIdx),
Sectors: recovered,
})
}
if len(params.Recoveries) == 0 {
if faulty != 0 {
log.Warnw("No recoveries to declare", "deadline", dlIdx, "faulty", faulty)
}
return nil
}
enc, aerr := actors.SerializeParams(params)
@ -200,51 +199,56 @@ func (s *WindowPoStScheduler) checkNextRecoveries(ctx context.Context, deadline
return nil
}
func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, deadline uint64, deadlineSectors *abi.BitField, ts *types.TipSet) error {
dc, err := deadlineSectors.Count()
if err != nil {
return xerrors.Errorf("counting deadline sectors: %w", err)
}
if dc == 0 {
// nothing can become faulty
return nil
}
toCheck, err := s.getSectorsToProve(ctx, deadlineSectors, true, ts)
if err != nil {
return xerrors.Errorf("getting next sectors to prove: %w", err)
}
good, err := s.checkSectors(ctx, deadlineSectors)
if err != nil {
return xerrors.Errorf("checking sectors: %w", err)
}
faulty, err := bitfield.SubtractBitField(toCheck, good)
if err != nil {
return xerrors.Errorf("calculating faulty sector set: %w", err)
}
c, err := faulty.Count()
if err != nil {
return xerrors.Errorf("counting faulty sectors: %w", err)
}
if c == 0 {
return nil
}
log.Errorw("DETECTED FAULTY SECTORS, declaring faults", "count", c)
func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64, partitions []*miner.Partition) error {
ctx, span := trace.StartSpan(ctx, "storage.checkNextFaults")
defer span.End()
params := &miner.DeclareFaultsParams{
Faults: []miner.FaultDeclaration{
{
Deadline: deadline,
Sectors: faulty,
},
},
Faults: []miner.FaultDeclaration{},
}
bad := uint64(0)
for partIdx, partition := range partitions {
toCheck, err := partition.ActiveSectors()
if err != nil {
return xerrors.Errorf("getting active sectors: %w", err)
}
good, err := s.checkSectors(ctx, toCheck)
if err != nil {
return xerrors.Errorf("checking sectors: %w", err)
}
faulty, err := bitfield.SubtractBitField(toCheck, good)
if err != nil {
return xerrors.Errorf("calculating faulty sector set: %w", err)
}
c, err := faulty.Count()
if err != nil {
return xerrors.Errorf("counting faulty sectors: %w", err)
}
if c == 0 {
continue
}
bad += c
params.Faults = append(params.Faults, miner.FaultDeclaration{
Deadline: dlIdx,
Partition: uint64(partIdx),
Sectors: faulty,
})
}
if len(params.Faults) == 0 {
return nil
}
log.Errorw("DETECTED FAULTY SECTORS, declaring faults", "count", bad)
enc, aerr := actors.SerializeParams(params)
if aerr != nil {
return xerrors.Errorf("could not serialize declare faults parameters: %w", aerr)
@ -279,76 +283,37 @@ func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, deadline uint
return nil
}
// the input sectors must match with the miner actor
func (s *WindowPoStScheduler) getSectorsToProve(ctx context.Context, deadlineSectors *abi.BitField, ignoreRecoveries bool, ts *types.TipSet) (*abi.BitField, error) {
stateFaults, err := s.api.StateMinerFaults(ctx, s.actor, ts.Key())
if err != nil {
return nil, xerrors.Errorf("getting on-chain faults: %w", err)
}
faults, err := bitfield.IntersectBitField(deadlineSectors, stateFaults)
if err != nil {
return nil, xerrors.Errorf("failed to intersect proof sectors with faults: %w", err)
}
recoveries, err := s.api.StateMinerRecoveries(ctx, s.actor, ts.Key())
if err != nil {
return nil, xerrors.Errorf("getting on-chain recoveries: %w", err)
}
if !ignoreRecoveries {
expectedRecoveries, err := bitfield.IntersectBitField(faults, recoveries)
if err != nil {
return nil, xerrors.Errorf("failed to intersect recoveries with faults: %w", err)
}
faults, err = bitfield.SubtractBitField(faults, expectedRecoveries)
if err != nil {
return nil, xerrors.Errorf("failed to subtract recoveries from faults: %w", err)
}
}
nonFaults, err := bitfield.SubtractBitField(deadlineSectors, faults)
if err != nil {
return nil, xerrors.Errorf("failed to diff bitfields: %w", err)
}
empty, err := nonFaults.IsEmpty()
if err != nil {
return nil, xerrors.Errorf("failed to check if bitfield was empty: %w", err)
}
if empty {
return nil, xerrors.Errorf("no non-faulty sectors in partitions: %w", err)
}
return nonFaults, nil
}
func (s *WindowPoStScheduler) runPost(ctx context.Context, di miner.DeadlineInfo, ts *types.TipSet) (*miner.SubmitWindowedPoStParams, error) {
ctx, span := trace.StartSpan(ctx, "storage.runPost")
defer span.End()
deadlines, err := s.api.StateMinerDeadlines(ctx, s.actor, ts.Key())
if err != nil {
return nil, xerrors.Errorf("getting miner deadlines: %w", err)
}
var declWait sync.WaitGroup
defer declWait.Wait()
declWait.Add(1)
go func() {
defer declWait.Done()
{
// check faults / recoveries for the *next* deadline. It's already too
// late to declare them for this deadline
declDeadline := (di.Index + 1) % miner.WPoStPeriodDeadlines
if err := s.checkNextRecoveries(ctx, declDeadline, deadlines.Due[declDeadline], ts); err != nil {
partitions, err := s.api.StateMinerPartitions(ctx, s.actor, declDeadline, ts.Key())
if err != nil {
log.Errorf("getting partitions: %v", err)
return
}
if err := s.checkNextRecoveries(ctx, declDeadline, partitions); err != nil {
// TODO: This is potentially quite bad, but not even trying to post when this fails is objectively worse
log.Errorf("checking sector recoveries: %v", err)
}
if err := s.checkNextFaults(ctx, declDeadline, deadlines.Due[declDeadline], ts); err != nil {
if err := s.checkNextFaults(ctx, declDeadline, partitions); err != nil {
// TODO: This is also potentially really bad, but we try to post anyways
log.Errorf("checking sector faults: %v", err)
}
}
}()
buf := new(bytes.Buffer)
if err := s.actor.MarshalCBOR(buf); err != nil {
@ -359,88 +324,92 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di miner.DeadlineInfo
return nil, xerrors.Errorf("failed to get chain randomness for windowPost (ts=%d; deadline=%d): %w", ts.Height(), di, err)
}
firstPartition, _, err := miner.PartitionsForDeadline(deadlines, s.partitionSectors, di.Index)
partitions, err := s.api.StateMinerPartitions(ctx, s.actor, di.Index, ts.Key())
if err != nil {
return nil, xerrors.Errorf("getting partitions for deadline: %w", err)
return nil, xerrors.Errorf("getting partitions: %w", err)
}
partitionCount, _, err := miner.DeadlineCount(deadlines, s.partitionSectors, di.Index)
if err != nil {
return nil, xerrors.Errorf("getting deadline partition count: %w", err)
params := &miner.SubmitWindowedPoStParams{
Deadline: di.Index,
Partitions: make([]miner.PoStPartition, len(partitions)),
Proofs: nil,
}
dc, err := deadlines.Due[di.Index].Count()
if err != nil {
return nil, xerrors.Errorf("get deadline count: %w", err)
var sinfos []abi.SectorInfo
sidToPart := map[abi.SectorNumber]uint64{}
skipCount := uint64(0)
for partIdx, partition := range partitions {
// TODO: Can do this in parallel
toProve, err := partition.ActiveSectors()
if err != nil {
return nil, xerrors.Errorf("getting active sectors: %w", err)
}
toProve, err = bitfield.MergeBitFields(toProve, partition.Recoveries)
if err != nil {
return nil, xerrors.Errorf("adding recoveries to set of sectors to prove: %w", err)
}
good, err := s.checkSectors(ctx, toProve)
if err != nil {
return nil, xerrors.Errorf("checking sectors to skip: %w", err)
}
skipped, err := bitfield.SubtractBitField(toProve, good)
if err != nil {
return nil, xerrors.Errorf("toProve - good: %w", err)
}
sc, err := skipped.Count()
if err != nil {
return nil, xerrors.Errorf("getting skipped sector count: %w", err)
}
skipCount += sc
ssi, err := s.sectorInfo(ctx, good, ts)
if err != nil {
return nil, xerrors.Errorf("getting sorted sector info: %w", err)
}
sinfos = append(sinfos, ssi...)
for _, si := range ssi {
sidToPart[si.SectorNumber] = uint64(partIdx)
}
if len(ssi) == 0 {
log.Warn("attempted to run windowPost without any sectors...")
return nil, xerrors.Errorf("no sectors to run windowPost on")
}
params.Partitions[partIdx] = miner.PoStPartition{
Index: uint64(partIdx),
Skipped: skipped,
}
}
log.Infof("di: %+v", di)
log.Infof("dc: %+v", dc)
log.Infof("fp: %+v", firstPartition)
log.Infof("pc: %+v", partitionCount)
log.Infof("ts: %+v (%d)", ts.Key(), ts.Height())
if partitionCount == 0 {
if len(sinfos) == 0 {
// nothing to prove..
return nil, errNoPartitions
}
partitions := make([]uint64, partitionCount)
for i := range partitions {
partitions[i] = firstPartition + uint64(i)
}
nps, err := s.getSectorsToProve(ctx, deadlines.Due[di.Index], false, ts)
if err != nil {
return nil, xerrors.Errorf("get need prove sectors: %w", err)
}
good, err := s.checkSectors(ctx, nps)
if err != nil {
return nil, xerrors.Errorf("checking sectors to skip: %w", err)
}
skipped, err := bitfield.SubtractBitField(nps, good)
if err != nil {
return nil, xerrors.Errorf("nps - good: %w", err)
}
skipCount, err := skipped.Count()
if err != nil {
return nil, xerrors.Errorf("getting skipped sector count: %w", err)
}
ssi, err := s.sortedSectorInfo(ctx, good, ts)
if err != nil {
return nil, xerrors.Errorf("getting sorted sector info: %w", err)
}
if len(ssi) == 0 {
log.Warn("attempted to run windowPost without any sectors...")
return nil, xerrors.Errorf("no sectors to run windowPost on")
}
log.Infow("running windowPost",
"chain-random", rand,
"deadline", di,
"height", ts.Height(),
"skipped", skipCount)
var snums []abi.SectorNumber
for _, si := range ssi {
snums = append(snums, si.SectorNumber)
}
tsStart := build.Clock.Now()
log.Infow("generating windowPost",
"sectors", len(ssi))
log.Infow("generating windowPost", "sectors", len(sinfos))
mid, err := address.IDFromAddress(s.actor)
if err != nil {
return nil, err
}
postOut, postSkipped, err := s.prover.GenerateWindowPoSt(ctx, abi.ActorID(mid), ssi, abi.PoStRandomness(rand))
postOut, postSkipped, err := s.prover.GenerateWindowPoSt(ctx, abi.ActorID(mid), sinfos, abi.PoStRandomness(rand))
if err != nil {
return nil, xerrors.Errorf("running post failed: %w", err)
}
@ -450,21 +419,16 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di miner.DeadlineInfo
}
for _, sector := range postSkipped {
skipped.Set(uint64(sector.Number))
params.Partitions[sidToPart[sector.Number]].Skipped.Set(uint64(sector.Number))
}
elapsed := time.Since(tsStart)
log.Infow("submitting window PoSt", "elapsed", elapsed)
return &miner.SubmitWindowedPoStParams{
Deadline: di.Index,
Partitions: partitions,
Proofs: postOut,
Skipped: *skipped,
}, nil
return params, nil
}
func (s *WindowPoStScheduler) sortedSectorInfo(ctx context.Context, deadlineSectors *abi.BitField, ts *types.TipSet) ([]abi.SectorInfo, error) {
func (s *WindowPoStScheduler) sectorInfo(ctx context.Context, deadlineSectors *abi.BitField, ts *types.TipSet) ([]abi.SectorInfo, error) {
sset, err := s.api.StateMinerSectors(ctx, s.actor, deadlineSectors, false, ts.Key())
if err != nil {
return nil, err