diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 49e461d00..6d717b44d 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -13,3 +13,4 @@
### Conformance testing.
conformance/ @raulk
extern/test-vectors @raulk
+cmd/tvx @raulk
\ No newline at end of file
diff --git a/README.md b/README.md
index 766317e4f..fa432bf7d 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,5 @@
-
+
@@ -18,7 +18,7 @@ Lotus is an implementation of the Filecoin Distributed Storage Network. For more
## Building & Documentation
-For instructions on how to build lotus from source, please visit [Lotus build and setup instruction](https://docs.filecoin.io/get-started/lotus/installation/#minimal-requirements) or read the source [here](https://github.com/filecoin-project/lotus/tree/master/documentation).
+For instructions on how to build, install and setup lotus, please visit [https://docs.filecoin.io/get-started/lotus](https://docs.filecoin.io/get-started/lotus/).
## Reporting a Vulnerability
diff --git a/api/api_full.go b/api/api_full.go
index 6d2d0c7b5..459a5634f 100644
--- a/api/api_full.go
+++ b/api/api_full.go
@@ -176,6 +176,9 @@ type FullNode interface {
// the reason.
SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error)
+ // SyncValidateTipset indicates whether the provided tipset is valid or not
+ SyncValidateTipset(ctx context.Context, tsk types.TipSetKey) (bool, error)
+
// MethodGroup: Mpool
// The Mpool methods are for interacting with the message pool. The message pool
// manages all incoming and outgoing 'messages' going over the network.
@@ -244,6 +247,8 @@ type FullNode interface {
WalletImport(context.Context, *types.KeyInfo) (address.Address, error)
// WalletDelete deletes an address from the wallet.
WalletDelete(context.Context, address.Address) error
+ // WalletValidateAddress validates whether a given string can be decoded as a well-formed address
+ WalletValidateAddress(context.Context, string) (address.Address, error)
// Other
diff --git a/api/apistruct/struct.go b/api/apistruct/struct.go
index 3abc5a18b..38b20b842 100644
--- a/api/apistruct/struct.go
+++ b/api/apistruct/struct.go
@@ -112,6 +112,7 @@ type FullNodeStruct struct {
SyncMarkBad func(ctx context.Context, bcid cid.Cid) error `perm:"admin"`
SyncUnmarkBad func(ctx context.Context, bcid cid.Cid) error `perm:"admin"`
SyncCheckBad func(ctx context.Context, bcid cid.Cid) (string, error) `perm:"read"`
+ SyncValidateTipset func(ctx context.Context, tsk types.TipSetKey) (bool, error) `perm:"read"`
MpoolGetConfig func(context.Context) (*types.MpoolConfig, error) `perm:"read"`
MpoolSetConfig func(context.Context, *types.MpoolConfig) error `perm:"write"`
@@ -129,18 +130,19 @@ type FullNodeStruct struct {
MinerGetBaseInfo func(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*api.MiningBaseInfo, error) `perm:"read"`
MinerCreateBlock func(context.Context, *api.BlockTemplate) (*types.BlockMsg, error) `perm:"write"`
- WalletNew func(context.Context, crypto.SigType) (address.Address, error) `perm:"write"`
- WalletHas func(context.Context, address.Address) (bool, error) `perm:"write"`
- WalletList func(context.Context) ([]address.Address, error) `perm:"write"`
- WalletBalance func(context.Context, address.Address) (types.BigInt, error) `perm:"read"`
- WalletSign func(context.Context, address.Address, []byte) (*crypto.Signature, error) `perm:"sign"`
- WalletSignMessage func(context.Context, address.Address, *types.Message) (*types.SignedMessage, error) `perm:"sign"`
- WalletVerify func(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) `perm:"read"`
- WalletDefaultAddress func(context.Context) (address.Address, error) `perm:"write"`
- WalletSetDefault func(context.Context, address.Address) error `perm:"admin"`
- WalletExport func(context.Context, address.Address) (*types.KeyInfo, error) `perm:"admin"`
- WalletImport func(context.Context, *types.KeyInfo) (address.Address, error) `perm:"admin"`
- WalletDelete func(context.Context, address.Address) error `perm:"write"`
+ WalletNew func(context.Context, crypto.SigType) (address.Address, error) `perm:"write"`
+ WalletHas func(context.Context, address.Address) (bool, error) `perm:"write"`
+ WalletList func(context.Context) ([]address.Address, error) `perm:"write"`
+ WalletBalance func(context.Context, address.Address) (types.BigInt, error) `perm:"read"`
+ WalletSign func(context.Context, address.Address, []byte) (*crypto.Signature, error) `perm:"sign"`
+ WalletSignMessage func(context.Context, address.Address, *types.Message) (*types.SignedMessage, error) `perm:"sign"`
+ WalletVerify func(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) `perm:"read"`
+ WalletDefaultAddress func(context.Context) (address.Address, error) `perm:"write"`
+ WalletSetDefault func(context.Context, address.Address) error `perm:"admin"`
+ WalletExport func(context.Context, address.Address) (*types.KeyInfo, error) `perm:"admin"`
+ WalletImport func(context.Context, *types.KeyInfo) (address.Address, error) `perm:"admin"`
+ WalletDelete func(context.Context, address.Address) error `perm:"write"`
+ WalletValidateAddress func(context.Context, string) (address.Address, error) `perm:"read"`
ClientImport func(ctx context.Context, ref api.FileRef) (*api.ImportRes, error) `perm:"admin"`
ClientListImports func(ctx context.Context) ([]api.Import, error) `perm:"write"`
@@ -642,6 +644,10 @@ func (c *FullNodeStruct) WalletDelete(ctx context.Context, addr address.Address)
return c.Internal.WalletDelete(ctx, addr)
}
+func (c *FullNodeStruct) WalletValidateAddress(ctx context.Context, str string) (address.Address, error) {
+ return c.Internal.WalletValidateAddress(ctx, str)
+}
+
func (c *FullNodeStruct) MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error) {
return c.Internal.MpoolGetNonce(ctx, addr)
}
@@ -746,6 +752,10 @@ func (c *FullNodeStruct) SyncCheckBad(ctx context.Context, bcid cid.Cid) (string
return c.Internal.SyncCheckBad(ctx, bcid)
}
+func (c *FullNodeStruct) SyncValidateTipset(ctx context.Context, tsk types.TipSetKey) (bool, error) {
+ return c.Internal.SyncValidateTipset(ctx, tsk)
+}
+
func (c *FullNodeStruct) StateNetworkName(ctx context.Context) (dtypes.NetworkName, error) {
return c.Internal.StateNetworkName(ctx)
}
diff --git a/api/test/window_post.go b/api/test/window_post.go
index 683489a91..958c91816 100644
--- a/api/test/window_post.go
+++ b/api/test/window_post.go
@@ -153,18 +153,16 @@ func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSector
require.NoError(t, err)
fmt.Printf("Running one proving period\n")
+ fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2)
for {
head, err := client.ChainHead(ctx)
require.NoError(t, err)
if head.Height() > di.PeriodStart+(di.WPoStProvingPeriod)+2 {
+ fmt.Printf("Now head.Height = %d\n", head.Height())
break
}
-
- if head.Height()%100 == 0 {
- fmt.Printf("@%d\n", head.Height())
- }
build.Clock.Sleep(blocktime)
}
@@ -186,7 +184,6 @@ func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSector
require.Greater(t, len(parts), 0)
secs := parts[0].AllSectors
- require.NoError(t, err)
n, err := secs.Count()
require.NoError(t, err)
require.Equal(t, uint64(2), n)
@@ -210,7 +207,6 @@ func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSector
require.Greater(t, len(parts), 0)
secs := parts[0].AllSectors
- require.NoError(t, err)
n, err := secs.Count()
require.NoError(t, err)
require.Equal(t, uint64(2), n)
@@ -236,18 +232,17 @@ func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSector
require.NoError(t, err)
fmt.Printf("Go through another PP, wait for sectors to become faulty\n")
+ fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2)
for {
head, err := client.ChainHead(ctx)
require.NoError(t, err)
if head.Height() > di.PeriodStart+(di.WPoStProvingPeriod)+2 {
+ fmt.Printf("Now head.Height = %d\n", head.Height())
break
}
- if head.Height()%100 == 0 {
- fmt.Printf("@%d\n", head.Height())
- }
build.Clock.Sleep(blocktime)
}
@@ -267,17 +262,17 @@ func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSector
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
+ fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2)
+
for {
head, err := client.ChainHead(ctx)
require.NoError(t, err)
if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 {
+ fmt.Printf("Now head.Height = %d\n", head.Height())
break
}
- if head.Height()%100 == 0 {
- fmt.Printf("@%d\n", head.Height())
- }
build.Clock.Sleep(blocktime)
}
@@ -300,12 +295,14 @@ func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSector
require.NoError(t, err)
waitUntil := head.Height() + 10
+ fmt.Printf("End for head.Height > %d\n", waitUntil)
for {
head, err := client.ChainHead(ctx)
require.NoError(t, err)
if head.Height() > waitUntil {
+ fmt.Printf("Now head.Height = %d\n", head.Height())
break
}
}
diff --git a/chain/actors/builtin/miner/miner.go b/chain/actors/builtin/miner/miner.go
index 1a4c466b9..5ea5cfc81 100644
--- a/chain/actors/builtin/miner/miner.go
+++ b/chain/actors/builtin/miner/miner.go
@@ -20,6 +20,10 @@ import (
// Unchanged between v0 and v1 actors
var WPoStProvingPeriod = miner0.WPoStProvingPeriod
+var WPoStPeriodDeadlines = miner0.WPoStPeriodDeadlines
+var WPoStChallengeWindow = miner0.WPoStChallengeWindow
+var WPoStChallengeLookback = miner0.WPoStChallengeLookback
+var FaultDeclarationCutoff = miner0.FaultDeclarationCutoff
const MinSectorExpiration = miner0.MinSectorExpiration
diff --git a/chain/messagepool/messagepool.go b/chain/messagepool/messagepool.go
index 96900925f..d54ea7164 100644
--- a/chain/messagepool/messagepool.go
+++ b/chain/messagepool/messagepool.go
@@ -75,8 +75,6 @@ var (
ErrRBFTooLowPremium = errors.New("replace by fee has too low GasPremium")
ErrTooManyPendingMessages = errors.New("too many pending messages for actor")
ErrNonceGap = errors.New("unfulfilled nonce gap")
-
- ErrTryAgain = errors.New("state inconsistency while pushing message; please try again")
)
const (
@@ -795,98 +793,6 @@ func (mp *MessagePool) getStateBalance(addr address.Address, ts *types.TipSet) (
return act.Balance, nil
}
-func (mp *MessagePool) PushWithNonce(ctx context.Context, addr address.Address, cb func(address.Address, uint64) (*types.SignedMessage, error)) (*types.SignedMessage, error) {
- // serialize push access to reduce lock contention
- mp.addSema <- struct{}{}
- defer func() {
- <-mp.addSema
- }()
-
- mp.curTsLk.Lock()
- mp.lk.Lock()
-
- curTs := mp.curTs
-
- fromKey := addr
- if fromKey.Protocol() == address.ID {
- var err error
- fromKey, err = mp.api.StateAccountKey(ctx, fromKey, mp.curTs)
- if err != nil {
- mp.lk.Unlock()
- mp.curTsLk.Unlock()
- return nil, xerrors.Errorf("resolving sender key: %w", err)
- }
- }
-
- nonce, err := mp.getNonceLocked(fromKey, mp.curTs)
- if err != nil {
- mp.lk.Unlock()
- mp.curTsLk.Unlock()
- return nil, xerrors.Errorf("get nonce locked failed: %w", err)
- }
-
- // release the locks for signing
- mp.lk.Unlock()
- mp.curTsLk.Unlock()
-
- msg, err := cb(fromKey, nonce)
- if err != nil {
- return nil, err
- }
-
- err = mp.checkMessage(msg)
- if err != nil {
- return nil, err
- }
-
- msgb, err := msg.Serialize()
- if err != nil {
- return nil, err
- }
-
- // reacquire the locks and check state for consistency
- mp.curTsLk.Lock()
- defer mp.curTsLk.Unlock()
-
- if mp.curTs != curTs {
- return nil, ErrTryAgain
- }
-
- mp.lk.Lock()
- defer mp.lk.Unlock()
-
- nonce2, err := mp.getNonceLocked(fromKey, mp.curTs)
- if err != nil {
- return nil, xerrors.Errorf("get nonce locked failed: %w", err)
- }
-
- if nonce2 != nonce {
- return nil, ErrTryAgain
- }
-
- publish, err := mp.verifyMsgBeforeAdd(msg, curTs, true)
- if err != nil {
- return nil, err
- }
-
- if err := mp.checkBalance(msg, curTs); err != nil {
- return nil, err
- }
-
- if err := mp.addLocked(msg, false); err != nil {
- return nil, xerrors.Errorf("add locked failed: %w", err)
- }
- if err := mp.addLocal(msg, msgb); err != nil {
- log.Errorf("addLocal failed: %+v", err)
- }
-
- if publish {
- err = mp.api.PubSubPublish(build.MessagesTopic(mp.netName), msgb)
- }
-
- return msg, err
-}
-
func (mp *MessagePool) Remove(from address.Address, nonce uint64, applied bool) {
mp.lk.Lock()
defer mp.lk.Unlock()
diff --git a/chain/messagesigner/messagesigner.go b/chain/messagesigner/messagesigner.go
new file mode 100644
index 000000000..1ad83543b
--- /dev/null
+++ b/chain/messagesigner/messagesigner.go
@@ -0,0 +1,124 @@
+package messagesigner
+
+import (
+ "bytes"
+ "context"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/lotus/chain/messagepool"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/wallet"
+ "github.com/filecoin-project/lotus/node/modules/dtypes"
+ "github.com/ipfs/go-datastore"
+ "github.com/ipfs/go-datastore/namespace"
+ logging "github.com/ipfs/go-log/v2"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+)
+
+const dsKeyActorNonce = "ActorNonce"
+
+var log = logging.Logger("messagesigner")
+
+type mpoolAPI interface {
+ GetNonce(address.Address) (uint64, error)
+}
+
+// MessageSigner keeps track of nonces per address, and increments the nonce
+// when signing a message
+type MessageSigner struct {
+ wallet *wallet.Wallet
+ mpool mpoolAPI
+ ds datastore.Batching
+}
+
+func NewMessageSigner(wallet *wallet.Wallet, mpool *messagepool.MessagePool, ds dtypes.MetadataDS) *MessageSigner {
+ return newMessageSigner(wallet, mpool, ds)
+}
+
+func newMessageSigner(wallet *wallet.Wallet, mpool mpoolAPI, ds dtypes.MetadataDS) *MessageSigner {
+ ds = namespace.Wrap(ds, datastore.NewKey("/message-signer/"))
+ return &MessageSigner{
+ wallet: wallet,
+ mpool: mpool,
+ ds: ds,
+ }
+}
+
+// SignMessage increments the nonce for the message From address, and signs
+// the message
+func (ms *MessageSigner) SignMessage(ctx context.Context, msg *types.Message) (*types.SignedMessage, error) {
+ nonce, err := ms.nextNonce(msg.From)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to create nonce: %w", err)
+ }
+
+ msg.Nonce = nonce
+ sig, err := ms.wallet.Sign(ctx, msg.From, msg.Cid().Bytes())
+ if err != nil {
+ return nil, xerrors.Errorf("failed to sign message: %w", err)
+ }
+
+ return &types.SignedMessage{
+ Message: *msg,
+ Signature: *sig,
+ }, nil
+}
+
+// nextNonce increments the nonce.
+// If there is no nonce in the datastore, gets the nonce from the message pool.
+func (ms *MessageSigner) nextNonce(addr address.Address) (uint64, error) {
+ // Nonces used to be created by the mempool and we need to support nodes
+ // that have mempool nonces, so first check the mempool for a nonce for
+ // this address. Note that the mempool returns the actor state's nonce
+ // by default.
+ nonce, err := ms.mpool.GetNonce(addr)
+ if err != nil {
+ return 0, xerrors.Errorf("failed to get nonce from mempool: %w", err)
+ }
+
+ // Get the nonce for this address from the datastore
+ addrNonceKey := datastore.KeyWithNamespaces([]string{dsKeyActorNonce, addr.String()})
+ dsNonceBytes, err := ms.ds.Get(addrNonceKey)
+
+ switch {
+ case xerrors.Is(err, datastore.ErrNotFound):
+ // If a nonce for this address hasn't yet been created in the
+ // datastore, just use the nonce from the mempool
+
+ case err != nil:
+ return 0, xerrors.Errorf("failed to get nonce from datastore: %w", err)
+
+ default:
+ // There is a nonce in the datastore, so unmarshall and increment it
+ maj, val, err := cbg.CborReadHeader(bytes.NewReader(dsNonceBytes))
+ if err != nil {
+ return 0, xerrors.Errorf("failed to parse nonce from datastore: %w", err)
+ }
+ if maj != cbg.MajUnsignedInt {
+ return 0, xerrors.Errorf("bad cbor type parsing nonce from datastore")
+ }
+
+ dsNonce := val + 1
+
+ // The message pool nonce should be <= than the datastore nonce
+ if nonce <= dsNonce {
+ nonce = dsNonce
+ } else {
+ log.Warnf("mempool nonce was larger than datastore nonce (%d > %d)", nonce, dsNonce)
+ }
+ }
+
+ // Write the nonce for this address to the datastore
+ buf := bytes.Buffer{}
+ _, err = buf.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, nonce))
+ if err != nil {
+ return 0, xerrors.Errorf("failed to marshall nonce: %w", err)
+ }
+ err = ms.ds.Put(addrNonceKey, buf.Bytes())
+ if err != nil {
+ return 0, xerrors.Errorf("failed to write nonce to datastore: %w", err)
+ }
+
+ return nonce, nil
+}
diff --git a/chain/messagesigner/messagesigner_test.go b/chain/messagesigner/messagesigner_test.go
new file mode 100644
index 000000000..55676b258
--- /dev/null
+++ b/chain/messagesigner/messagesigner_test.go
@@ -0,0 +1,158 @@
+package messagesigner
+
+import (
+ "context"
+ "sync"
+ "testing"
+
+ "github.com/filecoin-project/lotus/chain/wallet"
+
+ "github.com/filecoin-project/go-state-types/crypto"
+ "github.com/stretchr/testify/require"
+
+ ds_sync "github.com/ipfs/go-datastore/sync"
+
+ "github.com/filecoin-project/go-address"
+
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/ipfs/go-datastore"
+)
+
+type mockMpool struct {
+ lk sync.RWMutex
+ nonces map[address.Address]uint64
+}
+
+func newMockMpool() *mockMpool {
+ return &mockMpool{nonces: make(map[address.Address]uint64)}
+}
+
+func (mp *mockMpool) setNonce(addr address.Address, nonce uint64) {
+ mp.lk.Lock()
+ defer mp.lk.Unlock()
+
+ mp.nonces[addr] = nonce
+}
+
+func (mp *mockMpool) GetNonce(addr address.Address) (uint64, error) {
+ mp.lk.RLock()
+ defer mp.lk.RUnlock()
+
+ return mp.nonces[addr], nil
+}
+
+func TestMessageSignerSignMessage(t *testing.T) {
+ ctx := context.Background()
+
+ w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
+ from1, err := w.GenerateKey(crypto.SigTypeSecp256k1)
+ require.NoError(t, err)
+ from2, err := w.GenerateKey(crypto.SigTypeSecp256k1)
+ require.NoError(t, err)
+ to1, err := w.GenerateKey(crypto.SigTypeSecp256k1)
+ require.NoError(t, err)
+ to2, err := w.GenerateKey(crypto.SigTypeSecp256k1)
+ require.NoError(t, err)
+
+ type msgSpec struct {
+ msg *types.Message
+ mpoolNonce [1]uint64
+ expNonce uint64
+ }
+ tests := []struct {
+ name string
+ msgs []msgSpec
+ }{{
+ // No nonce yet in datastore
+ name: "no nonce yet",
+ msgs: []msgSpec{{
+ msg: &types.Message{
+ To: to1,
+ From: from1,
+ },
+ expNonce: 0,
+ }},
+ }, {
+ // Get nonce value of zero from mpool
+ name: "mpool nonce zero",
+ msgs: []msgSpec{{
+ msg: &types.Message{
+ To: to1,
+ From: from1,
+ },
+ mpoolNonce: [1]uint64{0},
+ expNonce: 0,
+ }},
+ }, {
+ // Get non-zero nonce value from mpool
+ name: "mpool nonce set",
+ msgs: []msgSpec{{
+ msg: &types.Message{
+ To: to1,
+ From: from1,
+ },
+ mpoolNonce: [1]uint64{5},
+ expNonce: 5,
+ }, {
+ msg: &types.Message{
+ To: to1,
+ From: from1,
+ },
+ // Should adjust datastore nonce because mpool nonce is higher
+ mpoolNonce: [1]uint64{10},
+ expNonce: 10,
+ }},
+ }, {
+ // Nonce should increment independently for each address
+ name: "nonce increments per address",
+ msgs: []msgSpec{{
+ msg: &types.Message{
+ To: to1,
+ From: from1,
+ },
+ expNonce: 0,
+ }, {
+ msg: &types.Message{
+ To: to1,
+ From: from1,
+ },
+ expNonce: 1,
+ }, {
+ msg: &types.Message{
+ To: to2,
+ From: from2,
+ },
+ mpoolNonce: [1]uint64{5},
+ expNonce: 5,
+ }, {
+ msg: &types.Message{
+ To: to2,
+ From: from2,
+ },
+ expNonce: 6,
+ }, {
+ msg: &types.Message{
+ To: to1,
+ From: from1,
+ },
+ expNonce: 2,
+ }},
+ }}
+ for _, tt := range tests {
+ tt := tt
+ t.Run(tt.name, func(t *testing.T) {
+ mpool := newMockMpool()
+ ds := ds_sync.MutexWrap(datastore.NewMapDatastore())
+ ms := newMessageSigner(w, mpool, ds)
+
+ for _, m := range tt.msgs {
+ if len(m.mpoolNonce) == 1 {
+ mpool.setNonce(m.msg.From, m.mpoolNonce[0])
+ }
+ smsg, err := ms.SignMessage(ctx, m.msg)
+ require.NoError(t, err)
+ require.Equal(t, m.expNonce, smsg.Message.Nonce)
+ }
+ })
+ }
+}
diff --git a/chain/store/store.go b/chain/store/store.go
index 1dbf69547..0806fb921 100644
--- a/chain/store/store.go
+++ b/chain/store/store.go
@@ -10,6 +10,8 @@ import (
"strconv"
"sync"
+ "golang.org/x/sync/errgroup"
+
"github.com/filecoin-project/go-state-types/crypto"
"github.com/minio/blake2b-simd"
@@ -284,6 +286,16 @@ func (cs *ChainStore) MarkBlockAsValidated(ctx context.Context, blkid cid.Cid) e
return nil
}
+func (cs *ChainStore) UnmarkBlockAsValidated(ctx context.Context, blkid cid.Cid) error {
+ key := blockValidationCacheKeyPrefix.Instance(blkid.String())
+
+ if err := cs.ds.Delete(key); err != nil {
+ return xerrors.Errorf("removing from valid block cache: %w", err)
+ }
+
+ return nil
+}
+
func (cs *ChainStore) SetGenesis(b *types.BlockHeader) error {
ts, err := types.NewTipSet([]*types.BlockHeader{b})
if err != nil {
@@ -467,14 +479,25 @@ func (cs *ChainStore) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) {
return v.(*types.TipSet), nil
}
- var blks []*types.BlockHeader
- for _, c := range tsk.Cids() {
- b, err := cs.GetBlock(c)
- if err != nil {
- return nil, xerrors.Errorf("get block %s: %w", c, err)
- }
+ // Fetch tipset block headers from blockstore in parallel
+ var eg errgroup.Group
+ cids := tsk.Cids()
+ blks := make([]*types.BlockHeader, len(cids))
+ for i, c := range cids {
+ i, c := i, c
+ eg.Go(func() error {
+ b, err := cs.GetBlock(c)
+ if err != nil {
+ return xerrors.Errorf("get block %s: %w", c, err)
+ }
- blks = append(blks, b)
+ blks[i] = b
+ return nil
+ })
+ }
+ err := eg.Wait()
+ if err != nil {
+ return nil, err
}
ts, err := types.NewTipSet(blks)
diff --git a/chain/sync.go b/chain/sync.go
index 9e098a57e..b2e3bb7f1 100644
--- a/chain/sync.go
+++ b/chain/sync.go
@@ -597,7 +597,7 @@ func isPermanent(err error) bool {
return !errors.Is(err, ErrTemporal)
}
-func (syncer *Syncer) ValidateTipSet(ctx context.Context, fts *store.FullTipSet) error {
+func (syncer *Syncer) ValidateTipSet(ctx context.Context, fts *store.FullTipSet, useCache bool) error {
ctx, span := trace.StartSpan(ctx, "validateTipSet")
defer span.End()
@@ -613,7 +613,7 @@ func (syncer *Syncer) ValidateTipSet(ctx context.Context, fts *store.FullTipSet)
b := b // rebind to a scoped variable
futures = append(futures, async.Err(func() error {
- if err := syncer.ValidateBlock(ctx, b); err != nil {
+ if err := syncer.ValidateBlock(ctx, b, useCache); err != nil {
if isPermanent(err) {
syncer.bad.Add(b.Cid(), NewBadBlockReason([]cid.Cid{b.Cid()}, err.Error()))
}
@@ -680,7 +680,7 @@ func blockSanityChecks(h *types.BlockHeader) error {
}
// ValidateBlock should match up with 'Semantical Validation' in validation.md in the spec
-func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) (err error) {
+func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, useCache bool) (err error) {
defer func() {
// b.Cid() could panic for empty blocks that are used in tests.
if rerr := recover(); rerr != nil {
@@ -689,13 +689,15 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) (er
}
}()
- isValidated, err := syncer.store.IsBlockValidated(ctx, b.Cid())
- if err != nil {
- return xerrors.Errorf("check block validation cache %s: %w", b.Cid(), err)
- }
+ if useCache {
+ isValidated, err := syncer.store.IsBlockValidated(ctx, b.Cid())
+ if err != nil {
+ return xerrors.Errorf("check block validation cache %s: %w", b.Cid(), err)
+ }
- if isValidated {
- return nil
+ if isValidated {
+ return nil
+ }
}
validationStart := build.Clock.Now()
@@ -782,31 +784,35 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) (er
b.Header.ParentWeight, pweight)
}
- // Stuff that needs stateroot / worker address
- stateroot, precp, err := syncer.sm.TipSetState(ctx, baseTs)
- if err != nil {
- return xerrors.Errorf("get tipsetstate(%d, %s) failed: %w", h.Height, h.Parents, err)
- }
-
- if stateroot != h.ParentStateRoot {
- msgs, err := syncer.store.MessagesForTipset(baseTs)
+ stateRootCheck := async.Err(func() error {
+ stateroot, precp, err := syncer.sm.TipSetState(ctx, baseTs)
if err != nil {
- log.Error("failed to load messages for tipset during tipset state mismatch error: ", err)
- } else {
- log.Warn("Messages for tipset with mismatching state:")
- for i, m := range msgs {
- mm := m.VMMessage()
- log.Warnf("Message[%d]: from=%s to=%s method=%d params=%x", i, mm.From, mm.To, mm.Method, mm.Params)
- }
+ return xerrors.Errorf("get tipsetstate(%d, %s) failed: %w", h.Height, h.Parents, err)
}
- return xerrors.Errorf("parent state root did not match computed state (%s != %s)", stateroot, h.ParentStateRoot)
- }
+ if stateroot != h.ParentStateRoot {
+ msgs, err := syncer.store.MessagesForTipset(baseTs)
+ if err != nil {
+ log.Error("failed to load messages for tipset during tipset state mismatch error: ", err)
+ } else {
+ log.Warn("Messages for tipset with mismatching state:")
+ for i, m := range msgs {
+ mm := m.VMMessage()
+ log.Warnf("Message[%d]: from=%s to=%s method=%d params=%x", i, mm.From, mm.To, mm.Method, mm.Params)
+ }
+ }
- if precp != h.ParentMessageReceipts {
- return xerrors.Errorf("parent receipts root did not match computed value (%s != %s)", precp, h.ParentMessageReceipts)
- }
+ return xerrors.Errorf("parent state root did not match computed state (%s != %s)", stateroot, h.ParentStateRoot)
+ }
+ if precp != h.ParentMessageReceipts {
+ return xerrors.Errorf("parent receipts root did not match computed value (%s != %s)", precp, h.ParentMessageReceipts)
+ }
+
+ return nil
+ })
+
+ // Stuff that needs worker address
waddr, err := stmgr.GetMinerWorkerRaw(ctx, syncer.sm, lbst, h.Miner)
if err != nil {
return xerrors.Errorf("GetMinerWorkerRaw failed: %w", err)
@@ -927,6 +933,7 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) (er
winnerCheck,
msgsCheck,
baseFeeCheck,
+ stateRootCheck,
}
var merr error
@@ -954,8 +961,10 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) (er
return mulErr
}
- if err := syncer.store.MarkBlockAsValidated(ctx, b.Cid()); err != nil {
- return xerrors.Errorf("caching block validation %s: %w", b.Cid(), err)
+ if useCache {
+ if err := syncer.store.MarkBlockAsValidated(ctx, b.Cid()); err != nil {
+ return xerrors.Errorf("caching block validation %s: %w", b.Cid(), err)
+ }
}
return nil
@@ -1457,7 +1466,7 @@ func (syncer *Syncer) syncMessagesAndCheckState(ctx context.Context, headers []*
return syncer.iterFullTipsets(ctx, headers, func(ctx context.Context, fts *store.FullTipSet) error {
log.Debugw("validating tipset", "height", fts.TipSet().Height(), "size", len(fts.TipSet().Cids()))
- if err := syncer.ValidateTipSet(ctx, fts); err != nil {
+ if err := syncer.ValidateTipSet(ctx, fts, true); err != nil {
log.Errorf("failed to validate tipset: %+v", err)
return xerrors.Errorf("message processing failed: %w", err)
}
diff --git a/chain/sync_test.go b/chain/sync_test.go
index 7a839be2b..1b06f604b 100644
--- a/chain/sync_test.go
+++ b/chain/sync_test.go
@@ -732,7 +732,7 @@ func TestSyncInputs(t *testing.T) {
err := s.ValidateBlock(context.TODO(), &types.FullBlock{
Header: &types.BlockHeader{},
- })
+ }, false)
if err == nil {
t.Fatal("should error on empty block")
}
@@ -741,7 +741,7 @@ func TestSyncInputs(t *testing.T) {
h.ElectionProof = nil
- err = s.ValidateBlock(context.TODO(), &types.FullBlock{Header: h})
+ err = s.ValidateBlock(context.TODO(), &types.FullBlock{Header: h}, false)
if err == nil {
t.Fatal("should error on block with nil election proof")
}
diff --git a/chain/types/tipset.go b/chain/types/tipset.go
index 44d41c29d..07eff3734 100644
--- a/chain/types/tipset.go
+++ b/chain/types/tipset.go
@@ -167,12 +167,16 @@ func (ts *TipSet) Equals(ots *TipSet) bool {
return false
}
- if len(ts.blks) != len(ots.blks) {
+ if ts.height != ots.height {
return false
}
- for i, b := range ts.blks {
- if b.Cid() != ots.blks[i].Cid() {
+ if len(ts.cids) != len(ots.cids) {
+ return false
+ }
+
+ for i, cid := range ts.cids {
+ if cid != ots.cids[i] {
return false
}
}
diff --git a/chain/vm/runtime.go b/chain/vm/runtime.go
index 156d57282..72dd413ed 100644
--- a/chain/vm/runtime.go
+++ b/chain/vm/runtime.go
@@ -5,7 +5,6 @@ import (
"context"
"encoding/binary"
"fmt"
- gruntime "runtime"
"time"
"github.com/filecoin-project/go-address"
@@ -460,8 +459,10 @@ func (rt *Runtime) stateCommit(oldh, newh cid.Cid) aerrors.ActorError {
}
func (rt *Runtime) finilizeGasTracing() {
- if rt.lastGasCharge != nil {
- rt.lastGasCharge.TimeTaken = time.Since(rt.lastGasChargeTime)
+ if enableTracing {
+ if rt.lastGasCharge != nil {
+ rt.lastGasCharge.TimeTaken = time.Since(rt.lastGasChargeTime)
+ }
}
}
@@ -490,34 +491,39 @@ func (rt *Runtime) chargeGasFunc(skip int) func(GasCharge) {
}
+var enableTracing = false
+
func (rt *Runtime) chargeGasInternal(gas GasCharge, skip int) aerrors.ActorError {
toUse := gas.Total()
- var callers [10]uintptr
- cout := gruntime.Callers(2+skip, callers[:])
+ if enableTracing {
+ var callers [10]uintptr
- now := build.Clock.Now()
- if rt.lastGasCharge != nil {
- rt.lastGasCharge.TimeTaken = now.Sub(rt.lastGasChargeTime)
+ cout := 0 //gruntime.Callers(2+skip, callers[:])
+
+ now := build.Clock.Now()
+ if rt.lastGasCharge != nil {
+ rt.lastGasCharge.TimeTaken = now.Sub(rt.lastGasChargeTime)
+ }
+
+ gasTrace := types.GasTrace{
+ Name: gas.Name,
+ Extra: gas.Extra,
+
+ TotalGas: toUse,
+ ComputeGas: gas.ComputeGas,
+ StorageGas: gas.StorageGas,
+
+ TotalVirtualGas: gas.VirtualCompute*GasComputeMulti + gas.VirtualStorage*GasStorageMulti,
+ VirtualComputeGas: gas.VirtualCompute,
+ VirtualStorageGas: gas.VirtualStorage,
+
+ Callers: callers[:cout],
+ }
+ rt.executionTrace.GasCharges = append(rt.executionTrace.GasCharges, &gasTrace)
+ rt.lastGasChargeTime = now
+ rt.lastGasCharge = &gasTrace
}
- gasTrace := types.GasTrace{
- Name: gas.Name,
- Extra: gas.Extra,
-
- TotalGas: toUse,
- ComputeGas: gas.ComputeGas,
- StorageGas: gas.StorageGas,
-
- TotalVirtualGas: gas.VirtualCompute*GasComputeMulti + gas.VirtualStorage*GasStorageMulti,
- VirtualComputeGas: gas.VirtualCompute,
- VirtualStorageGas: gas.VirtualStorage,
-
- Callers: callers[:cout],
- }
- rt.executionTrace.GasCharges = append(rt.executionTrace.GasCharges, &gasTrace)
- rt.lastGasChargeTime = now
- rt.lastGasCharge = &gasTrace
-
// overflow safe
if rt.gasUsed > rt.gasAvailable-toUse {
rt.gasUsed = rt.gasAvailable
diff --git a/chain/vm/vm.go b/chain/vm/vm.go
index 54ea47698..44979454f 100644
--- a/chain/vm/vm.go
+++ b/chain/vm/vm.go
@@ -227,14 +227,21 @@ func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime,
}
rt := vm.makeRuntime(ctx, msg, origin, on, gasUsed, nac)
- rt.lastGasChargeTime = start
+ if enableTracing {
+ rt.lastGasChargeTime = start
+ if parent != nil {
+ rt.lastGasChargeTime = parent.lastGasChargeTime
+ rt.lastGasCharge = parent.lastGasCharge
+ defer func() {
+ parent.lastGasChargeTime = rt.lastGasChargeTime
+ parent.lastGasCharge = rt.lastGasCharge
+ }()
+ }
+ }
+
if parent != nil {
- rt.lastGasChargeTime = parent.lastGasChargeTime
- rt.lastGasCharge = parent.lastGasCharge
defer func() {
parent.gasUsed = rt.gasUsed
- parent.lastGasChargeTime = rt.lastGasChargeTime
- parent.lastGasCharge = rt.lastGasCharge
}()
}
if gasCharge != nil {
diff --git a/cli/wallet.go b/cli/wallet.go
index 27993a1ba..aa5b9bed3 100644
--- a/cli/wallet.go
+++ b/cli/wallet.go
@@ -9,13 +9,16 @@ import (
"os"
"strings"
- "github.com/filecoin-project/go-address"
- "github.com/filecoin-project/go-state-types/crypto"
- types "github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/lotus/chain/wallet"
+ "github.com/urfave/cli/v2"
"golang.org/x/xerrors"
- "github.com/urfave/cli/v2"
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/crypto"
+
+ types "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/wallet"
+ "github.com/filecoin-project/lotus/lib/tablewriter"
)
var walletCmd = &cli.Command{
@@ -66,6 +69,13 @@ var walletNew = &cli.Command{
var walletList = &cli.Command{
Name: "list",
Usage: "List wallet address",
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "addr-only",
+ Usage: "Only print addresses",
+ Aliases: []string{"a"},
+ },
+ },
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
@@ -79,9 +89,52 @@ var walletList = &cli.Command{
return err
}
+ // Assume an error means no default key is set
+ def, _ := api.WalletDefaultAddress(ctx)
+
+ tw := tablewriter.New(
+ tablewriter.Col("Address"),
+ tablewriter.Col("Balance"),
+ tablewriter.Col("Nonce"),
+ tablewriter.Col("Default"),
+ tablewriter.NewLineCol("Error"))
+
for _, addr := range addrs {
- fmt.Println(addr.String())
+ if cctx.Bool("addr-only") {
+ fmt.Println(addr.String())
+ } else {
+ a, err := api.StateGetActor(ctx, addr, types.EmptyTSK)
+ if err != nil {
+ if !strings.Contains(err.Error(), "actor not found") {
+ tw.Write(map[string]interface{}{
+ "Address": addr,
+ "Error": err,
+ })
+ continue
+ }
+
+ a = &types.Actor{
+ Balance: big.Zero(),
+ }
+ }
+
+ row := map[string]interface{}{
+ "Address": addr,
+ "Balance": types.FIL(a.Balance),
+ "Nonce": a.Nonce,
+ }
+ if addr == def {
+ row["Default"] = "X"
+ }
+
+ tw.Write(row)
+ }
}
+
+ if !cctx.Bool("addr-only") {
+ return tw.Flush(os.Stdout)
+ }
+
return nil
},
}
diff --git a/cmd/lotus-bench/caching_verifier.go b/cmd/lotus-bench/caching_verifier.go
new file mode 100644
index 000000000..51ab696f7
--- /dev/null
+++ b/cmd/lotus-bench/caching_verifier.go
@@ -0,0 +1,98 @@
+package main
+
+import (
+ "bufio"
+ "context"
+ "errors"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
+ "github.com/filecoin-project/specs-actors/actors/runtime/proof"
+ "github.com/ipfs/go-datastore"
+ "github.com/minio/blake2b-simd"
+ cbg "github.com/whyrusleeping/cbor-gen"
+)
+
+type cachingVerifier struct {
+ ds datastore.Datastore
+ backend ffiwrapper.Verifier
+}
+
+const bufsize = 128
+
+func (cv cachingVerifier) withCache(execute func() (bool, error), param cbg.CBORMarshaler) (bool, error) {
+ hasher := blake2b.New256()
+ wr := bufio.NewWriterSize(hasher, bufsize)
+ err := param.MarshalCBOR(wr)
+ if err != nil {
+ log.Errorf("could not marshal call info: %+v", err)
+ return execute()
+ }
+ err = wr.Flush()
+ if err != nil {
+ log.Errorf("could not flush: %+v", err)
+ return execute()
+ }
+ hash := hasher.Sum(nil)
+ key := datastore.NewKey(string(hash))
+ fromDs, err := cv.ds.Get(key)
+ if err == nil {
+ switch fromDs[0] {
+ case 's':
+ return true, nil
+ case 'f':
+ return false, nil
+ case 'e':
+ return false, errors.New(string(fromDs[1:]))
+ default:
+ log.Errorf("bad cached result in cache %s(%x)", fromDs[0], fromDs[0])
+ return execute()
+ }
+ } else if errors.Is(err, datastore.ErrNotFound) {
+ // recalc
+ ok, err := execute()
+ var save []byte
+ if err != nil {
+ if ok {
+ log.Errorf("success with an error: %+v", err)
+ } else {
+ save = append([]byte{'e'}, []byte(err.Error())...)
+ }
+ } else if ok {
+ save = []byte{'s'}
+ } else {
+ save = []byte{'f'}
+ }
+
+ if len(save) != 0 {
+ errSave := cv.ds.Put(key, save)
+ if errSave != nil {
+ log.Errorf("error saving result: %+v", errSave)
+ }
+ }
+
+ return ok, err
+ } else {
+ log.Errorf("could not get data from cache: %+v", err)
+ return execute()
+ }
+}
+
+func (cv *cachingVerifier) VerifySeal(svi proof.SealVerifyInfo) (bool, error) {
+ return cv.withCache(func() (bool, error) {
+ return cv.backend.VerifySeal(svi)
+ }, &svi)
+}
+func (cv *cachingVerifier) VerifyWinningPoSt(ctx context.Context, info proof.WinningPoStVerifyInfo) (bool, error) {
+ return cv.backend.VerifyWinningPoSt(ctx, info)
+}
+func (cv *cachingVerifier) VerifyWindowPoSt(ctx context.Context, info proof.WindowPoStVerifyInfo) (bool, error) {
+ return cv.withCache(func() (bool, error) {
+ return cv.backend.VerifyWindowPoSt(ctx, info)
+ }, &info)
+}
+func (cv *cachingVerifier) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredPoStProof, a abi.ActorID, rnd abi.PoStRandomness, u uint64) ([]uint64, error) {
+ return cv.backend.GenerateWinningPoStSectorChallenge(ctx, proofType, a, rnd, u)
+}
+
+var _ ffiwrapper.Verifier = (*cachingVerifier)(nil)
diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go
index f2845ba20..3d93b0e5e 100644
--- a/cmd/lotus-bench/import.go
+++ b/cmd/lotus-bench/import.go
@@ -16,6 +16,8 @@ import (
"sort"
"time"
+ "github.com/cockroachdb/pebble"
+ "github.com/cockroachdb/pebble/bloom"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
@@ -24,12 +26,16 @@ import (
"github.com/filecoin-project/lotus/lib/blockstore"
_ "github.com/filecoin-project/lotus/lib/sigs/bls"
_ "github.com/filecoin-project/lotus/lib/sigs/secp"
+ "github.com/ipld/go-car"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
+ bdg "github.com/dgraph-io/badger/v2"
"github.com/ipfs/go-datastore"
badger "github.com/ipfs/go-ds-badger2"
+ pebbleds "github.com/ipfs/go-ds-pebble"
+
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
)
@@ -60,6 +66,29 @@ var importBenchCmd = &cli.Command{
Name: "repodir",
Usage: "set the repo directory for the lotus bench run (defaults to /tmp)",
},
+ &cli.StringFlag{
+ Name: "syscall-cache",
+ Usage: "read and write syscall results from datastore",
+ },
+ &cli.BoolFlag{
+ Name: "export-traces",
+ Usage: "should we export execution traces",
+ Value: true,
+ },
+ &cli.BoolFlag{
+ Name: "no-import",
+ Usage: "should we import the chain? if set to true chain has to be previously imported",
+ },
+ &cli.BoolFlag{
+ Name: "global-profile",
+ Value: true,
+ },
+ &cli.Int64Flag{
+ Name: "start-at",
+ },
+ &cli.BoolFlag{
+ Name: "only-import",
+ },
},
Action: func(cctx *cli.Context) error {
vm.BatchSealVerifyParallelism = cctx.Int("batch-seal-verify-threads")
@@ -74,6 +103,10 @@ var importBenchCmd = &cli.Command{
}
defer cfi.Close() //nolint:errcheck // read only file
+ go func() {
+ http.ListenAndServe("localhost:6060", nil) //nolint:errcheck
+ }()
+
var tdir string
if rdir := cctx.String("repodir"); rdir != "" {
tdir = rdir
@@ -85,33 +118,105 @@ var importBenchCmd = &cli.Command{
tdir = tmp
}
- bds, err := badger.NewDatastore(tdir, nil)
+ bdgOpt := badger.DefaultOptions
+ bdgOpt.GcInterval = 0
+ bdgOpt.Options = bdg.DefaultOptions("")
+ bdgOpt.Options.SyncWrites = false
+ bdgOpt.Options.Truncate = true
+ bdgOpt.Options.DetectConflicts = false
+
+ var bds datastore.Batching
+ if false {
+ cache := 512
+ bds, err = pebbleds.NewDatastore(tdir, &pebble.Options{
+ // Pebble has a single combined cache area and the write
+ // buffers are taken from this too. Assign all available
+ // memory allowance for cache.
+ Cache: pebble.NewCache(int64(cache * 1024 * 1024)),
+ // The size of memory table(as well as the write buffer).
+ // Note, there may have more than two memory tables in the system.
+ // MemTableStopWritesThreshold can be configured to avoid the memory abuse.
+ MemTableSize: cache * 1024 * 1024 / 4,
+ // The default compaction concurrency(1 thread),
+ // Here use all available CPUs for faster compaction.
+ MaxConcurrentCompactions: runtime.NumCPU(),
+ // Per-level options. Options for at least one level must be specified. The
+ // options for the last level are used for all subsequent levels.
+ Levels: []pebble.LevelOptions{
+ {TargetFileSize: 16 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10), Compression: pebble.NoCompression},
+ },
+ Logger: log,
+ })
+ } else {
+ bds, err = badger.NewDatastore(tdir, &bdgOpt)
+ }
if err != nil {
return err
}
+ defer bds.Close() //nolint:errcheck
+
bs := blockstore.NewBlockstore(bds)
- cbs, err := blockstore.CachedBlockstore(context.TODO(), bs, blockstore.DefaultCacheOpts())
+ cacheOpts := blockstore.DefaultCacheOpts()
+ cacheOpts.HasBloomFilterSize = 0
+
+ cbs, err := blockstore.CachedBlockstore(context.TODO(), bs, cacheOpts)
if err != nil {
return err
}
bs = cbs
ds := datastore.NewMapDatastore()
- cs := store.NewChainStore(bs, ds, vm.Syscalls(ffiwrapper.ProofVerifier))
+
+ var verifier ffiwrapper.Verifier = ffiwrapper.ProofVerifier
+ if cctx.IsSet("syscall-cache") {
+ scds, err := badger.NewDatastore(cctx.String("syscall-cache"), &bdgOpt)
+ if err != nil {
+ return xerrors.Errorf("opening syscall-cache datastore: %w", err)
+ }
+ defer scds.Close() //nolint:errcheck
+
+ verifier = &cachingVerifier{
+ ds: scds,
+ backend: verifier,
+ }
+ }
+ if cctx.Bool("only-gc") {
+ return nil
+ }
+
+ cs := store.NewChainStore(bs, ds, vm.Syscalls(verifier))
stm := stmgr.NewStateManager(cs)
- prof, err := os.Create("import-bench.prof")
- if err != nil {
- return err
- }
- defer prof.Close() //nolint:errcheck
+ if cctx.Bool("global-profile") {
+ prof, err := os.Create("import-bench.prof")
+ if err != nil {
+ return err
+ }
+ defer prof.Close() //nolint:errcheck
- if err := pprof.StartCPUProfile(prof); err != nil {
- return err
+ if err := pprof.StartCPUProfile(prof); err != nil {
+ return err
+ }
}
- head, err := cs.Import(cfi)
- if err != nil {
- return err
+ var head *types.TipSet
+ if !cctx.Bool("no-import") {
+ head, err = cs.Import(cfi)
+ if err != nil {
+ return err
+ }
+ } else {
+ cr, err := car.NewCarReader(cfi)
+ if err != nil {
+ return err
+ }
+ head, err = cs.LoadTipSet(types.NewTipSetKey(cr.Header.Roots...))
+ if err != nil {
+ return err
+ }
+ }
+
+ if cctx.Bool("only-import") {
+ return nil
}
gb, err := cs.GetTipsetByHeight(context.TODO(), 0, head, true)
@@ -124,6 +229,20 @@ var importBenchCmd = &cli.Command{
return err
}
+ startEpoch := abi.ChainEpoch(1)
+ if cctx.IsSet("start-at") {
+ startEpoch = abi.ChainEpoch(cctx.Int64("start-at"))
+ start, err := cs.GetTipsetByHeight(context.TODO(), abi.ChainEpoch(cctx.Int64("start-at")), head, true)
+ if err != nil {
+ return err
+ }
+
+ err = cs.SetHead(start)
+ if err != nil {
+ return err
+ }
+ }
+
if h := cctx.Int64("height"); h != 0 {
tsh, err := cs.GetTipsetByHeight(context.TODO(), abi.ChainEpoch(h), head, true)
if err != nil {
@@ -134,7 +253,7 @@ var importBenchCmd = &cli.Command{
ts := head
tschain := []*types.TipSet{ts}
- for ts.Height() != 0 {
+ for ts.Height() > startEpoch {
next, err := cs.LoadTipSet(ts.Parents())
if err != nil {
return err
@@ -144,45 +263,48 @@ var importBenchCmd = &cli.Command{
ts = next
}
- ibj, err := os.Create("import-bench.json")
- if err != nil {
- return err
+ var enc *json.Encoder
+ if cctx.Bool("export-traces") {
+ ibj, err := os.Create("import-bench.json")
+ if err != nil {
+ return err
+ }
+ defer ibj.Close() //nolint:errcheck
+
+ enc = json.NewEncoder(ibj)
}
- defer ibj.Close() //nolint:errcheck
- enc := json.NewEncoder(ibj)
-
- var lastTse *TipSetExec
-
- lastState := tschain[len(tschain)-1].ParentState()
- for i := len(tschain) - 2; i >= 0; i-- {
+ for i := len(tschain) - 1; i >= 1; i-- {
cur := tschain[i]
+ start := time.Now()
log.Infof("computing state (height: %d, ts=%s)", cur.Height(), cur.Cids())
- if cur.ParentState() != lastState {
- lastTrace := lastTse.Trace
+ st, trace, err := stm.ExecutionTrace(context.TODO(), cur)
+ if err != nil {
+ return err
+ }
+ tse := &TipSetExec{
+ TipSet: cur.Key(),
+ Trace: trace,
+ Duration: time.Since(start),
+ }
+ if enc != nil {
+ stripCallers(tse.Trace)
+
+ if err := enc.Encode(tse); err != nil {
+ return xerrors.Errorf("failed to write out tipsetexec: %w", err)
+ }
+ }
+ if tschain[i-1].ParentState() != st {
+ stripCallers(tse.Trace)
+ lastTrace := tse.Trace
d, err := json.MarshalIndent(lastTrace, "", " ")
if err != nil {
panic(err)
}
fmt.Println("TRACE")
fmt.Println(string(d))
- return xerrors.Errorf("tipset chain had state mismatch at height %d (%s != %s)", cur.Height(), cur.ParentState(), lastState)
- }
- start := time.Now()
- st, trace, err := stm.ExecutionTrace(context.TODO(), cur)
- if err != nil {
- return err
- }
- stripCallers(trace)
-
- lastTse = &TipSetExec{
- TipSet: cur.Key(),
- Trace: trace,
- Duration: time.Since(start),
- }
- lastState = st
- if err := enc.Encode(lastTse); err != nil {
- return xerrors.Errorf("failed to write out tipsetexec: %w", err)
+ //fmt.Println(statediff.Diff(context.Background(), bs, tschain[i-1].ParentState(), st, statediff.ExpandActors))
+ return xerrors.Errorf("tipset chain had state mismatch at height %d (%s != %s)", cur.Height(), cur.ParentState(), st)
}
}
diff --git a/cmd/lotus-seal-worker/main.go b/cmd/lotus-seal-worker/main.go
index 0a88eacb4..bf7ce1e52 100644
--- a/cmd/lotus-seal-worker/main.go
+++ b/cmd/lotus-seal-worker/main.go
@@ -111,6 +111,11 @@ var runCmd = &cli.Command{
Name: "no-local-storage",
Usage: "don't use storageminer repo for sector storage",
},
+ &cli.BoolFlag{
+ Name: "no-swap",
+ Usage: "don't use swap",
+ Value: false,
+ },
&cli.BoolFlag{
Name: "addpiece",
Usage: "enable addpiece",
@@ -355,6 +360,7 @@ var runCmd = &cli.Command{
LocalWorker: sectorstorage.NewLocalWorker(sectorstorage.WorkerConfig{
SealProof: spt,
TaskTypes: taskTypes,
+ NoSwap: cctx.Bool("no-swap"),
}, remote, localStore, nodeApi, nodeApi, wsts),
localStore: localStore,
ls: lr,
diff --git a/cmd/lotus-shed/dealtracker.go b/cmd/lotus-shed/dealtracker.go
index d39f51bd1..8ded6bf4a 100644
--- a/cmd/lotus-shed/dealtracker.go
+++ b/cmd/lotus-shed/dealtracker.go
@@ -5,10 +5,10 @@ import (
"encoding/json"
"net"
"net/http"
- "os"
- "strings"
+ "sync"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/ipfs/go-cid"
@@ -19,58 +19,54 @@ type dealStatsServer struct {
api api.FullNode
}
-var filteredClients map[address.Address]bool
+// Requested by @jbenet
+// How many epochs back to look at for dealstats
+var epochLookback = abi.ChainEpoch(10)
+
+// these lists grow continuously with the network
+// TODO: need to switch this to an LRU of sorts, to ensure refreshes
+var knownFiltered = new(sync.Map)
+var resolvedWallets = new(sync.Map)
func init() {
- fc := []string{"t0112", "t0113", "t0114", "t010089"}
-
- filtered, set := os.LookupEnv("FILTERED_CLIENTS")
- if set {
- fc = strings.Split(filtered, ":")
- }
-
- filteredClients = make(map[address.Address]bool)
- for _, a := range fc {
- addr, err := address.NewFromString(a)
+ for _, a := range []string{
+ "t0100", // client for genesis miner
+ "t0101", // client for genesis miner
+ "t0102", // client for genesis miner
+ "t0112", // client for genesis miner
+ "t0113", // client for genesis miner
+ "t0114", // client for genesis miner
+ "t1nslxql4pck5pq7hddlzym3orxlx35wkepzjkm3i", // SR1 dealbot wallet
+ "t1stghxhdp2w53dym2nz2jtbpk6ccd4l2lxgmezlq", // SR1 dealbot wallet
+ "t1mcr5xkgv4jdl3rnz77outn6xbmygb55vdejgbfi", // SR1 dealbot wallet
+ "t1qiqdbbmrdalbntnuapriirduvxu5ltsc5mhy7si", // SR1 dealbot wallet
+ } {
+ a, err := address.NewFromString(a)
if err != nil {
panic(err)
}
- filteredClients[addr] = true
+ knownFiltered.Store(a, true)
}
}
type dealCountResp struct {
- Total int64 `json:"total"`
- Epoch int64 `json:"epoch"`
+ Epoch int64 `json:"epoch"`
+ Endpoint string `json:"endpoint"`
+ Payload int64 `json:"payload"`
}
func (dss *dealStatsServer) handleStorageDealCount(w http.ResponseWriter, r *http.Request) {
- ctx := context.Background()
- head, err := dss.api.ChainHead(ctx)
- if err != nil {
- log.Warnf("failed to get chain head: %s", err)
+ epoch, deals := dss.filteredDealList()
+ if epoch == 0 {
w.WriteHeader(500)
return
}
- deals, err := dss.api.StateMarketDeals(ctx, head.Key())
- if err != nil {
- log.Warnf("failed to get market deals: %s", err)
- w.WriteHeader(500)
- return
- }
-
- var count int64
- for _, d := range deals {
- if !filteredClients[d.Proposal.Client] {
- count++
- }
- }
-
if err := json.NewEncoder(w).Encode(&dealCountResp{
- Total: count,
- Epoch: int64(head.Height()),
+ Endpoint: "COUNT_DEALS",
+ Payload: int64(len(deals)),
+ Epoch: epoch,
}); err != nil {
log.Warnf("failed to write back deal count response: %s", err)
return
@@ -78,39 +74,28 @@ func (dss *dealStatsServer) handleStorageDealCount(w http.ResponseWriter, r *htt
}
type dealAverageResp struct {
- AverageSize int64 `json:"average_size"`
- Epoch int64 `json:"epoch"`
+ Epoch int64 `json:"epoch"`
+ Endpoint string `json:"endpoint"`
+ Payload int64 `json:"payload"`
}
func (dss *dealStatsServer) handleStorageDealAverageSize(w http.ResponseWriter, r *http.Request) {
- ctx := context.Background()
- head, err := dss.api.ChainHead(ctx)
- if err != nil {
- log.Warnf("failed to get chain head: %s", err)
+ epoch, deals := dss.filteredDealList()
+ if epoch == 0 {
w.WriteHeader(500)
return
}
- deals, err := dss.api.StateMarketDeals(ctx, head.Key())
- if err != nil {
- log.Warnf("failed to get market deals: %s", err)
- w.WriteHeader(500)
- return
- }
-
- var count int64
var totalBytes int64
for _, d := range deals {
- if !filteredClients[d.Proposal.Client] {
- count++
- totalBytes += int64(d.Proposal.PieceSize.Unpadded())
- }
+ totalBytes += int64(d.deal.Proposal.PieceSize.Unpadded())
}
if err := json.NewEncoder(w).Encode(&dealAverageResp{
- AverageSize: totalBytes / count,
- Epoch: int64(head.Height()),
+ Endpoint: "AVERAGE_DEAL_SIZE",
+ Payload: totalBytes / int64(len(deals)),
+ Epoch: epoch,
}); err != nil {
log.Warnf("failed to write back deal average response: %s", err)
return
@@ -118,37 +103,27 @@ func (dss *dealStatsServer) handleStorageDealAverageSize(w http.ResponseWriter,
}
type dealTotalResp struct {
- TotalBytes int64 `json:"total_size"`
- Epoch int64 `json:"epoch"`
+ Epoch int64 `json:"epoch"`
+ Endpoint string `json:"endpoint"`
+ Payload int64 `json:"payload"`
}
func (dss *dealStatsServer) handleStorageDealTotalReal(w http.ResponseWriter, r *http.Request) {
- ctx := context.Background()
-
- head, err := dss.api.ChainHead(ctx)
- if err != nil {
- log.Warnf("failed to get chain head: %s", err)
- w.WriteHeader(500)
- return
- }
-
- deals, err := dss.api.StateMarketDeals(ctx, head.Key())
- if err != nil {
- log.Warnf("failed to get market deals: %s", err)
+ epoch, deals := dss.filteredDealList()
+ if epoch == 0 {
w.WriteHeader(500)
return
}
var totalBytes int64
for _, d := range deals {
- if !filteredClients[d.Proposal.Client] {
- totalBytes += int64(d.Proposal.PieceSize.Unpadded())
- }
+ totalBytes += int64(d.deal.Proposal.PieceSize.Unpadded())
}
if err := json.NewEncoder(w).Encode(&dealTotalResp{
- TotalBytes: totalBytes,
- Epoch: int64(head.Height()),
+ Endpoint: "DEAL_BYTES",
+ Payload: totalBytes,
+ Epoch: epoch,
}); err != nil {
log.Warnf("failed to write back deal average response: %s", err)
return
@@ -157,6 +132,12 @@ func (dss *dealStatsServer) handleStorageDealTotalReal(w http.ResponseWriter, r
}
type clientStatsOutput struct {
+ Epoch int64 `json:"epoch"`
+ Endpoint string `json:"endpoint"`
+ Payload []*clientStats `json:"payload"`
+}
+
+type clientStats struct {
Client address.Address `json:"client"`
DataSize int64 `json:"data_size"`
NumCids int `json:"num_cids"`
@@ -168,51 +149,41 @@ type clientStatsOutput struct {
}
func (dss *dealStatsServer) handleStorageClientStats(w http.ResponseWriter, r *http.Request) {
- ctx := context.Background()
-
- head, err := dss.api.ChainHead(ctx)
- if err != nil {
- log.Warnf("failed to get chain head: %s", err)
+ epoch, deals := dss.filteredDealList()
+ if epoch == 0 {
w.WriteHeader(500)
return
}
- deals, err := dss.api.StateMarketDeals(ctx, head.Key())
- if err != nil {
- log.Warnf("failed to get market deals: %s", err)
- w.WriteHeader(500)
- return
- }
-
- stats := make(map[address.Address]*clientStatsOutput)
+ stats := make(map[address.Address]*clientStats)
for _, d := range deals {
- if filteredClients[d.Proposal.Client] {
- continue
- }
- st, ok := stats[d.Proposal.Client]
+ st, ok := stats[d.deal.Proposal.Client]
if !ok {
- st = &clientStatsOutput{
- Client: d.Proposal.Client,
+ st = &clientStats{
+ Client: d.resolvedWallet,
cids: make(map[cid.Cid]bool),
providers: make(map[address.Address]bool),
}
- stats[d.Proposal.Client] = st
+ stats[d.deal.Proposal.Client] = st
}
- st.DataSize += int64(d.Proposal.PieceSize.Unpadded())
- st.cids[d.Proposal.PieceCID] = true
- st.providers[d.Proposal.Provider] = true
+ st.DataSize += int64(d.deal.Proposal.PieceSize.Unpadded())
+ st.cids[d.deal.Proposal.PieceCID] = true
+ st.providers[d.deal.Proposal.Provider] = true
st.NumDeals++
}
- out := make([]*clientStatsOutput, 0, len(stats))
- for _, cso := range stats {
- cso.NumCids = len(cso.cids)
- cso.NumMiners = len(cso.providers)
-
- out = append(out, cso)
+ out := clientStatsOutput{
+ Epoch: epoch,
+ Endpoint: "CLIENT_DEAL_STATS",
+ Payload: make([]*clientStats, 0, len(stats)),
+ }
+ for _, cs := range stats {
+ cs.NumCids = len(cs.cids)
+ cs.NumMiners = len(cs.providers)
+ out.Payload = append(out.Payload, cs)
}
if err := json.NewEncoder(w).Encode(out); err != nil {
@@ -221,6 +192,93 @@ func (dss *dealStatsServer) handleStorageClientStats(w http.ResponseWriter, r *h
}
}
+type dealInfo struct {
+ deal api.MarketDeal
+ resolvedWallet address.Address
+}
+
+// filteredDealList returns the current epoch and a list of filtered deals
+// on error returns an epoch of 0
+func (dss *dealStatsServer) filteredDealList() (int64, map[string]dealInfo) {
+ ctx := context.Background()
+
+ head, err := dss.api.ChainHead(ctx)
+ if err != nil {
+ log.Warnf("failed to get chain head: %s", err)
+ return 0, nil
+ }
+
+ head, err = dss.api.ChainGetTipSetByHeight(ctx, head.Height()-epochLookback, head.Key())
+ if err != nil {
+ log.Warnf("failed to walk back %s epochs: %s", epochLookback, err)
+ return 0, nil
+ }
+
+ // Disabled as per @pooja's request
+ //
+ // // Exclude any address associated with a miner
+ // miners, err := dss.api.StateListMiners(ctx, head.Key())
+ // if err != nil {
+ // log.Warnf("failed to get miner list: %s", err)
+ // return 0, nil
+ // }
+ // for _, m := range miners {
+ // info, err := dss.api.StateMinerInfo(ctx, m, head.Key())
+ // if err != nil {
+ // log.Warnf("failed to get info for known miner '%s': %s", m, err)
+ // continue
+ // }
+
+ // knownFiltered.Store(info.Owner, true)
+ // knownFiltered.Store(info.Worker, true)
+ // for _, a := range info.ControlAddresses {
+ // knownFiltered.Store(a, true)
+ // }
+ // }
+
+ deals, err := dss.api.StateMarketDeals(ctx, head.Key())
+ if err != nil {
+ log.Warnf("failed to get market deals: %s", err)
+ return 0, nil
+ }
+
+ ret := make(map[string]dealInfo, len(deals))
+ for dealKey, d := range deals {
+
+ // Counting no-longer-active deals as per Pooja's request
+ // // https://github.com/filecoin-project/specs-actors/blob/v0.9.9/actors/builtin/market/deal.go#L81-L85
+ // if d.State.SectorStartEpoch < 0 {
+ // continue
+ // }
+
+ if _, isFiltered := knownFiltered.Load(d.Proposal.Client); isFiltered {
+ continue
+ }
+
+ if _, wasSeen := resolvedWallets.Load(d.Proposal.Client); !wasSeen {
+ w, err := dss.api.StateAccountKey(ctx, d.Proposal.Client, head.Key())
+ if err != nil {
+ log.Warnf("failed to resolve id '%s' to wallet address: %s", d.Proposal.Client, err)
+ continue
+ } else {
+ resolvedWallets.Store(d.Proposal.Client, w)
+ }
+ }
+
+ w, _ := resolvedWallets.Load(d.Proposal.Client)
+ if _, isFiltered := knownFiltered.Load(w); isFiltered {
+ continue
+ }
+
+ ret[dealKey] = dealInfo{
+ deal: d,
+ resolvedWallet: w.(address.Address),
+ }
+ }
+
+ return int64(head.Height()), ret
+}
+
var serveDealStatsCmd = &cli.Command{
Name: "serve-deal-stats",
Flags: []cli.Flag{},
@@ -260,6 +318,8 @@ var serveDealStatsCmd = &cli.Command{
panic(err)
}
+ log.Warnf("deal-stat server listening on %s\n== NOTE: QUERIES ARE EXPENSIVE - YOU MUST FRONT-CACHE THIS SERVICE\n", list.Addr().String())
+
return s.Serve(list)
},
}
diff --git a/cmd/lotus-shed/import-car.go b/cmd/lotus-shed/import-car.go
index 01343c4a3..9cbff953b 100644
--- a/cmd/lotus-shed/import-car.go
+++ b/cmd/lotus-shed/import-car.go
@@ -1,10 +1,13 @@
package main
import (
+ "encoding/hex"
"fmt"
"io"
"os"
+ block "github.com/ipfs/go-block-format"
+ "github.com/ipfs/go-cid"
"github.com/ipld/go-car"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
@@ -81,3 +84,57 @@ var importCarCmd = &cli.Command{
}
},
}
+
+var importObjectCmd = &cli.Command{
+ Name: "import-obj",
+ Usage: "import a raw ipld object into your datastore",
+ Action: func(cctx *cli.Context) error {
+ r, err := repo.NewFS(cctx.String("repo"))
+ if err != nil {
+ return xerrors.Errorf("opening fs repo: %w", err)
+ }
+
+ exists, err := r.Exists()
+ if err != nil {
+ return err
+ }
+ if !exists {
+ return xerrors.Errorf("lotus repo doesn't exist")
+ }
+
+ lr, err := r.Lock(repo.FullNode)
+ if err != nil {
+ return err
+ }
+ defer lr.Close() //nolint:errcheck
+
+ ds, err := lr.Datastore("/chain")
+ if err != nil {
+ return err
+ }
+
+ bs := blockstore.NewBlockstore(ds)
+
+ c, err := cid.Decode(cctx.Args().Get(0))
+ if err != nil {
+ return err
+ }
+
+ data, err := hex.DecodeString(cctx.Args().Get(1))
+ if err != nil {
+ return err
+ }
+
+ blk, err := block.NewBlockWithCid(data, c)
+ if err != nil {
+ return err
+ }
+
+ if err := bs.Put(blk); err != nil {
+ return err
+ }
+
+ return nil
+
+ },
+}
diff --git a/cmd/lotus-shed/main.go b/cmd/lotus-shed/main.go
index 61e6967d6..409d8928b 100644
--- a/cmd/lotus-shed/main.go
+++ b/cmd/lotus-shed/main.go
@@ -25,6 +25,7 @@ func main() {
staterootCmd,
auditsCmd,
importCarCmd,
+ importObjectCmd,
commpToCidCmd,
fetchParamCmd,
proofsCmd,
@@ -37,6 +38,7 @@ func main() {
exportChainCmd,
consensusCmd,
serveDealStatsCmd,
+ syncCmd,
datastoreCmd,
}
diff --git a/cmd/lotus-shed/sync.go b/cmd/lotus-shed/sync.go
new file mode 100644
index 000000000..bfe7cc8b7
--- /dev/null
+++ b/cmd/lotus-shed/sync.go
@@ -0,0 +1,64 @@
+package main
+
+import (
+ "fmt"
+
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/lotus/chain/types"
+ lcli "github.com/filecoin-project/lotus/cli"
+ "github.com/urfave/cli/v2"
+)
+
+var syncCmd = &cli.Command{
+ Name: "sync",
+ Usage: "tools for diagnosing sync issues",
+ Flags: []cli.Flag{},
+ Subcommands: []*cli.Command{
+ syncValidateCmd,
+ },
+}
+
+var syncValidateCmd = &cli.Command{
+ Name: "validate",
+ Usage: "checks whether a provided tipset is valid",
+ Action: func(cctx *cli.Context) error {
+ api, closer, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+
+ defer closer()
+ ctx := lcli.ReqContext(cctx)
+
+ if cctx.Args().Len() < 1 {
+ fmt.Println("usage: ...")
+ fmt.Println("At least one block cid must be provided")
+ return nil
+ }
+
+ args := cctx.Args().Slice()
+
+ var tscids []cid.Cid
+ for _, s := range args {
+ c, err := cid.Decode(s)
+ if err != nil {
+ return fmt.Errorf("block cid was invalid: %s", err)
+ }
+ tscids = append(tscids, c)
+ }
+
+ tsk := types.NewTipSetKey(tscids...)
+
+ valid, err := api.SyncValidateTipset(ctx, tsk)
+ if err != nil {
+ fmt.Println("Tipset is invalid: ", err)
+ }
+
+ if valid {
+ fmt.Println("Tipset is valid")
+ }
+
+ return nil
+ },
+}
diff --git a/cmd/lotus-storage-miner/info.go b/cmd/lotus-storage-miner/info.go
index 3ccfd67da..213d62e6e 100644
--- a/cmd/lotus-storage-miner/info.go
+++ b/cmd/lotus-storage-miner/info.go
@@ -33,6 +33,12 @@ var infoCmd = &cli.Command{
Subcommands: []*cli.Command{
infoAllCmd,
},
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "hide-sectors-info",
+ Usage: "hide sectors info",
+ },
+ },
Action: infoCmdAct,
}
@@ -199,10 +205,12 @@ func infoCmdAct(cctx *cli.Context) error {
fmt.Printf("Expected Seal Duration: %s\n\n", sealdur)
- fmt.Println("Sectors:")
- err = sectorsInfo(ctx, nodeApi)
- if err != nil {
- return err
+ if !cctx.Bool("hide-sectors-info") {
+ fmt.Println("Sectors:")
+ err = sectorsInfo(ctx, nodeApi)
+ if err != nil {
+ return err
+ }
}
// TODO: grab actr state / info
diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go
index e0fee6564..a0f754a60 100644
--- a/cmd/lotus/daemon.go
+++ b/cmd/lotus/daemon.go
@@ -3,11 +3,14 @@
package main
import (
+ "bufio"
"context"
"encoding/hex"
"encoding/json"
"fmt"
+ "io"
"io/ioutil"
+ "net/http"
"os"
"runtime/pprof"
"strings"
@@ -23,6 +26,7 @@ import (
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"golang.org/x/xerrors"
+ "gopkg.in/cheggaaa/pb.v1"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
@@ -100,11 +104,11 @@ var DaemonCmd = &cli.Command{
},
&cli.StringFlag{
Name: "import-chain",
- Usage: "on first run, load chain from given file and validate",
+ Usage: "on first run, load chain from given file or url and validate",
},
&cli.StringFlag{
Name: "import-snapshot",
- Usage: "import chain state from a given chain export file",
+ Usage: "import chain state from a given chain export file or url",
},
&cli.BoolFlag{
Name: "halt-after-import",
@@ -123,6 +127,10 @@ var DaemonCmd = &cli.Command{
Usage: "manage open file limit",
Value: true,
},
+ &cli.StringFlag{
+ Name: "config",
+ Usage: "specify path of config file to use",
+ },
},
Action: func(cctx *cli.Context) error {
err := runmetrics.Enable(runmetrics.RunMetricOptions{
@@ -176,6 +184,10 @@ var DaemonCmd = &cli.Command{
return xerrors.Errorf("opening fs repo: %w", err)
}
+ if cctx.String("config") != "" {
+ r.SetConfigPath(cctx.String("config"))
+ }
+
if err := r.Init(repo.FullNode); err != nil && err != repo.ErrRepoExists {
return xerrors.Errorf("repo init error: %w", err)
}
@@ -206,11 +218,6 @@ var DaemonCmd = &cli.Command{
issnapshot = true
}
- chainfile, err := homedir.Expand(chainfile)
- if err != nil {
- return err
- }
-
if err := ImportChain(r, chainfile, issnapshot); err != nil {
return err
}
@@ -326,12 +333,42 @@ func importKey(ctx context.Context, api api.FullNode, f string) error {
return nil
}
-func ImportChain(r repo.Repo, fname string, snapshot bool) error {
- fi, err := os.Open(fname)
- if err != nil {
- return err
+func ImportChain(r repo.Repo, fname string, snapshot bool) (err error) {
+ var rd io.Reader
+ var l int64
+ if strings.HasPrefix(fname, "http://") || strings.HasPrefix(fname, "https://") {
+ resp, err := http.Get(fname) //nolint:gosec
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close() //nolint:errcheck
+
+ if resp.StatusCode != http.StatusOK {
+ return xerrors.Errorf("non-200 response: %d", resp.StatusCode)
+ }
+
+ rd = resp.Body
+ l = resp.ContentLength
+ } else {
+ fname, err = homedir.Expand(fname)
+ if err != nil {
+ return err
+ }
+
+ fi, err := os.Open(fname)
+ if err != nil {
+ return err
+ }
+ defer fi.Close() //nolint:errcheck
+
+ st, err := os.Stat(fname)
+ if err != nil {
+ return err
+ }
+
+ rd = fi
+ l = st.Size()
}
- defer fi.Close() //nolint:errcheck
lr, err := r.Lock(repo.FullNode)
if err != nil {
@@ -353,8 +390,21 @@ func ImportChain(r repo.Repo, fname string, snapshot bool) error {
cst := store.NewChainStore(bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier))
- log.Info("importing chain from file...")
- ts, err := cst.Import(fi)
+ log.Infof("importing chain from %s...", fname)
+
+ bufr := bufio.NewReaderSize(rd, 1<<20)
+
+ bar := pb.New64(l)
+ br := bar.NewProxyReader(bufr)
+ bar.ShowTimeLeft = true
+ bar.ShowPercent = true
+ bar.ShowSpeed = true
+ bar.Units = pb.U_BYTES
+
+ bar.Start()
+ ts, err := cst.Import(br)
+ bar.Finish()
+
if err != nil {
return xerrors.Errorf("importing chain failed: %w", err)
}
diff --git a/cmd/tvx/actor_mapping.go b/cmd/tvx/actor_mapping.go
new file mode 100644
index 000000000..8c306aca0
--- /dev/null
+++ b/cmd/tvx/actor_mapping.go
@@ -0,0 +1,44 @@
+package main
+
+import (
+ "reflect"
+
+ "github.com/filecoin-project/specs-actors/actors/builtin"
+ "github.com/ipfs/go-cid"
+ "github.com/multiformats/go-multihash"
+)
+
+var ActorMethodTable = make(map[string][]string, 64)
+
+var Actors = map[cid.Cid]interface{}{
+ builtin.InitActorCodeID: builtin.MethodsInit,
+ builtin.CronActorCodeID: builtin.MethodsCron,
+ builtin.AccountActorCodeID: builtin.MethodsAccount,
+ builtin.StoragePowerActorCodeID: builtin.MethodsPower,
+ builtin.StorageMinerActorCodeID: builtin.MethodsMiner,
+ builtin.StorageMarketActorCodeID: builtin.MethodsMarket,
+ builtin.PaymentChannelActorCodeID: builtin.MethodsPaych,
+ builtin.MultisigActorCodeID: builtin.MethodsMultisig,
+ builtin.RewardActorCodeID: builtin.MethodsReward,
+ builtin.VerifiedRegistryActorCodeID: builtin.MethodsVerifiedRegistry,
+}
+
+func init() {
+ for code, methods := range Actors {
+ cmh, err := multihash.Decode(code.Hash()) // identity hash.
+ if err != nil {
+ panic(err)
+ }
+
+ var (
+ aname = string(cmh.Digest)
+ rt = reflect.TypeOf(methods)
+ nf = rt.NumField()
+ )
+
+ ActorMethodTable[aname] = append(ActorMethodTable[aname], "Send")
+ for i := 0; i < nf; i++ {
+ ActorMethodTable[aname] = append(ActorMethodTable[aname], rt.Field(i).Name)
+ }
+ }
+}
diff --git a/cmd/tvx/exec.go b/cmd/tvx/exec.go
new file mode 100644
index 000000000..9ec6f9e2b
--- /dev/null
+++ b/cmd/tvx/exec.go
@@ -0,0 +1,92 @@
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
+ "os"
+
+ "github.com/fatih/color"
+ "github.com/urfave/cli/v2"
+
+ "github.com/filecoin-project/lotus/conformance"
+
+ "github.com/filecoin-project/test-vectors/schema"
+)
+
+var execFlags struct {
+ file string
+}
+
+var execCmd = &cli.Command{
+ Name: "exec",
+ Description: "execute one or many test vectors against Lotus; supplied as a single JSON file, or a ndjson stdin stream",
+ Action: runExecLotus,
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "file",
+ Usage: "input file; if not supplied, the vector will be read from stdin",
+ TakesFile: true,
+ Destination: &execFlags.file,
+ },
+ },
+}
+
+func runExecLotus(_ *cli.Context) error {
+ if file := execFlags.file; file != "" {
+ // we have a single test vector supplied as a file.
+ file, err := os.Open(file)
+ if err != nil {
+ return fmt.Errorf("failed to open test vector: %w", err)
+ }
+
+ var (
+ dec = json.NewDecoder(file)
+ tv schema.TestVector
+ )
+
+ if err = dec.Decode(&tv); err != nil {
+ return fmt.Errorf("failed to decode test vector: %w", err)
+ }
+
+ return executeTestVector(tv)
+ }
+
+ for dec := json.NewDecoder(os.Stdin); ; {
+ var tv schema.TestVector
+ switch err := dec.Decode(&tv); err {
+ case nil:
+ if err = executeTestVector(tv); err != nil {
+ return err
+ }
+ case io.EOF:
+ // we're done.
+ return nil
+ default:
+ // something bad happened.
+ return err
+ }
+ }
+}
+
+func executeTestVector(tv schema.TestVector) error {
+ log.Println("executing test vector:", tv.Meta.ID)
+ r := new(conformance.LogReporter)
+ switch class := tv.Class; class {
+ case "message":
+ conformance.ExecuteMessageVector(r, &tv)
+ case "tipset":
+ conformance.ExecuteTipsetVector(r, &tv)
+ default:
+ return fmt.Errorf("test vector class %s not supported", class)
+ }
+
+ if r.Failed() {
+ log.Println(color.HiRedString("❌ test vector failed"))
+ } else {
+ log.Println(color.GreenString("✅ test vector succeeded"))
+ }
+
+ return nil
+}
diff --git a/cmd/tvx/extract.go b/cmd/tvx/extract.go
new file mode 100644
index 000000000..fef245858
--- /dev/null
+++ b/cmd/tvx/extract.go
@@ -0,0 +1,503 @@
+package main
+
+import (
+ "bytes"
+ "compress/gzip"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "path/filepath"
+
+ "github.com/fatih/color"
+
+ "github.com/filecoin-project/lotus/api"
+ init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/reward"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/vm"
+ lcli "github.com/filecoin-project/lotus/cli"
+ "github.com/filecoin-project/lotus/conformance"
+
+ "github.com/filecoin-project/specs-actors/actors/builtin"
+
+ "github.com/filecoin-project/test-vectors/schema"
+
+ "github.com/ipfs/go-cid"
+ "github.com/urfave/cli/v2"
+)
+
+const (
+ PrecursorSelectAll = "all"
+ PrecursorSelectSender = "sender"
+)
+
+type extractOpts struct {
+ id string
+ block string
+ class string
+ cid string
+ file string
+ retain string
+ precursor string
+}
+
+var extractFlags extractOpts
+
+var extractCmd = &cli.Command{
+ Name: "extract",
+ Description: "generate a test vector by extracting it from a live chain",
+ Action: runExtract,
+ Flags: []cli.Flag{
+ &repoFlag,
+ &cli.StringFlag{
+ Name: "class",
+ Usage: "class of vector to extract; other required flags depend on the; values: 'message'",
+ Value: "message",
+ Destination: &extractFlags.class,
+ },
+ &cli.StringFlag{
+ Name: "id",
+ Usage: "identifier to name this test vector with",
+ Value: "(undefined)",
+ Destination: &extractFlags.id,
+ },
+ &cli.StringFlag{
+ Name: "block",
+ Usage: "optionally, the block CID the message was included in, to avoid expensive chain scanning",
+ Destination: &extractFlags.block,
+ },
+ &cli.StringFlag{
+ Name: "cid",
+ Usage: "message CID to generate test vector from",
+ Required: true,
+ Destination: &extractFlags.cid,
+ },
+ &cli.StringFlag{
+ Name: "out",
+ Aliases: []string{"o"},
+ Usage: "file to write test vector to",
+ Destination: &extractFlags.file,
+ },
+ &cli.StringFlag{
+ Name: "state-retain",
+ Usage: "state retention policy; values: 'accessed-cids', 'accessed-actors'",
+ Value: "accessed-cids",
+ Destination: &extractFlags.retain,
+ },
+ &cli.StringFlag{
+ Name: "precursor-select",
+ Usage: "precursors to apply; values: 'all', 'sender'; 'all' selects all preceding" +
+ "messages in the canonicalised tipset, 'sender' selects only preceding messages from the same" +
+ "sender. Usually, 'sender' is a good tradeoff and gives you sufficient accuracy. If the receipt sanity" +
+ "check fails due to gas reasons, switch to 'all', as previous messages in the tipset may have" +
+ "affected state in a disruptive way",
+ Value: "sender",
+ Destination: &extractFlags.precursor,
+ },
+ },
+}
+
+func runExtract(c *cli.Context) error {
+ // LOTUS_DISABLE_VM_BUF disables what's called "VM state tree buffering",
+ // which stashes write operations in a BufferedBlockstore
+ // (https://github.com/filecoin-project/lotus/blob/b7a4dbb07fd8332b4492313a617e3458f8003b2a/lib/bufbstore/buf_bstore.go#L21)
+ // such that they're not written until the VM is actually flushed.
+ //
+ // For some reason, the standard behaviour was not working for me (raulk),
+ // and disabling it (such that the state transformations are written immediately
+ // to the blockstore) worked.
+ _ = os.Setenv("LOTUS_DISABLE_VM_BUF", "iknowitsabadidea")
+
+ ctx := context.Background()
+
+ // Make the API client.
+ fapi, closer, err := lcli.GetFullNodeAPI(c)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ return doExtract(ctx, fapi, extractFlags)
+}
+
+func doExtract(ctx context.Context, fapi api.FullNode, opts extractOpts) error {
+ mcid, err := cid.Decode(opts.cid)
+ if err != nil {
+ return err
+ }
+
+ msg, execTs, incTs, err := resolveFromChain(ctx, fapi, mcid, opts.block)
+ if err != nil {
+ return fmt.Errorf("failed to resolve message and tipsets from chain: %w", err)
+ }
+
+ // get the circulating supply before the message was executed.
+ circSupplyDetail, err := fapi.StateCirculatingSupply(ctx, incTs.Key())
+ if err != nil {
+ return fmt.Errorf("failed while fetching circulating supply: %w", err)
+ }
+
+ circSupply := circSupplyDetail.FilCirculating
+
+ log.Printf("message was executed in tipset: %s", execTs.Key())
+ log.Printf("message was included in tipset: %s", incTs.Key())
+ log.Printf("circulating supply at inclusion tipset: %d", circSupply)
+ log.Printf("finding precursor messages using mode: %s", opts.precursor)
+
+ // Fetch messages in canonical order from inclusion tipset.
+ msgs, err := fapi.ChainGetParentMessages(ctx, execTs.Blocks()[0].Cid())
+ if err != nil {
+ return fmt.Errorf("failed to fetch messages in canonical order from inclusion tipset: %w", err)
+ }
+
+ related, found, err := findMsgAndPrecursors(opts.precursor, msg, msgs)
+ if err != nil {
+ return fmt.Errorf("failed while finding message and precursors: %w", err)
+ }
+
+ if !found {
+ return fmt.Errorf("message not found; precursors found: %d", len(related))
+ }
+
+ var (
+ precursors = related[:len(related)-1]
+ precursorsCids []cid.Cid
+ )
+
+ for _, p := range precursors {
+ precursorsCids = append(precursorsCids, p.Cid())
+ }
+
+ log.Println(color.GreenString("found message; precursors (count: %d): %v", len(precursors), precursorsCids))
+
+ var (
+ // create a read-through store that uses ChainGetObject to fetch unknown CIDs.
+ pst = NewProxyingStores(ctx, fapi)
+ g = NewSurgeon(ctx, fapi, pst)
+ )
+
+ driver := conformance.NewDriver(ctx, schema.Selector{}, conformance.DriverOpts{
+ DisableVMFlush: true,
+ })
+
+ // this is the root of the state tree we start with.
+ root := incTs.ParentState()
+ log.Printf("base state tree root CID: %s", root)
+
+ basefee := incTs.Blocks()[0].ParentBaseFee
+ log.Printf("basefee: %s", basefee)
+
+ // on top of that state tree, we apply all precursors.
+ log.Printf("number of precursors to apply: %d", len(precursors))
+ for i, m := range precursors {
+ log.Printf("applying precursor %d, cid: %s", i, m.Cid())
+ _, root, err = driver.ExecuteMessage(pst.Blockstore, conformance.ExecuteMessageParams{
+ Preroot: root,
+ Epoch: execTs.Height(),
+ Message: m,
+ CircSupply: &circSupplyDetail.FilCirculating,
+ BaseFee: &basefee,
+ })
+ if err != nil {
+ return fmt.Errorf("failed to execute precursor message: %w", err)
+ }
+ }
+
+ var (
+ preroot cid.Cid
+ postroot cid.Cid
+ applyret *vm.ApplyRet
+ carWriter func(w io.Writer) error
+ retention = opts.retain
+ )
+
+ log.Printf("using state retention strategy: %s", retention)
+ switch retention {
+ case "accessed-cids":
+ tbs, ok := pst.Blockstore.(TracingBlockstore)
+ if !ok {
+ return fmt.Errorf("requested 'accessed-cids' state retention, but no tracing blockstore was present")
+ }
+
+ tbs.StartTracing()
+
+ preroot = root
+ applyret, postroot, err = driver.ExecuteMessage(pst.Blockstore, conformance.ExecuteMessageParams{
+ Preroot: preroot,
+ Epoch: execTs.Height(),
+ Message: msg,
+ CircSupply: &circSupplyDetail.FilCirculating,
+ BaseFee: &basefee,
+ })
+ if err != nil {
+ return fmt.Errorf("failed to execute message: %w", err)
+ }
+ accessed := tbs.FinishTracing()
+ carWriter = func(w io.Writer) error {
+ return g.WriteCARIncluding(w, accessed, preroot, postroot)
+ }
+
+ case "accessed-actors":
+ log.Printf("calculating accessed actors")
+ // get actors accessed by message.
+ retain, err := g.GetAccessedActors(ctx, fapi, mcid)
+ if err != nil {
+ return fmt.Errorf("failed to calculate accessed actors: %w", err)
+ }
+ // also append the reward actor and the burnt funds actor.
+ retain = append(retain, reward.Address, builtin.BurntFundsActorAddr, init_.Address)
+ log.Printf("calculated accessed actors: %v", retain)
+
+ // get the masked state tree from the root,
+ preroot, err = g.GetMaskedStateTree(root, retain)
+ if err != nil {
+ return err
+ }
+ applyret, postroot, err = driver.ExecuteMessage(pst.Blockstore, conformance.ExecuteMessageParams{
+ Preroot: preroot,
+ Epoch: execTs.Height(),
+ Message: msg,
+ CircSupply: &circSupplyDetail.FilCirculating,
+ BaseFee: &basefee,
+ })
+ if err != nil {
+ return fmt.Errorf("failed to execute message: %w", err)
+ }
+ carWriter = func(w io.Writer) error {
+ return g.WriteCAR(w, preroot, postroot)
+ }
+
+ default:
+ return fmt.Errorf("unknown state retention option: %s", retention)
+ }
+
+ log.Printf("message applied; preroot: %s, postroot: %s", preroot, postroot)
+ log.Println("performing sanity check on receipt")
+
+ // TODO sometimes this returns a nil receipt and no error ¯\_(ツ)_/¯
+ // ex: https://filfox.info/en/message/bafy2bzacebpxw3yiaxzy2bako62akig46x3imji7fewszen6fryiz6nymu2b2
+ // This code is lenient and skips receipt comparison in case of a nil receipt.
+ rec, err := fapi.StateGetReceipt(ctx, mcid, execTs.Key())
+ if err != nil {
+ return fmt.Errorf("failed to find receipt on chain: %w", err)
+ }
+ log.Printf("found receipt: %+v", rec)
+
+ // generate the schema receipt; if we got
+ var receipt *schema.Receipt
+ if rec != nil {
+ receipt = &schema.Receipt{
+ ExitCode: int64(rec.ExitCode),
+ ReturnValue: rec.Return,
+ GasUsed: rec.GasUsed,
+ }
+ reporter := new(conformance.LogReporter)
+ conformance.AssertMsgResult(reporter, receipt, applyret, "as locally executed")
+ if reporter.Failed() {
+ log.Println(color.RedString("receipt sanity check failed; aborting"))
+ return fmt.Errorf("vector generation aborted")
+ }
+ log.Println(color.GreenString("receipt sanity check succeeded"))
+ } else {
+ receipt = &schema.Receipt{
+ ExitCode: int64(applyret.ExitCode),
+ ReturnValue: applyret.Return,
+ GasUsed: applyret.GasUsed,
+ }
+ log.Println(color.YellowString("skipping receipts comparison; we got back a nil receipt from lotus"))
+ }
+
+ log.Println("generating vector")
+ msgBytes, err := msg.Serialize()
+ if err != nil {
+ return err
+ }
+
+ var (
+ out = new(bytes.Buffer)
+ gw = gzip.NewWriter(out)
+ )
+ if err := carWriter(gw); err != nil {
+ return err
+ }
+ if err = gw.Flush(); err != nil {
+ return err
+ }
+ if err = gw.Close(); err != nil {
+ return err
+ }
+
+ version, err := fapi.Version(ctx)
+ if err != nil {
+ return err
+ }
+
+ ntwkName, err := fapi.StateNetworkName(ctx)
+ if err != nil {
+ return err
+ }
+
+ // Write out the test vector.
+ vector := schema.TestVector{
+ Class: schema.ClassMessage,
+ Meta: &schema.Metadata{
+ ID: opts.id,
+ // TODO need to replace schema.GenerationData with a more flexible
+ // data structure that makes no assumption about the traceability
+ // data that's being recorded; a flexible map[string]string
+ // would do.
+ Gen: []schema.GenerationData{
+ {Source: fmt.Sprintf("network:%s", ntwkName)},
+ {Source: fmt.Sprintf("message:%s", msg.Cid().String())},
+ {Source: fmt.Sprintf("inclusion_tipset:%s", incTs.Key().String())},
+ {Source: fmt.Sprintf("execution_tipset:%s", execTs.Key().String())},
+ {Source: "github.com/filecoin-project/lotus", Version: version.String()}},
+ },
+ CAR: out.Bytes(),
+ Pre: &schema.Preconditions{
+ Epoch: int64(execTs.Height()),
+ CircSupply: circSupply.Int,
+ BaseFee: basefee.Int,
+ StateTree: &schema.StateTree{
+ RootCID: preroot,
+ },
+ },
+ ApplyMessages: []schema.Message{{Bytes: msgBytes}},
+ Post: &schema.Postconditions{
+ StateTree: &schema.StateTree{
+ RootCID: postroot,
+ },
+ Receipts: []*schema.Receipt{
+ {
+ ExitCode: int64(applyret.ExitCode),
+ ReturnValue: applyret.Return,
+ GasUsed: applyret.GasUsed,
+ },
+ },
+ },
+ }
+
+ output := io.WriteCloser(os.Stdout)
+ if file := opts.file; file != "" {
+ dir := filepath.Dir(file)
+ if err := os.MkdirAll(dir, 0755); err != nil {
+ return fmt.Errorf("unable to create directory %s: %w", dir, err)
+ }
+ output, err = os.Create(file)
+ if err != nil {
+ return err
+ }
+ defer output.Close() //nolint:errcheck
+ defer log.Printf("wrote test vector to file: %s", file)
+ }
+
+ enc := json.NewEncoder(output)
+ enc.SetIndent("", " ")
+ if err := enc.Encode(&vector); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// resolveFromChain queries the chain for the provided message, using the block CID to
+// speed up the query, if provided
+func resolveFromChain(ctx context.Context, api api.FullNode, mcid cid.Cid, block string) (msg *types.Message, execTs *types.TipSet, incTs *types.TipSet, err error) {
+ // Extract the full message.
+ msg, err = api.ChainGetMessage(ctx, mcid)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ log.Printf("found message with CID %s: %+v", mcid, msg)
+
+ if block == "" {
+ log.Printf("locating message in blockchain")
+
+ // Locate the message.
+ msgInfo, err := api.StateSearchMsg(ctx, mcid)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("failed to locate message: %w", err)
+ }
+
+ log.Printf("located message at tipset %s (height: %d) with exit code: %s", msgInfo.TipSet, msgInfo.Height, msgInfo.Receipt.ExitCode)
+
+ execTs, incTs, err = fetchThisAndPrevTipset(ctx, api, msgInfo.TipSet)
+ return msg, execTs, incTs, err
+ }
+
+ bcid, err := cid.Decode(block)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ log.Printf("message inclusion block CID was provided; scanning around it: %s", bcid)
+
+ blk, err := api.ChainGetBlock(ctx, bcid)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("failed to get block: %w", err)
+ }
+
+ // types.EmptyTSK hints to use the HEAD.
+ execTs, err = api.ChainGetTipSetByHeight(ctx, blk.Height+1, types.EmptyTSK)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("failed to get message execution tipset: %w", err)
+ }
+
+ // walk back from the execTs instead of HEAD, to save time.
+ incTs, err = api.ChainGetTipSetByHeight(ctx, blk.Height, execTs.Key())
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("failed to get message inclusion tipset: %w", err)
+ }
+
+ return msg, execTs, incTs, nil
+}
+
+// fetchThisAndPrevTipset returns the full tipset identified by the key, as well
+// as the previous tipset. In the context of vector generation, the target
+// tipset is the one where a message was executed, and the previous tipset is
+// the one where the message was included.
+func fetchThisAndPrevTipset(ctx context.Context, api api.FullNode, target types.TipSetKey) (targetTs *types.TipSet, prevTs *types.TipSet, err error) {
+ // get the tipset on which this message was "executed" on.
+ // https://github.com/filecoin-project/lotus/issues/2847
+ targetTs, err = api.ChainGetTipSet(ctx, target)
+ if err != nil {
+ return nil, nil, err
+ }
+ // get the previous tipset, on which this message was mined,
+ // i.e. included on-chain.
+ prevTs, err = api.ChainGetTipSet(ctx, targetTs.Parents())
+ if err != nil {
+ return nil, nil, err
+ }
+ return targetTs, prevTs, nil
+}
+
+// findMsgAndPrecursors ranges through the canonical messages slice, locating
+// the target message and returning precursors in accordance to the supplied
+// mode.
+func findMsgAndPrecursors(mode string, target *types.Message, msgs []api.Message) (related []*types.Message, found bool, err error) {
+ // Range through canonicalised messages, selecting only the precursors based
+ // on selection mode.
+ for _, other := range msgs {
+ switch {
+ case mode == PrecursorSelectAll:
+ fallthrough
+ case mode == PrecursorSelectSender && other.Message.From == target.From:
+ related = append(related, other.Message)
+ }
+
+ // this message is the target; we're done.
+ if other.Cid == target.Cid() {
+ return related, true, nil
+ }
+ }
+
+ // this could happen because a block contained related messages, but not
+ // the target (that is, messages with a lower nonce, but ultimately not the
+ // target).
+ return related, false, nil
+}
diff --git a/cmd/tvx/extract_many.go b/cmd/tvx/extract_many.go
new file mode 100644
index 000000000..9679a1dbd
--- /dev/null
+++ b/cmd/tvx/extract_many.go
@@ -0,0 +1,232 @@
+package main
+
+import (
+ "context"
+ "encoding/csv"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/fatih/color"
+ "github.com/filecoin-project/go-state-types/exitcode"
+ "github.com/hashicorp/go-multierror"
+ "github.com/urfave/cli/v2"
+
+ lcli "github.com/filecoin-project/lotus/cli"
+)
+
+var extractManyFlags struct {
+ in string
+ outdir string
+ batchId string
+}
+
+var extractManyCmd = &cli.Command{
+ Name: "extract-many",
+ Description: `generate many test vectors by repeatedly calling tvx extract, using a csv file as input.
+
+ The CSV file must have a format just like the following:
+
+ message_cid,receiver_code,method_num,exit_code,height,block_cid,seq
+ bafy2bzacedvuvgpsnwq7i7kltfap6hnp7fdmzf6lr4w34zycjrthb3v7k6zi6,fil/1/account,0,0,67972,bafy2bzacebthpxzlk7zhlkz3jfzl4qw7mdoswcxlf3rkof3b4mbxfj3qzfk7w,1
+ bafy2bzacedwicofymn4imgny2hhbmcm4o5bikwnv3qqgohyx73fbtopiqlro6,fil/1/account,0,0,67860,bafy2bzacebj7beoxyzll522o6o76mt7von4psn3tlvunokhv4zhpwmfpipgti,2
+ ...
+
+ The first row MUST be a header row. At the bare minimum, those seven fields
+ must appear, in the order specified. Extra fields are accepted, but always
+ after these compulsory seven.
+`,
+ Action: runExtractMany,
+ Flags: []cli.Flag{
+ &repoFlag,
+ &cli.StringFlag{
+ Name: "batch-id",
+ Usage: "batch id; a four-digit left-zero-padded sequential number (e.g. 0041)",
+ Required: true,
+ Destination: &extractManyFlags.batchId,
+ },
+ &cli.StringFlag{
+ Name: "in",
+ Usage: "path to input file (csv)",
+ Destination: &extractManyFlags.in,
+ },
+ &cli.StringFlag{
+ Name: "outdir",
+ Usage: "output directory",
+ Destination: &extractManyFlags.outdir,
+ },
+ },
+}
+
+func runExtractMany(c *cli.Context) error {
+ // LOTUS_DISABLE_VM_BUF disables what's called "VM state tree buffering",
+ // which stashes write operations in a BufferedBlockstore
+ // (https://github.com/filecoin-project/lotus/blob/b7a4dbb07fd8332b4492313a617e3458f8003b2a/lib/bufbstore/buf_bstore.go#L21)
+ // such that they're not written until the VM is actually flushed.
+ //
+ // For some reason, the standard behaviour was not working for me (raulk),
+ // and disabling it (such that the state transformations are written immediately
+ // to the blockstore) worked.
+ _ = os.Setenv("LOTUS_DISABLE_VM_BUF", "iknowitsabadidea")
+
+ ctx := context.Background()
+
+ // Make the API client.
+ fapi, closer, err := lcli.GetFullNodeAPI(c)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ var (
+ in = extractManyFlags.in
+ outdir = extractManyFlags.outdir
+ )
+
+ if in == "" {
+ return fmt.Errorf("input file not provided")
+ }
+
+ if outdir == "" {
+ return fmt.Errorf("output dir not provided")
+ }
+
+ // Open the CSV file for reading.
+ f, err := os.Open(in)
+ if err != nil {
+ return fmt.Errorf("could not open file %s: %w", in, err)
+ }
+
+ // Ensure the output directory exists.
+ if err := os.MkdirAll(outdir, 0755); err != nil {
+ return fmt.Errorf("could not create output dir %s: %w", outdir, err)
+ }
+
+ // Create a CSV reader and validate the header row.
+ reader := csv.NewReader(f)
+ if header, err := reader.Read(); err != nil {
+ return fmt.Errorf("failed to read header from csv: %w", err)
+ } else if l := len(header); l < 7 {
+ return fmt.Errorf("insufficient number of fields: %d", l)
+ } else if f := header[0]; f != "message_cid" {
+ return fmt.Errorf("csv sanity check failed: expected first field in header to be 'message_cid'; was: %s", f)
+ } else {
+ log.Println(color.GreenString("csv sanity check succeeded; header contains fields: %v", header))
+ }
+
+ var (
+ generated []string
+ merr = new(multierror.Error)
+ retry []extractOpts // to retry with 'canonical' precursor selection mode
+ )
+
+ // Read each row and extract the requested message.
+ for {
+ row, err := reader.Read()
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ return fmt.Errorf("failed to read row: %w", err)
+ }
+ var (
+ cid = row[0]
+ actorcode = row[1]
+ methodnumstr = row[2]
+ exitcodestr = row[3]
+ _ = row[4]
+ block = row[5]
+ seq = row[6]
+
+ exit int
+ methodnum int
+ methodname string
+ )
+
+ // Parse the exit code.
+ if exit, err = strconv.Atoi(exitcodestr); err != nil {
+ return fmt.Errorf("invalid exitcode number: %d", exit)
+ }
+ // Parse the method number.
+ if methodnum, err = strconv.Atoi(methodnumstr); err != nil {
+ return fmt.Errorf("invalid method number: %s", methodnumstr)
+ }
+
+ // Lookup the method in actor method table.
+ if m, ok := ActorMethodTable[actorcode]; !ok {
+ return fmt.Errorf("unrecognized actor: %s", actorcode)
+ } else if methodnum >= len(m) {
+ return fmt.Errorf("unrecognized method number for actor %s: %d", actorcode, methodnum)
+ } else {
+ methodname = m[methodnum]
+ }
+
+ // exitcode string representations are of kind ErrType(0); strip out
+ // the number portion.
+ exitcodename := strings.Split(exitcode.ExitCode(exit).String(), "(")[0]
+ // replace the slashes in the actor code name with underscores.
+ actorcodename := strings.ReplaceAll(actorcode, "/", "_")
+
+ // Compute the ID of the vector.
+ id := fmt.Sprintf("ext-%s-%s-%s-%s-%s", extractManyFlags.batchId, actorcodename, methodname, exitcodename, seq)
+ // Vector filename, using a base of outdir.
+ file := filepath.Join(outdir, actorcodename, methodname, exitcodename, id) + ".json"
+
+ log.Println(color.YellowString("processing message cid with 'sender' precursor mode: %s", id))
+
+ opts := extractOpts{
+ id: id,
+ block: block,
+ class: "message",
+ cid: cid,
+ file: file,
+ retain: "accessed-cids",
+ precursor: PrecursorSelectSender,
+ }
+
+ if err := doExtract(ctx, fapi, opts); err != nil {
+ log.Println(color.RedString("failed to extract vector for message %s: %s; queuing for 'canonical' precursor selection", cid, err))
+ retry = append(retry, opts)
+ continue
+ }
+
+ log.Println(color.MagentaString("generated file: %s", file))
+
+ generated = append(generated, file)
+ }
+
+ log.Printf("extractions to try with canonical precursor selection mode: %d", len(retry))
+
+ for _, r := range retry {
+ log.Printf("retrying %s: %s", r.cid, r.id)
+
+ r.precursor = PrecursorSelectAll
+ if err := doExtract(ctx, fapi, r); err != nil {
+ merr = multierror.Append(merr, fmt.Errorf("failed to extract vector for message %s: %w", r.cid, err))
+ continue
+ }
+
+ log.Println(color.MagentaString("generated file: %s", r.file))
+ generated = append(generated, r.file)
+ }
+
+ if len(generated) == 0 {
+ log.Println("no files generated")
+ } else {
+ log.Println("files generated:")
+ for _, g := range generated {
+ log.Println(g)
+ }
+ }
+
+ if merr.ErrorOrNil() != nil {
+ log.Println(color.YellowString("done processing with errors: %v", merr))
+ } else {
+ log.Println(color.GreenString("done processing with no errors"))
+ }
+
+ return merr.ErrorOrNil()
+}
diff --git a/cmd/tvx/main.go b/cmd/tvx/main.go
new file mode 100644
index 000000000..6c887d163
--- /dev/null
+++ b/cmd/tvx/main.go
@@ -0,0 +1,71 @@
+package main
+
+import (
+ "log"
+ "os"
+ "sort"
+
+ "github.com/urfave/cli/v2"
+)
+
+// DefaultLotusRepoPath is where the fallback path where to look for a Lotus
+// client repo. It is expanded with mitchellh/go-homedir, so it'll work with all
+// OSes despite the Unix twiddle notation.
+const DefaultLotusRepoPath = "~/.lotus"
+
+var repoFlag = cli.StringFlag{
+ Name: "repo",
+ EnvVars: []string{"LOTUS_PATH"},
+ Value: DefaultLotusRepoPath,
+ TakesFile: true,
+}
+
+func main() {
+ app := &cli.App{
+ Name: "tvx",
+ Description: `tvx is a tool for extracting and executing test vectors. It has three subcommands.
+
+ tvx extract extracts a test vector from a live network. It requires access to
+ a Filecoin client that exposes the standard JSON-RPC API endpoint. Only
+ message class test vectors are supported at this time.
+
+ tvx exec executes test vectors against Lotus. Either you can supply one in a
+ file, or many as an ndjson stdin stream.
+
+ tvx extract-many performs a batch extraction of many messages, supplied in a
+ CSV file. Refer to the help of that subcommand for more info.
+
+ SETTING THE JSON-RPC API ENDPOINT
+
+ You can set the JSON-RPC API endpoint through one of the following methods.
+
+ 1. Directly set the API endpoint on the FULLNODE_API_INFO env variable.
+ The format is [token]:multiaddr, where token is optional for commands not
+ accessing privileged operations.
+
+ 2. If you're running tvx against a local Lotus client, you can set the REPO
+ env variable to have the API endpoint and token extracted from the repo.
+ Alternatively, you can pass the --repo CLI flag.
+
+ 3. Rely on the default fallback, which inspects ~/.lotus and extracts the
+ API endpoint string if the location is a Lotus repo.
+
+ tvx will apply these methods in the same order of precedence they're listed.
+`,
+ Usage: "tvx is a tool for extracting and executing test vectors",
+ Commands: []*cli.Command{
+ extractCmd,
+ execCmd,
+ extractManyCmd,
+ },
+ }
+
+ sort.Sort(cli.CommandsByName(app.Commands))
+ for _, c := range app.Commands {
+ sort.Sort(cli.FlagsByName(c.Flags))
+ }
+
+ if err := app.Run(os.Args); err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/cmd/tvx/state.go b/cmd/tvx/state.go
new file mode 100644
index 000000000..bff5cbd6e
--- /dev/null
+++ b/cmd/tvx/state.go
@@ -0,0 +1,293 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "log"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/ipfs/go-cid"
+ format "github.com/ipfs/go-ipld-format"
+ "github.com/ipld/go-car"
+ cbg "github.com/whyrusleeping/cbor-gen"
+
+ "github.com/filecoin-project/lotus/api"
+ init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
+ "github.com/filecoin-project/lotus/chain/state"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/vm"
+)
+
+// StateSurgeon is an object used to fetch and manipulate state.
+type StateSurgeon struct {
+ ctx context.Context
+ api api.FullNode
+ stores *Stores
+}
+
+// NewSurgeon returns a state surgeon, an object used to fetch and manipulate
+// state.
+func NewSurgeon(ctx context.Context, api api.FullNode, stores *Stores) *StateSurgeon {
+ return &StateSurgeon{
+ ctx: ctx,
+ api: api,
+ stores: stores,
+ }
+}
+
+// GetMaskedStateTree trims the state tree at the supplied tipset to contain
+// only the state of the actors in the retain set. It also "dives" into some
+// singleton system actors, like the init actor, to trim the state so as to
+// compute a minimal state tree. In the future, thid method will dive into
+// other system actors like the power actor and the market actor.
+func (sg *StateSurgeon) GetMaskedStateTree(previousRoot cid.Cid, retain []address.Address) (cid.Cid, error) {
+ // TODO: this will need to be parameterized on network version.
+ st, err := state.LoadStateTree(sg.stores.CBORStore, previousRoot)
+ if err != nil {
+ return cid.Undef, err
+ }
+
+ initActor, initState, err := sg.loadInitActor(st)
+ if err != nil {
+ return cid.Undef, err
+ }
+
+ err = sg.retainInitEntries(initState, retain)
+ if err != nil {
+ return cid.Undef, err
+ }
+
+ err = sg.saveInitActor(initActor, initState, st)
+ if err != nil {
+ return cid.Undef, err
+ }
+
+ // resolve all addresses to ID addresses.
+ resolved, err := sg.resolveAddresses(retain, initState)
+ if err != nil {
+ return cid.Undef, err
+ }
+
+ st, err = sg.transplantActors(st, resolved)
+ if err != nil {
+ return cid.Undef, err
+ }
+
+ root, err := st.Flush(sg.ctx)
+ if err != nil {
+ return cid.Undef, err
+ }
+
+ return root, nil
+}
+
+// GetAccessedActors identifies the actors that were accessed during the
+// execution of a message.
+func (sg *StateSurgeon) GetAccessedActors(ctx context.Context, a api.FullNode, mid cid.Cid) ([]address.Address, error) {
+ log.Printf("calculating accessed actors during execution of message: %s", mid)
+ msgInfo, err := a.StateSearchMsg(ctx, mid)
+ if err != nil {
+ return nil, err
+ }
+ if msgInfo == nil {
+ return nil, fmt.Errorf("message info is nil")
+ }
+
+ msgObj, err := a.ChainGetMessage(ctx, mid)
+ if err != nil {
+ return nil, err
+ }
+
+ ts, err := a.ChainGetTipSet(ctx, msgInfo.TipSet)
+ if err != nil {
+ return nil, err
+ }
+
+ trace, err := a.StateCall(ctx, msgObj, ts.Parents())
+ if err != nil {
+ return nil, fmt.Errorf("could not replay msg: %w", err)
+ }
+
+ accessed := make(map[address.Address]struct{})
+
+ var recur func(trace *types.ExecutionTrace)
+ recur = func(trace *types.ExecutionTrace) {
+ accessed[trace.Msg.To] = struct{}{}
+ accessed[trace.Msg.From] = struct{}{}
+ for i := range trace.Subcalls {
+ recur(&trace.Subcalls[i])
+ }
+ }
+ recur(&trace.ExecutionTrace)
+
+ ret := make([]address.Address, 0, len(accessed))
+ for k := range accessed {
+ ret = append(ret, k)
+ }
+
+ return ret, nil
+}
+
+// WriteCAR recursively writes the tree referenced by the root as a CAR into the
+// supplied io.Writer.
+func (sg *StateSurgeon) WriteCAR(w io.Writer, roots ...cid.Cid) error {
+ carWalkFn := func(nd format.Node) (out []*format.Link, err error) {
+ for _, link := range nd.Links() {
+ if link.Cid.Prefix().Codec == cid.FilCommitmentSealed || link.Cid.Prefix().Codec == cid.FilCommitmentUnsealed {
+ continue
+ }
+ out = append(out, link)
+ }
+ return out, nil
+ }
+ return car.WriteCarWithWalker(sg.ctx, sg.stores.DAGService, roots, w, carWalkFn)
+}
+
+// WriteCARIncluding writes a CAR including only the CIDs that are listed in
+// the include set. This leads to an intentially sparse tree with dangling links.
+func (sg *StateSurgeon) WriteCARIncluding(w io.Writer, include map[cid.Cid]struct{}, roots ...cid.Cid) error {
+ carWalkFn := func(nd format.Node) (out []*format.Link, err error) {
+ for _, link := range nd.Links() {
+ if _, ok := include[link.Cid]; !ok {
+ continue
+ }
+ if link.Cid.Prefix().Codec == cid.FilCommitmentSealed || link.Cid.Prefix().Codec == cid.FilCommitmentUnsealed {
+ continue
+ }
+ out = append(out, link)
+ }
+ return out, nil
+ }
+ return car.WriteCarWithWalker(sg.ctx, sg.stores.DAGService, roots, w, carWalkFn)
+}
+
+// transplantActors plucks the state from the supplied actors at the given
+// tipset, and places it into the supplied state map.
+func (sg *StateSurgeon) transplantActors(src *state.StateTree, pluck []address.Address) (*state.StateTree, error) {
+ log.Printf("transplanting actor states: %v", pluck)
+
+ dst, err := state.NewStateTree(sg.stores.CBORStore, src.Version())
+ if err != nil {
+ return nil, err
+ }
+
+ for _, a := range pluck {
+ actor, err := src.GetActor(a)
+ if err != nil {
+ return nil, fmt.Errorf("get actor %s failed: %w", a, err)
+ }
+
+ err = dst.SetActor(a, actor)
+ if err != nil {
+ return nil, err
+ }
+
+ // recursive copy of the actor state.
+ err = vm.Copy(context.TODO(), sg.stores.Blockstore, sg.stores.Blockstore, actor.Head)
+ if err != nil {
+ return nil, err
+ }
+
+ actorState, err := sg.api.ChainReadObj(sg.ctx, actor.Head)
+ if err != nil {
+ return nil, err
+ }
+
+ cid, err := sg.stores.CBORStore.Put(sg.ctx, &cbg.Deferred{Raw: actorState})
+ if err != nil {
+ return nil, err
+ }
+
+ if cid != actor.Head {
+ panic("mismatched cids")
+ }
+ }
+
+ return dst, nil
+}
+
+// saveInitActor saves the state of the init actor to the provided state map.
+func (sg *StateSurgeon) saveInitActor(initActor *types.Actor, initState init_.State, st *state.StateTree) error {
+ log.Printf("saving init actor into state tree")
+
+ // Store the state of the init actor.
+ cid, err := sg.stores.CBORStore.Put(sg.ctx, initState)
+ if err != nil {
+ return err
+ }
+ actor := *initActor
+ actor.Head = cid
+
+ err = st.SetActor(init_.Address, &actor)
+ if err != nil {
+ return err
+ }
+
+ cid, _ = st.Flush(sg.ctx)
+ log.Printf("saved init actor into state tree; new root: %s", cid)
+ return nil
+}
+
+// retainInitEntries takes an old init actor state, and retains only the
+// entries in the retain set, returning a new init actor state.
+func (sg *StateSurgeon) retainInitEntries(state init_.State, retain []address.Address) error {
+ log.Printf("retaining init actor entries for addresses: %v", retain)
+
+ m := make(map[address.Address]struct{}, len(retain))
+ for _, a := range retain {
+ m[a] = struct{}{}
+ }
+
+ var remove []address.Address
+ _ = state.ForEachActor(func(id abi.ActorID, address address.Address) error {
+ if _, ok := m[address]; !ok {
+ remove = append(remove, address)
+ }
+ return nil
+ })
+
+ err := state.Remove(remove...)
+ log.Printf("new init actor state: %+v", state)
+ return err
+}
+
+// resolveAddresses resolved the requested addresses from the provided
+// InitActor state, returning a slice of length len(orig), where each index
+// contains the resolved address.
+func (sg *StateSurgeon) resolveAddresses(orig []address.Address, ist init_.State) (ret []address.Address, err error) {
+ log.Printf("resolving addresses: %v", orig)
+
+ ret = make([]address.Address, len(orig))
+ for i, addr := range orig {
+ resolved, found, err := ist.ResolveAddress(addr)
+ if err != nil {
+ return nil, err
+ }
+ if !found {
+ return nil, fmt.Errorf("address not found: %s", addr)
+ }
+ ret[i] = resolved
+ }
+
+ log.Printf("resolved addresses: %v", ret)
+ return ret, nil
+}
+
+// loadInitActor loads the init actor state from a given tipset.
+func (sg *StateSurgeon) loadInitActor(st *state.StateTree) (*types.Actor, init_.State, error) {
+ actor, err := st.GetActor(init_.Address)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ initState, err := init_.Load(sg.stores.ADTStore, actor)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ log.Printf("loaded init actor state: %+v", initState)
+
+ return actor, initState, nil
+}
diff --git a/cmd/tvx/stores.go b/cmd/tvx/stores.go
new file mode 100644
index 000000000..93e0d215f
--- /dev/null
+++ b/cmd/tvx/stores.go
@@ -0,0 +1,142 @@
+package main
+
+import (
+ "context"
+ "log"
+ "sync"
+
+ "github.com/fatih/color"
+ dssync "github.com/ipfs/go-datastore/sync"
+
+ "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/lib/blockstore"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+
+ blocks "github.com/ipfs/go-block-format"
+ "github.com/ipfs/go-blockservice"
+ "github.com/ipfs/go-cid"
+ ds "github.com/ipfs/go-datastore"
+ exchange "github.com/ipfs/go-ipfs-exchange-interface"
+ offline "github.com/ipfs/go-ipfs-exchange-offline"
+ cbor "github.com/ipfs/go-ipld-cbor"
+ format "github.com/ipfs/go-ipld-format"
+ "github.com/ipfs/go-merkledag"
+)
+
+// Stores is a collection of the different stores and services that are needed
+// to deal with the data layer of Filecoin, conveniently interlinked with one
+// another.
+type Stores struct {
+ CBORStore cbor.IpldStore
+ ADTStore adt.Store
+ Datastore ds.Batching
+ Blockstore blockstore.Blockstore
+ BlockService blockservice.BlockService
+ Exchange exchange.Interface
+ DAGService format.DAGService
+}
+
+// NewProxyingStores is a set of Stores backed by a proxying Blockstore that
+// proxies Get requests for unknown CIDs to a Filecoin node, via the
+// ChainReadObj RPC.
+func NewProxyingStores(ctx context.Context, api api.FullNode) *Stores {
+ ds := dssync.MutexWrap(ds.NewMapDatastore())
+ bs := &proxyingBlockstore{
+ ctx: ctx,
+ api: api,
+ Blockstore: blockstore.NewBlockstore(ds),
+ }
+ return NewStores(ctx, ds, bs)
+}
+
+// NewStores creates a non-proxying set of Stores.
+func NewStores(ctx context.Context, ds ds.Batching, bs blockstore.Blockstore) *Stores {
+ var (
+ cborstore = cbor.NewCborStore(bs)
+ offl = offline.Exchange(bs)
+ blkserv = blockservice.New(bs, offl)
+ dserv = merkledag.NewDAGService(blkserv)
+ )
+
+ return &Stores{
+ CBORStore: cborstore,
+ ADTStore: adt.WrapStore(ctx, cborstore),
+ Datastore: ds,
+ Blockstore: bs,
+ Exchange: offl,
+ BlockService: blkserv,
+ DAGService: dserv,
+ }
+}
+
+// TracingBlockstore is a Blockstore trait that records CIDs that were accessed
+// through Get.
+type TracingBlockstore interface {
+ // StartTracing starts tracing CIDs accessed through the this Blockstore.
+ StartTracing()
+
+ // FinishTracing finishes tracing accessed CIDs, and returns a map of the
+ // CIDs that were traced.
+ FinishTracing() map[cid.Cid]struct{}
+}
+
+// proxyingBlockstore is a Blockstore wrapper that fetches unknown CIDs from
+// a Filecoin node via JSON-RPC.
+type proxyingBlockstore struct {
+ ctx context.Context
+ api api.FullNode
+
+ lk sync.RWMutex
+ tracing bool
+ traced map[cid.Cid]struct{}
+
+ blockstore.Blockstore
+}
+
+var _ TracingBlockstore = (*proxyingBlockstore)(nil)
+
+func (pb *proxyingBlockstore) StartTracing() {
+ pb.lk.Lock()
+ pb.tracing = true
+ pb.traced = map[cid.Cid]struct{}{}
+ pb.lk.Unlock()
+}
+
+func (pb *proxyingBlockstore) FinishTracing() map[cid.Cid]struct{} {
+ pb.lk.Lock()
+ ret := pb.traced
+ pb.tracing = false
+ pb.traced = map[cid.Cid]struct{}{}
+ pb.lk.Unlock()
+ return ret
+}
+
+func (pb *proxyingBlockstore) Get(cid cid.Cid) (blocks.Block, error) {
+ pb.lk.RLock()
+ if pb.tracing {
+ pb.traced[cid] = struct{}{}
+ }
+ pb.lk.RUnlock()
+
+ if block, err := pb.Blockstore.Get(cid); err == nil {
+ return block, err
+ }
+
+ log.Println(color.CyanString("fetching cid via rpc: %v", cid))
+ item, err := pb.api.ChainReadObj(pb.ctx, cid)
+ if err != nil {
+ return nil, err
+ }
+ block, err := blocks.NewBlockWithCid(item, cid)
+ if err != nil {
+ return nil, err
+ }
+
+ err = pb.Blockstore.Put(block)
+ if err != nil {
+ return nil, err
+ }
+
+ return block, nil
+}
diff --git a/conformance/corpus_test.go b/conformance/corpus_test.go
new file mode 100644
index 000000000..3d447570d
--- /dev/null
+++ b/conformance/corpus_test.go
@@ -0,0 +1,133 @@
+package conformance
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "github.com/filecoin-project/test-vectors/schema"
+)
+
+const (
+ // EnvSkipConformance, if 1, skips the conformance test suite.
+ EnvSkipConformance = "SKIP_CONFORMANCE"
+
+ // EnvCorpusRootDir is the name of the environment variable where the path
+ // to an alternative corpus location can be provided.
+ //
+ // The default is defaultCorpusRoot.
+ EnvCorpusRootDir = "CORPUS_DIR"
+
+ // defaultCorpusRoot is the directory where the test vector corpus is hosted.
+ // It is mounted on the Lotus repo as a git submodule.
+ //
+ // When running this test, the corpus root can be overridden through the
+ // -conformance.corpus CLI flag to run an alternate corpus.
+ defaultCorpusRoot = "../extern/test-vectors/corpus"
+)
+
+// ignore is a set of paths relative to root to skip.
+var ignore = map[string]struct{}{
+ ".git": {},
+ "schema.json": {},
+}
+
+// TestConformance is the entrypoint test that runs all test vectors found
+// in the corpus root directory.
+//
+// It locates all json files via a recursive walk, skipping over the ignore set,
+// as well as files beginning with _. It parses each file as a test vector, and
+// runs it via the Driver.
+func TestConformance(t *testing.T) {
+ if skip := strings.TrimSpace(os.Getenv(EnvSkipConformance)); skip == "1" {
+ t.SkipNow()
+ }
+ // corpusRoot is the effective corpus root path, taken from the `-conformance.corpus` CLI flag,
+ // falling back to defaultCorpusRoot if not provided.
+ corpusRoot := defaultCorpusRoot
+ if dir := strings.TrimSpace(os.Getenv(EnvCorpusRootDir)); dir != "" {
+ corpusRoot = dir
+ }
+
+ var vectors []string
+ err := filepath.Walk(corpusRoot+"/", func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ filename := filepath.Base(path)
+ rel, err := filepath.Rel(corpusRoot, path)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, ok := ignore[rel]; ok {
+ // skip over using the right error.
+ if info.IsDir() {
+ return filepath.SkipDir
+ }
+ return nil
+ }
+ if info.IsDir() {
+ // dive into directories.
+ return nil
+ }
+ if filepath.Ext(path) != ".json" {
+ // skip if not .json.
+ return nil
+ }
+ if ignored := strings.HasPrefix(filename, "_"); ignored {
+ // ignore files starting with _.
+ t.Logf("ignoring: %s", rel)
+ return nil
+ }
+ vectors = append(vectors, rel)
+ return nil
+ })
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(vectors) == 0 {
+ t.Fatalf("no test vectors found")
+ }
+
+ // Run a test for each vector.
+ for _, v := range vectors {
+ path := filepath.Join(corpusRoot, v)
+ raw, err := ioutil.ReadFile(path)
+ if err != nil {
+ t.Fatalf("failed to read test raw file: %s", path)
+ }
+
+ var vector schema.TestVector
+ err = json.Unmarshal(raw, &vector)
+ if err != nil {
+ t.Errorf("failed to parse test vector %s: %s; skipping", path, err)
+ continue
+ }
+
+ t.Run(v, func(t *testing.T) {
+ for _, h := range vector.Hints {
+ if h == schema.HintIncorrect {
+ t.Logf("skipping vector marked as incorrect: %s", vector.Meta.ID)
+ t.SkipNow()
+ }
+ }
+
+ // dispatch the execution depending on the vector class.
+ switch vector.Class {
+ case "message":
+ ExecuteMessageVector(t, &vector)
+ case "tipset":
+ ExecuteTipsetVector(t, &vector)
+ default:
+ t.Fatalf("test vector class not supported: %s", vector.Class)
+ }
+ })
+ }
+}
diff --git a/conformance/driver.go b/conformance/driver.go
index f43a8739d..9ced12d74 100644
--- a/conformance/driver.go
+++ b/conformance/driver.go
@@ -2,9 +2,9 @@ package conformance
import (
"context"
+ "os"
- "github.com/filecoin-project/go-state-types/crypto"
-
+ "github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
@@ -14,6 +14,7 @@ import (
"github.com/filecoin-project/lotus/lib/blockstore"
"github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/test-vectors/schema"
@@ -24,18 +25,36 @@ import (
)
var (
- // BaseFee to use in the VM.
- // TODO make parametrisable through vector.
- BaseFee = abi.NewTokenAmount(100)
+ // DefaultCirculatingSupply is the fallback circulating supply returned by
+ // the driver's CircSupplyCalculator function, used if the vector specifies
+ // no circulating supply.
+ DefaultCirculatingSupply = types.TotalFilecoinInt
+
+ // DefaultBaseFee to use in the VM, if one is not supplied in the vector.
+ DefaultBaseFee = abi.NewTokenAmount(100)
)
type Driver struct {
ctx context.Context
selector schema.Selector
+ vmFlush bool
}
-func NewDriver(ctx context.Context, selector schema.Selector) *Driver {
- return &Driver{ctx: ctx, selector: selector}
+type DriverOpts struct {
+ // DisableVMFlush, when true, avoids calling VM.Flush(), forces a blockstore
+ // recursive copy, from the temporary buffer blockstore, to the real
+ // system's blockstore. Disabling VM flushing is useful when extracting test
+ // vectors and trimming state, as we don't want to force an accidental
+ // deep copy of the state tree.
+ //
+ // Disabling VM flushing almost always should go hand-in-hand with
+ // LOTUS_DISABLE_VM_BUF=iknowitsabadidea. That way, state tree writes are
+ // immediately committed to the blockstore.
+ DisableVMFlush bool
+}
+
+func NewDriver(ctx context.Context, selector schema.Selector, opts DriverOpts) *Driver {
+ return &Driver{ctx: ctx, selector: selector, vmFlush: !opts.DisableVMFlush}
}
type ExecuteTipsetResult struct {
@@ -120,19 +139,47 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, preroot
return ret, nil
}
+type ExecuteMessageParams struct {
+ Preroot cid.Cid
+ Epoch abi.ChainEpoch
+ Message *types.Message
+ CircSupply *abi.TokenAmount
+ BaseFee *abi.TokenAmount
+}
+
// ExecuteMessage executes a conformance test vector message in a temporary VM.
-func (d *Driver) ExecuteMessage(bs blockstore.Blockstore, preroot cid.Cid, epoch abi.ChainEpoch, msg *types.Message) (*vm.ApplyRet, cid.Cid, error) {
- // dummy state manager; only to reference the GetNetworkVersion method, which does not depend on state.
+func (d *Driver) ExecuteMessage(bs blockstore.Blockstore, params ExecuteMessageParams) (*vm.ApplyRet, cid.Cid, error) {
+ if !d.vmFlush {
+ // do not flush the VM, just the state tree; this should be used with
+ // LOTUS_DISABLE_VM_BUF enabled, so writes will anyway be visible.
+ _ = os.Setenv("LOTUS_DISABLE_VM_BUF", "iknowitsabadidea")
+ }
+
+ basefee := DefaultBaseFee
+ if params.BaseFee != nil {
+ basefee = *params.BaseFee
+ }
+
+ circSupply := DefaultCirculatingSupply
+ if params.CircSupply != nil {
+ circSupply = *params.CircSupply
+ }
+
+ // dummy state manager; only to reference the GetNetworkVersion method,
+ // which does not depend on state.
sm := new(stmgr.StateManager)
+
vmOpts := &vm.VMOpts{
- StateBase: preroot,
- Epoch: epoch,
- Rand: &testRand{}, // TODO always succeeds; need more flexibility.
- Bstore: bs,
- Syscalls: mkFakedSigSyscalls(vm.Syscalls(ffiwrapper.ProofVerifier)), // TODO always succeeds; need more flexibility.
- CircSupplyCalc: nil,
- BaseFee: BaseFee,
- NtwkVersion: sm.GetNtwkVersion,
+ StateBase: params.Preroot,
+ Epoch: params.Epoch,
+ Rand: &testRand{}, // TODO always succeeds; need more flexibility.
+ Bstore: bs,
+ Syscalls: mkFakedSigSyscalls(vm.Syscalls(ffiwrapper.ProofVerifier)), // TODO always succeeds; need more flexibility.
+ CircSupplyCalc: func(_ context.Context, _ abi.ChainEpoch, _ *state.StateTree) (abi.TokenAmount, error) {
+ return circSupply, nil
+ },
+ BaseFee: basefee,
+ NtwkVersion: sm.GetNtwkVersion,
}
lvm, err := vm.NewVM(context.TODO(), vmOpts)
@@ -149,12 +196,20 @@ func (d *Driver) ExecuteMessage(bs blockstore.Blockstore, preroot cid.Cid, epoch
lvm.SetInvoker(invoker)
- ret, err := lvm.ApplyMessage(d.ctx, toChainMsg(msg))
+ ret, err := lvm.ApplyMessage(d.ctx, toChainMsg(params.Message))
if err != nil {
return nil, cid.Undef, err
}
- root, err := lvm.Flush(d.ctx)
+ var root cid.Cid
+ if d.vmFlush {
+ // flush the VM, committing the state tree changes and forcing a
+ // recursive copoy from the temporary blcokstore to the real blockstore.
+ root, err = lvm.Flush(d.ctx)
+ } else {
+ root, err = lvm.StateTree().(*state.StateTree).Flush(d.ctx)
+ }
+
return ret, root, err
}
diff --git a/conformance/reporter.go b/conformance/reporter.go
new file mode 100644
index 000000000..1cd2d389d
--- /dev/null
+++ b/conformance/reporter.go
@@ -0,0 +1,62 @@
+package conformance
+
+import (
+ "log"
+ "os"
+ "sync/atomic"
+ "testing"
+
+ "github.com/fatih/color"
+)
+
+// Reporter is a contains a subset of the testing.T methods, so that the
+// Execute* functions in this package can be used inside or outside of
+// go test runs.
+type Reporter interface {
+ Helper()
+
+ Log(args ...interface{})
+ Errorf(format string, args ...interface{})
+ Fatalf(format string, args ...interface{})
+ Logf(format string, args ...interface{})
+ FailNow()
+ Failed() bool
+}
+
+var _ Reporter = (*testing.T)(nil)
+
+// LogReporter wires the Reporter methods to the log package. It is appropriate
+// to use when calling the Execute* functions from a standalone CLI program.
+type LogReporter struct {
+ failed int32
+}
+
+var _ Reporter = (*LogReporter)(nil)
+
+func (*LogReporter) Helper() {}
+
+func (*LogReporter) Log(args ...interface{}) {
+ log.Println(args...)
+}
+
+func (*LogReporter) Logf(format string, args ...interface{}) {
+ log.Printf(format, args...)
+}
+
+func (*LogReporter) FailNow() {
+ os.Exit(1)
+}
+
+func (l *LogReporter) Failed() bool {
+ return atomic.LoadInt32(&l.failed) == 1
+}
+
+func (l *LogReporter) Errorf(format string, args ...interface{}) {
+ atomic.StoreInt32(&l.failed, 1)
+ log.Println(color.HiRedString("❌ "+format, args...))
+}
+
+func (l *LogReporter) Fatalf(format string, args ...interface{}) {
+ atomic.StoreInt32(&l.failed, 1)
+ log.Fatal(color.HiRedString("❌ "+format, args...))
+}
diff --git a/conformance/runner.go b/conformance/runner.go
new file mode 100644
index 000000000..2db53b3e4
--- /dev/null
+++ b/conformance/runner.go
@@ -0,0 +1,272 @@
+package conformance
+
+import (
+ "bytes"
+ "compress/gzip"
+ "context"
+ "encoding/base64"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "strconv"
+
+ "github.com/fatih/color"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/exitcode"
+ "github.com/filecoin-project/test-vectors/schema"
+ "github.com/ipfs/go-blockservice"
+ "github.com/ipfs/go-cid"
+ ds "github.com/ipfs/go-datastore"
+ offline "github.com/ipfs/go-ipfs-exchange-offline"
+ format "github.com/ipfs/go-ipld-format"
+ "github.com/ipfs/go-merkledag"
+ "github.com/ipld/go-car"
+
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/vm"
+ "github.com/filecoin-project/lotus/lib/blockstore"
+)
+
+// ExecuteMessageVector executes a message-class test vector.
+func ExecuteMessageVector(r Reporter, vector *schema.TestVector) {
+ var (
+ ctx = context.Background()
+ epoch = vector.Pre.Epoch
+ root = vector.Pre.StateTree.RootCID
+ )
+
+ // Load the CAR into a new temporary Blockstore.
+ bs, err := LoadVectorCAR(vector.CAR)
+ if err != nil {
+ r.Fatalf("failed to load the vector CAR: %w", err)
+ }
+
+ // Create a new Driver.
+ driver := NewDriver(ctx, vector.Selector, DriverOpts{DisableVMFlush: true})
+
+ var circSupply *abi.TokenAmount
+ if cs := vector.Pre.CircSupply; cs != nil {
+ ta := big.NewFromGo(cs)
+ circSupply = &ta
+ }
+
+ var basefee *abi.TokenAmount
+ if bf := vector.Pre.BaseFee; bf != nil {
+ ta := big.NewFromGo(bf)
+ basefee = &ta
+ }
+
+ // Apply every message.
+ for i, m := range vector.ApplyMessages {
+ msg, err := types.DecodeMessage(m.Bytes)
+ if err != nil {
+ r.Fatalf("failed to deserialize message: %s", err)
+ }
+
+ // add an epoch if one's set.
+ if m.Epoch != nil {
+ epoch = *m.Epoch
+ }
+
+ // Execute the message.
+ var ret *vm.ApplyRet
+ ret, root, err = driver.ExecuteMessage(bs, ExecuteMessageParams{
+ Preroot: root,
+ Epoch: abi.ChainEpoch(epoch),
+ Message: msg,
+ CircSupply: circSupply,
+ BaseFee: basefee,
+ })
+ if err != nil {
+ r.Fatalf("fatal failure when executing message: %s", err)
+ }
+
+ // Assert that the receipt matches what the test vector expects.
+ AssertMsgResult(r, vector.Post.Receipts[i], ret, strconv.Itoa(i))
+ }
+
+ // Once all messages are applied, assert that the final state root matches
+ // the expected postcondition root.
+ if expected, actual := vector.Post.StateTree.RootCID, root; expected != actual {
+ r.Errorf("wrong post root cid; expected %v, but got %v", expected, actual)
+ dumpThreeWayStateDiff(r, vector, bs, root)
+ r.FailNow()
+ }
+}
+
+// ExecuteTipsetVector executes a tipset-class test vector.
+func ExecuteTipsetVector(r Reporter, vector *schema.TestVector) {
+ var (
+ ctx = context.Background()
+ prevEpoch = vector.Pre.Epoch
+ root = vector.Pre.StateTree.RootCID
+ tmpds = ds.NewMapDatastore()
+ )
+
+ // Load the vector CAR into a new temporary Blockstore.
+ bs, err := LoadVectorCAR(vector.CAR)
+ if err != nil {
+ r.Fatalf("failed to load the vector CAR: %w", err)
+ }
+
+ // Create a new Driver.
+ driver := NewDriver(ctx, vector.Selector, DriverOpts{})
+
+ // Apply every tipset.
+ var receiptsIdx int
+ for i, ts := range vector.ApplyTipsets {
+ ts := ts // capture
+ ret, err := driver.ExecuteTipset(bs, tmpds, root, abi.ChainEpoch(prevEpoch), &ts)
+ if err != nil {
+ r.Fatalf("failed to apply tipset %d message: %s", i, err)
+ }
+
+ for j, v := range ret.AppliedResults {
+ AssertMsgResult(r, vector.Post.Receipts[receiptsIdx], v, fmt.Sprintf("%d of tipset %d", j, i))
+ receiptsIdx++
+ }
+
+ // Compare the receipts root.
+ if expected, actual := vector.Post.ReceiptsRoots[i], ret.ReceiptsRoot; expected != actual {
+ r.Errorf("post receipts root doesn't match; expected: %s, was: %s", expected, actual)
+ }
+
+ prevEpoch = ts.Epoch
+ root = ret.PostStateRoot
+ }
+
+ // Once all messages are applied, assert that the final state root matches
+ // the expected postcondition root.
+ if expected, actual := vector.Post.StateTree.RootCID, root; expected != actual {
+ r.Errorf("wrong post root cid; expected %v, but got %v", expected, actual)
+ dumpThreeWayStateDiff(r, vector, bs, root)
+ r.FailNow()
+ }
+}
+
+// AssertMsgResult compares a message result. It takes the expected receipt
+// encoded in the vector, the actual receipt returned by Lotus, and a message
+// label to log in the assertion failure message to facilitate debugging.
+func AssertMsgResult(r Reporter, expected *schema.Receipt, actual *vm.ApplyRet, label string) {
+ r.Helper()
+
+ if expected, actual := exitcode.ExitCode(expected.ExitCode), actual.ExitCode; expected != actual {
+ r.Errorf("exit code of msg %s did not match; expected: %s, got: %s", label, expected, actual)
+ }
+ if expected, actual := expected.GasUsed, actual.GasUsed; expected != actual {
+ r.Errorf("gas used of msg %s did not match; expected: %d, got: %d", label, expected, actual)
+ }
+ if expected, actual := []byte(expected.ReturnValue), actual.Return; !bytes.Equal(expected, actual) {
+ r.Errorf("return value of msg %s did not match; expected: %s, got: %s", label, base64.StdEncoding.EncodeToString(expected), base64.StdEncoding.EncodeToString(actual))
+ }
+}
+
+func dumpThreeWayStateDiff(r Reporter, vector *schema.TestVector, bs blockstore.Blockstore, actual cid.Cid) {
+ // check if statediff exists; if not, skip.
+ if err := exec.Command("statediff", "--help").Run(); err != nil {
+ r.Log("could not dump 3-way state tree diff upon test failure: statediff command not found")
+ r.Log("install statediff with:")
+ r.Log("$ git clone https://github.com/filecoin-project/statediff.git")
+ r.Log("$ cd statediff")
+ r.Log("$ go generate ./...")
+ r.Log("$ go install ./cmd/statediff")
+ return
+ }
+
+ tmpCar, err := writeStateToTempCAR(bs,
+ vector.Pre.StateTree.RootCID,
+ vector.Post.StateTree.RootCID,
+ actual,
+ )
+ if err != nil {
+ r.Fatalf("failed to write temporary state CAR: %s", err)
+ }
+ defer os.RemoveAll(tmpCar) //nolint:errcheck
+
+ color.NoColor = false // enable colouring.
+
+ var (
+ a = color.New(color.FgMagenta, color.Bold).Sprint("(A) expected final state")
+ b = color.New(color.FgYellow, color.Bold).Sprint("(B) actual final state")
+ c = color.New(color.FgCyan, color.Bold).Sprint("(C) initial state")
+ d1 = color.New(color.FgGreen, color.Bold).Sprint("[Δ1]")
+ d2 = color.New(color.FgGreen, color.Bold).Sprint("[Δ2]")
+ d3 = color.New(color.FgGreen, color.Bold).Sprint("[Δ3]")
+ )
+
+ printDiff := func(left, right cid.Cid) {
+ cmd := exec.Command("statediff", "car", "--file", tmpCar, left.String(), right.String())
+ b, err := cmd.CombinedOutput()
+ if err != nil {
+ r.Fatalf("statediff failed: %s", err)
+ }
+ r.Log(string(b))
+ }
+
+ bold := color.New(color.Bold).SprintfFunc()
+
+ // run state diffs.
+ r.Log(bold("=== dumping 3-way diffs between %s, %s, %s ===", a, b, c))
+
+ r.Log(bold("--- %s left: %s; right: %s ---", d1, a, b))
+ printDiff(vector.Post.StateTree.RootCID, actual)
+
+ r.Log(bold("--- %s left: %s; right: %s ---", d2, c, b))
+ printDiff(vector.Pre.StateTree.RootCID, actual)
+
+ r.Log(bold("--- %s left: %s; right: %s ---", d3, c, a))
+ printDiff(vector.Pre.StateTree.RootCID, vector.Post.StateTree.RootCID)
+}
+
+// writeStateToTempCAR writes the provided roots to a temporary CAR that'll be
+// cleaned up via t.Cleanup(). It returns the full path of the temp file.
+func writeStateToTempCAR(bs blockstore.Blockstore, roots ...cid.Cid) (string, error) {
+ tmp, err := ioutil.TempFile("", "lotus-tests-*.car")
+ if err != nil {
+ return "", fmt.Errorf("failed to create temp file to dump CAR for diffing: %w", err)
+ }
+
+ carWalkFn := func(nd format.Node) (out []*format.Link, err error) {
+ for _, link := range nd.Links() {
+ if link.Cid.Prefix().Codec == cid.FilCommitmentSealed || link.Cid.Prefix().Codec == cid.FilCommitmentUnsealed {
+ continue
+ }
+ out = append(out, link)
+ }
+ return out, nil
+ }
+
+ var (
+ offl = offline.Exchange(bs)
+ blkserv = blockservice.New(bs, offl)
+ dserv = merkledag.NewDAGService(blkserv)
+ )
+
+ err = car.WriteCarWithWalker(context.Background(), dserv, roots, tmp, carWalkFn)
+ if err != nil {
+ return "", fmt.Errorf("failed to dump CAR for diffing: %w", err)
+ }
+ _ = tmp.Close()
+ return tmp.Name(), nil
+}
+
+func LoadVectorCAR(vectorCAR schema.Base64EncodedBytes) (blockstore.Blockstore, error) {
+ bs := blockstore.NewTemporary()
+
+ // Read the base64-encoded CAR from the vector, and inflate the gzip.
+ buf := bytes.NewReader(vectorCAR)
+ r, err := gzip.NewReader(buf)
+ if err != nil {
+ return nil, fmt.Errorf("failed to inflate gzipped CAR: %s", err)
+ }
+ defer r.Close() // nolint
+
+ // Load the CAR embedded in the test vector into the Blockstore.
+ _, err = car.LoadCar(bs, r)
+ if err != nil {
+ return nil, fmt.Errorf("failed to load state tree car from test vector: %s", err)
+ }
+ return bs, nil
+}
diff --git a/conformance/runner_test.go b/conformance/runner_test.go
deleted file mode 100644
index cc7ef6b3d..000000000
--- a/conformance/runner_test.go
+++ /dev/null
@@ -1,376 +0,0 @@
-package conformance
-
-import (
- "bytes"
- "compress/gzip"
- "context"
- "encoding/base64"
- "encoding/json"
- "fmt"
- "io/ioutil"
- "os"
- "os/exec"
- "path/filepath"
- "strconv"
- "strings"
- "testing"
-
- "github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/go-state-types/exitcode"
- "github.com/ipfs/go-blockservice"
- "github.com/ipfs/go-cid"
- ds "github.com/ipfs/go-datastore"
- offline "github.com/ipfs/go-ipfs-exchange-offline"
- format "github.com/ipfs/go-ipld-format"
- "github.com/ipfs/go-merkledag"
-
- "github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/lotus/chain/vm"
- "github.com/filecoin-project/lotus/lib/blockstore"
-
- "github.com/filecoin-project/test-vectors/schema"
-
- "github.com/fatih/color"
- "github.com/ipld/go-car"
-)
-
-const (
- // EnvSkipConformance, if 1, skips the conformance test suite.
- EnvSkipConformance = "SKIP_CONFORMANCE"
-
- // EnvCorpusRootDir is the name of the environment variable where the path
- // to an alternative corpus location can be provided.
- //
- // The default is defaultCorpusRoot.
- EnvCorpusRootDir = "CORPUS_DIR"
-
- // defaultCorpusRoot is the directory where the test vector corpus is hosted.
- // It is mounted on the Lotus repo as a git submodule.
- //
- // When running this test, the corpus root can be overridden through the
- // -conformance.corpus CLI flag to run an alternate corpus.
- defaultCorpusRoot = "../extern/test-vectors/corpus"
-)
-
-// ignore is a set of paths relative to root to skip.
-var ignore = map[string]struct{}{
- ".git": {},
- "schema.json": {},
-}
-
-// TestConformance is the entrypoint test that runs all test vectors found
-// in the corpus root directory.
-//
-// It locates all json files via a recursive walk, skipping over the ignore set,
-// as well as files beginning with _. It parses each file as a test vector, and
-// runs it via the Driver.
-func TestConformance(t *testing.T) {
- if skip := strings.TrimSpace(os.Getenv(EnvSkipConformance)); skip == "1" {
- t.SkipNow()
- }
- // corpusRoot is the effective corpus root path, taken from the `-conformance.corpus` CLI flag,
- // falling back to defaultCorpusRoot if not provided.
- corpusRoot := defaultCorpusRoot
- if dir := strings.TrimSpace(os.Getenv(EnvCorpusRootDir)); dir != "" {
- corpusRoot = dir
- }
-
- var vectors []string
- err := filepath.Walk(corpusRoot+"/", func(path string, info os.FileInfo, err error) error {
- if err != nil {
- t.Fatal(err)
- }
-
- filename := filepath.Base(path)
- rel, err := filepath.Rel(corpusRoot, path)
- if err != nil {
- t.Fatal(err)
- }
-
- if _, ok := ignore[rel]; ok {
- // skip over using the right error.
- if info.IsDir() {
- return filepath.SkipDir
- }
- return nil
- }
- if info.IsDir() {
- // dive into directories.
- return nil
- }
- if filepath.Ext(path) != ".json" {
- // skip if not .json.
- return nil
- }
- if ignored := strings.HasPrefix(filename, "_"); ignored {
- // ignore files starting with _.
- t.Logf("ignoring: %s", rel)
- return nil
- }
- vectors = append(vectors, rel)
- return nil
- })
-
- if err != nil {
- t.Fatal(err)
- }
-
- if len(vectors) == 0 {
- t.Fatalf("no test vectors found")
- }
-
- // Run a test for each vector.
- for _, v := range vectors {
- path := filepath.Join(corpusRoot, v)
- raw, err := ioutil.ReadFile(path)
- if err != nil {
- t.Fatalf("failed to read test raw file: %s", path)
- }
-
- var vector schema.TestVector
- err = json.Unmarshal(raw, &vector)
- if err != nil {
- t.Errorf("failed to parse test vector %s: %s; skipping", path, err)
- continue
- }
-
- t.Run(v, func(t *testing.T) {
- for _, h := range vector.Hints {
- if h == schema.HintIncorrect {
- t.Logf("skipping vector marked as incorrect: %s", vector.Meta.ID)
- t.SkipNow()
- }
- }
-
- // dispatch the execution depending on the vector class.
- switch vector.Class {
- case "message":
- executeMessageVector(t, &vector)
- case "tipset":
- executeTipsetVector(t, &vector)
- default:
- t.Fatalf("test vector class not supported: %s", vector.Class)
- }
- })
- }
-}
-
-// executeMessageVector executes a message-class test vector.
-func executeMessageVector(t *testing.T, vector *schema.TestVector) {
- var (
- ctx = context.Background()
- epoch = vector.Pre.Epoch
- root = vector.Pre.StateTree.RootCID
- )
-
- // Load the CAR into a new temporary Blockstore.
- bs := loadCAR(t, vector.CAR)
-
- // Create a new Driver.
- driver := NewDriver(ctx, vector.Selector)
-
- // Apply every message.
- for i, m := range vector.ApplyMessages {
- msg, err := types.DecodeMessage(m.Bytes)
- if err != nil {
- t.Fatalf("failed to deserialize message: %s", err)
- }
-
- // add an epoch if one's set.
- if m.Epoch != nil {
- epoch = *m.Epoch
- }
-
- // Execute the message.
- var ret *vm.ApplyRet
- ret, root, err = driver.ExecuteMessage(bs, root, abi.ChainEpoch(epoch), msg)
- if err != nil {
- t.Fatalf("fatal failure when executing message: %s", err)
- }
-
- // Assert that the receipt matches what the test vector expects.
- assertMsgResult(t, vector.Post.Receipts[i], ret, strconv.Itoa(i))
- }
-
- // Once all messages are applied, assert that the final state root matches
- // the expected postcondition root.
- if expected, actual := vector.Post.StateTree.RootCID, root; expected != actual {
- t.Logf("actual state root CID doesn't match expected one; expected: %s, actual: %s", expected, actual)
- dumpThreeWayStateDiff(t, vector, bs, root)
- t.FailNow()
- }
-}
-
-// executeTipsetVector executes a tipset-class test vector.
-func executeTipsetVector(t *testing.T, vector *schema.TestVector) {
- var (
- ctx = context.Background()
- prevEpoch = vector.Pre.Epoch
- root = vector.Pre.StateTree.RootCID
- tmpds = ds.NewMapDatastore()
- )
-
- // Load the CAR into a new temporary Blockstore.
- bs := loadCAR(t, vector.CAR)
-
- // Create a new Driver.
- driver := NewDriver(ctx, vector.Selector)
-
- // Apply every tipset.
- var receiptsIdx int
- for i, ts := range vector.ApplyTipsets {
- ts := ts // capture
- ret, err := driver.ExecuteTipset(bs, tmpds, root, abi.ChainEpoch(prevEpoch), &ts)
- if err != nil {
- t.Fatalf("failed to apply tipset %d message: %s", i, err)
- }
-
- for j, v := range ret.AppliedResults {
- assertMsgResult(t, vector.Post.Receipts[receiptsIdx], v, fmt.Sprintf("%d of tipset %d", j, i))
- receiptsIdx++
- }
-
- // Compare the receipts root.
- if expected, actual := vector.Post.ReceiptsRoots[i], ret.ReceiptsRoot; expected != actual {
- t.Errorf("post receipts root doesn't match; expected: %s, was: %s", expected, actual)
- }
-
- prevEpoch = ts.Epoch
- root = ret.PostStateRoot
- }
-
- // Once all messages are applied, assert that the final state root matches
- // the expected postcondition root.
- if expected, actual := vector.Post.StateTree.RootCID, root; expected != actual {
- t.Logf("actual state root CID doesn't match expected one; expected: %s, actual: %s", expected, actual)
- dumpThreeWayStateDiff(t, vector, bs, root)
- t.FailNow()
- }
-}
-
-// assertMsgResult compares a message result. It takes the expected receipt
-// encoded in the vector, the actual receipt returned by Lotus, and a message
-// label to log in the assertion failure message to facilitate debugging.
-func assertMsgResult(t *testing.T, expected *schema.Receipt, actual *vm.ApplyRet, label string) {
- t.Helper()
-
- if expected, actual := exitcode.ExitCode(expected.ExitCode), actual.ExitCode; expected != actual {
- t.Errorf("exit code of msg %s did not match; expected: %s, got: %s", label, expected, actual)
- }
- if expected, actual := expected.GasUsed, actual.GasUsed; expected != actual {
- t.Errorf("gas used of msg %s did not match; expected: %d, got: %d", label, expected, actual)
- }
- if expected, actual := []byte(expected.ReturnValue), actual.Return; !bytes.Equal(expected, actual) {
- t.Errorf("return value of msg %s did not match; expected: %s, got: %s", label, base64.StdEncoding.EncodeToString(expected), base64.StdEncoding.EncodeToString(actual))
- }
-}
-
-func dumpThreeWayStateDiff(t *testing.T, vector *schema.TestVector, bs blockstore.Blockstore, actual cid.Cid) {
- // check if statediff exists; if not, skip.
- if err := exec.Command("statediff", "--help").Run(); err != nil {
- t.Log("could not dump 3-way state tree diff upon test failure: statediff command not found")
- t.Log("install statediff with:")
- t.Log("$ git clone https://github.com/filecoin-project/statediff.git")
- t.Log("$ cd statediff")
- t.Log("$ go generate ./...")
- t.Log("$ go install ./cmd/statediff")
- return
- }
-
- tmpCar := writeStateToTempCAR(t, bs,
- vector.Pre.StateTree.RootCID,
- vector.Post.StateTree.RootCID,
- actual,
- )
-
- color.NoColor = false // enable colouring.
-
- t.Errorf("wrong post root cid; expected %v, but got %v", vector.Post.StateTree.RootCID, actual)
-
- var (
- a = color.New(color.FgMagenta, color.Bold).Sprint("(A) expected final state")
- b = color.New(color.FgYellow, color.Bold).Sprint("(B) actual final state")
- c = color.New(color.FgCyan, color.Bold).Sprint("(C) initial state")
- d1 = color.New(color.FgGreen, color.Bold).Sprint("[Δ1]")
- d2 = color.New(color.FgGreen, color.Bold).Sprint("[Δ2]")
- d3 = color.New(color.FgGreen, color.Bold).Sprint("[Δ3]")
- )
-
- printDiff := func(left, right cid.Cid) {
- cmd := exec.Command("statediff", "car", "--file", tmpCar, left.String(), right.String())
- b, err := cmd.CombinedOutput()
- if err != nil {
- t.Fatalf("statediff failed: %s", err)
- }
- t.Log(string(b))
- }
-
- bold := color.New(color.Bold).SprintfFunc()
-
- // run state diffs.
- t.Log(bold("=== dumping 3-way diffs between %s, %s, %s ===", a, b, c))
-
- t.Log(bold("--- %s left: %s; right: %s ---", d1, a, b))
- printDiff(vector.Post.StateTree.RootCID, actual)
-
- t.Log(bold("--- %s left: %s; right: %s ---", d2, c, b))
- printDiff(vector.Pre.StateTree.RootCID, actual)
-
- t.Log(bold("--- %s left: %s; right: %s ---", d3, c, a))
- printDiff(vector.Pre.StateTree.RootCID, vector.Post.StateTree.RootCID)
-}
-
-// writeStateToTempCAR writes the provided roots to a temporary CAR that'll be
-// cleaned up via t.Cleanup(). It returns the full path of the temp file.
-func writeStateToTempCAR(t *testing.T, bs blockstore.Blockstore, roots ...cid.Cid) string {
- tmp, err := ioutil.TempFile("", "lotus-tests-*.car")
- if err != nil {
- t.Fatalf("failed to create temp file to dump CAR for diffing: %s", err)
- }
- // register a cleanup function to delete the CAR.
- t.Cleanup(func() {
- _ = os.Remove(tmp.Name())
- })
-
- carWalkFn := func(nd format.Node) (out []*format.Link, err error) {
- for _, link := range nd.Links() {
- if link.Cid.Prefix().Codec == cid.FilCommitmentSealed || link.Cid.Prefix().Codec == cid.FilCommitmentUnsealed {
- continue
- }
- out = append(out, link)
- }
- return out, nil
- }
-
- var (
- offl = offline.Exchange(bs)
- blkserv = blockservice.New(bs, offl)
- dserv = merkledag.NewDAGService(blkserv)
- )
-
- err = car.WriteCarWithWalker(context.Background(), dserv, roots, tmp, carWalkFn)
- if err != nil {
- t.Fatalf("failed to dump CAR for diffing: %s", err)
- }
- _ = tmp.Close()
- return tmp.Name()
-}
-
-func loadCAR(t *testing.T, vectorCAR schema.Base64EncodedBytes) blockstore.Blockstore {
- bs := blockstore.NewTemporary()
-
- // Read the base64-encoded CAR from the vector, and inflate the gzip.
- buf := bytes.NewReader(vectorCAR)
- r, err := gzip.NewReader(buf)
- if err != nil {
- t.Fatalf("failed to inflate gzipped CAR: %s", err)
- }
- defer r.Close() // nolint
-
- // Load the CAR embedded in the test vector into the Blockstore.
- _, err = car.LoadCar(bs, r)
- if err != nil {
- t.Fatalf("failed to load state tree car from test vector: %s", err)
- }
- return bs
-}
diff --git a/documentation/en/.glossary.json b/documentation/en/.glossary.json
index e8a9e0846..0967ef424 100644
--- a/documentation/en/.glossary.json
+++ b/documentation/en/.glossary.json
@@ -1,146 +1 @@
-{
- "bellman": {
- "title": "Bellman",
- "value": "Bellman is a rust crate for building zk-SNARK circuits. It provides circuit traits and primitive structures, as well as basic gadget implementations such as booleans and number abstractions."
- },
- "nvme": {
- "title": "NVMe",
- "value": "(non-volatile memory express) is a host controller interface and storage protocol created to accelerate the transfer of data between enterprise and client systems and solid-state drives (SSDs) over a computer's high-speed Peripheral Component Interconnect Express (PCIe) bus."
- },
- "multiaddr": {
- "title": "Multiaddr",
- "value": "Multiaddr is a format for encoding addresses from various well-established network protocols. It is useful to write applications that future-proof their use of addresses, and allow multiple transport protocols and addresses to coexist."
- },
- "attofil": {
- "title": "attoFIL",
- "value": "AttoFIL is a word used to describe 10^-18 FIL. The word atto comes from the Norwegian and Danish term: atten eighteen."
- },
- "fil": {
- "title": "FIL",
- "value": "A ticker symbol is an abbreviation used to uniquely identify Filecoin when it is used in a wallet exchange or a cryptocurrency exchange."
- },
- "epost": {
- "title": "Election Proof-of-Spacetime",
- "value": "Election Proof-of-Spacetime couples the Proof-of-Spacetime process with block production, meaning that in order to produce a block, the miner must produce a valid Proof-of-Spacetime proof (snark output)."
- },
- "jwt": {
- "title": "JWT",
- "value": "JSON Web Tokens are an open, industry standard RFC 7519 method for representing claims securely between two parties."
- },
- "json-rpc": {
- "title": "JSON-RPC",
- "value": "JSON-RPC is a remote procedure call protocol encoded in JSON. It is a very simple protocol (and very similar to XML-RPC), defining only a few data types and commands."
- },
- "bls-address": {
- "title": "BLS Signature (Address)",
- "value": "A Boneh–Lynn–Shacham (BLS) signature is a digital signature scheme that allows a user to determine the authenticity of a signer, and is a commonly used signature scheme in the Filecoin Distributed Storage Network."
- },
- "faucet": {
- "title": "Filecoin Test Faucet",
- "value": "A webpage where you can get free test Filecoin to participate in the Testnet."
- },
- "chain": {
- "title": "Chain",
- "value": "The Filecoin Blockchain is a distributed virtual machine that achieves consensus, processes messages, accounts for storage, and maintains security in the Filecoin Protocol. It is the main interface linking various actors in the Filecoin system."
- },
- "miner-power": {
- "title": "Miner Power",
- "value": "Miner storage in relation to network storage, tracked in the power table."
- },
- "sector": {
- "title": "Sector",
- "value": "A fixed-size block of data of SECTOR_SIZE bytes which generally contains client's data."
- },
- "sealing": {
- "title": "Sealing",
- "value": "A slow encoding process that returns commitments and proofs for data being stored in a sector."
- },
- "seal": {
- "title": "Seal",
- "value": "A slow encoding process that returns commitments and proofs for data being stored in a sector."
- },
- "posts": {
- "title": "Proof-of-Spacetime(s)",
- "value": "Filecoin is a protocol token whose blockchain runs on a novel proof, called Proof-of-Spacetime, where blocks are created by miners that are storing data."
- },
- "filecoin-testnet": {
- "title": "Filecoin Testnet",
- "value": "Until we launch, we are making lots of changes to Lotus. The Testnet is expected to bring a few significant fixes/improvements. During Testnet, you can retrieve test filecoin from our network faucet to use as collateral to start mining. Test filecoin do not have any value – the official filecoin tokens will not be released until Mainnet launch."
- },
- "filecoin-decentralized-storage-market": {
- "title": "Filecoin Decentralized Storage Market",
- "value": "Storage Market subsystem is the data entry point into the network. Miners only earn power from data stored in a storage deal and all deals live on the Filecoin network."
- },
- "filecoin-proof-parameters": {
- "title": "Filecoin Proof Parameters",
- "value": "The proving algorithms rely on a large binary parameter file."
- },
- "lotus-devnet": {
- "title": "DevNet",
- "value": "On the DevNets, you can store data as a storage client and also try how Filecoin mining works. The devnets are an important development tool for those who anticipate building applications on top of the Filecoin protocol or storing data on the decentralized storage market. "
- },
- "filecoin-distributed-storage-network": {
- "title": "Filecoin Distributed Storage Network",
- "value": "Filecoin is a distributed storage network based on a blockchain mechanism. Filecoin miners can elect to provide storage capacity for the network, and thereby earn units of the Filecoin cryptocurrency (FIL) by periodically producing cryptographic proofs that certify that they are providing the capacity specified."
- },
- "lotus-node": {
- "title": "Lotus Node",
- "value": "The Lotus Node is full of capabilities. It runs the Blockchain system, makes retrieval deals, does data transfer, supports block producer logic, and syncs and validates the chain."
- },
- "block-rewards": {
- "title": "Block Reward",
- "value": "Over the entire lifetime of the protocol, 1,400,000,000 FIL (TotalIssuance) will be given out to miners. The rate at which the funds are given out is set to halve every six years, smoothly (not a fixed jump like in Bitcoin)."
- },
- "block-producer-miner": {
- "title": "Miner (Block Producer)",
- "value": "The Block Producer Miner's logic. It currently shares an interface and process with the Lotus Node. A Block Producer chooses which messages to include in a block and is rewarded according to each message’s gas price and consumption, forming a market."
- },
- "lotus-miner": {
- "title": "Miner (lotus-miner)",
- "value": "The Miner's logic. It has its own dedicated process. Contributes to the network through Sector commitments and Proofs of Spacetime to prove that it is storing the sectors it has commited to."
- },
- "swarm-port": {
- "title": "Swarm Port (Libp2p)",
- "value": "The LibP2P Swarm manages groups of connections to peers, handles incoming and outgoing streams, and is part of the miners implementation. The port value is part of the Host interface."
- },
- "daemon": {
- "title": "Lotus Daemon",
- "value": "A Daemon is a program that runs as a background process. A Daemon in the context of the Filecoin Distributed Storage Network may enable applications to communicate with peers, handle protocols, participate in pubsub, and interact with a distributed hash table (DHT)."
- },
- "storage-deal": {
- "title": "Storage deal",
- "value": "One of the two types of deals in Filecoin markets. Storage deals are recorded on the blockchain and enforced by the protocol."
- },
- "retrieval-deal": {
- "title": "Retrieval deal",
- "value": "One of the two types of deals in Filecoin markets. Retrieval deals are off chain and enabled by micropayment channel by transacting parties."
- },
- "deal-cid": {
- "title": "Deal CID",
- "value": "CID is a format for referencing content in distributed information systems, it is a way to store information so it can be retrieved based on its content, not its location. DealCID specifically is used in storage deals."
- },
- "data-cid": {
- "title": "Data CID",
- "value": "CID is a format for referencing content in distributed information systems, it is a way to store information so it can be retrieved based on its content, not its location. DataCID specifically is used to represent the file that is stored in the Filecoin Distributed Storage Network."
- },
- "cid": {
- "title": "CID",
- "value": "A CID is a self-describing content-addressed identifier. It uses cryptographic hashes to achieve content addressing. It uses several multiformats to achieve flexible self-description, namely multihash for hashes, multicodec for data content types, and multibase to encode the CID itself into strings."
- },
- "total-network-power": {
- "title": "Total Network Power",
- "value": "A reference to all the Power Tables for every subchain, accounting for each Lotus Miner on chain."
- },
- "chain-block-height": {
- "title": "Chain Block Height",
- "value": "Chain block height is defined as the number of blocks in the chain between any given block and the very first block in the blockchain."
- },
- "block-height": {
- "title": "Block Height",
- "value": "Height of the Merkle Tree of a sector. A sector is a contiguous array of bytes that a miner puts together, seals, and performs Proofs of Spacetime on."
- },
- "blocktime": {
- "title": "Blocktime",
- "value": "The time it takes for a Block to propagate to the whole network."
- }
-}
+{}
diff --git a/documentation/en/.library.json b/documentation/en/.library.json
index 3fab0df9b..e31f09950 100644
--- a/documentation/en/.library.json
+++ b/documentation/en/.library.json
@@ -1,232 +1,25 @@
{
"posts": [
{
- "title": "Hardware Requirements",
- "slug": "en+hardware",
- "github": "en/hardware.md",
+ "title": "About Lotus",
+ "slug": "",
+ "github": "en/about.md",
"value": null,
- "posts": [
- {
- "title": "Testing Configuration",
- "slug": "en+hardware-mining",
- "github": "en/hardware-mining.md",
- "value": null
- }
- ]
+ "posts": []
},
{
- "title": "Setup",
- "slug": "en+getting-started",
- "github": "en/getting-started.md",
- "value": null,
- "posts": [
- {
- "title": "Arch Linux Installation",
- "slug": "en+install-lotus-arch",
- "github": "en/install-lotus-arch.md",
- "value": null
- },
- {
- "title": "Ubuntu Installation",
- "slug": "en+install-lotus-ubuntu",
- "github": "en/install-lotus-ubuntu.md",
- "value": null
- },
- {
- "title": "Fedora Installation",
- "slug": "en+install-lotus-fedora",
- "github": "en/install-lotus-fedora.md",
- "value": null
- },
- {
- "title": "MacOS Installation",
- "slug": "en+install-lotus-macos",
- "github": "en/install-lotus-macos.md",
- "value": null
- },
- {
- "title": "Updating Lotus",
- "slug": "en+updating-lotus",
- "github": "en/updating-lotus.md",
- "value": null
- },
- {
- "title": "Join Testnet",
- "slug": "en+join-testnet",
- "github": "en/join-testnet.md",
- "value": null
- },
- {
- "title": "Use Lotus with systemd",
- "slug": "en+install-systemd-services",
- "github": "en/install-systemd-services.md",
- "value": null
- },
- {
- "title": "Setup Troubleshooting",
- "slug": "en+setup-troubleshooting",
- "github": "en/setup-troubleshooting.md",
- "value": null
- },
- {
- "title": "Environment Variables",
- "slug": "en+env-vars",
- "github": "en/environment-vars.md",
- "value": null
- }
- ]
- },
- {
- "title": "Architecture",
+ "title": "Lotus Architecture (WIP)",
"slug": "en+arch",
- "github": "en/architecture.md",
+ "github": "en/architecture/architecture.md",
"value": null,
"posts": [
- {
- "title": "The Message Pool",
- "slug": "en+mpool",
- "github": "en/mpool.md",
- "value": null
- }
+ {
+ "title": "The Message Pool",
+ "slug": "en+mpool",
+ "github": "en/architecture/mpool.md",
+ "value": null
+ }
]
- },
- {
- "title": "Storage Mining",
- "slug": "en+mining",
- "github": "en/mining.md",
- "value": null,
- "posts": [
- {
- "title": "Lotus Worker",
- "slug": "en+lotus-worker",
- "github": "en/mining-lotus-worker.md",
- "value": null
- },
- {
- "title": "Static Ports",
- "slug": "en+setting-a-static-port",
- "github": "en/setting-a-static-port.md",
- "value": null
- },
- {
- "title": "Mining Troubleshooting",
- "slug": "en+mining-troubleshooting",
- "github": "en/mining-troubleshooting.md",
- "value": null
- }
- ]
- },
- {
- "title": "Storing Data",
- "slug": "en+storing-data",
- "github": "en/storing-data.md",
- "value": null,
- "posts": [
- {
- "title": "Storage Troubleshooting",
- "slug": "en+storing-data-troubleshooting",
- "github": "en/storing-data-troubleshooting.md",
- "value": null
- },
- {
- "title": "Information for Miners",
- "slug": "en+info-for-miners",
- "github": "en/miner-deals.md",
- "value": null
- },
- {
- "title": "IPFS Integration",
- "slug": "en+ipfs-client-integration",
- "github": "en/storing-ipfs-integration.md",
- "value": null
- }
- ]
- },
- {
- "title": "Retrieving Data",
- "slug": "en+retrieving-data",
- "github": "en/retrieving-data.md",
- "value": null,
- "posts": []
- },
- {
- "title": "Payment Channels",
- "slug": "en+payment-channels",
- "github": "en/payment-channels.md",
- "value": null,
- "posts": []
- },
- {
- "title": "Command Line Interface",
- "slug": "en+cli",
- "github": "en/cli.md",
- "value": null,
- "posts": []
- },
- {
- "title": "API",
- "slug": "en+api",
- "github": "en/api.md",
- "value": null,
- "posts": [
- {
- "title": "Remote API Support",
- "slug": "en+api-scripting-support",
- "github": "en/api-scripting-support.md",
- "value": null
- },
- {
- "title": "API Methods",
- "slug": "en+api-methods",
- "github": "en/api-methods.md",
- "value": null
- },
- {
- "title": "API Troubleshooting",
- "slug": "en+api-troubleshooting",
- "github": "en/api-troubleshooting.md",
- "value": null
- }
- ]
- },
- {
- "title": "Developer Tools",
- "slug": "en+dev-tools",
- "github": "en/dev-tools.md",
- "value": null,
- "posts": [
- {
- "title": "Setup Local Devnet",
- "slug": "en+setup-local-dev-net",
- "github": "en/local-dev-net.md",
- "value": null,
- "posts": []
- },
- {
- "title": "Jaeger Tracing",
- "slug": "en+dev-tools-jaeger-tracing",
- "github": "en/dev-tools-jaeger-tracing.md",
- "value": null,
- "posts": []
- }
- ]
- },
- {
- "title": "FAQs",
- "slug": "en+faqs",
- "github": "en/faqs.md",
- "value": null,
- "posts": []
- },
- {
- "title": "Glossary",
- "slug": "en+glossary",
- "github": "en/.glossary.json",
- "value": null,
- "custom": {
- "glossary": true
- },
- "posts": []
}
]
}
diff --git a/documentation/en/README.md b/documentation/en/README.md
new file mode 100644
index 000000000..76f11ed90
--- /dev/null
+++ b/documentation/en/README.md
@@ -0,0 +1,16 @@
+# Lotus documentation
+
+This folder contains some Lotus documentation mostly intended for Lotus developers.
+
+User documentation (including documentation for miners) has been moved to specific Lotus sections in https://docs.filecoin.io:
+
+- https://docs.filecoin.io/get-started/lotus
+- https://docs.filecoin.io/store/lotus
+- https://docs.filecoin.io/mine/lotus
+- https://docs.filecoin.io/build/lotus
+
+## The Lotu.sh site
+
+The https://lotu.sh and https://docs.lotu.sh sites are generated from this folder based on the index provided by [.library.json](.library.json). This is done at the [lotus-docs repository](https://github.com/filecoin-project/lotus-docs), which contains Lotus as a git submodule.
+
+To update the site, the lotus-docs repository should be updated with the desired version for the lotus git submodule. Once pushed to master, it will be auto-deployed.
diff --git a/documentation/en/dev/WIP-arch-complementary-notes.md b/documentation/en/WIP-arch-complementary-notes.md
similarity index 100%
rename from documentation/en/dev/WIP-arch-complementary-notes.md
rename to documentation/en/WIP-arch-complementary-notes.md
diff --git a/documentation/en/about.md b/documentation/en/about.md
new file mode 100644
index 000000000..f2051e00b
--- /dev/null
+++ b/documentation/en/about.md
@@ -0,0 +1,19 @@
+# Lotus
+
+Lotus is an implementation of the **Filecoin Distributed Storage Network**.
+
+It is written in Go and provides a suite of command-line applications:
+
+- Lotus Node (`lotus`): a Filecoin Node: validates network transactions, manages a FIL wallet, can perform storage and retrieval deals.
+- Lotus Miner (`lotus-miner`): a Filecoin miner. See the the respective Lotus Miner section in the Mine documentation.
+- Lotus Worker (`lotus-worker`): a worker that assists miners to perform mining-related tasks. See its respective guide for more information.
+
+The [Lotus user documentation](https://docs.filecoin.io/get-started/lotus) is part of the [Filecoin documentation site](https://docs.filecoin.io):
+
+* To install and get started with Lotus, visit the [Get Started section](https://docs.filecoin.io/get-started/lotus).
+* Information about how to perform deals on the Filecoin network using Lotus can be found in the [Store section](https://docs.filecoin.io/store/lotus).
+* Miners looking to provide storage to the Network can find the latest guides in the [Mine section](https://docs.filecoin.io/mine/lotus).
+* Developers and integrators that wish to use the Lotus APIs can start in the [Build section](https://docs.filecoin.io/mine/lotus).
+
+For more details about Filecoin, check out the [Filecoin Docs](https://docs.filecoin.io) and [Filecoin Spec](https://spec.filecoin.io/).
+
diff --git a/documentation/en/api-methods.md b/documentation/en/api-methods.md
index ed082ccbf..af6ee8b97 100644
--- a/documentation/en/api-methods.md
+++ b/documentation/en/api-methods.md
@@ -169,6 +169,7 @@
* [SyncState](#SyncState)
* [SyncSubmitBlock](#SyncSubmitBlock)
* [SyncUnmarkBad](#SyncUnmarkBad)
+ * [SyncValidateTipset](#SyncValidateTipset)
* [Wallet](#Wallet)
* [WalletBalance](#WalletBalance)
* [WalletDefaultAddress](#WalletDefaultAddress)
@@ -181,6 +182,7 @@
* [WalletSetDefault](#WalletSetDefault)
* [WalletSign](#WalletSign)
* [WalletSignMessage](#WalletSignMessage)
+ * [WalletValidateAddress](#WalletValidateAddress)
* [WalletVerify](#WalletVerify)
##
@@ -4379,6 +4381,28 @@ Inputs:
Response: `{}`
+### SyncValidateTipset
+SyncValidateTipset indicates whether the provided tipset is valid or not
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `true`
+
## Wallet
@@ -4585,6 +4609,21 @@ Response:
}
```
+### WalletValidateAddress
+WalletValidateAddress validates whether a given string can be decoded as a well-formed address
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ "string value"
+]
+```
+
+Response: `"t01234"`
+
### WalletVerify
WalletVerify takes an address, a signature, and some bytes, and indicates whether the signature is valid.
The address does not have to be in the wallet.
diff --git a/documentation/en/api-scripting-support.md b/documentation/en/api-scripting-support.md
deleted file mode 100644
index 653f144ed..000000000
--- a/documentation/en/api-scripting-support.md
+++ /dev/null
@@ -1,25 +0,0 @@
-# Remote API Support
-
-You may want to delegate the work **Lotus Miner** or **Lotus Node** performs to other machines.
-Here is how to setup the necessary authorization and environment variables.
-
-## Environment variables
-
-Environmental variables are variables that are defined for the current shell and are inherited by any child shells or processes. Environmental variables are used to pass information into processes that are spawned from the shell.
-
-Using the [JWT you generated](https://lotu.sh/en+api#how-do-i-generate-a-token-18865), you can assign it and the **multiaddr** to the appropriate environment variable.
-
-```sh
-# Lotus Node
-FULLNODE_API_INFO="JWT_TOKEN:/ip4/127.0.0.1/tcp/1234/http"
-
-# Lotus Miner
-MINER_API_INFO="JWT_TOKEN:/ip4/127.0.0.1/tcp/2345/http"
-```
-
-You can also use `lotus auth api-info --perm admin` to quickly create _API_INFO env vars
-
-- The **Lotus Node**'s `mutliaddr` is in `~/.lotus/api`.
-- The default token is in `~/.lotus/token`.
-- The **Lotus Miner**'s `multiaddr` is in `~/.lotusminer/config`.
-- The default token is in `~/.lotusminer/token`.
diff --git a/documentation/en/api-troubleshooting.md b/documentation/en/api-troubleshooting.md
deleted file mode 100644
index 0cb3a6800..000000000
--- a/documentation/en/api-troubleshooting.md
+++ /dev/null
@@ -1,36 +0,0 @@
-# API Troubleshooting
-
-## Types: params
-
-`params` must be an array. If there are no `params` you should still pass an empty array.
-
-## Types: TipSet
-
-For methods such as `Filecoin.StateMinerPower`, where the method accepts the argument of the type `TipSet`, you can pass `null` to use the current chain head.
-
-```sh
-curl -X POST \
- -H "Content-Type: application/json" \
- --data '{ "jsonrpc": "2.0", "method": "Filecoin.StateMinerPower", "params": ["t0101", null], "id": 3 }' \
- 'http://127.0.0.1:1234/rpc/v0'
-```
-
-## Types: Sending a CID
-
-If you do not serialize the CID as a [JSON IPLD link](https://did-ipid.github.io/ipid-did-method/#txref), you will receive an error. Here is an example of a broken CURL request:
-
-```sh
-curl -X POST \
- -H "Content-Type: application/json" \
- --data '{ "jsonrpc": "2.0", "method":"Filecoin.ClientGetDealInfo", "params": ["bafyreiaxl446wlnu6t6dpq4ivrjf4gda4gvsoi4rr6mpxau7z25xvk5pl4"], "id": 0 }' \
- 'http://127.0.0.1:1234/rpc/v0'
-```
-
-To fix it, change the `params` property to:
-
-```sh
-curl -X POST \
- -H "Content-Type: application/json" \
- --data '{ "jsonrpc": "2.0", "method":"Filecoin.ClientGetDealInfo", "params": [{"/": "bafyreiaxl446wlnu6t6dpq4ivrjf4gda4gvsoi4rr6mpxau7z25xvk5pl4"}], "id": 0 }' \
- 'http://127.0.0.1:1234/rpc/v0'
-```
diff --git a/documentation/en/api.md b/documentation/en/api.md
deleted file mode 100644
index 9760e2f32..000000000
--- a/documentation/en/api.md
+++ /dev/null
@@ -1,85 +0,0 @@
-# API
-
-Here is an early overview of how to make API calls.
-
-Implementation details for the **JSON-RPC** package are [here](https://github.com/filecoin-project/go-jsonrpc).
-
-## Overview: How do you modify the config.toml to change the API endpoint?
-
-API requests are made against `127.0.0.1:1234` unless you modify `.lotus/config.toml`.
-
-Options:
-
-- `http://[api:port]/rpc/v0` - HTTP endpoint
-- `ws://[api:port]/rpc/v0` - Websocket endpoint
-- `PUT http://[api:port]/rest/v0/import` - File import, it requires write permissions.
-
-## What methods can I use?
-
-For now, you can look into different files to find methods available to you based on your needs:
-
-- [Both Lotus node + miner APIs](https://github.com/filecoin-project/lotus/blob/master/api/api_common.go)
-- [Lotus node API](https://github.com/filecoin-project/lotus/blob/master/api/api_full.go)
-- [Lotus miner API](https://github.com/filecoin-project/lotus/blob/master/api/api_storage.go)
-
-The necessary permissions for each are in [api/struct.go](https://github.com/filecoin-project/lotus/blob/master/api/struct.go).
-
-## How do I make an API request?
-
-To demonstrate making an API request, we will take the method `ChainHead` from [api/api_full.go](https://github.com/filecoin-project/lotus/blob/master/api/api_full.go).
-
-```go
-ChainHead(context.Context) (*types.TipSet, error)
-```
-
-And create a CURL command. In this command, `ChainHead` is included as `{ "method": "Filecoin.ChainHead" }`:
-
-```sh
-curl -X POST \
- -H "Content-Type: application/json" \
- --data '{ "jsonrpc": "2.0", "method": "Filecoin.ChainHead", "params": [], "id": 3 }' \
- 'http://127.0.0.1:1234/rpc/v0'
-```
-
-If the request requires authorization, add an authorization header:
-
-```sh
-curl -X POST \
- -H "Content-Type: application/json" \
- -H "Authorization: Bearer $(cat ~/.lotusminer/token)" \
- --data '{ "jsonrpc": "2.0", "method": "Filecoin.ChainHead", "params": [], "id": 3 }' \
- 'http://127.0.0.1:1234/rpc/v0'
-```
-
-> In the future we will add a playground to make it easier to build and experiment with API requests.
-
-## CURL authorization
-
-To authorize your request, you will need to include the **JWT** in a HTTP header, for example:
-
-```sh
--H "Authorization: Bearer $(cat ~/.lotusminer/token)"
-```
-
-Admin token is stored in `~/.lotus/token` for the **Lotus Node** or `~/.lotusminer/token` for the **Lotus Miner**.
-
-## How do I generate a token?
-
-To generate a JWT with custom permissions, use this command:
-
-```sh
-# Lotus Node
-lotus auth create-token --perm admin
-
-# Lotus Miner
-lotus-miner auth create-token --perm admin
-```
-
-## What authorization level should I use?
-
-When viewing [api/apistruct/struct.go](https://github.com/filecoin-project/lotus/blob/master/api/apistruct/struct.go), you will encounter these types:
-
-- `read` - Read node state, no private data.
-- `write` - Write to local store / chain, and `read` permissions.
-- `sign` - Use private keys stored in wallet for signing, `read` and `write` permissions.
-- `admin` - Manage permissions, `read`, `write`, and `sign` permissions.
diff --git a/documentation/en/architecture.md b/documentation/en/architecture/architecture.md
similarity index 99%
rename from documentation/en/architecture.md
rename to documentation/en/architecture/architecture.md
index ca4789fa0..61cd117bb 100644
--- a/documentation/en/architecture.md
+++ b/documentation/en/architecture/architecture.md
@@ -6,7 +6,7 @@ Filecoin protocol, validating the blocks and state transitions.
The specification for the Filecoin protocol can be found [here](https://filecoin-project.github.io/specs/).
For information on how to setup and operate a Lotus node,
-please follow the instructions [here](https://lotu.sh/en+getting-started).
+please follow the instructions [here](en+getting-started).
# Components
diff --git a/documentation/en/mpool.md b/documentation/en/architecture/mpool.md
similarity index 100%
rename from documentation/en/mpool.md
rename to documentation/en/architecture/mpool.md
diff --git a/documentation/en/cli.md b/documentation/en/cli.md
deleted file mode 100644
index fd26400d0..000000000
--- a/documentation/en/cli.md
+++ /dev/null
@@ -1,108 +0,0 @@
-# Lotus Command Line Interface
-
-The Command Line Interface (CLI) is a convenient way to interact with
-a Lotus node. You can use the CLI to operate your node,
-get information about the blockchain,
-manage your accounts and transfer funds,
-create storage deals, and much more!
-
-The CLI is intended to be self-documenting, so when in doubt, simply add `--help`
-to whatever command you're trying to run! This will also display all of the
-input parameters that can be provided to a command.
-
-We highlight some of the commonly
-used features of the CLI below.
-All CLI commands should be run from the home directory of the Lotus project.
-
-## Operating a Lotus node
-
-### Starting up a node
-
-```sh
-lotus daemon
-```
-This command will start up your Lotus node, with its API port open at 1234.
-You can pass `--api=` to use a different port.
-
-### Checking your sync progress
-
-```sh
-lotus sync status
-```
-This command will print your current tipset height under `Height`, and the target tipset height
-under `Taregt`.
-
-You can also run `lotus sync wait` to get constant updates on your sync progress.
-
-### Getting the head tipset
-
-```sh
-lotus chain head
-```
-
-### Control the logging level
-
-```sh
-lotus log set-level
-```
-This command can be used to toggle the logging levels of the different
-systems of a Lotus node. In decreasing order
-of logging detail, the levels are `debug`, `info`, `warn`, and `error`.
-
-As an example,
-to set the `chain` and `blocksync` to log at the `debug` level, run
-`lotus log set-level --system chain --system blocksync debug`.
-
-To see the various logging system, run `lotus log list`.
-
-### Find out what version of Lotus you're running
-
-```sh
-lotus version
-```
-
-## Managing your accounts
-
-### Listing accounts in your wallet
-
-```sh
-lotus wallet list
-```
-
-### Creating a new account
-
-```sh
-lotus wallet new bls
-```
-This command will create a new BLS account in your wallet; these
-addresses start with the prefix `t3`. Running `lotus wallet new secp256k1`
-(or just `lotus wallet new`) will create
-a new Secp256k1 account, which begins with the prefix `t1`.
-
-### Getting an account's balance
-
-```sh
-lotus wallet balance
-```
-
-### Transferring funds
-
-```sh
-lotus send --source=