lotus/cli/client.go

2469 lines
60 KiB
Go
Raw Normal View History

2019-07-12 10:17:16 +00:00
package cli
import (
"bufio"
2020-08-27 18:32:51 +00:00
"context"
"encoding/json"
"errors"
2019-07-12 10:17:16 +00:00
"fmt"
"io"
2020-11-10 16:32:55 +00:00
"math"
"math/rand"
2019-11-08 01:45:45 +00:00
"os"
2019-10-23 07:05:22 +00:00
"path/filepath"
2020-07-07 11:45:02 +00:00
"sort"
2019-08-02 14:09:54 +00:00
"strconv"
"strings"
"sync"
"sync/atomic"
2019-11-08 01:45:45 +00:00
"text/tabwriter"
2020-07-31 16:22:04 +00:00
"time"
2019-07-16 16:07:08 +00:00
tm "github.com/buger/goterm"
"github.com/chzyer/readline"
2020-07-31 16:22:04 +00:00
"github.com/docker/go-units"
2020-07-28 23:10:57 +00:00
"github.com/fatih/color"
2019-08-02 14:09:54 +00:00
"github.com/ipfs/go-cid"
2020-06-05 21:22:29 +00:00
"github.com/ipfs/go-cidutil/cidenc"
2022-08-25 18:20:41 +00:00
"github.com/libp2p/go-libp2p/core/peer"
2020-06-05 21:22:29 +00:00
"github.com/multiformats/go-multibase"
"github.com/urfave/cli/v2"
2020-06-05 22:59:01 +00:00
"golang.org/x/xerrors"
2019-08-02 14:09:54 +00:00
"github.com/filecoin-project/go-address"
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
2022-06-14 15:00:51 +00:00
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
2020-09-07 03:49:10 +00:00
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
2020-02-13 03:50:37 +00:00
"github.com/filecoin-project/lotus/api"
lapi "github.com/filecoin-project/lotus/api"
2021-04-03 10:55:29 +00:00
"github.com/filecoin-project/lotus/api/v0api"
2020-07-31 16:22:04 +00:00
"github.com/filecoin-project/lotus/build"
2020-10-08 01:09:33 +00:00
"github.com/filecoin-project/lotus/chain/actors/builtin"
2020-09-17 08:17:14 +00:00
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/types"
2020-08-04 17:07:17 +00:00
"github.com/filecoin-project/lotus/lib/tablewriter"
integrate DAG store and CARv2 in deal-making (#6671) This commit removes badger from the deal-making processes, and moves to a new architecture with the dagstore as the cental component on the miner-side, and CARv2s on the client-side. Every deal that has been handed off to the sealing subsystem becomes a shard in the dagstore. Shards are mounted via the LotusMount, which teaches the dagstore how to load the related piece when serving retrievals. When the miner starts the Lotus for the first time with this patch, we will perform a one-time migration of all active deals into the dagstore. This is a lightweight process, and it consists simply of registering the shards in the dagstore. Shards are backed by the unsealed copy of the piece. This is currently a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so when it's time to acquire a shard to serve a retrieval, the unsealed CARv1 is joined with its index (safeguarded by the dagstore), to form a read-only blockstore, thus taking the place of the monolithic badger. Data transfers have been adjusted to interface directly with CARv2 files. On inbound transfers (client retrievals, miner storage deals), we stream the received data into a CARv2 ReadWrite blockstore. On outbound transfers (client storage deals, miner retrievals), we serve the data off a CARv2 ReadOnly blockstore. Client-side imports are managed by the refactored *imports.Manager component (when not using IPFS integration). Just like it before, we use the go-filestore library to avoid duplicating the data from the original file in the resulting UnixFS DAG (concretely the leaves). However, the target of those imports are what we call "ref-CARv2s": CARv2 files placed under the `$LOTUS_PATH/imports` directory, containing the intermediate nodes in full, and the leaves as positional references to the original file on disk. Client-side retrievals are placed into CARv2 files in the location: `$LOTUS_PATH/retrievals`. A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore` subcommands have been introduced on the miner-side to inspect and manage the dagstore. Despite moving to a CARv2-backed system, the IPFS integration has been respected, and it continues to be possible to make storage deals with data held in an IPFS node, and to perform retrievals directly into an IPFS node. NOTE: because the "staging" and "client" Badger blockstores are no longer used, existing imports on the client will be rendered useless. On startup, Lotus will enumerate all imports and print WARN statements on the log for each import that needs to be reimported. These log lines contain these messages: - import lacks carv2 path; import will not work; please reimport - import has missing/broken carv2; please reimport At the end, we will print a "sanity check completed" message indicating the count of imports found, and how many were deemed broken. Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com> Co-authored-by: Raúl Kripalani <raul@protocol.ai> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
"github.com/filecoin-project/lotus/node/repo/imports"
2019-07-12 10:17:16 +00:00
)
2020-06-05 21:22:29 +00:00
var CidBaseFlag = cli.StringFlag{
Name: "cid-base",
Hidden: true,
Value: "base32",
Usage: "Multibase encoding used for version 1 CIDs in output.",
DefaultText: "base32",
}
// GetCidEncoder returns an encoder using the `cid-base` flag if provided, or
// the default (Base32) encoder if not.
func GetCidEncoder(cctx *cli.Context) (cidenc.Encoder, error) {
val := cctx.String("cid-base")
e := cidenc.Encoder{Base: multibase.MustNewEncoder(multibase.Base32)}
if val != "" {
var err error
e.Base, err = multibase.EncoderByName(val)
if err != nil {
return e, err
}
}
return e, nil
}
var ClientCmd = &cli.Command{
2019-07-12 10:17:16 +00:00
Name: "client",
Usage: "Make deals, store data, retrieve data",
Subcommands: []*cli.Command{
2020-07-24 01:52:29 +00:00
WithCategory("storage", clientDealCmd),
WithCategory("storage", clientQueryAskCmd),
WithCategory("storage", clientListDeals),
WithCategory("storage", clientGetDealCmd),
WithCategory("storage", clientListAsksCmd),
2020-11-10 16:32:55 +00:00
WithCategory("storage", clientDealStatsCmd),
WithCategory("storage", clientInspectDealCmd),
2020-07-24 01:52:29 +00:00
WithCategory("data", clientImportCmd),
WithCategory("data", clientDropCmd),
WithCategory("data", clientLocalCmd),
WithCategory("data", clientStat),
2020-07-24 01:52:29 +00:00
WithCategory("retrieval", clientFindCmd),
2021-12-17 14:50:51 +00:00
WithCategory("retrieval", clientQueryRetrievalAskCmd),
2020-07-24 01:52:29 +00:00
WithCategory("retrieval", clientRetrieveCmd),
2021-11-16 11:40:25 +00:00
WithCategory("retrieval", clientRetrieveCatCmd),
WithCategory("retrieval", clientRetrieveLsCmd),
WithCategory("retrieval", clientCancelRetrievalDealCmd),
WithCategory("retrieval", clientListRetrievalsCmd),
2020-07-24 01:52:29 +00:00
WithCategory("util", clientCommPCmd),
WithCategory("util", clientCarGenCmd),
WithCategory("util", clientBalancesCmd),
WithCategory("util", clientListTransfers),
WithCategory("util", clientRestartTransfer),
2020-10-22 20:40:26 +00:00
WithCategory("util", clientCancelTransfer),
2019-07-12 10:17:16 +00:00
},
}
var clientImportCmd = &cli.Command{
2020-03-06 19:01:28 +00:00
Name: "import",
Usage: "Import data",
ArgsUsage: "[inputPath]",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "car",
Usage: "import from a car file instead of a regular file",
},
2020-07-07 11:45:02 +00:00
&cli.BoolFlag{
Name: "quiet",
Aliases: []string{"q"},
Usage: "Output root CID only",
},
2020-06-05 21:22:29 +00:00
&CidBaseFlag,
},
2019-07-12 10:17:16 +00:00
Action: func(cctx *cli.Context) error {
2019-10-03 18:12:30 +00:00
api, closer, err := GetFullNodeAPI(cctx)
2019-07-12 10:17:16 +00:00
if err != nil {
return err
}
2019-10-03 18:12:30 +00:00
defer closer()
2019-07-18 23:16:23 +00:00
ctx := ReqContext(cctx)
2020-07-13 05:13:29 +00:00
if cctx.NArg() != 1 {
return IncorrectNumArgs(cctx)
2020-07-13 05:13:29 +00:00
}
2019-10-23 07:05:22 +00:00
absPath, err := filepath.Abs(cctx.Args().First())
if err != nil {
return err
}
2019-07-12 10:17:16 +00:00
ref := lapi.FileRef{
Path: absPath,
IsCAR: cctx.Bool("car"),
}
c, err := api.ClientImport(ctx, ref)
2019-07-12 10:17:16 +00:00
if err != nil {
return err
}
encoder, err := GetCidEncoder(cctx)
if err != nil {
return err
}
2020-07-07 11:45:02 +00:00
if !cctx.Bool("quiet") {
fmt.Printf("Import %d, Root ", c.ImportID)
}
fmt.Println(encoder.Encode(c.Root))
return nil
},
}
var clientDropCmd = &cli.Command{
Name: "drop",
Usage: "Remove import",
ArgsUsage: "[import ID...]",
Action: func(cctx *cli.Context) error {
if cctx.NArg() != 1 {
return IncorrectNumArgs(cctx)
2020-07-07 11:45:02 +00:00
}
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
integrate DAG store and CARv2 in deal-making (#6671) This commit removes badger from the deal-making processes, and moves to a new architecture with the dagstore as the cental component on the miner-side, and CARv2s on the client-side. Every deal that has been handed off to the sealing subsystem becomes a shard in the dagstore. Shards are mounted via the LotusMount, which teaches the dagstore how to load the related piece when serving retrievals. When the miner starts the Lotus for the first time with this patch, we will perform a one-time migration of all active deals into the dagstore. This is a lightweight process, and it consists simply of registering the shards in the dagstore. Shards are backed by the unsealed copy of the piece. This is currently a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so when it's time to acquire a shard to serve a retrieval, the unsealed CARv1 is joined with its index (safeguarded by the dagstore), to form a read-only blockstore, thus taking the place of the monolithic badger. Data transfers have been adjusted to interface directly with CARv2 files. On inbound transfers (client retrievals, miner storage deals), we stream the received data into a CARv2 ReadWrite blockstore. On outbound transfers (client storage deals, miner retrievals), we serve the data off a CARv2 ReadOnly blockstore. Client-side imports are managed by the refactored *imports.Manager component (when not using IPFS integration). Just like it before, we use the go-filestore library to avoid duplicating the data from the original file in the resulting UnixFS DAG (concretely the leaves). However, the target of those imports are what we call "ref-CARv2s": CARv2 files placed under the `$LOTUS_PATH/imports` directory, containing the intermediate nodes in full, and the leaves as positional references to the original file on disk. Client-side retrievals are placed into CARv2 files in the location: `$LOTUS_PATH/retrievals`. A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore` subcommands have been introduced on the miner-side to inspect and manage the dagstore. Despite moving to a CARv2-backed system, the IPFS integration has been respected, and it continues to be possible to make storage deals with data held in an IPFS node, and to perform retrievals directly into an IPFS node. NOTE: because the "staging" and "client" Badger blockstores are no longer used, existing imports on the client will be rendered useless. On startup, Lotus will enumerate all imports and print WARN statements on the log for each import that needs to be reimported. These log lines contain these messages: - import lacks carv2 path; import will not work; please reimport - import has missing/broken carv2; please reimport At the end, we will print a "sanity check completed" message indicating the count of imports found, and how many were deemed broken. Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com> Co-authored-by: Raúl Kripalani <raul@protocol.ai> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
var ids []uint64
2020-07-07 11:45:02 +00:00
for i, s := range cctx.Args().Slice() {
integrate DAG store and CARv2 in deal-making (#6671) This commit removes badger from the deal-making processes, and moves to a new architecture with the dagstore as the cental component on the miner-side, and CARv2s on the client-side. Every deal that has been handed off to the sealing subsystem becomes a shard in the dagstore. Shards are mounted via the LotusMount, which teaches the dagstore how to load the related piece when serving retrievals. When the miner starts the Lotus for the first time with this patch, we will perform a one-time migration of all active deals into the dagstore. This is a lightweight process, and it consists simply of registering the shards in the dagstore. Shards are backed by the unsealed copy of the piece. This is currently a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so when it's time to acquire a shard to serve a retrieval, the unsealed CARv1 is joined with its index (safeguarded by the dagstore), to form a read-only blockstore, thus taking the place of the monolithic badger. Data transfers have been adjusted to interface directly with CARv2 files. On inbound transfers (client retrievals, miner storage deals), we stream the received data into a CARv2 ReadWrite blockstore. On outbound transfers (client storage deals, miner retrievals), we serve the data off a CARv2 ReadOnly blockstore. Client-side imports are managed by the refactored *imports.Manager component (when not using IPFS integration). Just like it before, we use the go-filestore library to avoid duplicating the data from the original file in the resulting UnixFS DAG (concretely the leaves). However, the target of those imports are what we call "ref-CARv2s": CARv2 files placed under the `$LOTUS_PATH/imports` directory, containing the intermediate nodes in full, and the leaves as positional references to the original file on disk. Client-side retrievals are placed into CARv2 files in the location: `$LOTUS_PATH/retrievals`. A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore` subcommands have been introduced on the miner-side to inspect and manage the dagstore. Despite moving to a CARv2-backed system, the IPFS integration has been respected, and it continues to be possible to make storage deals with data held in an IPFS node, and to perform retrievals directly into an IPFS node. NOTE: because the "staging" and "client" Badger blockstores are no longer used, existing imports on the client will be rendered useless. On startup, Lotus will enumerate all imports and print WARN statements on the log for each import that needs to be reimported. These log lines contain these messages: - import lacks carv2 path; import will not work; please reimport - import has missing/broken carv2; please reimport At the end, we will print a "sanity check completed" message indicating the count of imports found, and how many were deemed broken. Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com> Co-authored-by: Raúl Kripalani <raul@protocol.ai> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
id, err := strconv.ParseUint(s, 10, 64)
2020-07-07 11:45:02 +00:00
if err != nil {
return xerrors.Errorf("parsing %d-th import ID: %w", i, err)
}
integrate DAG store and CARv2 in deal-making (#6671) This commit removes badger from the deal-making processes, and moves to a new architecture with the dagstore as the cental component on the miner-side, and CARv2s on the client-side. Every deal that has been handed off to the sealing subsystem becomes a shard in the dagstore. Shards are mounted via the LotusMount, which teaches the dagstore how to load the related piece when serving retrievals. When the miner starts the Lotus for the first time with this patch, we will perform a one-time migration of all active deals into the dagstore. This is a lightweight process, and it consists simply of registering the shards in the dagstore. Shards are backed by the unsealed copy of the piece. This is currently a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so when it's time to acquire a shard to serve a retrieval, the unsealed CARv1 is joined with its index (safeguarded by the dagstore), to form a read-only blockstore, thus taking the place of the monolithic badger. Data transfers have been adjusted to interface directly with CARv2 files. On inbound transfers (client retrievals, miner storage deals), we stream the received data into a CARv2 ReadWrite blockstore. On outbound transfers (client storage deals, miner retrievals), we serve the data off a CARv2 ReadOnly blockstore. Client-side imports are managed by the refactored *imports.Manager component (when not using IPFS integration). Just like it before, we use the go-filestore library to avoid duplicating the data from the original file in the resulting UnixFS DAG (concretely the leaves). However, the target of those imports are what we call "ref-CARv2s": CARv2 files placed under the `$LOTUS_PATH/imports` directory, containing the intermediate nodes in full, and the leaves as positional references to the original file on disk. Client-side retrievals are placed into CARv2 files in the location: `$LOTUS_PATH/retrievals`. A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore` subcommands have been introduced on the miner-side to inspect and manage the dagstore. Despite moving to a CARv2-backed system, the IPFS integration has been respected, and it continues to be possible to make storage deals with data held in an IPFS node, and to perform retrievals directly into an IPFS node. NOTE: because the "staging" and "client" Badger blockstores are no longer used, existing imports on the client will be rendered useless. On startup, Lotus will enumerate all imports and print WARN statements on the log for each import that needs to be reimported. These log lines contain these messages: - import lacks carv2 path; import will not work; please reimport - import has missing/broken carv2; please reimport At the end, we will print a "sanity check completed" message indicating the count of imports found, and how many were deemed broken. Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com> Co-authored-by: Raúl Kripalani <raul@protocol.ai> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
ids = append(ids, id)
2020-07-07 11:45:02 +00:00
}
for _, id := range ids {
integrate DAG store and CARv2 in deal-making (#6671) This commit removes badger from the deal-making processes, and moves to a new architecture with the dagstore as the cental component on the miner-side, and CARv2s on the client-side. Every deal that has been handed off to the sealing subsystem becomes a shard in the dagstore. Shards are mounted via the LotusMount, which teaches the dagstore how to load the related piece when serving retrievals. When the miner starts the Lotus for the first time with this patch, we will perform a one-time migration of all active deals into the dagstore. This is a lightweight process, and it consists simply of registering the shards in the dagstore. Shards are backed by the unsealed copy of the piece. This is currently a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so when it's time to acquire a shard to serve a retrieval, the unsealed CARv1 is joined with its index (safeguarded by the dagstore), to form a read-only blockstore, thus taking the place of the monolithic badger. Data transfers have been adjusted to interface directly with CARv2 files. On inbound transfers (client retrievals, miner storage deals), we stream the received data into a CARv2 ReadWrite blockstore. On outbound transfers (client storage deals, miner retrievals), we serve the data off a CARv2 ReadOnly blockstore. Client-side imports are managed by the refactored *imports.Manager component (when not using IPFS integration). Just like it before, we use the go-filestore library to avoid duplicating the data from the original file in the resulting UnixFS DAG (concretely the leaves). However, the target of those imports are what we call "ref-CARv2s": CARv2 files placed under the `$LOTUS_PATH/imports` directory, containing the intermediate nodes in full, and the leaves as positional references to the original file on disk. Client-side retrievals are placed into CARv2 files in the location: `$LOTUS_PATH/retrievals`. A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore` subcommands have been introduced on the miner-side to inspect and manage the dagstore. Despite moving to a CARv2-backed system, the IPFS integration has been respected, and it continues to be possible to make storage deals with data held in an IPFS node, and to perform retrievals directly into an IPFS node. NOTE: because the "staging" and "client" Badger blockstores are no longer used, existing imports on the client will be rendered useless. On startup, Lotus will enumerate all imports and print WARN statements on the log for each import that needs to be reimported. These log lines contain these messages: - import lacks carv2 path; import will not work; please reimport - import has missing/broken carv2; please reimport At the end, we will print a "sanity check completed" message indicating the count of imports found, and how many were deemed broken. Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com> Co-authored-by: Raúl Kripalani <raul@protocol.ai> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
if err := api.ClientRemoveImport(ctx, imports.ID(id)); err != nil {
2020-07-07 11:45:02 +00:00
return xerrors.Errorf("removing import %d: %w", id, err)
}
}
2019-07-12 10:17:16 +00:00
return nil
},
2019-07-12 10:44:01 +00:00
}
var clientCommPCmd = &cli.Command{
Name: "commP",
2020-07-24 01:52:29 +00:00
Usage: "Calculate the piece-cid (commP) of a CAR file",
ArgsUsage: "[inputFile]",
2020-06-05 21:22:29 +00:00
Flags: []cli.Flag{
&CidBaseFlag,
},
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
2022-09-14 18:33:29 +00:00
if cctx.NArg() != 1 {
return IncorrectNumArgs(cctx)
}
ret, err := api.ClientCalcCommP(ctx, cctx.Args().Get(0))
if err != nil {
return err
}
encoder, err := GetCidEncoder(cctx)
if err != nil {
return err
}
fmt.Println("CID: ", encoder.Encode(ret.Root))
fmt.Println("Piece size: ", types.SizeStr(types.NewInt(uint64(ret.Size))))
fmt.Println("Piece size in bytes: ", types.NewInt(uint64(ret.Size)))
return nil
},
}
var clientCarGenCmd = &cli.Command{
Name: "generate-car",
2020-07-24 01:52:29 +00:00
Usage: "Generate a car file from input",
ArgsUsage: "[inputPath outputPath]",
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
2022-09-14 18:33:29 +00:00
if cctx.NArg() != 2 {
return IncorrectNumArgs(cctx)
}
ref := lapi.FileRef{
Path: cctx.Args().First(),
IsCAR: false,
}
op := cctx.Args().Get(1)
if err = api.ClientGenCar(ctx, ref, op); err != nil {
return err
}
return nil
},
}
2019-07-12 10:44:01 +00:00
var clientLocalCmd = &cli.Command{
Name: "local",
Usage: "List locally imported data",
2020-06-05 21:22:29 +00:00
Flags: []cli.Flag{
&CidBaseFlag,
},
2019-07-12 10:44:01 +00:00
Action: func(cctx *cli.Context) error {
2019-10-03 18:12:30 +00:00
api, closer, err := GetFullNodeAPI(cctx)
2019-07-12 10:44:01 +00:00
if err != nil {
return err
}
2019-10-03 18:12:30 +00:00
defer closer()
2019-07-18 23:16:23 +00:00
ctx := ReqContext(cctx)
2019-07-12 10:44:01 +00:00
list, err := api.ClientListImports(ctx)
if err != nil {
return err
}
encoder, err := GetCidEncoder(cctx)
if err != nil {
return err
}
2020-07-07 11:45:02 +00:00
sort.Slice(list, func(i, j int) bool {
return list[i].Key < list[j].Key
})
2019-07-12 10:44:01 +00:00
for _, v := range list {
2020-07-07 08:52:19 +00:00
cidStr := "<nil>"
if v.Root != nil {
cidStr = encoder.Encode(*v.Root)
}
fmt.Printf("%d: %s @%s (%s)\n", v.Key, cidStr, v.FilePath, v.Source)
2020-07-07 09:12:32 +00:00
if v.Err != "" {
2020-07-07 08:52:19 +00:00
fmt.Printf("\terror: %s\n", v.Err)
}
2019-07-12 10:44:01 +00:00
}
return nil
},
}
2019-08-02 14:09:54 +00:00
var clientDealCmd = &cli.Command{
Name: "deal",
Usage: "Initialize storage deal with a miner",
Description: `Make a deal with a miner.
dataCid comes from running 'lotus client import'.
miner is the address of the miner you wish to make a deal with.
2021-06-22 19:46:44 +00:00
price is measured in FIL/Epoch. Miners usually don't accept a bid
lower than their advertised ask (which is in FIL/GiB/Epoch). You can check a miners listed price
with 'lotus client query-ask <miner address>'.
duration is how long the miner should store the data for, in blocks.
The minimum value is 518400 (6 months).`,
ArgsUsage: "[dataCid miner price duration]",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "manual-piece-cid",
Usage: "manually specify piece commitment for data (dataCid must be to a car file)",
},
&cli.Int64Flag{
Name: "manual-piece-size",
Usage: "if manually specifying piece cid, used to specify size (dataCid must be to a car file)",
},
&cli.BoolFlag{
Name: "manual-stateless-deal",
Usage: "instructs the node to send an offline deal without registering it with the deallist/fsm",
},
&cli.StringFlag{
Name: "from",
Usage: "specify address to fund the deal with",
},
2020-04-30 17:42:16 +00:00
&cli.Int64Flag{
Name: "start-epoch",
Usage: "specify the epoch that the deal should start at",
Value: -1,
},
&cli.BoolFlag{
Name: "fast-retrieval",
Usage: "indicates that data should be available for fast retrieval",
Value: true,
},
&cli.BoolFlag{
Name: "verified-deal",
Usage: "indicate that the deal counts towards verified client total",
DefaultText: "true if client is verified, false otherwise",
},
&cli.StringFlag{
Name: "provider-collateral",
Usage: "specify the requested provider collateral the miner should put up",
},
2020-06-05 21:22:29 +00:00
&CidBaseFlag,
},
2019-08-02 14:09:54 +00:00
Action: func(cctx *cli.Context) error {
2022-01-02 15:38:22 +00:00
expectedArgsMsg := "expected 4 args: dataCid, miner, price, duration"
2022-01-02 15:38:22 +00:00
2020-07-31 16:22:04 +00:00
if !cctx.Args().Present() {
if cctx.Bool("manual-stateless-deal") {
return xerrors.New("--manual-stateless-deal can not be combined with interactive deal mode: you must specify the " + expectedArgsMsg)
}
2020-07-31 16:22:04 +00:00
return interactiveDeal(cctx)
}
2019-10-03 18:12:30 +00:00
api, closer, err := GetFullNodeAPI(cctx)
2019-08-02 14:09:54 +00:00
if err != nil {
return err
}
2019-10-03 18:12:30 +00:00
defer closer()
2019-08-02 14:09:54 +00:00
ctx := ReqContext(cctx)
afmt := NewAppFmt(cctx.App)
2019-08-02 14:09:54 +00:00
2019-08-07 20:06:10 +00:00
if cctx.NArg() != 4 {
return IncorrectNumArgs(cctx)
2019-08-02 14:09:54 +00:00
}
// [data, miner, price, dur]
2019-08-02 14:09:54 +00:00
data, err := cid.Parse(cctx.Args().Get(0))
if err != nil {
return err
}
miner, err := address.NewFromString(cctx.Args().Get(1))
if err != nil {
return err
}
2019-10-29 12:02:13 +00:00
price, err := types.ParseFIL(cctx.Args().Get(2))
2019-08-02 14:09:54 +00:00
if err != nil {
return err
}
2019-08-07 20:06:10 +00:00
dur, err := strconv.ParseInt(cctx.Args().Get(3), 10, 32)
if err != nil {
return err
}
var provCol big.Int
2020-08-14 02:14:45 +00:00
if pcs := cctx.String("provider-collateral"); pcs != "" {
pc, err := big.FromString(pcs)
if err != nil {
return fmt.Errorf("failed to parse provider-collateral: %w", err)
}
provCol = pc
}
2020-07-28 17:51:47 +00:00
if abi.ChainEpoch(dur) < build.MinDealDuration {
return xerrors.Errorf("minimum deal duration is %d blocks", build.MinDealDuration)
}
2020-12-30 08:02:53 +00:00
if abi.ChainEpoch(dur) > build.MaxDealDuration {
return xerrors.Errorf("maximum deal duration is %d blocks", build.MaxDealDuration)
}
2020-07-28 17:51:47 +00:00
var a address.Address
if from := cctx.String("from"); from != "" {
faddr, err := address.NewFromString(from)
if err != nil {
return xerrors.Errorf("failed to parse 'from' address: %w", err)
}
a = faddr
} else {
def, err := api.WalletDefaultAddress(ctx)
if err != nil {
return err
}
a = def
}
ref := &storagemarket.DataRef{
TransferType: storagemarket.TTGraphsync,
Root: data,
}
if mpc := cctx.String("manual-piece-cid"); mpc != "" {
c, err := cid.Parse(mpc)
if err != nil {
return xerrors.Errorf("failed to parse provided manual piece cid: %w", err)
}
ref.PieceCid = &c
psize := cctx.Int64("manual-piece-size")
if psize == 0 {
return xerrors.Errorf("must specify piece size when manually setting cid")
}
ref.PieceSize = abi.UnpaddedPieceSize(psize)
ref.TransferType = storagemarket.TTManual
}
// Check if the address is a verified client
dcap, err := api.StateVerifiedClientStatus(ctx, a, types.EmptyTSK)
if err != nil {
return err
}
2020-09-17 15:30:15 +00:00
isVerified := dcap != nil
// If the user has explicitly set the --verified-deal flag
if cctx.IsSet("verified-deal") {
// If --verified-deal is true, but the address is not a verified
// client, return an error
verifiedDealParam := cctx.Bool("verified-deal")
if verifiedDealParam && !isVerified {
return xerrors.Errorf("address %s does not have verified client status", a)
}
// Override the default
isVerified = verifiedDealParam
}
sdParams := &lapi.StartDealParams{
Data: ref,
Wallet: a,
Miner: miner,
EpochPrice: types.BigInt(price),
MinBlocksDuration: uint64(dur),
DealStartEpoch: abi.ChainEpoch(cctx.Int64("start-epoch")),
FastRetrieval: cctx.Bool("fast-retrieval"),
VerifiedDeal: isVerified,
ProviderCollateral: provCol,
}
var proposal *cid.Cid
if cctx.Bool("manual-stateless-deal") {
2021-05-11 02:44:07 +00:00
if ref.TransferType != storagemarket.TTManual || price.Int64() != 0 {
return xerrors.New("when manual-stateless-deal is enabled, you must also provide a 'price' of 0 and specify 'manual-piece-cid' and 'manual-piece-size'")
}
proposal, err = api.ClientStatelessDeal(ctx, sdParams)
} else {
proposal, err = api.ClientStartDeal(ctx, sdParams)
}
2019-08-07 20:06:10 +00:00
if err != nil {
return err
}
encoder, err := GetCidEncoder(cctx)
if err != nil {
return err
}
afmt.Println(encoder.Encode(*proposal))
2019-08-07 20:06:10 +00:00
return nil
2019-08-02 14:09:54 +00:00
},
}
2019-08-26 13:45:36 +00:00
2020-07-31 16:22:04 +00:00
func interactiveDeal(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPIV1(cctx)
2020-07-31 16:22:04 +00:00
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
afmt := NewAppFmt(cctx.App)
2020-07-31 16:22:04 +00:00
state := "import"
gib := types.NewInt(1 << 30)
2020-07-31 16:22:04 +00:00
var data cid.Cid
var days int
var maddrs []address.Address
var ask []storagemarket.StorageAsk
var epochPrices []big.Int
var dur time.Duration
2020-07-31 16:22:04 +00:00
var epochs abi.ChainEpoch
var verified bool
var ds lapi.DataCIDSize
// find
var candidateAsks []QueriedAsk
var budget types.FIL
var dealCount int64
var medianPing, maxAcceptablePing time.Duration
2020-07-31 16:22:04 +00:00
var a address.Address
if from := cctx.String("from"); from != "" {
faddr, err := address.NewFromString(from)
if err != nil {
return xerrors.Errorf("failed to parse 'from' address: %w", err)
}
a = faddr
} else {
def, err := api.WalletDefaultAddress(ctx)
if err != nil {
return err
}
a = def
}
if _, err := api.StateGetActor(ctx, a, types.EmptyTSK); err != nil {
return xerrors.Errorf("address not initialized on chain: %w", err)
}
fromBal, err := api.WalletBalance(ctx, a)
if err != nil {
return xerrors.Errorf("checking from address balance: %w", err)
}
2020-07-31 16:22:04 +00:00
printErr := func(err error) {
afmt.Printf("%s %s\n", color.RedString("Error:"), err.Error())
2020-07-31 16:22:04 +00:00
}
cs := readline.NewCancelableStdin(afmt.Stdin)
go func() {
<-ctx.Done()
_ = cs.Close()
}()
rl := bufio.NewReader(cs)
uiLoop:
2020-07-31 16:22:04 +00:00
for {
// TODO: better exit handling
if err := ctx.Err(); err != nil {
return err
}
switch state {
case "import":
afmt.Print("Data CID (from " + color.YellowString("lotus client import") + "): ")
2020-07-31 16:22:04 +00:00
_cidStr, _, err := rl.ReadLine()
cidStr := string(_cidStr)
2020-07-31 16:22:04 +00:00
if err != nil {
printErr(xerrors.Errorf("reading cid string: %w", err))
continue
}
data, err = cid.Parse(cidStr)
if err != nil {
printErr(xerrors.Errorf("parsing cid string: %w", err))
continue
}
color.Blue(".. calculating data size\n")
ds, err = api.ClientDealPieceCID(ctx, data)
2020-07-31 16:22:04 +00:00
if err != nil {
return err
}
state = "duration"
case "duration":
afmt.Print("Deal duration (days): ")
2020-07-31 16:22:04 +00:00
_daystr, _, err := rl.ReadLine()
daystr := string(_daystr)
2020-07-31 16:22:04 +00:00
if err != nil {
return err
2020-07-31 16:22:04 +00:00
}
_, err = fmt.Sscan(daystr, &days)
2020-07-31 16:22:04 +00:00
if err != nil {
printErr(xerrors.Errorf("parsing duration: %w", err))
2020-07-31 16:22:04 +00:00
continue
}
minDealDurationDays := uint64(build.MinDealDuration) / (builtin.SecondsInDay / build.BlockDelaySecs)
if days < int(minDealDurationDays) {
2022-11-29 19:51:52 +00:00
printErr(xerrors.Errorf("minimum duration is %d days, got %d", minDealDurationDays, days))
continue
}
maxDealDurationDays := uint64(build.MaxDealDuration) / (builtin.SecondsInDay / build.BlockDelaySecs)
if days > int(maxDealDurationDays) {
printErr(xerrors.Errorf("maximum duration is %d days, got %d", maxDealDurationDays, days))
2020-07-31 16:22:04 +00:00
continue
}
dur = 24 * time.Hour * time.Duration(days)
epochs = abi.ChainEpoch(dur / (time.Duration(build.BlockDelaySecs) * time.Second))
2020-07-31 16:22:04 +00:00
state = "verified"
case "verified":
ts, err := api.ChainHead(ctx)
if err != nil {
return err
}
dcap, err := api.StateVerifiedClientStatus(ctx, a, ts.Key())
if err != nil {
return err
}
if dcap == nil {
state = "miner"
continue
}
if dcap.Uint64() < uint64(ds.PieceSize) {
color.Yellow(".. not enough DataCap available for a verified deal\n")
state = "miner"
continue
}
afmt.Print("\nMake this a verified deal? (yes/no): ")
_yn, _, err := rl.ReadLine()
yn := string(_yn)
if err != nil {
return err
}
switch yn {
case "yes":
verified = true
case "no":
verified = false
default:
afmt.Println("Type in full 'yes' or 'no'")
continue
}
state = "miner"
case "miner":
maddrs = maddrs[:0]
ask = ask[:0]
afmt.Print("Miner Addresses (f0.. f0..), none to find: ")
_maddrsStr, _, err := rl.ReadLine()
maddrsStr := string(_maddrsStr)
2020-07-31 16:22:04 +00:00
if err != nil {
printErr(xerrors.Errorf("reading miner address: %w", err))
continue
2020-07-31 16:22:04 +00:00
}
for _, s := range strings.Fields(maddrsStr) {
maddr, err := address.NewFromString(strings.TrimSpace(s))
if err != nil {
printErr(xerrors.Errorf("parsing miner address: %w", err))
continue uiLoop
}
maddrs = append(maddrs, maddr)
}
state = "query"
if len(maddrs) == 0 {
state = "find"
}
case "find":
2020-11-30 13:18:17 +00:00
asks, err := GetAsks(ctx, api)
2020-07-31 16:22:04 +00:00
if err != nil {
return err
}
if len(asks) == 0 {
printErr(xerrors.Errorf("no asks found"))
continue uiLoop
}
medianPing = asks[len(asks)/2].Ping
var avgPing time.Duration
for _, ask := range asks {
avgPing += ask.Ping
}
avgPing /= time.Duration(len(asks))
for _, ask := range asks {
2020-11-30 13:18:17 +00:00
if ask.Ask.MinPieceSize > ds.PieceSize {
continue
}
2020-11-30 13:18:17 +00:00
if ask.Ask.MaxPieceSize < ds.PieceSize {
continue
}
candidateAsks = append(candidateAsks, ask)
}
2020-07-31 16:22:04 +00:00
afmt.Printf("Found %d candidate asks\n", len(candidateAsks))
afmt.Printf("Average network latency: %s; Median latency: %s\n", avgPing.Truncate(time.Millisecond), medianPing.Truncate(time.Millisecond))
state = "max-ping"
case "max-ping":
maxAcceptablePing = medianPing
afmt.Printf("Maximum network latency (default: %s) (ms): ", maxAcceptablePing.Truncate(time.Millisecond))
_latStr, _, err := rl.ReadLine()
latStr := string(_latStr)
if err != nil {
printErr(xerrors.Errorf("reading maximum latency: %w", err))
continue
}
if latStr != "" {
maxMs, err := strconv.ParseInt(latStr, 10, 64)
if err != nil {
printErr(xerrors.Errorf("parsing FIL: %w", err))
continue uiLoop
}
maxAcceptablePing = time.Millisecond * time.Duration(maxMs)
}
var goodAsks []QueriedAsk
for _, candidateAsk := range candidateAsks {
if candidateAsk.Ping < maxAcceptablePing {
goodAsks = append(goodAsks, candidateAsk)
}
}
if len(goodAsks) == 0 {
afmt.Printf("no asks left after filtering for network latency\n")
continue uiLoop
}
afmt.Printf("%d asks left after filtering for network latency\n", len(goodAsks))
candidateAsks = goodAsks
state = "find-budget"
case "find-budget":
afmt.Printf("Proposing from %s, Current Balance: %s\n", a, types.FIL(fromBal))
afmt.Print("Maximum budget (FIL): ") // TODO: Propose some default somehow?
2020-07-31 16:22:04 +00:00
_budgetStr, _, err := rl.ReadLine()
budgetStr := string(_budgetStr)
if err != nil {
printErr(xerrors.Errorf("reading miner address: %w", err))
continue
2020-10-02 20:07:40 +00:00
}
budget, err = types.ParseFIL(budgetStr)
if err != nil {
printErr(xerrors.Errorf("parsing FIL: %w", err))
continue uiLoop
}
2020-07-31 16:22:04 +00:00
var goodAsks []QueriedAsk
for _, ask := range candidateAsks {
p := ask.Ask.Price
if verified {
p = ask.Ask.VerifiedPrice
}
epochPrice := types.BigDiv(types.BigMul(p, types.NewInt(uint64(ds.PieceSize))), gib)
totalPrice := types.BigMul(epochPrice, types.NewInt(uint64(epochs)))
if totalPrice.LessThan(abi.TokenAmount(budget)) {
goodAsks = append(goodAsks, ask)
}
}
candidateAsks = goodAsks
afmt.Printf("%d asks within budget\n", len(candidateAsks))
state = "find-count"
case "find-count":
afmt.Print("Deals to make (1): ")
dealcStr, _, err := rl.ReadLine()
if err != nil {
printErr(xerrors.Errorf("reading deal count: %w", err))
continue
}
dealCount, err = strconv.ParseInt(string(dealcStr), 10, 64)
if err != nil {
printErr(xerrors.Errorf("reading deal count: invalid number"))
continue
}
color.Blue(".. Picking miners")
// TODO: some better strategy (this tries to pick randomly)
var pickedAsks []*storagemarket.StorageAsk
pickLoop:
for i := 0; i < 64; i++ {
rand.Shuffle(len(candidateAsks), func(i, j int) {
candidateAsks[i], candidateAsks[j] = candidateAsks[j], candidateAsks[i]
})
remainingBudget := abi.TokenAmount(budget)
pickedAsks = []*storagemarket.StorageAsk{}
for _, ask := range candidateAsks {
p := ask.Ask.Price
if verified {
p = ask.Ask.VerifiedPrice
}
epochPrice := types.BigDiv(types.BigMul(p, types.NewInt(uint64(ds.PieceSize))), gib)
totalPrice := types.BigMul(epochPrice, types.NewInt(uint64(epochs)))
if totalPrice.GreaterThan(remainingBudget) {
continue
}
pickedAsks = append(pickedAsks, ask.Ask)
remainingBudget = big.Sub(remainingBudget, totalPrice)
if len(pickedAsks) == int(dealCount) {
break pickLoop
}
}
}
for _, pickedAsk := range pickedAsks {
maddrs = append(maddrs, pickedAsk.Miner)
ask = append(ask, *pickedAsk)
}
state = "confirm"
case "query":
color.Blue(".. querying miner asks")
for _, maddr := range maddrs {
mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
if err != nil {
printErr(xerrors.Errorf("failed to get peerID for miner: %w", err))
state = "miner"
continue uiLoop
}
a, err := api.ClientQueryAsk(ctx, *mi.PeerId, maddr)
if err != nil {
printErr(xerrors.Errorf("failed to query ask for miner %s: %w", maddr.String(), err))
state = "miner"
continue uiLoop
}
ask = append(ask, *a.Response)
}
// TODO: run more validation
state = "confirm"
case "confirm":
// TODO: do some more or epochs math (round to miner PP, deal start buffer)
2020-07-31 16:22:04 +00:00
afmt.Printf("-----\n")
afmt.Printf("Proposing from %s\n", a)
afmt.Printf("\tBalance: %s\n", types.FIL(fromBal))
afmt.Printf("\n")
afmt.Printf("Piece size: %s (Payload size: %s)\n", units.BytesSize(float64(ds.PieceSize)), units.BytesSize(float64(ds.PayloadSize)))
afmt.Printf("Duration: %s\n", dur)
pricePerGib := big.Zero()
for _, a := range ask {
p := a.Price
if verified {
p = a.VerifiedPrice
}
pricePerGib = big.Add(pricePerGib, p)
epochPrice := types.BigDiv(types.BigMul(p, types.NewInt(uint64(ds.PieceSize))), gib)
epochPrices = append(epochPrices, epochPrice)
mpow, err := api.StateMinerPower(ctx, a.Miner, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("getting power (%s): %w", a.Miner, err)
}
if len(ask) > 1 {
totalPrice := types.BigMul(epochPrice, types.NewInt(uint64(epochs)))
afmt.Printf("Miner %s (Power:%s) price: ~%s (%s per epoch)\n", color.YellowString(a.Miner.String()), color.GreenString(types.SizeStr(mpow.MinerPower.QualityAdjPower)), color.BlueString(types.FIL(totalPrice).String()), types.FIL(epochPrice))
}
}
// TODO: price is based on PaddedPieceSize, right?
epochPrice := types.BigDiv(types.BigMul(pricePerGib, types.NewInt(uint64(ds.PieceSize))), gib)
totalPrice := types.BigMul(epochPrice, types.NewInt(uint64(epochs)))
afmt.Printf("Total price: ~%s (%s per epoch)\n", color.CyanString(types.FIL(totalPrice).String()), types.FIL(epochPrice))
afmt.Printf("Verified: %v\n", verified)
2020-07-31 16:22:04 +00:00
state = "accept"
case "accept":
afmt.Print("\nAccept (yes/no): ")
2020-07-31 16:22:04 +00:00
_yn, _, err := rl.ReadLine()
yn := string(_yn)
2020-07-31 16:22:04 +00:00
if err != nil {
return err
}
if yn == "no" {
return nil
}
if yn != "yes" {
afmt.Println("Type in full 'yes' or 'no'")
2020-07-31 16:22:04 +00:00
continue
}
state = "execute"
case "execute":
color.Blue(".. executing\n")
for i, maddr := range maddrs {
proposal, err := api.ClientStartDeal(ctx, &lapi.StartDealParams{
Data: &storagemarket.DataRef{
TransferType: storagemarket.TTGraphsync,
Root: data,
PieceCid: &ds.PieceCID,
PieceSize: ds.PieceSize.Unpadded(),
},
Wallet: a,
Miner: maddr,
EpochPrice: epochPrices[i],
MinBlocksDuration: uint64(epochs),
DealStartEpoch: abi.ChainEpoch(cctx.Int64("start-epoch")),
FastRetrieval: cctx.Bool("fast-retrieval"),
VerifiedDeal: verified,
})
if err != nil {
return err
}
2020-07-31 16:22:04 +00:00
encoder, err := GetCidEncoder(cctx)
if err != nil {
return err
}
afmt.Printf("Deal (%s) CID: %s\n", maddr, color.GreenString(encoder.Encode(*proposal)))
2020-07-31 16:22:04 +00:00
}
return nil
default:
return xerrors.Errorf("unknown state: %s", state)
}
}
}
2019-08-26 13:45:36 +00:00
var clientFindCmd = &cli.Command{
2020-03-06 19:01:28 +00:00
Name: "find",
2020-07-24 01:52:29 +00:00
Usage: "Find data in the network",
ArgsUsage: "[dataCid]",
2020-07-09 16:29:57 +00:00
Flags: []cli.Flag{
&cli.StringFlag{
Name: "pieceCid",
Usage: "require data to be retrieved from a specific Piece CID",
},
},
2019-08-26 13:45:36 +00:00
Action: func(cctx *cli.Context) error {
if cctx.NArg() != 1 {
return IncorrectNumArgs(cctx)
2019-08-26 13:45:36 +00:00
}
file, err := cid.Parse(cctx.Args().First())
if err != nil {
return err
}
2019-10-03 18:12:30 +00:00
api, closer, err := GetFullNodeAPI(cctx)
2019-08-26 13:45:36 +00:00
if err != nil {
return err
}
2019-10-03 18:12:30 +00:00
defer closer()
2019-08-26 13:45:36 +00:00
ctx := ReqContext(cctx)
// Check if we already have this data locally
has, err := api.ClientHasLocal(ctx, file)
if err != nil {
return err
}
if has {
fmt.Println("LOCAL")
}
2020-07-09 16:29:57 +00:00
var pieceCid *cid.Cid
if cctx.String("pieceCid") != "" {
parsed, err := cid.Parse(cctx.String("pieceCid"))
if err != nil {
return err
}
pieceCid = &parsed
}
offers, err := api.ClientFindData(ctx, file, pieceCid)
2019-08-26 13:45:36 +00:00
if err != nil {
return err
}
for _, offer := range offers {
if offer.Err != "" {
2020-08-05 22:35:59 +00:00
fmt.Printf("ERR %s@%s: %s\n", offer.Miner, offer.MinerPeer.ID, offer.Err)
2019-08-26 13:45:36 +00:00
continue
}
2020-08-05 22:35:59 +00:00
fmt.Printf("RETRIEVAL %s@%s-%s-%s\n", offer.Miner, offer.MinerPeer.ID, types.FIL(offer.MinPrice), types.SizeStr(types.NewInt(offer.Size)))
2019-08-26 13:45:36 +00:00
}
return nil
},
}
2021-12-17 14:50:51 +00:00
var clientQueryRetrievalAskCmd = &cli.Command{
Name: "retrieval-ask",
2021-12-17 14:50:51 +00:00
Usage: "Get a miner's retrieval ask",
ArgsUsage: "[minerAddress] [data CID]",
Flags: []cli.Flag{
&cli.Int64Flag{
Name: "size",
Usage: "data size in bytes",
},
},
Action: func(cctx *cli.Context) error {
afmt := NewAppFmt(cctx.App)
if cctx.NArg() != 2 {
return IncorrectNumArgs(cctx)
2021-12-17 14:50:51 +00:00
}
maddr, err := address.NewFromString(cctx.Args().First())
if err != nil {
return err
}
dataCid, err := cid.Parse(cctx.Args().Get(1))
if err != nil {
return fmt.Errorf("parsing data cid: %w", err)
}
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
ask, err := api.ClientMinerQueryOffer(ctx, maddr, dataCid, nil)
if err != nil {
return err
}
afmt.Printf("Ask: %s\n", maddr)
afmt.Printf("Unseal price: %s\n", types.FIL(ask.UnsealPrice))
afmt.Printf("Price per byte: %s\n", types.FIL(ask.PricePerByte))
afmt.Printf("Payment interval: %s\n", types.SizeStr(types.NewInt(ask.PaymentInterval)))
afmt.Printf("Payment interval increase: %s\n", types.SizeStr(types.NewInt(ask.PaymentIntervalIncrease)))
size := cctx.Uint64("size")
2021-12-17 14:50:51 +00:00
if size == 0 {
if ask.Size == 0 {
return nil
}
size = ask.Size
afmt.Printf("Size: %s\n", types.SizeStr(types.NewInt(ask.Size)))
2021-12-17 14:50:51 +00:00
}
transferPrice := types.BigMul(ask.PricePerByte, types.NewInt(size))
2021-12-17 14:50:51 +00:00
totalPrice := types.BigAdd(ask.UnsealPrice, transferPrice)
afmt.Printf("Total price for %d bytes: %s\n", size, types.FIL(totalPrice))
return nil
},
}
var clientListRetrievalsCmd = &cli.Command{
Name: "list-retrievals",
Usage: "List retrieval market deals",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "verbose",
Aliases: []string{"v"},
Usage: "print verbose deal details",
},
&cli.BoolFlag{
Name: "show-failed",
Usage: "show failed/failing deals",
Value: true,
},
&cli.BoolFlag{
Name: "completed",
Usage: "show completed retrievals",
},
&cli.BoolFlag{
Name: "watch",
Usage: "watch deal updates in real-time, rather than a one time list",
},
},
Action: func(cctx *cli.Context) error {
2021-05-27 22:00:31 +00:00
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
verbose := cctx.Bool("verbose")
watch := cctx.Bool("watch")
showFailed := cctx.Bool("show-failed")
completed := cctx.Bool("completed")
localDeals, err := api.ClientListRetrievals(ctx)
if err != nil {
return err
}
if watch {
updates, err := api.ClientGetRetrievalUpdates(ctx)
if err != nil {
return err
}
for {
tm.Clear()
tm.MoveCursor(1, 1)
err = outputRetrievalDeals(ctx, tm.Screen, localDeals, verbose, showFailed, completed)
if err != nil {
return err
}
tm.Flush()
select {
case <-ctx.Done():
return nil
case updated := <-updates:
var found bool
for i, existing := range localDeals {
if existing.ID == updated.ID {
localDeals[i] = updated
found = true
break
}
}
if !found {
localDeals = append(localDeals, updated)
}
}
}
}
return outputRetrievalDeals(ctx, cctx.App.Writer, localDeals, verbose, showFailed, completed)
},
}
func isTerminalError(status retrievalmarket.DealStatus) bool {
// should patch this in go-fil-markets but to solve the problem immediate and not have buggy output
2021-05-27 18:24:08 +00:00
return retrievalmarket.IsTerminalError(status) || status == retrievalmarket.DealStatusErrored || status == retrievalmarket.DealStatusCancelled
}
func outputRetrievalDeals(ctx context.Context, out io.Writer, localDeals []lapi.RetrievalInfo, verbose bool, showFailed bool, completed bool) error {
var deals []api.RetrievalInfo
for _, deal := range localDeals {
if !showFailed && isTerminalError(deal.Status) {
continue
}
if !completed && retrievalmarket.IsTerminalSuccess(deal.Status) {
continue
}
deals = append(deals, deal)
}
tableColumns := []tablewriter.Column{
tablewriter.Col("PayloadCID"),
tablewriter.Col("DealId"),
tablewriter.Col("Provider"),
tablewriter.Col("Status"),
tablewriter.Col("PricePerByte"),
tablewriter.Col("Received"),
tablewriter.Col("TotalPaid"),
}
if verbose {
tableColumns = append(tableColumns,
tablewriter.Col("PieceCID"),
tablewriter.Col("UnsealPrice"),
tablewriter.Col("BytesPaidFor"),
tablewriter.Col("TransferChannelID"),
tablewriter.Col("TransferStatus"),
)
}
tableColumns = append(tableColumns, tablewriter.NewLineCol("Message"))
w := tablewriter.New(tableColumns...)
for _, d := range deals {
w.Write(toRetrievalOutput(d, verbose))
}
return w.Flush(out)
}
func toRetrievalOutput(d api.RetrievalInfo, verbose bool) map[string]interface{} {
payloadCID := d.PayloadCID.String()
provider := d.Provider.String()
if !verbose {
payloadCID = ellipsis(payloadCID, 8)
provider = ellipsis(provider, 8)
}
retrievalOutput := map[string]interface{}{
"PayloadCID": payloadCID,
"DealId": d.ID,
"Provider": provider,
"Status": retrievalStatusString(d.Status),
"PricePerByte": types.FIL(d.PricePerByte),
"Received": units.BytesSize(float64(d.BytesReceived)),
"TotalPaid": types.FIL(d.TotalPaid),
"Message": d.Message,
}
if verbose {
transferChannelID := ""
if d.TransferChannelID != nil {
transferChannelID = d.TransferChannelID.String()
}
transferStatus := ""
if d.DataTransfer != nil {
transferStatus = datatransfer.Statuses[d.DataTransfer.Status]
}
pieceCID := ""
if d.PieceCID != nil {
pieceCID = d.PieceCID.String()
}
retrievalOutput["PieceCID"] = pieceCID
retrievalOutput["UnsealPrice"] = types.FIL(d.UnsealPrice)
retrievalOutput["BytesPaidFor"] = units.BytesSize(float64(d.BytesPaidFor))
retrievalOutput["TransferChannelID"] = transferChannelID
retrievalOutput["TransferStatus"] = transferStatus
}
return retrievalOutput
}
func retrievalStatusString(status retrievalmarket.DealStatus) string {
s := retrievalmarket.DealStatuses[status]
switch {
case isTerminalError(status):
return color.RedString(s)
case retrievalmarket.IsTerminalSuccess(status):
return color.GreenString(s)
default:
return s
}
}
var clientInspectDealCmd = &cli.Command{
Name: "inspect-deal",
2021-03-30 14:08:47 +00:00
Usage: "Inspect detailed information about deal's lifecycle and the various stages it goes through",
Flags: []cli.Flag{
&cli.IntFlag{
Name: "deal-id",
},
&cli.StringFlag{
Name: "proposal-cid",
},
},
2021-03-04 15:56:49 +00:00
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
2021-03-04 15:56:49 +00:00
ctx := ReqContext(cctx)
2021-04-01 12:07:25 +00:00
return inspectDealCmd(ctx, api, cctx.String("proposal-cid"), cctx.Int("deal-id"))
2021-03-04 15:56:49 +00:00
},
}
2020-11-10 16:32:55 +00:00
var clientDealStatsCmd = &cli.Command{
Name: "deal-stats",
Usage: "Print statistics about local storage deals",
Flags: []cli.Flag{
&cli.DurationFlag{
Name: "newer-than",
},
},
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
localDeals, err := api.ClientListDeals(ctx)
if err != nil {
return err
}
var totalSize uint64
byState := map[storagemarket.StorageDealStatus][]uint64{}
for _, deal := range localDeals {
if cctx.IsSet("newer-than") {
if time.Now().Sub(deal.CreationTime) > cctx.Duration("newer-than") {
continue
}
}
totalSize += deal.Size
byState[deal.State] = append(byState[deal.State], deal.Size)
}
fmt.Printf("Total: %d deals, %s\n", len(localDeals), types.SizeStr(types.NewInt(totalSize)))
type stateStat struct {
state storagemarket.StorageDealStatus
count int
bytes uint64
}
stateStats := make([]stateStat, 0, len(byState))
for state, deals := range byState {
if state == storagemarket.StorageDealActive {
state = math.MaxUint64 // for sort
}
st := stateStat{
state: state,
count: len(deals),
}
for _, b := range deals {
st.bytes += b
}
stateStats = append(stateStats, st)
}
sort.Slice(stateStats, func(i, j int) bool {
return int64(stateStats[i].state) < int64(stateStats[j].state)
})
for _, st := range stateStats {
if st.state == math.MaxUint64 {
st.state = storagemarket.StorageDealActive
}
fmt.Printf("%s: %d deals, %s\n", storagemarket.DealStates[st.state], st.count, types.SizeStr(types.NewInt(st.bytes)))
}
return nil
},
}
var clientListAsksCmd = &cli.Command{
Name: "list-asks",
Usage: "List asks for top miners",
2020-11-30 13:18:17 +00:00
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "by-ping",
Usage: "sort by ping",
2020-11-30 13:18:17 +00:00
},
&cli.StringFlag{
Name: "output-format",
Value: "text",
Usage: "Either 'text' or 'csv'",
},
&cli.BoolFlag{
Name: "protocols",
Usage: "Output supported deal protocols",
},
2020-11-30 13:18:17 +00:00
},
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPIV1(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
2020-11-30 13:18:17 +00:00
asks, err := GetAsks(ctx, api)
if err != nil {
return err
}
2020-11-30 13:18:17 +00:00
if cctx.Bool("by-ping") {
sort.Slice(asks, func(i, j int) bool {
return asks[i].Ping < asks[j].Ping
})
}
pfmt := "%s: min:%s max:%s price:%s/GiB/Epoch verifiedPrice:%s/GiB/Epoch ping:%s protos:%s\n"
if cctx.String("output-format") == "csv" {
fmt.Printf("Miner,Min,Max,Price,VerifiedPrice,Ping,Protocols")
pfmt = "%s,%s,%s,%s,%s,%s,%s\n"
}
2020-11-30 13:18:17 +00:00
for _, a := range asks {
ask := a.Ask
protos := ""
if cctx.Bool("protocols") {
protos = "[" + strings.Join(a.DealProtocols, ",") + "]"
}
fmt.Printf(pfmt, ask.Miner,
types.SizeStr(types.NewInt(uint64(ask.MinPieceSize))),
types.SizeStr(types.NewInt(uint64(ask.MaxPieceSize))),
types.FIL(ask.Price),
types.FIL(ask.VerifiedPrice),
2020-11-30 13:18:17 +00:00
a.Ping,
protos,
)
}
return nil
},
}
2020-11-30 13:18:17 +00:00
type QueriedAsk struct {
Ask *storagemarket.StorageAsk
DealProtocols []string
2020-11-30 13:18:17 +00:00
Ping time.Duration
}
func GetAsks(ctx context.Context, api lapi.FullNode) ([]QueriedAsk, error) {
isTTY := true
if fileInfo, _ := os.Stdout.Stat(); (fileInfo.Mode() & os.ModeCharDevice) == 0 {
isTTY = false
}
if isTTY {
color.Blue(".. getting miner list")
}
miners, err := api.StateListMiners(ctx, types.EmptyTSK)
if err != nil {
return nil, xerrors.Errorf("getting miner list: %w", err)
}
var lk sync.Mutex
var found int64
var withMinPower []address.Address
done := make(chan struct{})
go func() {
defer close(done)
var wg sync.WaitGroup
wg.Add(len(miners))
throttle := make(chan struct{}, 50)
for _, miner := range miners {
throttle <- struct{}{}
go func(miner address.Address) {
defer wg.Done()
defer func() {
<-throttle
}()
power, err := api.StateMinerPower(ctx, miner, types.EmptyTSK)
if err != nil {
return
}
if power.HasMinPower { // TODO: Lower threshold
atomic.AddInt64(&found, 1)
lk.Lock()
withMinPower = append(withMinPower, miner)
lk.Unlock()
}
}(miner)
}
wg.Wait()
}()
loop:
for {
select {
case <-time.After(150 * time.Millisecond):
if isTTY {
fmt.Printf("\r* Found %d miners with power", atomic.LoadInt64(&found))
}
case <-done:
break loop
}
}
if isTTY {
fmt.Printf("\r* Found %d miners with power\n", atomic.LoadInt64(&found))
color.Blue(".. querying asks")
}
2020-11-30 13:18:17 +00:00
var asks []QueriedAsk
var queried, got int64
done = make(chan struct{})
go func() {
defer close(done)
var wg sync.WaitGroup
wg.Add(len(withMinPower))
throttle := make(chan struct{}, 50)
for _, miner := range withMinPower {
throttle <- struct{}{}
go func(miner address.Address) {
defer wg.Done()
defer func() {
<-throttle
atomic.AddInt64(&queried, 1)
}()
ctx, cancel := context.WithTimeout(ctx, 4*time.Second)
defer cancel()
mi, err := api.StateMinerInfo(ctx, miner, types.EmptyTSK)
if err != nil {
return
}
if mi.PeerId == nil {
return
}
ask, err := api.ClientQueryAsk(ctx, *mi.PeerId, miner)
if err != nil {
return
}
2020-11-30 13:18:17 +00:00
rt := time.Now()
_, err = api.ClientQueryAsk(ctx, *mi.PeerId, miner)
if err != nil {
return
}
pingDuration := time.Now().Sub(rt)
2020-11-30 13:18:17 +00:00
atomic.AddInt64(&got, 1)
lk.Lock()
2020-11-30 13:18:17 +00:00
asks = append(asks, QueriedAsk{
Ask: ask.Response,
DealProtocols: ask.DealProtocols,
Ping: pingDuration,
2020-11-30 13:18:17 +00:00
})
lk.Unlock()
}(miner)
}
wg.Wait()
}()
loop2:
for {
select {
case <-time.After(150 * time.Millisecond):
if isTTY {
fmt.Printf("\r* Queried %d asks, got %d responses", atomic.LoadInt64(&queried), atomic.LoadInt64(&got))
}
case <-done:
break loop2
}
}
if isTTY {
fmt.Printf("\r* Queried %d asks, got %d responses\n", atomic.LoadInt64(&queried), atomic.LoadInt64(&got))
}
sort.Slice(asks, func(i, j int) bool {
2020-11-30 13:18:17 +00:00
return asks[i].Ask.Price.LessThan(asks[j].Ask.Price)
})
return asks, nil
}
2019-09-13 21:00:36 +00:00
var clientQueryAskCmd = &cli.Command{
2020-03-06 19:01:28 +00:00
Name: "query-ask",
2020-07-24 01:52:29 +00:00
Usage: "Find a miners ask",
ArgsUsage: "[minerAddress]",
2019-09-13 21:00:36 +00:00
Flags: []cli.Flag{
&cli.StringFlag{
Name: "peerid",
Usage: "specify peer ID of node to make query against",
},
2019-10-12 07:21:28 +00:00
&cli.Int64Flag{
Name: "size",
Usage: "data size in bytes",
},
&cli.Int64Flag{
Name: "duration",
Usage: "deal duration",
},
2019-09-13 21:00:36 +00:00
},
Action: func(cctx *cli.Context) error {
afmt := NewAppFmt(cctx.App)
2019-09-13 21:00:36 +00:00
if cctx.NArg() != 1 {
return IncorrectNumArgs(cctx)
2019-09-13 21:00:36 +00:00
}
maddr, err := address.NewFromString(cctx.Args().First())
if err != nil {
return err
}
2019-10-03 18:12:30 +00:00
api, closer, err := GetFullNodeAPI(cctx)
2019-09-13 21:00:36 +00:00
if err != nil {
return err
}
2019-10-03 18:12:30 +00:00
defer closer()
2019-09-13 21:00:36 +00:00
ctx := ReqContext(cctx)
var pid peer.ID
if pidstr := cctx.String("peerid"); pidstr != "" {
2020-03-03 04:55:25 +00:00
p, err := peer.Decode(pidstr)
2019-09-13 21:00:36 +00:00
if err != nil {
return err
}
pid = p
} else {
mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
2019-09-13 21:00:36 +00:00
if err != nil {
return xerrors.Errorf("failed to get peerID for miner: %w", err)
}
2022-04-20 21:34:28 +00:00
if mi.PeerId == nil || *mi.PeerId == ("SETME") {
return fmt.Errorf("the miner hasn't initialized yet")
}
2019-09-13 21:00:36 +00:00
pid = *mi.PeerId
2019-09-13 21:00:36 +00:00
}
ask, err := api.ClientQueryAsk(ctx, pid, maddr)
if err != nil {
return err
}
afmt.Printf("Ask: %s\n", maddr)
afmt.Printf("Price per GiB: %s\n", types.FIL(ask.Price))
afmt.Printf("Verified Price per GiB: %s\n", types.FIL(ask.VerifiedPrice))
afmt.Printf("Max Piece size: %s\n", types.SizeStr(types.NewInt(uint64(ask.MaxPieceSize))))
2020-12-10 20:38:18 +00:00
afmt.Printf("Min Piece size: %s\n", types.SizeStr(types.NewInt(uint64(ask.MinPieceSize))))
2019-10-12 07:21:28 +00:00
size := cctx.Int64("size")
if size == 0 {
return nil
}
2020-09-29 11:53:30 +00:00
perEpoch := types.BigDiv(types.BigMul(ask.Price, types.NewInt(uint64(size))), types.NewInt(1<<30))
afmt.Printf("Price per Block: %s\n", types.FIL(perEpoch))
2019-10-12 07:21:28 +00:00
duration := cctx.Int64("duration")
if duration == 0 {
return nil
}
afmt.Printf("Total Price: %s\n", types.FIL(types.BigMul(perEpoch, types.NewInt(uint64(duration)))))
2019-10-12 07:21:28 +00:00
2019-09-13 21:00:36 +00:00
return nil
},
}
2019-11-08 01:45:45 +00:00
var clientListDeals = &cli.Command{
Name: "list-deals",
Usage: "List storage market deals",
2020-07-28 21:12:55 +00:00
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "verbose",
Aliases: []string{"v"},
Usage: "print verbose deal details",
},
&cli.BoolFlag{
Name: "show-failed",
Usage: "show failed/failing deals",
},
2020-08-27 18:32:51 +00:00
&cli.BoolFlag{
Name: "watch",
Usage: "watch deal updates in real-time, rather than a one time list",
},
2020-07-28 21:12:55 +00:00
},
2019-11-08 01:45:45 +00:00
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
2020-08-27 18:32:51 +00:00
verbose := cctx.Bool("verbose")
watch := cctx.Bool("watch")
showFailed := cctx.Bool("show-failed")
2019-11-08 01:45:45 +00:00
localDeals, err := api.ClientListDeals(ctx)
if err != nil {
return err
}
2020-08-27 18:32:51 +00:00
if watch {
updates, err := api.ClientGetDealUpdates(ctx)
if err != nil {
return err
}
for {
tm.Clear()
tm.MoveCursor(1, 1)
2020-08-15 00:00:31 +00:00
err = outputStorageDeals(ctx, tm.Screen, api, localDeals, verbose, showFailed)
if err != nil {
2020-08-27 18:32:51 +00:00
return err
}
2020-08-27 18:32:51 +00:00
tm.Flush()
2020-07-28 23:10:57 +00:00
2020-08-27 18:32:51 +00:00
select {
case <-ctx.Done():
return nil
case updated := <-updates:
var found bool
for i, existing := range localDeals {
if existing.ProposalCid.Equals(updated.ProposalCid) {
localDeals[i] = updated
found = true
break
}
}
if !found {
localDeals = append(localDeals, updated)
}
2020-07-28 21:12:55 +00:00
}
2020-08-27 18:32:51 +00:00
}
}
2020-07-28 21:12:55 +00:00
return outputStorageDeals(ctx, cctx.App.Writer, api, localDeals, verbose, showFailed)
2020-08-27 18:32:51 +00:00
},
}
2020-07-28 21:12:55 +00:00
2021-04-03 10:55:29 +00:00
func dealFromDealInfo(ctx context.Context, full v0api.FullNode, head *types.TipSet, v api.DealInfo) deal {
2020-08-27 18:32:51 +00:00
if v.DealID == 0 {
return deal{
2020-09-17 08:17:14 +00:00
LocalDeal: v,
chore: Merge nv22 into master (#11699) * [WIP] feat: Add nv22 skeleton Addition of Network Version 22 skeleton * update FFI * feat: drand: refactor round verification * feat: sealing: Support nv22 DDO features in the sealing pipeline (#11226) * Initial work supporting DDO pieces in lotus-miner * sealing: Update pipeline input to operate on UniversalPiece * sealing: Update pipeline checks/sealing states to operate on UniversalPiece * sealing: Make pipeline build with UniversalPiece * move PieceDealInfo out of api * make gen * make sealing pipeline unit tests pass * fix itest ensemble build * don't panic in SectorsStatus with deals * stop linter from complaining about checkPieces * fix sector import tests * mod tidy * sealing: Add logic for (pre)committing DDO sectors * sealing: state-types with method defs * DDO non-snap pipeline works(?), DDO Itests * DDO support in snapdeals pipeline * make gen * update actor bundles * update the gst market fix * fix: chain: use PreCommitSectorsBatch2 when setting up genesis * some bug fixes * integration working changes * update actor bundles * Make TestOnboardRawPieceSnap pass * Appease the linter * Make deadlines test pass with v12 actors * Update go-state-types, abstract market DealState * make gen * mod tidy, lint fixes * Fix some more tests * Bump version in master Bump version in master * Make gen Make gen * fix sender * fix: lotus-provider: Fix winning PoSt * fix: sql Scan cannot write to an object * Actually show miner-addrs in info-log Actually show miner-addrs in lotus-provider info-log * [WIP] feat: Add nv22 skeleton Addition of Network Version 22 skeleton * update FFI * ddo is now nv22 * make gen * temp actor bundle with ddo * use working go-state-types * gst with v13 market migration * update bundle, builtin.MethodsMiner.ProveCommitSectors2 -> 3 * actually working v13 migration, v13 migration itest * Address review * sealing: Correct DDO snap pledge math * itests: Mixed ddo itest * pipeline: Fix sectorWeight * sealing: convert market deals into PAMs in mixed sectors * sealing: make market to ddo conversion work * fix lint * update gst * Update actors and GST to lastest integ branch * commit batcher: Update ProveCommitSectors3Params builder logic * make gen * use builtin-actors master * ddo: address review * itests: Add commd assertions to ddo tests * make gen * gst with fixed types * config knobs for RequireActivationSuccess * storage: Drop obsolete flaky tasts --------- Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: Phi <orjan.roren@gmail.com> Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com> Co-authored-by: TippyFlits <james.bluett@protocol.ai> * feat: implement FIP-0063 * chore: deps: update to go-multiaddr v0.12.2 (#11602) * feat: fvm: update the FVM/FFI to v4.1 (#11608) (#11612) This: 1. Adds nv22 support. 2. Updates the message tracing format. Co-authored-by: Steven Allen <steven@stebalien.com> * AggregateProofType nil when doing batch updates Use latest nv22 go-state-types version with matching update * Update to v13.0.0-rc.2 bundle * chore: Upgrade heights and codename Update upgrade heights Co-Authored-By: Steven Allen <steven@stebalien.com> * Update epoch after nv22 DRAND switch Update epoch after nv22 DRAND switch * Update Mango codename to Phoneix Make the codename for the Drand-change inline with Dragon style. * Add UpgradePhoenixHeight to API params * set UpgradePhoenixHeight to be one hour after Dragon * Make gen Make gen and UpgradePhoenixHeight in butterfly and local devnet to be in line with Calibration and Mainnet * Update epoch heights (#11637) Update epoch heights * new: add forest bootstrap nodes (#11636) Signed-off-by: samuelarogbonlo <sbayo971@gmail.com> * Merge pull request #11491 from filecoin-project/fix/remove-decommissioned-pl-bootstrap-nodes Remove PL operated bootstrap nodes from mainnet.pi * feat: api: new verified registry methods to get all allocations and claims (#11631) * new verireg methods * update changelog and add itest * update itest and cli * update new method's support till v9 * remove gateway APIs * fix cli internal var names * chore:: backport #11609 to the feat/nv22 branch (#11644) * feat: api: improve the correctness of Eth's trace_block (#11609) * Improve the correctness of Eth's trace_block - Improve encoding/decoding of parameters and return values: - Encode "native" parameters and return values with Solidity ABI. - Correctly decode parameters to "create" calls. - Use the correct (ish) output for "create" calls. - Handle all forms of "create". - Make robust with respect to reverts: - Use the actor ID/address from the trace instead of looking it up in the state-tree (may not exist in the state-tree due to a revert). - Gracefully handle failed actor/contract creation. - Improve performance: - We avoid looking anything up in the state-tree when translating the trace, which should significantly improve performance. - Improve code readability: - Remove all "backtracking" logic. - Use an "environment" struct to store temporary state instead of attaching it to the trace. - Fix random bugs: - Fix an allocation bug in the "address" logic (need to set the capacity before modifying the slice). - Improved error checking/handling. - Use correct types for `trace_block` action/results (create, call, etc.). - And use the correct types for Result/Action structs instead of reusing the same "Call" action every time. - Improve error messages. * Make gen Make gen --------- Co-authored-by: Steven Allen <steven@stebalien.com> * fix: add UpgradePhoenixHeight to StateGetNetworkParams (#11648) * chore: deps: update to go-state-types v13.0.0-rc.1 * do NOT update the cache when running the real migration * Merge pull request #11632 from hanabi1224/hm/drand-test feat: drand quicknet: allow scheduling drand quicknet upgrade before nv22 on 2k devnet * chore: deps: update to go-state-types v13.0.0-rc.2 chore: deps: update to go-state-types v13.0.0-rc.2 * feat: set migration config UpgradeEpoch for v13 actors upgrade * Built-in actor events first draft * itest for DDO non-market verified data w/ builtin actor events * Tests for builtin actor events API * Clean up DDO+Events tests, add lots of explainer comments * Minor tweaks to events types * Avoid duplicate messages when looking for receipts * Rename internal events modules for clarity * Adjust actor event API after review * s/ActorEvents/Events/g in global config * Manage event sending rate for SubscribeActorEvents * Terminate SubscribeActorEvents chan when at max height * Document future API changes * More clarity in actor event API docs * More post-review changes, lots of tests for SubscribeActorEvents Use BlockDelay as the window for receiving events on the SubscribeActorEvents channel. We expect the user to have received the initial batch of historical events (if any) in one block's time. For real-time events we expect them to not fall behind by roughly one block's time. * Remove duplicate code from actor event type marshalling tests Reduce verbosity and remove duplicate test logic from actor event types JSON marshalling tests. * Rename actor events test to follow go convention Add missing `s` to `actor_events` test file to follow golang convention used across the repo. * Run actor events table tests in deterministic order Refactor `map` usage for actor event table tests to ensure deterministic test execution order, making debugging potential issues easier. If non-determinism is a target, leverage Go's built-in parallel testing capabilities. * Reduce scope for filter removal failure when getting actor events Use a fresh context to remove the temporary filter installed solely to get the actor events. This should reduce chances of failure in a case where the original context may be expired/cancelled. Refactor removal into a `defer` statement for a more readable, concise return statement. * Use fixed RNG seed for actor event tests Improve determinism in actor event tests by using a fixed RNG seed. This makes up a more reproducible test suit. * Use provided libraries to assert eventual conditions Use the functionalities already provided by `testify` to assert eventual conditions, and remove the use of `time.Sleep`. Remove duplicate code in utility functions that are already defined. Refactor assertion helper functions to use consistent terminology: "require" implies fatal error, whereas "assert" implies error where the test may proceed executing. * Update changelog for actor events APIs * Fix concerns and docs identified by review * Update actor bundle to v13.0.0-rc3 Update actor bundle to v13.0.0-rc3 * Prep Lotus v1.26.0-rc1 - For sanity reverting the mainnet upgrade epoch to 99999999, and then only set it when cutting the final release -Update Calibnet CIDs to v13.0.0-rc3 - Add GetActorEvents, SubscribeActorEvents, GetAllClaims and GetAllAllocations methods to the changelog Co-Authored-By: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> * Update CHANGELOG.md Co-authored-by: Masih H. Derkani <m@derkani.org> * Make gen Make gen * fix: beacon: validate drand change at nv16 correctly * bump to v1.26.0-rc2 * test: cleanup ddo verified itest, extract steps to functions also add allocation-removed event case * test: extract verified DDO test to separate file, add more checks * test: add additional actor events checks * Add verification for "deal-activated" actor event * docs(drand): document the meaning of "IsChained" (#11692) * Resolve conflicts I encountered multiple issues when trying to run make gen. And these changes fixed a couple of them: - go mod tidy - Remove RaftState/RaftLeader - Revert `if ts.Height() > claim.TermMax+claim.TermStart || !cctx.IsSet("expired")` to the what is in the release/v1.26.0: `if tsHeight > val.TermMax || !expired` * fixup imports, make jen * Update version Update version in master to v1.27.0-dev * Update node/impl/full/dummy.go Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> * Adjust ListClaimsCmd Adjust ListClaimsCmd according to review --------- Signed-off-by: samuelarogbonlo <sbayo971@gmail.com> Co-authored-by: TippyFlits <james.bluett@protocol.ai> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com> Co-authored-by: Steven Allen <steven@stebalien.com> Co-authored-by: Rod Vagg <rod@vagg.org> Co-authored-by: Samuel Arogbonlo <47984109+samuelarogbonlo@users.noreply.github.com> Co-authored-by: LexLuthr <88259624+LexLuthr@users.noreply.github.com> Co-authored-by: tom123222 <160735201+tom123222@users.noreply.github.com> Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Masih H. Derkani <m@derkani.org> Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com>
2024-03-12 09:33:58 +00:00
OnChainDealState: market.EmptyDealState(),
}
2020-08-27 18:32:51 +00:00
}
2020-08-27 18:32:51 +00:00
onChain, err := full.StateMarketStorageDeal(ctx, v.DealID, head.Key())
if err != nil {
return deal{LocalDeal: v}
}
2020-08-27 18:32:51 +00:00
return deal{
LocalDeal: v,
chore: Merge nv22 into master (#11699) * [WIP] feat: Add nv22 skeleton Addition of Network Version 22 skeleton * update FFI * feat: drand: refactor round verification * feat: sealing: Support nv22 DDO features in the sealing pipeline (#11226) * Initial work supporting DDO pieces in lotus-miner * sealing: Update pipeline input to operate on UniversalPiece * sealing: Update pipeline checks/sealing states to operate on UniversalPiece * sealing: Make pipeline build with UniversalPiece * move PieceDealInfo out of api * make gen * make sealing pipeline unit tests pass * fix itest ensemble build * don't panic in SectorsStatus with deals * stop linter from complaining about checkPieces * fix sector import tests * mod tidy * sealing: Add logic for (pre)committing DDO sectors * sealing: state-types with method defs * DDO non-snap pipeline works(?), DDO Itests * DDO support in snapdeals pipeline * make gen * update actor bundles * update the gst market fix * fix: chain: use PreCommitSectorsBatch2 when setting up genesis * some bug fixes * integration working changes * update actor bundles * Make TestOnboardRawPieceSnap pass * Appease the linter * Make deadlines test pass with v12 actors * Update go-state-types, abstract market DealState * make gen * mod tidy, lint fixes * Fix some more tests * Bump version in master Bump version in master * Make gen Make gen * fix sender * fix: lotus-provider: Fix winning PoSt * fix: sql Scan cannot write to an object * Actually show miner-addrs in info-log Actually show miner-addrs in lotus-provider info-log * [WIP] feat: Add nv22 skeleton Addition of Network Version 22 skeleton * update FFI * ddo is now nv22 * make gen * temp actor bundle with ddo * use working go-state-types * gst with v13 market migration * update bundle, builtin.MethodsMiner.ProveCommitSectors2 -> 3 * actually working v13 migration, v13 migration itest * Address review * sealing: Correct DDO snap pledge math * itests: Mixed ddo itest * pipeline: Fix sectorWeight * sealing: convert market deals into PAMs in mixed sectors * sealing: make market to ddo conversion work * fix lint * update gst * Update actors and GST to lastest integ branch * commit batcher: Update ProveCommitSectors3Params builder logic * make gen * use builtin-actors master * ddo: address review * itests: Add commd assertions to ddo tests * make gen * gst with fixed types * config knobs for RequireActivationSuccess * storage: Drop obsolete flaky tasts --------- Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: Phi <orjan.roren@gmail.com> Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com> Co-authored-by: TippyFlits <james.bluett@protocol.ai> * feat: implement FIP-0063 * chore: deps: update to go-multiaddr v0.12.2 (#11602) * feat: fvm: update the FVM/FFI to v4.1 (#11608) (#11612) This: 1. Adds nv22 support. 2. Updates the message tracing format. Co-authored-by: Steven Allen <steven@stebalien.com> * AggregateProofType nil when doing batch updates Use latest nv22 go-state-types version with matching update * Update to v13.0.0-rc.2 bundle * chore: Upgrade heights and codename Update upgrade heights Co-Authored-By: Steven Allen <steven@stebalien.com> * Update epoch after nv22 DRAND switch Update epoch after nv22 DRAND switch * Update Mango codename to Phoneix Make the codename for the Drand-change inline with Dragon style. * Add UpgradePhoenixHeight to API params * set UpgradePhoenixHeight to be one hour after Dragon * Make gen Make gen and UpgradePhoenixHeight in butterfly and local devnet to be in line with Calibration and Mainnet * Update epoch heights (#11637) Update epoch heights * new: add forest bootstrap nodes (#11636) Signed-off-by: samuelarogbonlo <sbayo971@gmail.com> * Merge pull request #11491 from filecoin-project/fix/remove-decommissioned-pl-bootstrap-nodes Remove PL operated bootstrap nodes from mainnet.pi * feat: api: new verified registry methods to get all allocations and claims (#11631) * new verireg methods * update changelog and add itest * update itest and cli * update new method's support till v9 * remove gateway APIs * fix cli internal var names * chore:: backport #11609 to the feat/nv22 branch (#11644) * feat: api: improve the correctness of Eth's trace_block (#11609) * Improve the correctness of Eth's trace_block - Improve encoding/decoding of parameters and return values: - Encode "native" parameters and return values with Solidity ABI. - Correctly decode parameters to "create" calls. - Use the correct (ish) output for "create" calls. - Handle all forms of "create". - Make robust with respect to reverts: - Use the actor ID/address from the trace instead of looking it up in the state-tree (may not exist in the state-tree due to a revert). - Gracefully handle failed actor/contract creation. - Improve performance: - We avoid looking anything up in the state-tree when translating the trace, which should significantly improve performance. - Improve code readability: - Remove all "backtracking" logic. - Use an "environment" struct to store temporary state instead of attaching it to the trace. - Fix random bugs: - Fix an allocation bug in the "address" logic (need to set the capacity before modifying the slice). - Improved error checking/handling. - Use correct types for `trace_block` action/results (create, call, etc.). - And use the correct types for Result/Action structs instead of reusing the same "Call" action every time. - Improve error messages. * Make gen Make gen --------- Co-authored-by: Steven Allen <steven@stebalien.com> * fix: add UpgradePhoenixHeight to StateGetNetworkParams (#11648) * chore: deps: update to go-state-types v13.0.0-rc.1 * do NOT update the cache when running the real migration * Merge pull request #11632 from hanabi1224/hm/drand-test feat: drand quicknet: allow scheduling drand quicknet upgrade before nv22 on 2k devnet * chore: deps: update to go-state-types v13.0.0-rc.2 chore: deps: update to go-state-types v13.0.0-rc.2 * feat: set migration config UpgradeEpoch for v13 actors upgrade * Built-in actor events first draft * itest for DDO non-market verified data w/ builtin actor events * Tests for builtin actor events API * Clean up DDO+Events tests, add lots of explainer comments * Minor tweaks to events types * Avoid duplicate messages when looking for receipts * Rename internal events modules for clarity * Adjust actor event API after review * s/ActorEvents/Events/g in global config * Manage event sending rate for SubscribeActorEvents * Terminate SubscribeActorEvents chan when at max height * Document future API changes * More clarity in actor event API docs * More post-review changes, lots of tests for SubscribeActorEvents Use BlockDelay as the window for receiving events on the SubscribeActorEvents channel. We expect the user to have received the initial batch of historical events (if any) in one block's time. For real-time events we expect them to not fall behind by roughly one block's time. * Remove duplicate code from actor event type marshalling tests Reduce verbosity and remove duplicate test logic from actor event types JSON marshalling tests. * Rename actor events test to follow go convention Add missing `s` to `actor_events` test file to follow golang convention used across the repo. * Run actor events table tests in deterministic order Refactor `map` usage for actor event table tests to ensure deterministic test execution order, making debugging potential issues easier. If non-determinism is a target, leverage Go's built-in parallel testing capabilities. * Reduce scope for filter removal failure when getting actor events Use a fresh context to remove the temporary filter installed solely to get the actor events. This should reduce chances of failure in a case where the original context may be expired/cancelled. Refactor removal into a `defer` statement for a more readable, concise return statement. * Use fixed RNG seed for actor event tests Improve determinism in actor event tests by using a fixed RNG seed. This makes up a more reproducible test suit. * Use provided libraries to assert eventual conditions Use the functionalities already provided by `testify` to assert eventual conditions, and remove the use of `time.Sleep`. Remove duplicate code in utility functions that are already defined. Refactor assertion helper functions to use consistent terminology: "require" implies fatal error, whereas "assert" implies error where the test may proceed executing. * Update changelog for actor events APIs * Fix concerns and docs identified by review * Update actor bundle to v13.0.0-rc3 Update actor bundle to v13.0.0-rc3 * Prep Lotus v1.26.0-rc1 - For sanity reverting the mainnet upgrade epoch to 99999999, and then only set it when cutting the final release -Update Calibnet CIDs to v13.0.0-rc3 - Add GetActorEvents, SubscribeActorEvents, GetAllClaims and GetAllAllocations methods to the changelog Co-Authored-By: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> * Update CHANGELOG.md Co-authored-by: Masih H. Derkani <m@derkani.org> * Make gen Make gen * fix: beacon: validate drand change at nv16 correctly * bump to v1.26.0-rc2 * test: cleanup ddo verified itest, extract steps to functions also add allocation-removed event case * test: extract verified DDO test to separate file, add more checks * test: add additional actor events checks * Add verification for "deal-activated" actor event * docs(drand): document the meaning of "IsChained" (#11692) * Resolve conflicts I encountered multiple issues when trying to run make gen. And these changes fixed a couple of them: - go mod tidy - Remove RaftState/RaftLeader - Revert `if ts.Height() > claim.TermMax+claim.TermStart || !cctx.IsSet("expired")` to the what is in the release/v1.26.0: `if tsHeight > val.TermMax || !expired` * fixup imports, make jen * Update version Update version in master to v1.27.0-dev * Update node/impl/full/dummy.go Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> * Adjust ListClaimsCmd Adjust ListClaimsCmd according to review --------- Signed-off-by: samuelarogbonlo <sbayo971@gmail.com> Co-authored-by: TippyFlits <james.bluett@protocol.ai> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com> Co-authored-by: Steven Allen <steven@stebalien.com> Co-authored-by: Rod Vagg <rod@vagg.org> Co-authored-by: Samuel Arogbonlo <47984109+samuelarogbonlo@users.noreply.github.com> Co-authored-by: LexLuthr <88259624+LexLuthr@users.noreply.github.com> Co-authored-by: tom123222 <160735201+tom123222@users.noreply.github.com> Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Masih H. Derkani <m@derkani.org> Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com>
2024-03-12 09:33:58 +00:00
OnChainDealState: onChain.State.Iface(),
2020-08-27 18:32:51 +00:00
}
}
func outputStorageDeals(ctx context.Context, out io.Writer, full v0api.FullNode, localDeals []lapi.DealInfo, verbose bool, showFailed bool) error {
2020-08-27 18:32:51 +00:00
sort.Slice(localDeals, func(i, j int) bool {
return localDeals[i].CreationTime.Before(localDeals[j].CreationTime)
})
head, err := full.ChainHead(ctx)
if err != nil {
return err
}
2020-08-27 18:32:51 +00:00
var deals []deal
for _, localDeal := range localDeals {
if showFailed || localDeal.State != storagemarket.StorageDealError {
deals = append(deals, dealFromDealInfo(ctx, full, head, localDeal))
}
2020-08-27 18:32:51 +00:00
}
if verbose {
w := tabwriter.NewWriter(out, 2, 4, 2, ' ', 0)
fmt.Fprintf(w, "Created\tDealCid\tDealId\tProvider\tState\tOn Chain?\tSlashed?\tPieceCID\tSize\tPrice\tDuration\tTransferChannelID\tTransferStatus\tVerified\tMessage\n")
2020-08-27 18:32:51 +00:00
for _, d := range deals {
onChain := "N"
chore: Merge nv22 into master (#11699) * [WIP] feat: Add nv22 skeleton Addition of Network Version 22 skeleton * update FFI * feat: drand: refactor round verification * feat: sealing: Support nv22 DDO features in the sealing pipeline (#11226) * Initial work supporting DDO pieces in lotus-miner * sealing: Update pipeline input to operate on UniversalPiece * sealing: Update pipeline checks/sealing states to operate on UniversalPiece * sealing: Make pipeline build with UniversalPiece * move PieceDealInfo out of api * make gen * make sealing pipeline unit tests pass * fix itest ensemble build * don't panic in SectorsStatus with deals * stop linter from complaining about checkPieces * fix sector import tests * mod tidy * sealing: Add logic for (pre)committing DDO sectors * sealing: state-types with method defs * DDO non-snap pipeline works(?), DDO Itests * DDO support in snapdeals pipeline * make gen * update actor bundles * update the gst market fix * fix: chain: use PreCommitSectorsBatch2 when setting up genesis * some bug fixes * integration working changes * update actor bundles * Make TestOnboardRawPieceSnap pass * Appease the linter * Make deadlines test pass with v12 actors * Update go-state-types, abstract market DealState * make gen * mod tidy, lint fixes * Fix some more tests * Bump version in master Bump version in master * Make gen Make gen * fix sender * fix: lotus-provider: Fix winning PoSt * fix: sql Scan cannot write to an object * Actually show miner-addrs in info-log Actually show miner-addrs in lotus-provider info-log * [WIP] feat: Add nv22 skeleton Addition of Network Version 22 skeleton * update FFI * ddo is now nv22 * make gen * temp actor bundle with ddo * use working go-state-types * gst with v13 market migration * update bundle, builtin.MethodsMiner.ProveCommitSectors2 -> 3 * actually working v13 migration, v13 migration itest * Address review * sealing: Correct DDO snap pledge math * itests: Mixed ddo itest * pipeline: Fix sectorWeight * sealing: convert market deals into PAMs in mixed sectors * sealing: make market to ddo conversion work * fix lint * update gst * Update actors and GST to lastest integ branch * commit batcher: Update ProveCommitSectors3Params builder logic * make gen * use builtin-actors master * ddo: address review * itests: Add commd assertions to ddo tests * make gen * gst with fixed types * config knobs for RequireActivationSuccess * storage: Drop obsolete flaky tasts --------- Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: Phi <orjan.roren@gmail.com> Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com> Co-authored-by: TippyFlits <james.bluett@protocol.ai> * feat: implement FIP-0063 * chore: deps: update to go-multiaddr v0.12.2 (#11602) * feat: fvm: update the FVM/FFI to v4.1 (#11608) (#11612) This: 1. Adds nv22 support. 2. Updates the message tracing format. Co-authored-by: Steven Allen <steven@stebalien.com> * AggregateProofType nil when doing batch updates Use latest nv22 go-state-types version with matching update * Update to v13.0.0-rc.2 bundle * chore: Upgrade heights and codename Update upgrade heights Co-Authored-By: Steven Allen <steven@stebalien.com> * Update epoch after nv22 DRAND switch Update epoch after nv22 DRAND switch * Update Mango codename to Phoneix Make the codename for the Drand-change inline with Dragon style. * Add UpgradePhoenixHeight to API params * set UpgradePhoenixHeight to be one hour after Dragon * Make gen Make gen and UpgradePhoenixHeight in butterfly and local devnet to be in line with Calibration and Mainnet * Update epoch heights (#11637) Update epoch heights * new: add forest bootstrap nodes (#11636) Signed-off-by: samuelarogbonlo <sbayo971@gmail.com> * Merge pull request #11491 from filecoin-project/fix/remove-decommissioned-pl-bootstrap-nodes Remove PL operated bootstrap nodes from mainnet.pi * feat: api: new verified registry methods to get all allocations and claims (#11631) * new verireg methods * update changelog and add itest * update itest and cli * update new method's support till v9 * remove gateway APIs * fix cli internal var names * chore:: backport #11609 to the feat/nv22 branch (#11644) * feat: api: improve the correctness of Eth's trace_block (#11609) * Improve the correctness of Eth's trace_block - Improve encoding/decoding of parameters and return values: - Encode "native" parameters and return values with Solidity ABI. - Correctly decode parameters to "create" calls. - Use the correct (ish) output for "create" calls. - Handle all forms of "create". - Make robust with respect to reverts: - Use the actor ID/address from the trace instead of looking it up in the state-tree (may not exist in the state-tree due to a revert). - Gracefully handle failed actor/contract creation. - Improve performance: - We avoid looking anything up in the state-tree when translating the trace, which should significantly improve performance. - Improve code readability: - Remove all "backtracking" logic. - Use an "environment" struct to store temporary state instead of attaching it to the trace. - Fix random bugs: - Fix an allocation bug in the "address" logic (need to set the capacity before modifying the slice). - Improved error checking/handling. - Use correct types for `trace_block` action/results (create, call, etc.). - And use the correct types for Result/Action structs instead of reusing the same "Call" action every time. - Improve error messages. * Make gen Make gen --------- Co-authored-by: Steven Allen <steven@stebalien.com> * fix: add UpgradePhoenixHeight to StateGetNetworkParams (#11648) * chore: deps: update to go-state-types v13.0.0-rc.1 * do NOT update the cache when running the real migration * Merge pull request #11632 from hanabi1224/hm/drand-test feat: drand quicknet: allow scheduling drand quicknet upgrade before nv22 on 2k devnet * chore: deps: update to go-state-types v13.0.0-rc.2 chore: deps: update to go-state-types v13.0.0-rc.2 * feat: set migration config UpgradeEpoch for v13 actors upgrade * Built-in actor events first draft * itest for DDO non-market verified data w/ builtin actor events * Tests for builtin actor events API * Clean up DDO+Events tests, add lots of explainer comments * Minor tweaks to events types * Avoid duplicate messages when looking for receipts * Rename internal events modules for clarity * Adjust actor event API after review * s/ActorEvents/Events/g in global config * Manage event sending rate for SubscribeActorEvents * Terminate SubscribeActorEvents chan when at max height * Document future API changes * More clarity in actor event API docs * More post-review changes, lots of tests for SubscribeActorEvents Use BlockDelay as the window for receiving events on the SubscribeActorEvents channel. We expect the user to have received the initial batch of historical events (if any) in one block's time. For real-time events we expect them to not fall behind by roughly one block's time. * Remove duplicate code from actor event type marshalling tests Reduce verbosity and remove duplicate test logic from actor event types JSON marshalling tests. * Rename actor events test to follow go convention Add missing `s` to `actor_events` test file to follow golang convention used across the repo. * Run actor events table tests in deterministic order Refactor `map` usage for actor event table tests to ensure deterministic test execution order, making debugging potential issues easier. If non-determinism is a target, leverage Go's built-in parallel testing capabilities. * Reduce scope for filter removal failure when getting actor events Use a fresh context to remove the temporary filter installed solely to get the actor events. This should reduce chances of failure in a case where the original context may be expired/cancelled. Refactor removal into a `defer` statement for a more readable, concise return statement. * Use fixed RNG seed for actor event tests Improve determinism in actor event tests by using a fixed RNG seed. This makes up a more reproducible test suit. * Use provided libraries to assert eventual conditions Use the functionalities already provided by `testify` to assert eventual conditions, and remove the use of `time.Sleep`. Remove duplicate code in utility functions that are already defined. Refactor assertion helper functions to use consistent terminology: "require" implies fatal error, whereas "assert" implies error where the test may proceed executing. * Update changelog for actor events APIs * Fix concerns and docs identified by review * Update actor bundle to v13.0.0-rc3 Update actor bundle to v13.0.0-rc3 * Prep Lotus v1.26.0-rc1 - For sanity reverting the mainnet upgrade epoch to 99999999, and then only set it when cutting the final release -Update Calibnet CIDs to v13.0.0-rc3 - Add GetActorEvents, SubscribeActorEvents, GetAllClaims and GetAllAllocations methods to the changelog Co-Authored-By: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> * Update CHANGELOG.md Co-authored-by: Masih H. Derkani <m@derkani.org> * Make gen Make gen * fix: beacon: validate drand change at nv16 correctly * bump to v1.26.0-rc2 * test: cleanup ddo verified itest, extract steps to functions also add allocation-removed event case * test: extract verified DDO test to separate file, add more checks * test: add additional actor events checks * Add verification for "deal-activated" actor event * docs(drand): document the meaning of "IsChained" (#11692) * Resolve conflicts I encountered multiple issues when trying to run make gen. And these changes fixed a couple of them: - go mod tidy - Remove RaftState/RaftLeader - Revert `if ts.Height() > claim.TermMax+claim.TermStart || !cctx.IsSet("expired")` to the what is in the release/v1.26.0: `if tsHeight > val.TermMax || !expired` * fixup imports, make jen * Update version Update version in master to v1.27.0-dev * Update node/impl/full/dummy.go Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> * Adjust ListClaimsCmd Adjust ListClaimsCmd according to review --------- Signed-off-by: samuelarogbonlo <sbayo971@gmail.com> Co-authored-by: TippyFlits <james.bluett@protocol.ai> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com> Co-authored-by: Steven Allen <steven@stebalien.com> Co-authored-by: Rod Vagg <rod@vagg.org> Co-authored-by: Samuel Arogbonlo <47984109+samuelarogbonlo@users.noreply.github.com> Co-authored-by: LexLuthr <88259624+LexLuthr@users.noreply.github.com> Co-authored-by: tom123222 <160735201+tom123222@users.noreply.github.com> Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Masih H. Derkani <m@derkani.org> Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com>
2024-03-12 09:33:58 +00:00
if d.OnChainDealState.SectorStartEpoch() != -1 {
onChain = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SectorStartEpoch())
}
2020-08-04 17:07:17 +00:00
slashed := "N"
chore: Merge nv22 into master (#11699) * [WIP] feat: Add nv22 skeleton Addition of Network Version 22 skeleton * update FFI * feat: drand: refactor round verification * feat: sealing: Support nv22 DDO features in the sealing pipeline (#11226) * Initial work supporting DDO pieces in lotus-miner * sealing: Update pipeline input to operate on UniversalPiece * sealing: Update pipeline checks/sealing states to operate on UniversalPiece * sealing: Make pipeline build with UniversalPiece * move PieceDealInfo out of api * make gen * make sealing pipeline unit tests pass * fix itest ensemble build * don't panic in SectorsStatus with deals * stop linter from complaining about checkPieces * fix sector import tests * mod tidy * sealing: Add logic for (pre)committing DDO sectors * sealing: state-types with method defs * DDO non-snap pipeline works(?), DDO Itests * DDO support in snapdeals pipeline * make gen * update actor bundles * update the gst market fix * fix: chain: use PreCommitSectorsBatch2 when setting up genesis * some bug fixes * integration working changes * update actor bundles * Make TestOnboardRawPieceSnap pass * Appease the linter * Make deadlines test pass with v12 actors * Update go-state-types, abstract market DealState * make gen * mod tidy, lint fixes * Fix some more tests * Bump version in master Bump version in master * Make gen Make gen * fix sender * fix: lotus-provider: Fix winning PoSt * fix: sql Scan cannot write to an object * Actually show miner-addrs in info-log Actually show miner-addrs in lotus-provider info-log * [WIP] feat: Add nv22 skeleton Addition of Network Version 22 skeleton * update FFI * ddo is now nv22 * make gen * temp actor bundle with ddo * use working go-state-types * gst with v13 market migration * update bundle, builtin.MethodsMiner.ProveCommitSectors2 -> 3 * actually working v13 migration, v13 migration itest * Address review * sealing: Correct DDO snap pledge math * itests: Mixed ddo itest * pipeline: Fix sectorWeight * sealing: convert market deals into PAMs in mixed sectors * sealing: make market to ddo conversion work * fix lint * update gst * Update actors and GST to lastest integ branch * commit batcher: Update ProveCommitSectors3Params builder logic * make gen * use builtin-actors master * ddo: address review * itests: Add commd assertions to ddo tests * make gen * gst with fixed types * config knobs for RequireActivationSuccess * storage: Drop obsolete flaky tasts --------- Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: Phi <orjan.roren@gmail.com> Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com> Co-authored-by: TippyFlits <james.bluett@protocol.ai> * feat: implement FIP-0063 * chore: deps: update to go-multiaddr v0.12.2 (#11602) * feat: fvm: update the FVM/FFI to v4.1 (#11608) (#11612) This: 1. Adds nv22 support. 2. Updates the message tracing format. Co-authored-by: Steven Allen <steven@stebalien.com> * AggregateProofType nil when doing batch updates Use latest nv22 go-state-types version with matching update * Update to v13.0.0-rc.2 bundle * chore: Upgrade heights and codename Update upgrade heights Co-Authored-By: Steven Allen <steven@stebalien.com> * Update epoch after nv22 DRAND switch Update epoch after nv22 DRAND switch * Update Mango codename to Phoneix Make the codename for the Drand-change inline with Dragon style. * Add UpgradePhoenixHeight to API params * set UpgradePhoenixHeight to be one hour after Dragon * Make gen Make gen and UpgradePhoenixHeight in butterfly and local devnet to be in line with Calibration and Mainnet * Update epoch heights (#11637) Update epoch heights * new: add forest bootstrap nodes (#11636) Signed-off-by: samuelarogbonlo <sbayo971@gmail.com> * Merge pull request #11491 from filecoin-project/fix/remove-decommissioned-pl-bootstrap-nodes Remove PL operated bootstrap nodes from mainnet.pi * feat: api: new verified registry methods to get all allocations and claims (#11631) * new verireg methods * update changelog and add itest * update itest and cli * update new method's support till v9 * remove gateway APIs * fix cli internal var names * chore:: backport #11609 to the feat/nv22 branch (#11644) * feat: api: improve the correctness of Eth's trace_block (#11609) * Improve the correctness of Eth's trace_block - Improve encoding/decoding of parameters and return values: - Encode "native" parameters and return values with Solidity ABI. - Correctly decode parameters to "create" calls. - Use the correct (ish) output for "create" calls. - Handle all forms of "create". - Make robust with respect to reverts: - Use the actor ID/address from the trace instead of looking it up in the state-tree (may not exist in the state-tree due to a revert). - Gracefully handle failed actor/contract creation. - Improve performance: - We avoid looking anything up in the state-tree when translating the trace, which should significantly improve performance. - Improve code readability: - Remove all "backtracking" logic. - Use an "environment" struct to store temporary state instead of attaching it to the trace. - Fix random bugs: - Fix an allocation bug in the "address" logic (need to set the capacity before modifying the slice). - Improved error checking/handling. - Use correct types for `trace_block` action/results (create, call, etc.). - And use the correct types for Result/Action structs instead of reusing the same "Call" action every time. - Improve error messages. * Make gen Make gen --------- Co-authored-by: Steven Allen <steven@stebalien.com> * fix: add UpgradePhoenixHeight to StateGetNetworkParams (#11648) * chore: deps: update to go-state-types v13.0.0-rc.1 * do NOT update the cache when running the real migration * Merge pull request #11632 from hanabi1224/hm/drand-test feat: drand quicknet: allow scheduling drand quicknet upgrade before nv22 on 2k devnet * chore: deps: update to go-state-types v13.0.0-rc.2 chore: deps: update to go-state-types v13.0.0-rc.2 * feat: set migration config UpgradeEpoch for v13 actors upgrade * Built-in actor events first draft * itest for DDO non-market verified data w/ builtin actor events * Tests for builtin actor events API * Clean up DDO+Events tests, add lots of explainer comments * Minor tweaks to events types * Avoid duplicate messages when looking for receipts * Rename internal events modules for clarity * Adjust actor event API after review * s/ActorEvents/Events/g in global config * Manage event sending rate for SubscribeActorEvents * Terminate SubscribeActorEvents chan when at max height * Document future API changes * More clarity in actor event API docs * More post-review changes, lots of tests for SubscribeActorEvents Use BlockDelay as the window for receiving events on the SubscribeActorEvents channel. We expect the user to have received the initial batch of historical events (if any) in one block's time. For real-time events we expect them to not fall behind by roughly one block's time. * Remove duplicate code from actor event type marshalling tests Reduce verbosity and remove duplicate test logic from actor event types JSON marshalling tests. * Rename actor events test to follow go convention Add missing `s` to `actor_events` test file to follow golang convention used across the repo. * Run actor events table tests in deterministic order Refactor `map` usage for actor event table tests to ensure deterministic test execution order, making debugging potential issues easier. If non-determinism is a target, leverage Go's built-in parallel testing capabilities. * Reduce scope for filter removal failure when getting actor events Use a fresh context to remove the temporary filter installed solely to get the actor events. This should reduce chances of failure in a case where the original context may be expired/cancelled. Refactor removal into a `defer` statement for a more readable, concise return statement. * Use fixed RNG seed for actor event tests Improve determinism in actor event tests by using a fixed RNG seed. This makes up a more reproducible test suit. * Use provided libraries to assert eventual conditions Use the functionalities already provided by `testify` to assert eventual conditions, and remove the use of `time.Sleep`. Remove duplicate code in utility functions that are already defined. Refactor assertion helper functions to use consistent terminology: "require" implies fatal error, whereas "assert" implies error where the test may proceed executing. * Update changelog for actor events APIs * Fix concerns and docs identified by review * Update actor bundle to v13.0.0-rc3 Update actor bundle to v13.0.0-rc3 * Prep Lotus v1.26.0-rc1 - For sanity reverting the mainnet upgrade epoch to 99999999, and then only set it when cutting the final release -Update Calibnet CIDs to v13.0.0-rc3 - Add GetActorEvents, SubscribeActorEvents, GetAllClaims and GetAllAllocations methods to the changelog Co-Authored-By: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> * Update CHANGELOG.md Co-authored-by: Masih H. Derkani <m@derkani.org> * Make gen Make gen * fix: beacon: validate drand change at nv16 correctly * bump to v1.26.0-rc2 * test: cleanup ddo verified itest, extract steps to functions also add allocation-removed event case * test: extract verified DDO test to separate file, add more checks * test: add additional actor events checks * Add verification for "deal-activated" actor event * docs(drand): document the meaning of "IsChained" (#11692) * Resolve conflicts I encountered multiple issues when trying to run make gen. And these changes fixed a couple of them: - go mod tidy - Remove RaftState/RaftLeader - Revert `if ts.Height() > claim.TermMax+claim.TermStart || !cctx.IsSet("expired")` to the what is in the release/v1.26.0: `if tsHeight > val.TermMax || !expired` * fixup imports, make jen * Update version Update version in master to v1.27.0-dev * Update node/impl/full/dummy.go Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> * Adjust ListClaimsCmd Adjust ListClaimsCmd according to review --------- Signed-off-by: samuelarogbonlo <sbayo971@gmail.com> Co-authored-by: TippyFlits <james.bluett@protocol.ai> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com> Co-authored-by: Steven Allen <steven@stebalien.com> Co-authored-by: Rod Vagg <rod@vagg.org> Co-authored-by: Samuel Arogbonlo <47984109+samuelarogbonlo@users.noreply.github.com> Co-authored-by: LexLuthr <88259624+LexLuthr@users.noreply.github.com> Co-authored-by: tom123222 <160735201+tom123222@users.noreply.github.com> Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Masih H. Derkani <m@derkani.org> Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com>
2024-03-12 09:33:58 +00:00
if d.OnChainDealState.SlashEpoch() != -1 {
slashed = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SlashEpoch())
}
price := types.FIL(types.BigMul(d.LocalDeal.PricePerEpoch, types.NewInt(d.LocalDeal.Duration)))
transferChannelID := ""
if d.LocalDeal.TransferChannelID != nil {
transferChannelID = d.LocalDeal.TransferChannelID.String()
}
transferStatus := ""
if d.LocalDeal.DataTransfer != nil {
transferStatus = datatransfer.Statuses[d.LocalDeal.DataTransfer.Status]
// TODO: Include the transferred percentage once this bug is fixed:
// https://github.com/ipfs/go-graphsync/issues/126
//fmt.Printf("transferred: %d / size: %d\n", d.LocalDeal.DataTransfer.Transferred, d.LocalDeal.Size)
//if d.LocalDeal.Size > 0 {
// pct := (100 * d.LocalDeal.DataTransfer.Transferred) / d.LocalDeal.Size
// transferPct = fmt.Sprintf("%d%%", pct)
//}
}
fmt.Fprintf(w, "%s\t%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%d\t%s\t%s\t%v\t%s\n",
d.LocalDeal.CreationTime.Format(time.Stamp),
d.LocalDeal.ProposalCid,
d.LocalDeal.DealID,
d.LocalDeal.Provider,
dealStateString(d.LocalDeal.State),
onChain,
slashed,
d.LocalDeal.PieceCID,
types.SizeStr(types.NewInt(d.LocalDeal.Size)),
price,
d.LocalDeal.Duration,
transferChannelID,
transferStatus,
d.LocalDeal.Verified,
d.LocalDeal.Message)
2019-11-08 01:45:45 +00:00
}
2020-08-27 18:32:51 +00:00
return w.Flush()
}
2020-08-27 18:32:51 +00:00
w := tablewriter.New(tablewriter.Col("DealCid"),
tablewriter.Col("DealId"),
tablewriter.Col("Provider"),
tablewriter.Col("State"),
tablewriter.Col("On Chain?"),
tablewriter.Col("Slashed?"),
tablewriter.Col("PieceCID"),
tablewriter.Col("Size"),
tablewriter.Col("Price"),
tablewriter.Col("Duration"),
tablewriter.Col("Verified"),
2020-08-27 18:32:51 +00:00
tablewriter.NewLineCol("Message"))
for _, d := range deals {
propcid := ellipsis(d.LocalDeal.ProposalCid.String(), 8)
onChain := "N"
chore: Merge nv22 into master (#11699) * [WIP] feat: Add nv22 skeleton Addition of Network Version 22 skeleton * update FFI * feat: drand: refactor round verification * feat: sealing: Support nv22 DDO features in the sealing pipeline (#11226) * Initial work supporting DDO pieces in lotus-miner * sealing: Update pipeline input to operate on UniversalPiece * sealing: Update pipeline checks/sealing states to operate on UniversalPiece * sealing: Make pipeline build with UniversalPiece * move PieceDealInfo out of api * make gen * make sealing pipeline unit tests pass * fix itest ensemble build * don't panic in SectorsStatus with deals * stop linter from complaining about checkPieces * fix sector import tests * mod tidy * sealing: Add logic for (pre)committing DDO sectors * sealing: state-types with method defs * DDO non-snap pipeline works(?), DDO Itests * DDO support in snapdeals pipeline * make gen * update actor bundles * update the gst market fix * fix: chain: use PreCommitSectorsBatch2 when setting up genesis * some bug fixes * integration working changes * update actor bundles * Make TestOnboardRawPieceSnap pass * Appease the linter * Make deadlines test pass with v12 actors * Update go-state-types, abstract market DealState * make gen * mod tidy, lint fixes * Fix some more tests * Bump version in master Bump version in master * Make gen Make gen * fix sender * fix: lotus-provider: Fix winning PoSt * fix: sql Scan cannot write to an object * Actually show miner-addrs in info-log Actually show miner-addrs in lotus-provider info-log * [WIP] feat: Add nv22 skeleton Addition of Network Version 22 skeleton * update FFI * ddo is now nv22 * make gen * temp actor bundle with ddo * use working go-state-types * gst with v13 market migration * update bundle, builtin.MethodsMiner.ProveCommitSectors2 -> 3 * actually working v13 migration, v13 migration itest * Address review * sealing: Correct DDO snap pledge math * itests: Mixed ddo itest * pipeline: Fix sectorWeight * sealing: convert market deals into PAMs in mixed sectors * sealing: make market to ddo conversion work * fix lint * update gst * Update actors and GST to lastest integ branch * commit batcher: Update ProveCommitSectors3Params builder logic * make gen * use builtin-actors master * ddo: address review * itests: Add commd assertions to ddo tests * make gen * gst with fixed types * config knobs for RequireActivationSuccess * storage: Drop obsolete flaky tasts --------- Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: Phi <orjan.roren@gmail.com> Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com> Co-authored-by: TippyFlits <james.bluett@protocol.ai> * feat: implement FIP-0063 * chore: deps: update to go-multiaddr v0.12.2 (#11602) * feat: fvm: update the FVM/FFI to v4.1 (#11608) (#11612) This: 1. Adds nv22 support. 2. Updates the message tracing format. Co-authored-by: Steven Allen <steven@stebalien.com> * AggregateProofType nil when doing batch updates Use latest nv22 go-state-types version with matching update * Update to v13.0.0-rc.2 bundle * chore: Upgrade heights and codename Update upgrade heights Co-Authored-By: Steven Allen <steven@stebalien.com> * Update epoch after nv22 DRAND switch Update epoch after nv22 DRAND switch * Update Mango codename to Phoneix Make the codename for the Drand-change inline with Dragon style. * Add UpgradePhoenixHeight to API params * set UpgradePhoenixHeight to be one hour after Dragon * Make gen Make gen and UpgradePhoenixHeight in butterfly and local devnet to be in line with Calibration and Mainnet * Update epoch heights (#11637) Update epoch heights * new: add forest bootstrap nodes (#11636) Signed-off-by: samuelarogbonlo <sbayo971@gmail.com> * Merge pull request #11491 from filecoin-project/fix/remove-decommissioned-pl-bootstrap-nodes Remove PL operated bootstrap nodes from mainnet.pi * feat: api: new verified registry methods to get all allocations and claims (#11631) * new verireg methods * update changelog and add itest * update itest and cli * update new method's support till v9 * remove gateway APIs * fix cli internal var names * chore:: backport #11609 to the feat/nv22 branch (#11644) * feat: api: improve the correctness of Eth's trace_block (#11609) * Improve the correctness of Eth's trace_block - Improve encoding/decoding of parameters and return values: - Encode "native" parameters and return values with Solidity ABI. - Correctly decode parameters to "create" calls. - Use the correct (ish) output for "create" calls. - Handle all forms of "create". - Make robust with respect to reverts: - Use the actor ID/address from the trace instead of looking it up in the state-tree (may not exist in the state-tree due to a revert). - Gracefully handle failed actor/contract creation. - Improve performance: - We avoid looking anything up in the state-tree when translating the trace, which should significantly improve performance. - Improve code readability: - Remove all "backtracking" logic. - Use an "environment" struct to store temporary state instead of attaching it to the trace. - Fix random bugs: - Fix an allocation bug in the "address" logic (need to set the capacity before modifying the slice). - Improved error checking/handling. - Use correct types for `trace_block` action/results (create, call, etc.). - And use the correct types for Result/Action structs instead of reusing the same "Call" action every time. - Improve error messages. * Make gen Make gen --------- Co-authored-by: Steven Allen <steven@stebalien.com> * fix: add UpgradePhoenixHeight to StateGetNetworkParams (#11648) * chore: deps: update to go-state-types v13.0.0-rc.1 * do NOT update the cache when running the real migration * Merge pull request #11632 from hanabi1224/hm/drand-test feat: drand quicknet: allow scheduling drand quicknet upgrade before nv22 on 2k devnet * chore: deps: update to go-state-types v13.0.0-rc.2 chore: deps: update to go-state-types v13.0.0-rc.2 * feat: set migration config UpgradeEpoch for v13 actors upgrade * Built-in actor events first draft * itest for DDO non-market verified data w/ builtin actor events * Tests for builtin actor events API * Clean up DDO+Events tests, add lots of explainer comments * Minor tweaks to events types * Avoid duplicate messages when looking for receipts * Rename internal events modules for clarity * Adjust actor event API after review * s/ActorEvents/Events/g in global config * Manage event sending rate for SubscribeActorEvents * Terminate SubscribeActorEvents chan when at max height * Document future API changes * More clarity in actor event API docs * More post-review changes, lots of tests for SubscribeActorEvents Use BlockDelay as the window for receiving events on the SubscribeActorEvents channel. We expect the user to have received the initial batch of historical events (if any) in one block's time. For real-time events we expect them to not fall behind by roughly one block's time. * Remove duplicate code from actor event type marshalling tests Reduce verbosity and remove duplicate test logic from actor event types JSON marshalling tests. * Rename actor events test to follow go convention Add missing `s` to `actor_events` test file to follow golang convention used across the repo. * Run actor events table tests in deterministic order Refactor `map` usage for actor event table tests to ensure deterministic test execution order, making debugging potential issues easier. If non-determinism is a target, leverage Go's built-in parallel testing capabilities. * Reduce scope for filter removal failure when getting actor events Use a fresh context to remove the temporary filter installed solely to get the actor events. This should reduce chances of failure in a case where the original context may be expired/cancelled. Refactor removal into a `defer` statement for a more readable, concise return statement. * Use fixed RNG seed for actor event tests Improve determinism in actor event tests by using a fixed RNG seed. This makes up a more reproducible test suit. * Use provided libraries to assert eventual conditions Use the functionalities already provided by `testify` to assert eventual conditions, and remove the use of `time.Sleep`. Remove duplicate code in utility functions that are already defined. Refactor assertion helper functions to use consistent terminology: "require" implies fatal error, whereas "assert" implies error where the test may proceed executing. * Update changelog for actor events APIs * Fix concerns and docs identified by review * Update actor bundle to v13.0.0-rc3 Update actor bundle to v13.0.0-rc3 * Prep Lotus v1.26.0-rc1 - For sanity reverting the mainnet upgrade epoch to 99999999, and then only set it when cutting the final release -Update Calibnet CIDs to v13.0.0-rc3 - Add GetActorEvents, SubscribeActorEvents, GetAllClaims and GetAllAllocations methods to the changelog Co-Authored-By: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> * Update CHANGELOG.md Co-authored-by: Masih H. Derkani <m@derkani.org> * Make gen Make gen * fix: beacon: validate drand change at nv16 correctly * bump to v1.26.0-rc2 * test: cleanup ddo verified itest, extract steps to functions also add allocation-removed event case * test: extract verified DDO test to separate file, add more checks * test: add additional actor events checks * Add verification for "deal-activated" actor event * docs(drand): document the meaning of "IsChained" (#11692) * Resolve conflicts I encountered multiple issues when trying to run make gen. And these changes fixed a couple of them: - go mod tidy - Remove RaftState/RaftLeader - Revert `if ts.Height() > claim.TermMax+claim.TermStart || !cctx.IsSet("expired")` to the what is in the release/v1.26.0: `if tsHeight > val.TermMax || !expired` * fixup imports, make jen * Update version Update version in master to v1.27.0-dev * Update node/impl/full/dummy.go Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> * Adjust ListClaimsCmd Adjust ListClaimsCmd according to review --------- Signed-off-by: samuelarogbonlo <sbayo971@gmail.com> Co-authored-by: TippyFlits <james.bluett@protocol.ai> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com> Co-authored-by: Steven Allen <steven@stebalien.com> Co-authored-by: Rod Vagg <rod@vagg.org> Co-authored-by: Samuel Arogbonlo <47984109+samuelarogbonlo@users.noreply.github.com> Co-authored-by: LexLuthr <88259624+LexLuthr@users.noreply.github.com> Co-authored-by: tom123222 <160735201+tom123222@users.noreply.github.com> Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Masih H. Derkani <m@derkani.org> Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com>
2024-03-12 09:33:58 +00:00
if d.OnChainDealState.SectorStartEpoch() != -1 {
onChain = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SectorStartEpoch())
2020-08-27 18:32:51 +00:00
}
slashed := "N"
chore: Merge nv22 into master (#11699) * [WIP] feat: Add nv22 skeleton Addition of Network Version 22 skeleton * update FFI * feat: drand: refactor round verification * feat: sealing: Support nv22 DDO features in the sealing pipeline (#11226) * Initial work supporting DDO pieces in lotus-miner * sealing: Update pipeline input to operate on UniversalPiece * sealing: Update pipeline checks/sealing states to operate on UniversalPiece * sealing: Make pipeline build with UniversalPiece * move PieceDealInfo out of api * make gen * make sealing pipeline unit tests pass * fix itest ensemble build * don't panic in SectorsStatus with deals * stop linter from complaining about checkPieces * fix sector import tests * mod tidy * sealing: Add logic for (pre)committing DDO sectors * sealing: state-types with method defs * DDO non-snap pipeline works(?), DDO Itests * DDO support in snapdeals pipeline * make gen * update actor bundles * update the gst market fix * fix: chain: use PreCommitSectorsBatch2 when setting up genesis * some bug fixes * integration working changes * update actor bundles * Make TestOnboardRawPieceSnap pass * Appease the linter * Make deadlines test pass with v12 actors * Update go-state-types, abstract market DealState * make gen * mod tidy, lint fixes * Fix some more tests * Bump version in master Bump version in master * Make gen Make gen * fix sender * fix: lotus-provider: Fix winning PoSt * fix: sql Scan cannot write to an object * Actually show miner-addrs in info-log Actually show miner-addrs in lotus-provider info-log * [WIP] feat: Add nv22 skeleton Addition of Network Version 22 skeleton * update FFI * ddo is now nv22 * make gen * temp actor bundle with ddo * use working go-state-types * gst with v13 market migration * update bundle, builtin.MethodsMiner.ProveCommitSectors2 -> 3 * actually working v13 migration, v13 migration itest * Address review * sealing: Correct DDO snap pledge math * itests: Mixed ddo itest * pipeline: Fix sectorWeight * sealing: convert market deals into PAMs in mixed sectors * sealing: make market to ddo conversion work * fix lint * update gst * Update actors and GST to lastest integ branch * commit batcher: Update ProveCommitSectors3Params builder logic * make gen * use builtin-actors master * ddo: address review * itests: Add commd assertions to ddo tests * make gen * gst with fixed types * config knobs for RequireActivationSuccess * storage: Drop obsolete flaky tasts --------- Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: Phi <orjan.roren@gmail.com> Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com> Co-authored-by: TippyFlits <james.bluett@protocol.ai> * feat: implement FIP-0063 * chore: deps: update to go-multiaddr v0.12.2 (#11602) * feat: fvm: update the FVM/FFI to v4.1 (#11608) (#11612) This: 1. Adds nv22 support. 2. Updates the message tracing format. Co-authored-by: Steven Allen <steven@stebalien.com> * AggregateProofType nil when doing batch updates Use latest nv22 go-state-types version with matching update * Update to v13.0.0-rc.2 bundle * chore: Upgrade heights and codename Update upgrade heights Co-Authored-By: Steven Allen <steven@stebalien.com> * Update epoch after nv22 DRAND switch Update epoch after nv22 DRAND switch * Update Mango codename to Phoneix Make the codename for the Drand-change inline with Dragon style. * Add UpgradePhoenixHeight to API params * set UpgradePhoenixHeight to be one hour after Dragon * Make gen Make gen and UpgradePhoenixHeight in butterfly and local devnet to be in line with Calibration and Mainnet * Update epoch heights (#11637) Update epoch heights * new: add forest bootstrap nodes (#11636) Signed-off-by: samuelarogbonlo <sbayo971@gmail.com> * Merge pull request #11491 from filecoin-project/fix/remove-decommissioned-pl-bootstrap-nodes Remove PL operated bootstrap nodes from mainnet.pi * feat: api: new verified registry methods to get all allocations and claims (#11631) * new verireg methods * update changelog and add itest * update itest and cli * update new method's support till v9 * remove gateway APIs * fix cli internal var names * chore:: backport #11609 to the feat/nv22 branch (#11644) * feat: api: improve the correctness of Eth's trace_block (#11609) * Improve the correctness of Eth's trace_block - Improve encoding/decoding of parameters and return values: - Encode "native" parameters and return values with Solidity ABI. - Correctly decode parameters to "create" calls. - Use the correct (ish) output for "create" calls. - Handle all forms of "create". - Make robust with respect to reverts: - Use the actor ID/address from the trace instead of looking it up in the state-tree (may not exist in the state-tree due to a revert). - Gracefully handle failed actor/contract creation. - Improve performance: - We avoid looking anything up in the state-tree when translating the trace, which should significantly improve performance. - Improve code readability: - Remove all "backtracking" logic. - Use an "environment" struct to store temporary state instead of attaching it to the trace. - Fix random bugs: - Fix an allocation bug in the "address" logic (need to set the capacity before modifying the slice). - Improved error checking/handling. - Use correct types for `trace_block` action/results (create, call, etc.). - And use the correct types for Result/Action structs instead of reusing the same "Call" action every time. - Improve error messages. * Make gen Make gen --------- Co-authored-by: Steven Allen <steven@stebalien.com> * fix: add UpgradePhoenixHeight to StateGetNetworkParams (#11648) * chore: deps: update to go-state-types v13.0.0-rc.1 * do NOT update the cache when running the real migration * Merge pull request #11632 from hanabi1224/hm/drand-test feat: drand quicknet: allow scheduling drand quicknet upgrade before nv22 on 2k devnet * chore: deps: update to go-state-types v13.0.0-rc.2 chore: deps: update to go-state-types v13.0.0-rc.2 * feat: set migration config UpgradeEpoch for v13 actors upgrade * Built-in actor events first draft * itest for DDO non-market verified data w/ builtin actor events * Tests for builtin actor events API * Clean up DDO+Events tests, add lots of explainer comments * Minor tweaks to events types * Avoid duplicate messages when looking for receipts * Rename internal events modules for clarity * Adjust actor event API after review * s/ActorEvents/Events/g in global config * Manage event sending rate for SubscribeActorEvents * Terminate SubscribeActorEvents chan when at max height * Document future API changes * More clarity in actor event API docs * More post-review changes, lots of tests for SubscribeActorEvents Use BlockDelay as the window for receiving events on the SubscribeActorEvents channel. We expect the user to have received the initial batch of historical events (if any) in one block's time. For real-time events we expect them to not fall behind by roughly one block's time. * Remove duplicate code from actor event type marshalling tests Reduce verbosity and remove duplicate test logic from actor event types JSON marshalling tests. * Rename actor events test to follow go convention Add missing `s` to `actor_events` test file to follow golang convention used across the repo. * Run actor events table tests in deterministic order Refactor `map` usage for actor event table tests to ensure deterministic test execution order, making debugging potential issues easier. If non-determinism is a target, leverage Go's built-in parallel testing capabilities. * Reduce scope for filter removal failure when getting actor events Use a fresh context to remove the temporary filter installed solely to get the actor events. This should reduce chances of failure in a case where the original context may be expired/cancelled. Refactor removal into a `defer` statement for a more readable, concise return statement. * Use fixed RNG seed for actor event tests Improve determinism in actor event tests by using a fixed RNG seed. This makes up a more reproducible test suit. * Use provided libraries to assert eventual conditions Use the functionalities already provided by `testify` to assert eventual conditions, and remove the use of `time.Sleep`. Remove duplicate code in utility functions that are already defined. Refactor assertion helper functions to use consistent terminology: "require" implies fatal error, whereas "assert" implies error where the test may proceed executing. * Update changelog for actor events APIs * Fix concerns and docs identified by review * Update actor bundle to v13.0.0-rc3 Update actor bundle to v13.0.0-rc3 * Prep Lotus v1.26.0-rc1 - For sanity reverting the mainnet upgrade epoch to 99999999, and then only set it when cutting the final release -Update Calibnet CIDs to v13.0.0-rc3 - Add GetActorEvents, SubscribeActorEvents, GetAllClaims and GetAllAllocations methods to the changelog Co-Authored-By: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> * Update CHANGELOG.md Co-authored-by: Masih H. Derkani <m@derkani.org> * Make gen Make gen * fix: beacon: validate drand change at nv16 correctly * bump to v1.26.0-rc2 * test: cleanup ddo verified itest, extract steps to functions also add allocation-removed event case * test: extract verified DDO test to separate file, add more checks * test: add additional actor events checks * Add verification for "deal-activated" actor event * docs(drand): document the meaning of "IsChained" (#11692) * Resolve conflicts I encountered multiple issues when trying to run make gen. And these changes fixed a couple of them: - go mod tidy - Remove RaftState/RaftLeader - Revert `if ts.Height() > claim.TermMax+claim.TermStart || !cctx.IsSet("expired")` to the what is in the release/v1.26.0: `if tsHeight > val.TermMax || !expired` * fixup imports, make jen * Update version Update version in master to v1.27.0-dev * Update node/impl/full/dummy.go Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> * Adjust ListClaimsCmd Adjust ListClaimsCmd according to review --------- Signed-off-by: samuelarogbonlo <sbayo971@gmail.com> Co-authored-by: TippyFlits <james.bluett@protocol.ai> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com> Co-authored-by: Steven Allen <steven@stebalien.com> Co-authored-by: Rod Vagg <rod@vagg.org> Co-authored-by: Samuel Arogbonlo <47984109+samuelarogbonlo@users.noreply.github.com> Co-authored-by: LexLuthr <88259624+LexLuthr@users.noreply.github.com> Co-authored-by: tom123222 <160735201+tom123222@users.noreply.github.com> Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Masih H. Derkani <m@derkani.org> Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com>
2024-03-12 09:33:58 +00:00
if d.OnChainDealState.SlashEpoch() != -1 {
slashed = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SlashEpoch())
2020-08-27 18:32:51 +00:00
}
piece := ellipsis(d.LocalDeal.PieceCID.String(), 8)
price := types.FIL(types.BigMul(d.LocalDeal.PricePerEpoch, types.NewInt(d.LocalDeal.Duration)))
w.Write(map[string]interface{}{
"DealCid": propcid,
"DealId": d.LocalDeal.DealID,
"Provider": d.LocalDeal.Provider,
"State": dealStateString(d.LocalDeal.State),
2020-08-27 18:32:51 +00:00
"On Chain?": onChain,
"Slashed?": slashed,
"PieceCID": piece,
"Size": types.SizeStr(types.NewInt(d.LocalDeal.Size)),
"Price": price,
"Verified": d.LocalDeal.Verified,
2020-08-27 18:32:51 +00:00
"Duration": d.LocalDeal.Duration,
"Message": d.LocalDeal.Message,
})
}
return w.Flush(out)
2019-11-08 01:45:45 +00:00
}
func dealStateString(state storagemarket.StorageDealStatus) string {
2020-07-28 23:10:57 +00:00
s := storagemarket.DealStates[state]
switch state {
case storagemarket.StorageDealError, storagemarket.StorageDealExpired:
return color.RedString(s)
case storagemarket.StorageDealActive:
return color.GreenString(s)
default:
return s
}
}
type deal struct {
LocalDeal lapi.DealInfo
OnChainDealState market.DealState
}
var clientGetDealCmd = &cli.Command{
Name: "get-deal",
Usage: "Print detailed deal information",
ArgsUsage: "[proposalCID]",
Action: func(cctx *cli.Context) error {
if cctx.NArg() != 1 {
return IncorrectNumArgs(cctx)
}
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
propcid, err := cid.Decode(cctx.Args().First())
if err != nil {
return err
}
di, err := api.ClientGetDealInfo(ctx, propcid)
if err != nil {
return err
}
out := map[string]interface{}{
"DealInfo: ": di,
}
if di.DealID != 0 {
onChain, err := api.StateMarketStorageDeal(ctx, di.DealID, types.EmptyTSK)
if err != nil {
return err
}
out["OnChain"] = onChain
}
b, err := json.MarshalIndent(out, "", " ")
if err != nil {
return err
}
2020-07-28 21:12:55 +00:00
fmt.Println(string(b))
return nil
},
}
var clientBalancesCmd = &cli.Command{
Name: "balances",
Usage: "Print storage market client balances",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "client",
Usage: "specify storage client address",
},
},
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
var addr address.Address
if clientFlag := cctx.String("client"); clientFlag != "" {
ca, err := address.NewFromString(clientFlag)
if err != nil {
return err
}
addr = ca
} else {
def, err := api.WalletDefaultAddress(ctx)
if err != nil {
return err
}
addr = def
}
balance, err := api.StateMarketBalance(ctx, addr, types.EmptyTSK)
if err != nil {
return err
}
reserved, err := api.MarketGetReserved(ctx, addr)
if err != nil {
return err
}
avail := big.Sub(big.Sub(balance.Escrow, balance.Locked), reserved)
if avail.LessThan(big.Zero()) {
avail = big.Zero()
}
fmt.Printf("Client Market Balance for address %s:\n", addr)
fmt.Printf(" Escrowed Funds: %s\n", types.FIL(balance.Escrow))
fmt.Printf(" Locked Funds: %s\n", types.FIL(balance.Locked))
fmt.Printf(" Reserved Funds: %s\n", types.FIL(reserved))
fmt.Printf(" Available to Withdraw: %s\n", types.FIL(avail))
return nil
},
}
var clientStat = &cli.Command{
Name: "stat",
Usage: "Print information about a locally stored file (piece size, etc)",
ArgsUsage: "<cid>",
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
if cctx.NArg() != 1 {
return IncorrectNumArgs(cctx)
}
dataCid, err := cid.Parse(cctx.Args().First())
if err != nil {
return fmt.Errorf("parsing data cid: %w", err)
}
ds, err := api.ClientDealSize(ctx, dataCid)
if err != nil {
return err
}
fmt.Printf("Piece Size : %v\n", ds.PieceSize)
fmt.Printf("Payload Size: %v\n", ds.PayloadSize)
return nil
},
}
var clientRestartTransfer = &cli.Command{
Name: "restart-transfer",
Usage: "Force restart a stalled data transfer",
ArgsUsage: "[transferID]",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "peerid",
Usage: "narrow to transfer with specific peer",
},
&cli.BoolFlag{
Name: "initiator",
Usage: "specify only transfers where peer is/is not initiator",
Value: true,
},
},
Action: func(cctx *cli.Context) error {
if cctx.NArg() != 1 {
return IncorrectNumArgs(cctx)
}
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
transferUint, err := strconv.ParseUint(cctx.Args().First(), 10, 64)
if err != nil {
return fmt.Errorf("Error reading transfer ID: %w", err)
}
transferID := datatransfer.TransferID(transferUint)
initiator := cctx.Bool("initiator")
var other peer.ID
if pidstr := cctx.String("peerid"); pidstr != "" {
p, err := peer.Decode(pidstr)
if err != nil {
return err
}
other = p
} else {
channels, err := api.ClientListDataTransfers(ctx)
if err != nil {
return err
}
found := false
for _, channel := range channels {
if channel.IsInitiator == initiator && channel.TransferID == transferID {
other = channel.OtherPeer
found = true
break
}
}
if !found {
return errors.New("unable to find matching data transfer")
}
}
return api.ClientRestartDataTransfer(ctx, transferID, other, initiator)
},
}
2020-10-22 20:40:26 +00:00
var clientCancelTransfer = &cli.Command{
Name: "cancel-transfer",
Usage: "Force cancel a data transfer",
ArgsUsage: "[transferID]",
2020-10-22 20:40:26 +00:00
Flags: []cli.Flag{
&cli.StringFlag{
Name: "peerid",
Usage: "narrow to transfer with specific peer",
},
&cli.BoolFlag{
Name: "initiator",
Usage: "specify only transfers where peer is/is not initiator",
Value: true,
},
&cli.DurationFlag{
Name: "cancel-timeout",
Usage: "time to wait for cancel to be sent to storage provider",
Value: 5 * time.Second,
},
2020-10-22 20:40:26 +00:00
},
Action: func(cctx *cli.Context) error {
if cctx.NArg() != 1 {
return IncorrectNumArgs(cctx)
2020-10-22 20:40:26 +00:00
}
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
transferUint, err := strconv.ParseUint(cctx.Args().First(), 10, 64)
if err != nil {
return fmt.Errorf("Error reading transfer ID: %w", err)
}
transferID := datatransfer.TransferID(transferUint)
initiator := cctx.Bool("initiator")
var other peer.ID
if pidstr := cctx.String("peerid"); pidstr != "" {
p, err := peer.Decode(pidstr)
if err != nil {
return err
}
other = p
} else {
channels, err := api.ClientListDataTransfers(ctx)
if err != nil {
return err
}
found := false
for _, channel := range channels {
if channel.IsInitiator == initiator && channel.TransferID == transferID {
other = channel.OtherPeer
found = true
break
}
}
if !found {
return errors.New("unable to find matching data transfer")
}
}
timeoutCtx, cancel := context.WithTimeout(ctx, cctx.Duration("cancel-timeout"))
defer cancel()
return api.ClientCancelDataTransfer(timeoutCtx, transferID, other, initiator)
2020-10-22 20:40:26 +00:00
},
}
var clientCancelRetrievalDealCmd = &cli.Command{
Name: "cancel-retrieval",
Usage: "Cancel a retrieval deal by deal ID; this also cancels the associated transfer",
2021-03-24 12:36:21 +00:00
Flags: []cli.Flag{
&cli.Int64Flag{
Name: "deal-id",
Usage: "specify retrieval deal by deal ID",
2021-03-24 12:36:21 +00:00
Required: true,
},
},
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
id := cctx.Int64("deal-id")
if id < 0 {
2021-03-26 04:18:50 +00:00
return errors.New("deal id cannot be negative")
2021-03-24 12:36:21 +00:00
}
return api.ClientCancelRetrievalDeal(ctx, retrievalmarket.DealID(id))
2021-03-24 12:36:21 +00:00
},
}
var clientListTransfers = &cli.Command{
Name: "list-transfers",
Usage: "List ongoing data transfers for deals",
Flags: []cli.Flag{
2020-12-28 08:36:19 +00:00
&cli.BoolFlag{
Name: "verbose",
Aliases: []string{"v"},
Usage: "print verbose transfer details",
},
&cli.BoolFlag{
Name: "completed",
Usage: "show completed data transfers",
},
&cli.BoolFlag{
Name: "watch",
Usage: "watch deal updates in real-time, rather than a one time list",
},
2020-10-22 20:40:26 +00:00
&cli.BoolFlag{
Name: "show-failed",
Usage: "show failed/cancelled transfers",
},
},
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
channels, err := api.ClientListDataTransfers(ctx)
if err != nil {
return err
}
2020-12-28 08:36:19 +00:00
verbose := cctx.Bool("verbose")
completed := cctx.Bool("completed")
watch := cctx.Bool("watch")
2020-10-22 20:40:26 +00:00
showFailed := cctx.Bool("show-failed")
if watch {
channelUpdates, err := api.ClientDataTransferUpdates(ctx)
if err != nil {
return err
}
for {
tm.Clear() // Clear current screen
tm.MoveCursor(1, 1)
OutputDataTransferChannels(tm.Screen, channels, verbose, completed, showFailed)
tm.Flush()
select {
case <-ctx.Done():
return nil
case channelUpdate := <-channelUpdates:
var found bool
for i, existing := range channels {
if existing.TransferID == channelUpdate.TransferID &&
existing.OtherPeer == channelUpdate.OtherPeer &&
existing.IsSender == channelUpdate.IsSender &&
existing.IsInitiator == channelUpdate.IsInitiator {
channels[i] = channelUpdate
found = true
break
}
}
if !found {
channels = append(channels, channelUpdate)
}
}
}
}
OutputDataTransferChannels(os.Stdout, channels, verbose, completed, showFailed)
return nil
},
}
// OutputDataTransferChannels generates table output for a list of channels
func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChannel, verbose, completed, showFailed bool) {
sort.Slice(channels, func(i, j int) bool {
return channels[i].TransferID < channels[j].TransferID
})
var receivingChannels, sendingChannels []lapi.DataTransferChannel
for _, channel := range channels {
if !completed && channel.Status == datatransfer.Completed {
continue
}
2020-10-22 20:40:26 +00:00
if !showFailed && (channel.Status == datatransfer.Failed || channel.Status == datatransfer.Cancelled) {
continue
}
if channel.IsSender {
sendingChannels = append(sendingChannels, channel)
} else {
receivingChannels = append(receivingChannels, channel)
}
}
fmt.Fprintf(out, "Sending Channels\n\n")
w := tablewriter.New(tablewriter.Col("ID"),
tablewriter.Col("Status"),
tablewriter.Col("Sending To"),
tablewriter.Col("Root Cid"),
tablewriter.Col("Initiated?"),
tablewriter.Col("Transferred"),
tablewriter.Col("Voucher"),
tablewriter.NewLineCol("Message"))
for _, channel := range sendingChannels {
w.Write(toChannelOutput("Sending To", channel, verbose))
}
_ = w.Flush(out)
fmt.Fprintf(out, "\nReceiving Channels\n\n")
w = tablewriter.New(tablewriter.Col("ID"),
tablewriter.Col("Status"),
tablewriter.Col("Receiving From"),
tablewriter.Col("Root Cid"),
tablewriter.Col("Initiated?"),
tablewriter.Col("Transferred"),
tablewriter.Col("Voucher"),
tablewriter.NewLineCol("Message"))
for _, channel := range receivingChannels {
w.Write(toChannelOutput("Receiving From", channel, verbose))
}
_ = w.Flush(out)
}
func channelStatusString(status datatransfer.Status) string {
s := datatransfer.Statuses[status]
switch status {
case datatransfer.Failed, datatransfer.Cancelled:
return color.RedString(s)
case datatransfer.Completed:
return color.GreenString(s)
default:
return s
}
}
func toChannelOutput(otherPartyColumn string, channel lapi.DataTransferChannel, verbose bool) map[string]interface{} {
2020-12-28 08:36:19 +00:00
rootCid := channel.BaseCID.String()
otherParty := channel.OtherPeer.String()
if !verbose {
rootCid = ellipsis(rootCid, 8)
otherParty = ellipsis(otherParty, 8)
}
initiated := "N"
if channel.IsInitiator {
initiated = "Y"
}
voucher := channel.Voucher
2020-12-28 08:36:19 +00:00
if len(voucher) > 40 && !verbose {
voucher = ellipsis(voucher, 37)
}
return map[string]interface{}{
"ID": channel.TransferID,
"Status": channelStatusString(channel.Status),
otherPartyColumn: otherParty,
"Root Cid": rootCid,
"Initiated?": initiated,
"Transferred": units.BytesSize(float64(channel.Transferred)),
"Voucher": voucher,
"Message": channel.Message,
}
}
func ellipsis(s string, length int) string {
if length > 0 && len(s) > length {
return "..." + s[len(s)-length:]
}
return s
}
2021-04-01 12:07:25 +00:00
2021-04-03 10:55:29 +00:00
func inspectDealCmd(ctx context.Context, api v0api.FullNode, proposalCid string, dealId int) error {
2021-04-01 12:07:25 +00:00
ctx, cancel := context.WithCancel(ctx)
defer cancel()
deals, err := api.ClientListDeals(ctx)
if err != nil {
return err
}
var di *lapi.DealInfo
for i, cdi := range deals {
if proposalCid != "" && cdi.ProposalCid.String() == proposalCid {
di = &deals[i]
break
}
if dealId != 0 && int(cdi.DealID) == dealId {
di = &deals[i]
break
}
}
if di == nil {
if proposalCid != "" {
return fmt.Errorf("cannot find deal with proposal cid: %s", proposalCid)
}
if dealId != 0 {
return fmt.Errorf("cannot find deal with deal id: %v", dealId)
}
return errors.New("you must specify proposal cid or deal id in order to inspect a deal")
}
// populate DealInfo.DealStages and DataTransfer.Stages
di, err = api.ClientGetDealInfo(ctx, di.ProposalCid)
if err != nil {
return fmt.Errorf("cannot get deal info for proposal cid: %v", di.ProposalCid)
}
2021-04-01 12:07:25 +00:00
renderDeal(di)
return nil
}
func renderDeal(di *lapi.DealInfo) {
color.Blue("Deal ID: %d\n", int(di.DealID))
color.Blue("Proposal CID: %s\n\n", di.ProposalCid.String())
if di.DealStages == nil {
color.Yellow("Deal was made with an older version of Lotus and Lotus did not collect detailed information about its stages")
return
}
for _, stg := range di.DealStages.Stages {
msg := fmt.Sprintf("%s %s: %s (expected duration: %s)", color.BlueString("Stage:"), color.BlueString(strings.TrimPrefix(stg.Name, "StorageDeal")), stg.Description, color.GreenString(stg.ExpectedDuration))
2021-04-01 12:07:25 +00:00
if stg.UpdatedTime.Time().IsZero() {
msg = color.YellowString(msg)
}
fmt.Println(msg)
for _, l := range stg.Logs {
fmt.Printf(" %s %s\n", color.YellowString(l.UpdatedTime.Time().UTC().Round(time.Second).Format(time.Stamp)), l.Log)
}
if stg.Name == "StorageDealStartDataTransfer" {
for _, dtStg := range di.DataTransfer.Stages.Stages {
fmt.Printf(" %s %s %s\n", color.YellowString(dtStg.CreatedTime.Time().UTC().Round(time.Second).Format(time.Stamp)), color.BlueString("Data transfer stage:"), color.BlueString(dtStg.Name))
for _, l := range dtStg.Logs {
fmt.Printf(" %s %s\n", color.YellowString(l.UpdatedTime.Time().UTC().Round(time.Second).Format(time.Stamp)), l.Log)
}
}
}
}
}