From cac848c106fb1608944958d4105c9186f957e42e Mon Sep 17 00:00:00 2001 From: whyrusleeping Date: Mon, 31 Aug 2020 14:24:23 -0700 Subject: [PATCH 01/88] add a command to import an ipld object into the chainstore --- cmd/lotus-shed/import-car.go | 57 ++++++++++++++++++++++++++++++++++++ cmd/lotus-shed/main.go | 1 + 2 files changed, 58 insertions(+) diff --git a/cmd/lotus-shed/import-car.go b/cmd/lotus-shed/import-car.go index 01343c4a3..9cbff953b 100644 --- a/cmd/lotus-shed/import-car.go +++ b/cmd/lotus-shed/import-car.go @@ -1,10 +1,13 @@ package main import ( + "encoding/hex" "fmt" "io" "os" + block "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" "github.com/ipld/go-car" "github.com/urfave/cli/v2" "golang.org/x/xerrors" @@ -81,3 +84,57 @@ var importCarCmd = &cli.Command{ } }, } + +var importObjectCmd = &cli.Command{ + Name: "import-obj", + Usage: "import a raw ipld object into your datastore", + Action: func(cctx *cli.Context) error { + r, err := repo.NewFS(cctx.String("repo")) + if err != nil { + return xerrors.Errorf("opening fs repo: %w", err) + } + + exists, err := r.Exists() + if err != nil { + return err + } + if !exists { + return xerrors.Errorf("lotus repo doesn't exist") + } + + lr, err := r.Lock(repo.FullNode) + if err != nil { + return err + } + defer lr.Close() //nolint:errcheck + + ds, err := lr.Datastore("/chain") + if err != nil { + return err + } + + bs := blockstore.NewBlockstore(ds) + + c, err := cid.Decode(cctx.Args().Get(0)) + if err != nil { + return err + } + + data, err := hex.DecodeString(cctx.Args().Get(1)) + if err != nil { + return err + } + + blk, err := block.NewBlockWithCid(data, c) + if err != nil { + return err + } + + if err := bs.Put(blk); err != nil { + return err + } + + return nil + + }, +} diff --git a/cmd/lotus-shed/main.go b/cmd/lotus-shed/main.go index 5438a31ef..11b98a3ac 100644 --- a/cmd/lotus-shed/main.go +++ b/cmd/lotus-shed/main.go @@ -24,6 +24,7 @@ func main() { bigIntParseCmd, staterootCmd, importCarCmd, + importObjectCmd, commpToCidCmd, fetchParamCmd, proofsCmd, From f58e8bc9a393c7fb3aca463fad961b45c8a0b8f4 Mon Sep 17 00:00:00 2001 From: Aayush Rajasekaran Date: Tue, 1 Sep 2020 02:18:02 -0400 Subject: [PATCH 02/88] Fix some failed precommit handling --- extern/storage-sealing/states_failed.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/extern/storage-sealing/states_failed.go b/extern/storage-sealing/states_failed.go index e313fd712..25f60ff2f 100644 --- a/extern/storage-sealing/states_failed.go +++ b/extern/storage-sealing/states_failed.go @@ -37,16 +37,16 @@ func (m *Sealing) checkPreCommitted(ctx statemachine.Context, sector SectorInfo) tok, _, err := m.api.ChainHead(ctx.Context()) if err != nil { log.Errorf("handleSealPrecommit1Failed(%d): temp error: %+v", sector.SectorNumber, err) - return nil, true + return nil, false } info, err := m.api.StateSectorPreCommitInfo(ctx.Context(), m.maddr, sector.SectorNumber, tok) if err != nil { log.Errorf("handleSealPrecommit1Failed(%d): temp error: %+v", sector.SectorNumber, err) - return nil, true + return nil, false } - return info, false + return info, true } func (m *Sealing) handleSealPrecommit1Failed(ctx statemachine.Context, sector SectorInfo) error { @@ -107,7 +107,7 @@ func (m *Sealing) handlePreCommitFailed(ctx statemachine.Context, sector SectorI } if pci, is := m.checkPreCommitted(ctx, sector); is && pci != nil { - if sector.PreCommitMessage != nil { + if sector.PreCommitMessage == nil { log.Warn("sector %d is precommitted on chain, but we don't have precommit message", sector.SectorNumber) return ctx.Send(SectorPreCommitLanded{TipSet: tok}) } From fe52c475703ad417178667477c003e6cd5d930bc Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Mon, 31 Aug 2020 17:11:01 +0200 Subject: [PATCH 03/88] Docs review and re-organization This: * Re-organizes the docs into sections that align with what docs.filecoin.io becoming: * An installation section * A "getting started" section (lotus client focused) * A "storing" section (lotus client focused) * A "mining" section (miner focused) * A "build" section (developer focused) * An legacy "architecture" section is left in the last place. A few high-value documentation pages have been reviewed and updated with the latest recommendations: * Installation section and lotus setup * Miner setup * etc. ... Other pages have been correctly merged into the new relevant sections. Some pages have not been touched. The filesystem layout of the documentation has been changed into folders corresponding to the sections (as requested by @cw). Some pages that were not linked at all and/or where hidden, have been moved to "unclassified". This should make the porting of the Lotus documentation to docs.filecoin.io much easier, while ensuring it is more up to date than it was before. For the moment, this breaks most links as link-aliasing is not supported in lotus-docs. --- Makefile | 2 +- documentation/en/.library.json | 353 +++++++++--------- documentation/en/about.md | 14 + documentation/en/api-scripting-support.md | 25 -- documentation/en/api.md | 85 ----- .../en/{ => architecture}/architecture.md | 2 +- documentation/en/{ => architecture}/mpool.md | 0 .../en/{ => building}/api-methods.md | 0 .../en/{ => building}/api-troubleshooting.md | 0 documentation/en/building/api.md | 38 ++ documentation/en/building/building.md | 5 + .../jaeger-tracing.md} | 0 .../local-devnet.md} | 0 .../en/{ => building}/payment-channels.md | 0 documentation/en/building/remote-api.md | 69 ++++ documentation/en/cli.md | 108 ------ documentation/en/dev-tools.md | 3 - documentation/en/environment-vars.md | 65 ---- documentation/en/faqs.md | 36 +- documentation/en/getting-started.md | 23 -- .../en/getting-started/getting-started.md | 3 + .../setup-troubleshooting.md | 13 +- documentation/en/getting-started/setup.md | 169 +++++++++ documentation/en/getting-started/wallet.md | 58 +++ documentation/en/hardware-mining.md | 54 --- documentation/en/hardware.md | 7 - documentation/en/install-lotus-arch.md | 51 --- documentation/en/install-lotus-fedora.md | 54 --- documentation/en/install-lotus-ubuntu.md | 54 --- documentation/en/install-systemd-services.md | 145 ------- .../en/installation/install-linux.md | 129 +++++++ .../install-macos.md} | 2 +- documentation/en/installation/installation.md | 39 ++ documentation/en/installation/update.md | 72 ++++ documentation/en/join-testnet.md | 93 ----- documentation/en/miner-deals.md | 39 -- documentation/en/mining.md | 149 -------- documentation/en/mining/gpus.md | 17 + .../lotus-seal-worker.md} | 58 +-- documentation/en/mining/managing-deals.md | 19 + documentation/en/mining/miner-setup.md | 241 ++++++++++++ .../en/{ => mining}/mining-troubleshooting.md | 9 +- documentation/en/mining/mining.md | 8 + documentation/en/retrieving-data.md | 27 -- documentation/en/setting-a-static-port.md | 54 --- .../adding-from-ipfs.md} | 6 +- documentation/en/store/making-deals.md | 71 ++++ documentation/en/store/retrieve.md | 27 ++ .../storage-troubleshooting.md} | 13 +- documentation/en/store/store.md | 11 + documentation/en/storing-data.md | 62 --- .../WIP-arch-complementary-notes.md | 0 .../en/{ => unclassified}/block-validation.md | 0 .../en/{dev => unclassified}/create-miner.md | 0 .../{ => unclassified}/dev-tools-pond-ui.md | 0 .../en/{ => unclassified}/sealing-procs.md | 0 documentation/en/updating-lotus.md | 14 - 57 files changed, 1239 insertions(+), 1357 deletions(-) create mode 100644 documentation/en/about.md delete mode 100644 documentation/en/api-scripting-support.md delete mode 100644 documentation/en/api.md rename documentation/en/{ => architecture}/architecture.md (99%) rename documentation/en/{ => architecture}/mpool.md (100%) rename documentation/en/{ => building}/api-methods.md (100%) rename documentation/en/{ => building}/api-troubleshooting.md (100%) create mode 100644 documentation/en/building/api.md create mode 100644 documentation/en/building/building.md rename documentation/en/{dev-tools-jaeger-tracing.md => building/jaeger-tracing.md} (100%) rename documentation/en/{local-dev-net.md => building/local-devnet.md} (100%) rename documentation/en/{ => building}/payment-channels.md (100%) create mode 100644 documentation/en/building/remote-api.md delete mode 100644 documentation/en/cli.md delete mode 100644 documentation/en/dev-tools.md delete mode 100644 documentation/en/environment-vars.md delete mode 100644 documentation/en/getting-started.md create mode 100644 documentation/en/getting-started/getting-started.md rename documentation/en/{ => getting-started}/setup-troubleshooting.md (60%) create mode 100644 documentation/en/getting-started/setup.md create mode 100644 documentation/en/getting-started/wallet.md delete mode 100644 documentation/en/hardware-mining.md delete mode 100644 documentation/en/hardware.md delete mode 100644 documentation/en/install-lotus-arch.md delete mode 100644 documentation/en/install-lotus-fedora.md delete mode 100644 documentation/en/install-lotus-ubuntu.md delete mode 100644 documentation/en/install-systemd-services.md create mode 100644 documentation/en/installation/install-linux.md rename documentation/en/{install-lotus-macos.md => installation/install-macos.md} (85%) create mode 100644 documentation/en/installation/installation.md create mode 100644 documentation/en/installation/update.md delete mode 100644 documentation/en/join-testnet.md delete mode 100644 documentation/en/miner-deals.md delete mode 100644 documentation/en/mining.md create mode 100644 documentation/en/mining/gpus.md rename documentation/en/{mining-lotus-worker.md => mining/lotus-seal-worker.md} (60%) create mode 100644 documentation/en/mining/managing-deals.md create mode 100644 documentation/en/mining/miner-setup.md rename documentation/en/{ => mining}/mining-troubleshooting.md (90%) create mode 100644 documentation/en/mining/mining.md delete mode 100644 documentation/en/retrieving-data.md delete mode 100644 documentation/en/setting-a-static-port.md rename documentation/en/{storing-ipfs-integration.md => store/adding-from-ipfs.md} (79%) create mode 100644 documentation/en/store/making-deals.md create mode 100644 documentation/en/store/retrieve.md rename documentation/en/{storing-data-troubleshooting.md => store/storage-troubleshooting.md} (51%) create mode 100644 documentation/en/store/store.md delete mode 100644 documentation/en/storing-data.md rename documentation/en/{dev => unclassified}/WIP-arch-complementary-notes.md (100%) rename documentation/en/{ => unclassified}/block-validation.md (100%) rename documentation/en/{dev => unclassified}/create-miner.md (100%) rename documentation/en/{ => unclassified}/dev-tools-pond-ui.md (100%) rename documentation/en/{ => unclassified}/sealing-procs.md (100%) delete mode 100644 documentation/en/updating-lotus.md diff --git a/Makefile b/Makefile index 4f6ece417..2e91cfa65 100644 --- a/Makefile +++ b/Makefile @@ -280,7 +280,7 @@ method-gen: gen: type-gen method-gen docsgen: - go run ./api/docgen > documentation/en/api-methods.md + go run ./api/docgen > documentation/en/building/api-methods.md print-%: @echo $*=$($*) diff --git a/documentation/en/.library.json b/documentation/en/.library.json index 3fab0df9b..87c7353c1 100644 --- a/documentation/en/.library.json +++ b/documentation/en/.library.json @@ -1,214 +1,207 @@ { "posts": [ { - "title": "Hardware Requirements", - "slug": "en+hardware", - "github": "en/hardware.md", + "title": "About Lotus", + "slug": "en+lotus", + "github": "en/about.md", + "value": null, + "posts": [] + }, + { + "title": "Installation", + "slug": "en+install", + "github": "en/installation/installation.md", "value": null, "posts": [ - { - "title": "Testing Configuration", - "slug": "en+hardware-mining", - "github": "en/hardware-mining.md", - "value": null - } + { + "title": "Linux installation", + "slug": "en+install-linux", + "github": "en/installation/install-linux.md", + "value": null + }, + { + "title": "MacOS installation", + "slug": "en+install-macos", + "github": "en/installation/install-macos.md", + "value": null + }, + { + "title": "Updating Lotus", + "slug": "en+update", + "github": "en/installation/update.md", + "value": null + } ] }, { - "title": "Setup", + "title": "Getting started", "slug": "en+getting-started", - "github": "en/getting-started.md", + "github": "en/getting-started/getting-started.md", "value": null, "posts": [ - { - "title": "Arch Linux Installation", - "slug": "en+install-lotus-arch", - "github": "en/install-lotus-arch.md", - "value": null - }, - { - "title": "Ubuntu Installation", - "slug": "en+install-lotus-ubuntu", - "github": "en/install-lotus-ubuntu.md", - "value": null - }, - { - "title": "Fedora Installation", - "slug": "en+install-lotus-fedora", - "github": "en/install-lotus-fedora.md", - "value": null - }, - { - "title": "MacOS Installation", - "slug": "en+install-lotus-macos", - "github": "en/install-lotus-macos.md", - "value": null - }, - { - "title": "Updating Lotus", - "slug": "en+updating-lotus", - "github": "en/updating-lotus.md", - "value": null - }, - { - "title": "Join Testnet", - "slug": "en+join-testnet", - "github": "en/join-testnet.md", - "value": null - }, - { - "title": "Use Lotus with systemd", - "slug": "en+install-systemd-services", - "github": "en/install-systemd-services.md", - "value": null - }, - { - "title": "Setup Troubleshooting", - "slug": "en+setup-troubleshooting", - "github": "en/setup-troubleshooting.md", - "value": null - }, - { - "title": "Environment Variables", - "slug": "en+env-vars", - "github": "en/environment-vars.md", - "value": null - } + { + "title": "Setting up Lotus", + "slug": "en+setup", + "github": "en/getting-started/setup.md", + "value": null + }, + { + + "title": "Obtaining and sending FIL", + "slug": "en+wallet", + "github": "en/getting-started/wallet.md", + "value": null + }, + { + "title": "Setup troubleshooting", + "slug": "en+setup-troubleshooting", + "github": "en/getting-started/setup-troubleshooting.md", + "value": null + } ] }, { - "title": "Architecture", - "slug": "en+arch", - "github": "en/architecture.md", + "title": "Storing and retrieving data", + "slug": "en+store", + "github": "en/store/store.md", "value": null, "posts": [ - { - "title": "The Message Pool", - "slug": "en+mpool", - "github": "en/mpool.md", - "value": null - } + { + "title": "Making storage deals", + "slug": "en+making-deals", + "github": "en/store/making-deals.md", + "value": null + }, + { + "title": "Adding data from IPFS", + "slug": "en+adding-from-ipfs", + "github": "en/store/adding-from-ipfs.md", + "value": null + }, + { + "title": "Retrieving data", + "slug": "en+retriving", + "github": "en/store/retrieve.md", + "value": null + }, + { + "title": "Storage Troubleshooting", + "slug": "en+storage-troubleshooting", + "github": "en/store/storage-troubleshooting.md", + "value": null + } ] }, { - "title": "Storage Mining", + "title": "Storage mining", "slug": "en+mining", - "github": "en/mining.md", + "github": "en/mining/mining.md", "value": null, "posts": [ - { - "title": "Lotus Worker", - "slug": "en+lotus-worker", - "github": "en/mining-lotus-worker.md", - "value": null - }, - { - "title": "Static Ports", - "slug": "en+setting-a-static-port", - "github": "en/setting-a-static-port.md", - "value": null - }, - { - "title": "Mining Troubleshooting", - "slug": "en+mining-troubleshooting", - "github": "en/mining-troubleshooting.md", - "value": null - } + { + "title": "Miner setup", + "slug": "en+miner-setup", + "github": "en/mining/miner-setup.md", + "value": null + }, + { + "title": "Managing deals", + "slug": "en+managing-deals", + "github": "en/mining/managing-deals.md", + "value": null + }, + { + "title": "Lotus Worker", + "slug": "en+lotus-worker", + "github": "en/mining/lotus-seal-worker.md", + "value": null + }, + { + "title": "Benchmarking GPUs", + "slug": "en+gpus", + "github": "en/mining/gpus.md", + "value": null + }, + { + "title": "Mining Troubleshooting", + "slug": "en+mining-troubleshooting", + "github": "en/mining/mining-troubleshooting.md", + "value": null + } ] }, { - "title": "Storing Data", - "slug": "en+storing-data", - "github": "en/storing-data.md", + "title": "Building", + "slug": "en+building", + "github": "en/building/building.md", "value": null, "posts": [ - { - "title": "Storage Troubleshooting", - "slug": "en+storing-data-troubleshooting", - "github": "en/storing-data-troubleshooting.md", - "value": null - }, - { - "title": "Information for Miners", - "slug": "en+info-for-miners", - "github": "en/miner-deals.md", - "value": null - }, - { - "title": "IPFS Integration", - "slug": "en+ipfs-client-integration", - "github": "en/storing-ipfs-integration.md", - "value": null - } + { + "title": "Setting up remote API access", + "slug": "en+remote-api", + "github": "en/building/remote-api.md", + "value": null, + "posts": [] + }, + { + "title": "API endpoints and methods", + "slug": "en+api", + "github": "en/building/api.md", + "value": null, + "posts": [] + }, + { + "title": "API Reference", + "slug": "en+api-methods", + "github": "en/building/api-methods.md", + "value": null, + "posts": [] + }, + + { + "title": "Payment Channels", + "slug": "en+payment-channels", + "github": "en/building/payment-channels.md", + "value": null, + "posts": [] + }, + + { + "title": "Running a local devnet", + "slug": "en+local-devnet", + "github": "en/building/local-devnet.md", + "value": null, + "posts": [] + }, + { + "title": "Jaeger Tracing", + "slug": "en+jaeger-tracing", + "github": "en/building/jaeger-tracing.md", + "value": null, + "posts": [] + }, + + { + "title": "API Troubleshooting", + "slug": "en+api-troubleshooting", + "github": "en/building/api-troubleshooting.md", + "value": null, + "posts": [] + } ] }, { - "title": "Retrieving Data", - "slug": "en+retrieving-data", - "github": "en/retrieving-data.md", - "value": null, - "posts": [] - }, - { - "title": "Payment Channels", - "slug": "en+payment-channels", - "github": "en/payment-channels.md", - "value": null, - "posts": [] - }, - { - "title": "Command Line Interface", - "slug": "en+cli", - "github": "en/cli.md", - "value": null, - "posts": [] - }, - { - "title": "API", - "slug": "en+api", - "github": "en/api.md", + "title": "Lotus Architecture (WIP)", + "slug": "en+arch", + "github": "en/architectiure/architecture.md", "value": null, "posts": [ - { - "title": "Remote API Support", - "slug": "en+api-scripting-support", - "github": "en/api-scripting-support.md", - "value": null - }, - { - "title": "API Methods", - "slug": "en+api-methods", - "github": "en/api-methods.md", - "value": null - }, - { - "title": "API Troubleshooting", - "slug": "en+api-troubleshooting", - "github": "en/api-troubleshooting.md", - "value": null - } - ] - }, - { - "title": "Developer Tools", - "slug": "en+dev-tools", - "github": "en/dev-tools.md", - "value": null, - "posts": [ - { - "title": "Setup Local Devnet", - "slug": "en+setup-local-dev-net", - "github": "en/local-dev-net.md", - "value": null, - "posts": [] - }, - { - "title": "Jaeger Tracing", - "slug": "en+dev-tools-jaeger-tracing", - "github": "en/dev-tools-jaeger-tracing.md", - "value": null, - "posts": [] - } + { + "title": "The Message Pool", + "slug": "en+mpool", + "github": "en/architecture/mpool.md", + "value": null + } ] }, { @@ -224,7 +217,7 @@ "github": "en/.glossary.json", "value": null, "custom": { - "glossary": true + "glossary": true }, "posts": [] } diff --git a/documentation/en/about.md b/documentation/en/about.md new file mode 100644 index 000000000..ee8536ac9 --- /dev/null +++ b/documentation/en/about.md @@ -0,0 +1,14 @@ +# Lotus + +Lotus is an implementation of the **Filecoin Distributed Storage Network**. + +The **Lotus Node** (and the mining applications) can be built to join any of the [Filecoin networks](https://docs.filecoin.io/how-to/networks/). + +For more details about Filecoin, check out the [Filecoin Docs](https://docs.filecoin.io) and [Filecoin Spec](https://filecoin-project.github.io/specs/). + +## What can I learn here? + +* How to [install](en+installation) and [setup](en+setup) the Lotus software +* How to [store data on the Filecoin network](en+store) +* How to [setup a high performance FIL miner](en+miner-setup) +* How to [configure and access Lotus APIs](en+remote-api) diff --git a/documentation/en/api-scripting-support.md b/documentation/en/api-scripting-support.md deleted file mode 100644 index 653f144ed..000000000 --- a/documentation/en/api-scripting-support.md +++ /dev/null @@ -1,25 +0,0 @@ -# Remote API Support - -You may want to delegate the work **Lotus Miner** or **Lotus Node** performs to other machines. -Here is how to setup the necessary authorization and environment variables. - -## Environment variables - -Environmental variables are variables that are defined for the current shell and are inherited by any child shells or processes. Environmental variables are used to pass information into processes that are spawned from the shell. - -Using the [JWT you generated](https://lotu.sh/en+api#how-do-i-generate-a-token-18865), you can assign it and the **multiaddr** to the appropriate environment variable. - -```sh -# Lotus Node -FULLNODE_API_INFO="JWT_TOKEN:/ip4/127.0.0.1/tcp/1234/http" - -# Lotus Miner -MINER_API_INFO="JWT_TOKEN:/ip4/127.0.0.1/tcp/2345/http" -``` - -You can also use `lotus auth api-info --perm admin` to quickly create _API_INFO env vars - -- The **Lotus Node**'s `mutliaddr` is in `~/.lotus/api`. -- The default token is in `~/.lotus/token`. -- The **Lotus Miner**'s `multiaddr` is in `~/.lotusminer/config`. -- The default token is in `~/.lotusminer/token`. diff --git a/documentation/en/api.md b/documentation/en/api.md deleted file mode 100644 index 9760e2f32..000000000 --- a/documentation/en/api.md +++ /dev/null @@ -1,85 +0,0 @@ -# API - -Here is an early overview of how to make API calls. - -Implementation details for the **JSON-RPC** package are [here](https://github.com/filecoin-project/go-jsonrpc). - -## Overview: How do you modify the config.toml to change the API endpoint? - -API requests are made against `127.0.0.1:1234` unless you modify `.lotus/config.toml`. - -Options: - -- `http://[api:port]/rpc/v0` - HTTP endpoint -- `ws://[api:port]/rpc/v0` - Websocket endpoint -- `PUT http://[api:port]/rest/v0/import` - File import, it requires write permissions. - -## What methods can I use? - -For now, you can look into different files to find methods available to you based on your needs: - -- [Both Lotus node + miner APIs](https://github.com/filecoin-project/lotus/blob/master/api/api_common.go) -- [Lotus node API](https://github.com/filecoin-project/lotus/blob/master/api/api_full.go) -- [Lotus miner API](https://github.com/filecoin-project/lotus/blob/master/api/api_storage.go) - -The necessary permissions for each are in [api/struct.go](https://github.com/filecoin-project/lotus/blob/master/api/struct.go). - -## How do I make an API request? - -To demonstrate making an API request, we will take the method `ChainHead` from [api/api_full.go](https://github.com/filecoin-project/lotus/blob/master/api/api_full.go). - -```go -ChainHead(context.Context) (*types.TipSet, error) -``` - -And create a CURL command. In this command, `ChainHead` is included as `{ "method": "Filecoin.ChainHead" }`: - -```sh -curl -X POST \ - -H "Content-Type: application/json" \ - --data '{ "jsonrpc": "2.0", "method": "Filecoin.ChainHead", "params": [], "id": 3 }' \ - 'http://127.0.0.1:1234/rpc/v0' -``` - -If the request requires authorization, add an authorization header: - -```sh -curl -X POST \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer $(cat ~/.lotusminer/token)" \ - --data '{ "jsonrpc": "2.0", "method": "Filecoin.ChainHead", "params": [], "id": 3 }' \ - 'http://127.0.0.1:1234/rpc/v0' -``` - -> In the future we will add a playground to make it easier to build and experiment with API requests. - -## CURL authorization - -To authorize your request, you will need to include the **JWT** in a HTTP header, for example: - -```sh --H "Authorization: Bearer $(cat ~/.lotusminer/token)" -``` - -Admin token is stored in `~/.lotus/token` for the **Lotus Node** or `~/.lotusminer/token` for the **Lotus Miner**. - -## How do I generate a token? - -To generate a JWT with custom permissions, use this command: - -```sh -# Lotus Node -lotus auth create-token --perm admin - -# Lotus Miner -lotus-miner auth create-token --perm admin -``` - -## What authorization level should I use? - -When viewing [api/apistruct/struct.go](https://github.com/filecoin-project/lotus/blob/master/api/apistruct/struct.go), you will encounter these types: - -- `read` - Read node state, no private data. -- `write` - Write to local store / chain, and `read` permissions. -- `sign` - Use private keys stored in wallet for signing, `read` and `write` permissions. -- `admin` - Manage permissions, `read`, `write`, and `sign` permissions. diff --git a/documentation/en/architecture.md b/documentation/en/architecture/architecture.md similarity index 99% rename from documentation/en/architecture.md rename to documentation/en/architecture/architecture.md index 619e04f05..8c4d7be5c 100644 --- a/documentation/en/architecture.md +++ b/documentation/en/architecture/architecture.md @@ -6,7 +6,7 @@ Filecoin protocol, validating the blocks and state transitions. The specification for the Filecoin protocol can be found [here](https://filecoin-project.github.io/specs/). For information on how to setup and operate a Lotus node, -please follow the instructions [here](https://lotu.sh/en+getting-started). +please follow the instructions [here](en+getting-started). # Components diff --git a/documentation/en/mpool.md b/documentation/en/architecture/mpool.md similarity index 100% rename from documentation/en/mpool.md rename to documentation/en/architecture/mpool.md diff --git a/documentation/en/api-methods.md b/documentation/en/building/api-methods.md similarity index 100% rename from documentation/en/api-methods.md rename to documentation/en/building/api-methods.md diff --git a/documentation/en/api-troubleshooting.md b/documentation/en/building/api-troubleshooting.md similarity index 100% rename from documentation/en/api-troubleshooting.md rename to documentation/en/building/api-troubleshooting.md diff --git a/documentation/en/building/api.md b/documentation/en/building/api.md new file mode 100644 index 000000000..626193ee2 --- /dev/null +++ b/documentation/en/building/api.md @@ -0,0 +1,38 @@ +# API endpoints and methods + +The API can be accessed on: + +- `http://[api:port]/rpc/v0` - HTTP RPC-API endpoint +- `ws://[api:port]/rpc/v0` - Websocket RPC-API endpoint +- `PUT http://[api:port]/rest/v0/import` - REST endpoint for file import (multipart upload). It requires write permissions. + +The RPC methods can be found in the [Reference](en+api-methods) and directly in the source code: + +- [Both Lotus node + miner APIs](https://github.com/filecoin-project/lotus/blob/master/api/api_common.go) +- [Lotus node API](https://github.com/filecoin-project/lotus/blob/master/api/api_full.go) +- [Lotus miner API](https://github.com/filecoin-project/lotus/blob/master/api/api_storage.go) + + +## JSON-RPC client + +Lotus uses its own Go library implementation of [JSON-RPC](https://github.com/filecoin-project/go-jsonrpc). + +## cURL example + +To demonstrate making an API request, we will take the method `ChainHead` from [api/api.go](https://github.com/filecoin-project/lotus/blob/master/api/api_full.go). + +```go +ChainHead(context.Context) (*types.TipSet, error) +``` + +And create a CURL command. In this command, `ChainHead` is included as `{ "method": "Filecoin.ChainHead" }`: + +```sh +curl -X POST \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $(cat ~/.lotusminer/token)" \ + --data '{ "jsonrpc": "2.0", "method": "Filecoin.ChainHead", "params": [], "id": 3 }' \ + 'http://127.0.0.1:1234/rpc/v0' +``` + +(See [this section](en+remote-api) to learn how to generate authorization tokens). diff --git a/documentation/en/building/building.md b/documentation/en/building/building.md new file mode 100644 index 000000000..5194f8314 --- /dev/null +++ b/documentation/en/building/building.md @@ -0,0 +1,5 @@ +# Building with Lotus + +Lotus applications provide HTTP (JSON-RPC) APIs that allow developers to control Lotus programatically. + +This section dives into how to setup and use these APIs, additionally providing information on advanced Lotus features and workflows, like Payment Channels or how to setup a fully local Lotus development network. diff --git a/documentation/en/dev-tools-jaeger-tracing.md b/documentation/en/building/jaeger-tracing.md similarity index 100% rename from documentation/en/dev-tools-jaeger-tracing.md rename to documentation/en/building/jaeger-tracing.md diff --git a/documentation/en/local-dev-net.md b/documentation/en/building/local-devnet.md similarity index 100% rename from documentation/en/local-dev-net.md rename to documentation/en/building/local-devnet.md diff --git a/documentation/en/payment-channels.md b/documentation/en/building/payment-channels.md similarity index 100% rename from documentation/en/payment-channels.md rename to documentation/en/building/payment-channels.md diff --git a/documentation/en/building/remote-api.md b/documentation/en/building/remote-api.md new file mode 100644 index 000000000..d0fedb51b --- /dev/null +++ b/documentation/en/building/remote-api.md @@ -0,0 +1,69 @@ +# Setting up remote API access + +The **Lotus Miner** and the **Lotus Node** applications come with their own local API endpoints setup by default when they are running. + +These endpoints are used by `lotus` and `lotus-miner` to interact with the running process. In this section we will explain how to enable remote access to the Lotus APIs. + +Note that instructions are the same for `lotus` and `lotus-miner`. For simplicity, we will just show how to do it with `lotus`. + +## Setting the listening interface for the API endpoint + +By default, the API listens on the local "loopback" interface (`127.0.0.1`). This is configured in the `config.toml` file: + +```toml +[API] +# ListenAddress = "/ip4/127.0.0.1/tcp/1234/http" +# RemoteListenAddress = "" +# Timeout = "30s" +``` + +To access the API remotely, Lotus needs to listen on the right IP/interface. The IP associated to each interface can be usually found with the command `ip a`. Once the right IP is known, it can be set in the configuration: + +```toml +[API] +ListenAddress = "/ip4//tcp/3453/http" # port is an example + +# Only relevant for lotus-miner +# This should be the IP:Port pair where the miner is reachable from anyone trying to dial to it. +# If you have placed a reverse proxy or a NAT'ing device in front of it, this may be different from +# the EXTERNAL_INTERFACE_IP. +RemoteListenAddress = "" +``` + +> `0.0.0.0` can be used too. This is a wildcard that means "all interfaces". Depending on the network setup, this may affect security (listening on the wrong, exposed interface). + +After making these changes, please restart the affected process. + +## Issuing tokens + +Any client wishing to talk to the API endpoints will need a token. Tokens can be generated with: + +```sh +lotus auth create-token --perm +``` + +(similarly for the Lotus Miner). + +The permissions work as follows: + +- `read` - Read node state, no private data. +- `write` - Write to local store / chain, and `read` permissions. +- `sign` - Use private keys stored in wallet for signing, `read` and `write` permissions. +- `admin` - Manage permissions, `read`, `write`, and `sign` permissions. + + +Tokens can then be used in applications by setting an Authorization header as: + +``` +Authorization: Bearer +``` + + +## Environment variables + +`lotus`, `lotus-miner` and `lotus-worker` can actually interact with their respective applications running on a different node. All is needed to configure them are the following the *environment variables*: + +```sh +FULLNODE_API_INFO="TOKEN:/ip4//tcp//http" +MINER_API_INFO="TOKEN:/ip4//tcp//http" +``` diff --git a/documentation/en/cli.md b/documentation/en/cli.md deleted file mode 100644 index fd26400d0..000000000 --- a/documentation/en/cli.md +++ /dev/null @@ -1,108 +0,0 @@ -# Lotus Command Line Interface - -The Command Line Interface (CLI) is a convenient way to interact with -a Lotus node. You can use the CLI to operate your node, -get information about the blockchain, -manage your accounts and transfer funds, -create storage deals, and much more! - -The CLI is intended to be self-documenting, so when in doubt, simply add `--help` -to whatever command you're trying to run! This will also display all of the -input parameters that can be provided to a command. - -We highlight some of the commonly -used features of the CLI below. -All CLI commands should be run from the home directory of the Lotus project. - -## Operating a Lotus node - -### Starting up a node - -```sh -lotus daemon -``` -This command will start up your Lotus node, with its API port open at 1234. -You can pass `--api=` to use a different port. - -### Checking your sync progress - -```sh -lotus sync status -``` -This command will print your current tipset height under `Height`, and the target tipset height -under `Taregt`. - -You can also run `lotus sync wait` to get constant updates on your sync progress. - -### Getting the head tipset - -```sh -lotus chain head -``` - -### Control the logging level - -```sh -lotus log set-level -``` -This command can be used to toggle the logging levels of the different -systems of a Lotus node. In decreasing order -of logging detail, the levels are `debug`, `info`, `warn`, and `error`. - -As an example, -to set the `chain` and `blocksync` to log at the `debug` level, run -`lotus log set-level --system chain --system blocksync debug`. - -To see the various logging system, run `lotus log list`. - -### Find out what version of Lotus you're running - -```sh -lotus version -``` - -## Managing your accounts - -### Listing accounts in your wallet - -```sh -lotus wallet list -``` - -### Creating a new account - -```sh -lotus wallet new bls -``` -This command will create a new BLS account in your wallet; these -addresses start with the prefix `t3`. Running `lotus wallet new secp256k1` -(or just `lotus wallet new`) will create -a new Secp256k1 account, which begins with the prefix `t1`. - -### Getting an account's balance - -```sh -lotus wallet balance
-``` - -### Transferring funds - -```sh -lotus send --source= -``` -This command will transfer `amount` (in attoFIL) from `source address` to `destination address`. - -### Importing an account into your wallet - -```sh -lotus wallet import -``` -This command will import an account whose private key is saved at the specified file. - -### Exporting an account from your wallet - -```sh -lotus wallet export
-``` -This command will print out the private key of the specified address -if it is in your wallet. Always be careful with your private key! diff --git a/documentation/en/dev-tools.md b/documentation/en/dev-tools.md deleted file mode 100644 index 60b9b26d4..000000000 --- a/documentation/en/dev-tools.md +++ /dev/null @@ -1,3 +0,0 @@ -# Developer Tools - -> Running a local network can be a great way to understand how Lotus works and test your setup. diff --git a/documentation/en/environment-vars.md b/documentation/en/environment-vars.md deleted file mode 100644 index 9d455a74d..000000000 --- a/documentation/en/environment-vars.md +++ /dev/null @@ -1,65 +0,0 @@ -# Lotus Environment Variables - -## Building - -## Common - -The environment variables are common across most lotus binaries. - -### `LOTUS_FD_MAX` - -Sets the file descriptor limit for the process. This should be set high (8192 -or higher) if you ever notice 'too many open file descriptor' errors. - -### `LOTUS_JAEGER` - -This can be set to enable jaeger trace reporting. The value should be the url -of the jaeger trace collector, the default for most jaeger setups should be -`localhost:6831`. - -### `LOTUS_DEV` - -If set to a non-empty value, certain parts of the application will print more -verbose information to aid in development of the software. Not recommended for -end users. - -## Lotus Daemon - -### `LOTUS_PATH` - -Sets the location for the lotus daemon on-disk repo. If left empty, this defaults to `~/.lotus`. - -### `LOTUS_SKIP_GENESIS_CHECK` - -Can be set to `_yes_` if you wish to run a lotus network with a different -genesis than the default one built into your lotus binary. - -### `LOTUS_CHAIN_TIPSET_CACHE` - -Sets the cache size for the chainstore tipset cache. The default value is 8192, -but if your usage of the lotus API involves frequent arbitrary tipset lookups, -you may want to increase this. - -### `LOTUS_CHAIN_INDEX_CACHE` - -Sets the cache size for the chainstore epoch index cache. The default value is 32768, -but if your usage of the lotus API involves frequent deep chain lookups for -block heights that are very far from the current chain height, you may want to -increase this. - - -### `LOTUS_BSYNC_MSG_WINDOW` - -Set the initial maximum window size for message fetching blocksync requests. If -you have a slower internet connection and are having trouble syncing, you might -try lowering this down to 10-20 for a 'poor' internet connection. - -## Lotus Miner - -A number of environment variables are respected for configuring the behavior of the filecoin proving subsystem. For more details on those [see here](https://github.com/filecoin-project/rust-fil-proofs/#settings). - -### `LOTUS_MINER_PATH` - -Sets the location for the lotus miners on-disk repo. If left empty, this defaults to `~/.lotusminer`. - - diff --git a/documentation/en/faqs.md b/documentation/en/faqs.md index c2d526830..74119a5b6 100644 --- a/documentation/en/faqs.md +++ b/documentation/en/faqs.md @@ -11,7 +11,6 @@ go [here](https://filecoin.io/faqs/). Lotus is an implementation of the **Filecoin Distributed Storage Network**, written in Go. It is designed to be modular and interoperable with any other implementation of the Filecoin Protocol. -More information about Lotus can be found [here](https://lotu.sh/). ### What are the components of Lotus? @@ -30,21 +29,19 @@ to a Lotus Node over the JSON-RPC API. ### How do I set up a Lotus Node? -Follow the instructions found [here](https://lotu.sh/en+getting-started). +Follow the instructions found [here](en+install) and [here](en+setup). ### Where can I get the latest version of Lotus? -Download the binary tagged as the `Latest Release` from the - [Lotus Github repo](https://github.com/filecoin-project/lotus/releases). +Download the binary tagged as the `Latest Release` from the [Lotus Github repo](https://github.com/filecoin-project/lotus/releases) or checkout the `master` branch of the source repository. ### What operating systems can Lotus run on? -Lotus can build and run on most Linux and MacOS systems with at least -8GB of RAM. Windows is not yet supported. +Lotus can build and run on most Linux and MacOS systems with [at least 8GB of RAM](en+install#hardware-requirements-1). Windows is not yet supported. ### How can I update to the latest version of Lotus? -To update Lotus, follow the instructions [here](https://lotu.sh/en+updating-lotus). +To update Lotus, follow the instructions [here](en+update). ### How do I prepare a fresh installation of Lotus? @@ -52,7 +49,7 @@ Stop the Lotus daemon, and delete all related files, including sealed and chain running `rm ~/.lotus ~/.lotusminer`. Then, install Lotus afresh by following the instructions -found [here](https://lotu.sh/en+getting-started). +found [here](en+install). ### Can I configure where the node's config and data goes? @@ -73,48 +70,45 @@ directory for more. ### How can I send a request over the JSON-RPC API? Information on how to send a `cURL` request to the JSON-RPC API can be found -[here](https://lotu.sh/en+api). A JavaScript client is under development. +[here](en+api). ### What are the requests I can send over the JSON-RPC API? -Please have a look at the -[source code](https://github.com/filecoin-project/lotus/blob/master/api/api_full.go) -for a list of methods supported by the JSON-RPC API. +Please have a look [here](en+api). + + ## The Test Network ### What is Testnet? Testnet is a live network of Lotus Nodes run by the community for testing purposes. - It has 2 PiB of storage (and growing!) dedicated to it. ### Is FIL on the Testnet worth anything? -Nothing at all! Real-world incentives may be provided in a future phase of Testnet, but this is -yet to be confirmed. +Nothing at all! ### How can I see the status of Testnet? The [dashboard](https://stats.testnet.filecoin.io/) displays the status of the network as -well as a ton -of other metrics you might find interesting. +well as a ton of other metrics you might find interesting. ## Mining with a Lotus Node on Testnet ### How do I get started mining with Lotus? -Follow the instructions found [here](https://lotu.sh/en+mining). +Follow the instructions found [here](en+mining). ### What are the minimum hardware requirements? An example test configuration, and minimum hardware requirements can be found -[here](https://lotu.sh/en+hardware-mining). +[here](en+install#hardware-requirements-8). Note that these might NOT be the minimum requirements for mining on Mainnet. ### What are some GPUs that have been tested? -A list of benchmarked GPUs can be found [here](https://lotu.sh/en+hardware-mining#benchmarked-gpus-7393). +See previous question. ### Why is my GPU not being used when sealing a sector? @@ -135,4 +129,4 @@ You can do so by changing the storage path variable for the second miner, e.g., ### How do I setup my own local devnet? -Follow the instructions found [here](https://lotu.sh/en+setup-local-dev-net). +Follow the instructions found [here](en+local-devnet). diff --git a/documentation/en/getting-started.md b/documentation/en/getting-started.md deleted file mode 100644 index e38a2ab97..000000000 --- a/documentation/en/getting-started.md +++ /dev/null @@ -1,23 +0,0 @@ -# Lotus - -Lotus is an implementation of the **Filecoin Distributed Storage Network**. You can run the Lotus software client to join the **Filecoin Testnet**. - -For more details about Filecoin, check out the [Filecoin Docs](https://docs.filecoin.io) and [Filecoin Spec](https://filecoin-project.github.io/specs/). - -## What can I learn here? - -- How to install Lotus on [Arch Linux](https://lotu.sh/en+install-lotus-arch), [Ubuntu](https://lotu.sh/en+install-lotus-ubuntu), or [MacOS](https://lotu.sh/en+install-lotus-macos). -- Joining the [Lotus Testnet](https://lotu.sh/en+join-testnet). -- [Storing](https://lotu.sh/en+storing-data) or [retrieving](https://lotu.sh/en+retrieving-data) data. -- Mining Filecoin using the **Lotus Miner** in your [CLI](https://lotu.sh/en+mining). - -## How is Lotus designed? - -Lotus is architected modularly to keep clean API boundaries while using the same process. Installing Lotus will include two separate programs: - -- The **Lotus Node** -- The **Lotus Miner** - -The **Lotus Miner** is intended to be run on the machine that manages a single miner instance, and is meant to communicate with the **Lotus Node** via the websocket **JSON-RPC** API for all of the chain interaction needs. - -This way, a mining operation may easily run a **Lotus Miner** or many of them, connected to one or many **Lotus Node** instances. diff --git a/documentation/en/getting-started/getting-started.md b/documentation/en/getting-started/getting-started.md new file mode 100644 index 000000000..99b4095d4 --- /dev/null +++ b/documentation/en/getting-started/getting-started.md @@ -0,0 +1,3 @@ +# Getting started + +This section will get you started with Lotus. We will setup the Lotus daemon (that should already be [installed](en+install)), start it, create a wallet and use it to send and receive some Filecoin. diff --git a/documentation/en/setup-troubleshooting.md b/documentation/en/getting-started/setup-troubleshooting.md similarity index 60% rename from documentation/en/setup-troubleshooting.md rename to documentation/en/getting-started/setup-troubleshooting.md index a1c78b51b..f27a3faa5 100644 --- a/documentation/en/setup-troubleshooting.md +++ b/documentation/en/getting-started/setup-troubleshooting.md @@ -1,5 +1,12 @@ # Setup Troubleshooting + +## Error: initializing node error: cbor input had wrong number of fields + +This happens when you are starting Lotus which has been compiled for one network, but it encounters data in the Lotus data folder which is for a different network, or for an older incompatible version. + +The solution is to clear the data folder (see below). + ## Config: Clearing data Here is a command that will delete your chain data, stored wallets, stored data and any miners you have set up: @@ -8,7 +15,7 @@ Here is a command that will delete your chain data, stored wallets, stored data rm -rf ~/.lotus ~/.lotusminer ``` -This command usually resolves any issues with running `lotus` but it is not always required for updates. We will share information about when resetting your chain data and miners is required for an update in the future. +Note you do not always need to clear your data for [updating](en+update). ## Error: Failed to connect bootstrap peer @@ -33,6 +40,8 @@ ERROR hello hello/hello.go:81 other peer has different genesis! ## Config: Open files limit +Lotus will attempt to set up the file descriptor (FD) limit automatically. If that does not work, you can still configure your system to allow higher than the default values. + On most systems you can check the open files limit with: ```sh @@ -44,3 +53,5 @@ You can also modify this number by using the `ulimit` command. It gives you the ```sh ulimit -n 10000 ``` + +Note that this is not persisted and that systemd manages its own FD limits for services. Please use your favourite search engine to find instructions on how to persist and configure FD limits for your system. diff --git a/documentation/en/getting-started/setup.md b/documentation/en/getting-started/setup.md new file mode 100644 index 000000000..e751da80b --- /dev/null +++ b/documentation/en/getting-started/setup.md @@ -0,0 +1,169 @@ +# Setting up Lotus + +Your Lotus binaries have been installed and you are ready to start participating in the Filecoin network. + +## Selecting the right network + +You should have built the Lotus binaries from the right Github branch and Lotus will be fully setup to join the matching [Filecoin network](https://docs.filecoin.io/how-to/networks/). For more information on switching networks, check the [updating Lotus section](en+update). + +## Starting the daemon + +To start the daemon simply run: + +```sh +lotus daemon +``` + +or if you are using the provided systemd service files, do: + +```sh +systemctl start lotus-daemon +``` + +__If you are using Lotus from China__, make sure you set the following environment variable before running Lotus: + +``` +export IPFS_GATEWAY="https://proof-parameters.s3.cn-south-1.jdcloud-oss.com/ipfs/" +``` + + +During the first start, Lotus: + +* Will setup its data folder at `~/.lotus` +* Will download the necessary parameters +* Start syncing the Lotus chain + +If you started lotus using systemd, the logs will appear in `/var/log/lotus/daemon.log` (not in journalctl as usual), otherwise you will see them in your screen. + +Do not be appalled by the amount of warnings and sometimes errors showing in the logs, there are usually part of the usual functioning of the daemon as part of a distributed network. + +## Waiting to sync + +After the first start, the chain will start syncing until it has reached the tip. You can check how far the syncing process is with: + +```sh +lotus sync status +``` + +You can also interactively wait for the chain to be fully synced with: + +```sh +lotus sync wait +``` + +## Interacting with the Lotus daemon + +As shown above, the `lotus` command allows to interact with the running daemon. You will see it getting used in many of the documentation examples. + +This command-line-interface is self-documenting: + +```sh +# Show general help +lotus --help +# Show specific help for the "client" subcommand +lotus client --help +``` + +For example, after your Lotus daemon has been running for a few minutes, use `lotus` to check the number of other peers that it is connected to in the Filecoin network: + +```sh +lotus net peers +``` + +## Controlling the logging level + +```sh +lotus log set-level +``` +This command can be used to toggle the logging levels of the different +systems of a Lotus node. In decreasing order +of logging detail, the levels are `debug`, `info`, `warn`, and `error`. + +As an example, +to set the `chain` and `blocksync` to log at the `debug` level, run +`lotus log set-level --system chain --system blocksync debug`. + +To see the various logging system, run `lotus log list`. + + +## Configuration + +### Configuration file + +The Lotus daemon stores a configuration file in `~/.lotus/config.toml`. Note that by default all settings are commented. Here is an example configuration: + +```toml +[API] + # Binding address for the Lotus API + ListenAddress = "/ip4/127.0.0.1/tcp/1234/http" + # Not used by lotus daemon + RemoteListenAddress = "" + # General network timeout value + Timeout = "30s" + +# Libp2p provides connectivity to other Filecoin network nodes +[Libp2p] + # Binding address swarm - 0 means random port. + ListenAddresses = ["/ip4/0.0.0.0/tcp/0", "/ip6/::/tcp/0"] + # Insert any addresses you want to explicitally + # announce to other peers here. Otherwise, they are + # guessed. + AnnounceAddresses = [] + # Insert any addresses to avoid announcing here. + NoAnnounceAddresses = [] + # Connection manager settings, decrease if your + # machine is overwhelmed by connections. + ConnMgrLow = 150 + ConnMgrHigh = 180 + ConnMgrGrace = "20s" + +# Pubsub is used to broadcast information in the network +[Pubsub] + Bootstrapper = false + RemoteTracer = "/dns4/pubsub-tracer.filecoin.io/tcp/4001/p2p/QmTd6UvR47vUidRNZ1ZKXHrAFhqTJAD27rKL9XYghEKgKX" + +# This section can be used to enable adding and retriving files from IPFS +[Client] + UseIpfs = false + IpfsMAddr = "" + IpfsUseForRetrieval = false + +# Metrics configuration +[Metrics] + Nickname = "" + HeadNotifs = false +``` + +### Ensuring connectivity to your Lotus daemon + +Usually your lotus daemon will establish connectivity with others in the network and try to make itself diallable using uPnP. If you wish to manually ensure that your daemon is reachable: + +* Set a fixed port of your choice in the `ListenAddresses` in the Libp2p section (i.e. 6665). +* Open a port in your router that is forwarded to this port. This is usually called featured as "Port forwarding" and the instructions differ from router model to model but there are many guides online. +* Add your public IP/port to `AnnounceAddresses`. i.e. `/ip4//tcp/6665/`. + +Note that it is not a requirement to use Lotus as a client to the network to be fully reachable, as your node already connects to others directly. + + +### Environment variables + +Common to most Lotus binaries: + +* `LOTUS_FD_MAX`: Sets the file descriptor limit for the process +* `LOTUS_JAEGER`: Sets the Jaeger URL to send traces. See TODO. +* `LOTUS_DEV`: Any non-empty value will enable more verbose logging, useful only for developers. + +Specific to the *Lotus daemon*: + +* `LOTUS_PATH`: Location to store Lotus data (defaults to `~/.lotus`). +* `LOTUS_SKIP_GENESIS_CHECK=_yes_`: Set only if you wish to run a lotus network with a different genesis block. +* `LOTUS_CHAIN_TIPSET_CACHE`: Sets the size for the chainstore tipset cache. Defaults to `8192`. Increase if you perform frequent arbitrary tipset lookups. +* `LOTUS_CHAIN_INDEX_CACHE`: Sets the size for the epoch index cache. Defaults to `32768`. Increase if you perform frequent deep chain lookups for block heights far from the latest height. +* `LOTUS_BSYNC_MSG_WINDOW`: Set the initial maximum window size for message fetching blocksync request. Set to 10-20 if you have an internet connection with low bandwidth. + +Specific to the *Lotus miner*: + +* `LOTUS_MINER_PATH`: Location for the miner's on-disk repo. Defaults to `./lotusminer`. +* A number of environment variables are respected for configuring the behaviour of the Filecoin proving subsystem. [See here](en+miner-setup). + + diff --git a/documentation/en/getting-started/wallet.md b/documentation/en/getting-started/wallet.md new file mode 100644 index 000000000..25a67fb09 --- /dev/null +++ b/documentation/en/getting-started/wallet.md @@ -0,0 +1,58 @@ +# Obtaining and sending FIL + +In order to receive and send FIL with Lotus you will need to have installed the program and be running the Lotus daemon. + +## Creating a wallet + + +```sh +lotus wallet new bls +``` + +This will print your Filecoin address. + +Your wallet information is stored in the `~/.lotus/keystore` (or `$LOTUS_PATH/keystore`). For instructions on export/import, see below. + +You can create multiple wallets and list them with: + +```sh +lotus wallet list +``` + +## Obtaining FIL + +FIL can be obtained either by using one of the Faucets (available for the test networks) or by buying it from an exchange supporting FIL trading (once mainnet has launched). + +Once you have received some FIL you can check your balance with: + +```sh +lotus wallet balance +``` + +Remember that your will only see the latest balance when your daemon is fully synced to the chain. + +## Sending FIL + +Sending some FIL can be achieved by running: + +```sh +lotus wallet send
+``` + +Make sure to check `lotus wallet send --help` for additional options. + +## Exporting and importing a wallet + +You can export and re-import a wallet with: + +```sh +lotus wallet export
> wallet.private +``` + +and: + +```sh +lotus wallet import wallet.private +``` + +Keep your wallet's private key safe! diff --git a/documentation/en/hardware-mining.md b/documentation/en/hardware-mining.md deleted file mode 100644 index d421f6fb1..000000000 --- a/documentation/en/hardware-mining.md +++ /dev/null @@ -1,54 +0,0 @@ -# Protocol Labs Standard Testing Configuration - -> This documentation page describes the standard testing configuration the Protocol Labs team has used to test **Lotus Miner**s on Lotus. There is no guarantee this testing configuration will be suitable for Filecoin storage mining at MainNet launch. If you need to buy new hardware to join the Filecoin Testnet, we recommend to buy no more hardware than you require for testing. To learn more please read this [Protocol Labs Standard Testing Configuration post](https://filecoin.io/blog/filecoin-testnet-mining/). - -**Sector sizes** and **minimum pledged storage** required to mine blocks are two very important Filecoin Testnet parameters that impact hardware decisions. We will continue to refine all parameters during Testnet. - -BECAUSE OF THIS, OUR STANDARD TESTING CONFIGURATION FOR FILECOIN MAINNET CAN AND WILL CHANGE. YOU HAVE BEEN WARNED. - -## Example configuration - -The setup below is a minimal example for sealing 32 GiB sectors on Lotus: - -- 2 TB of hard drive space. -- 8 core CPU -- 128 GiB of RAM - -Note that 1GB sectors don't require as high of specs, but are likely to be removed as we improve the performance of 32GB sector sealing. - -For the first part of the sealing process, AMD CPU's are __highly recommended__, because of the `Intel SHA Extensions` instruction set that is available there ever since the `Zen` microarchitecture. Hence, AMD CPU's seem to perform much better on the testnet than other CPU's. Contrary to what the name implies, this extended instruction set is not available on recent Intel desktop/server chips. - -## Testnet discoveries - -- If you only have 128GiB of ram, enabling 256GB of **NVMe** swap on an SSD will help you avoid out-of-memory issues while mining. - -## Benchmarked GPUs - -GPUs are a must for getting **block rewards**. Here are a few that have been confirmed to generate **SNARKs** quickly enough to successfully mine blocks on the Lotus Testnet. - -- GeForce RTX 2080 Ti -- GeForce RTX 2080 SUPER -- GeForce RTX 2080 -- GeForce GTX 1080 Ti -- GeForce GTX 1080 -- GeForce GTX 1060 - -## Testing other GPUs - -If you want to test a GPU that is not explicitly supported, use the following global **environment variable**: - -```sh -BELLMAN_CUSTOM_GPU=":" -``` - -Here is an example of trying a GeForce GTX 1660 Ti with 1536 cores. - -```sh -BELLMAN_CUSTOM_GPU="GeForce GTX 1660 Ti:1536" -``` - -To get the number of cores for your GPU, you will need to check your card’s specifications. - -## Benchmarking - -Here is a [benchmarking tool](https://github.com/filecoin-project/lotus/tree/master/cmd/lotus-bench) and a [GitHub issue thread](https://github.com/filecoin-project/lotus/issues/694) for those who wish to experiment with and contribute hardware setups for the **Filecoin Testnet**. diff --git a/documentation/en/hardware.md b/documentation/en/hardware.md deleted file mode 100644 index f6250548a..000000000 --- a/documentation/en/hardware.md +++ /dev/null @@ -1,7 +0,0 @@ -# Hardware - -> This page is a work in progress. Exact mining requirements are still in the works. - -Lotus can build and run on most [Linux](https://ubuntu.com/) and [MacOS](https://www.apple.com/macos) systems with at least 8GiB of RAM. - -Windows is not yet supported. diff --git a/documentation/en/install-lotus-arch.md b/documentation/en/install-lotus-arch.md deleted file mode 100644 index 8e06aae4e..000000000 --- a/documentation/en/install-lotus-arch.md +++ /dev/null @@ -1,51 +0,0 @@ -# Arch Linux Instructions - -These steps will install the following dependencies: - -- go (1.14 or higher) -- gcc (7.4.0 or higher) -- git (version 2 or higher) -- bzr (some go dependency needs this) -- jq -- pkg-config -- opencl-icd-loader -- opencl driver (like nvidia-opencl on arch) (for GPU acceleration) -- opencl-headers (build) -- rustup (proofs build) -- llvm (proofs build) -- clang (proofs build) - -### Install dependencies - -```sh -sudo pacman -Syu opencl-icd-loader gcc git bzr jq pkg-config opencl-icd-loader opencl-headers -``` - -### Install Go 1.14 - -Install the latest version of Go by following [the docs on their website](https://golang.org/doc/install). - -### Clone the Lotus repository - -```sh -git clone https://github.com/filecoin-project/lotus.git -cd lotus/ -``` - -### Build the Lotus binaries from source and install - -! **If you are running an AMD platform or if your CPU supports SHA extensions you will want to build the Filecoin proofs natively** - -```sh -make clean && make all -sudo make install -``` - -#### Native Filecoin FFI building - -```sh -env env RUSTFLAGS="-C target-cpu=native -g" FFI_BUILD_FROM_SOURCE=1 make clean deps all -sudo make install -``` - -After installing Lotus, you can run the `lotus` command directly from your CLI to see usage documentation. Next, you can join the [Lotus Testnet](https://lotu.sh/en+join-testnet). diff --git a/documentation/en/install-lotus-fedora.md b/documentation/en/install-lotus-fedora.md deleted file mode 100644 index c37161b7a..000000000 --- a/documentation/en/install-lotus-fedora.md +++ /dev/null @@ -1,54 +0,0 @@ -# Fedora Instructions - -> tested on 30 - -**NOTE:** If you have an AMD GPU the opencl instructions may be incorrect... - -These steps will install the following dependencies: - -- go (1.14 or higher) -- gcc (7.4.0 or higher) -- git (version 2 or higher) -- bzr (some go dependency needs this) -- jq -- pkg-config -- rustup (proofs build) -- llvm (proofs build) -- clang (proofs build) - -### Install dependencies - -```sh -$ sudo dnf -y update -$ sudo dnf -y install gcc git bzr jq pkgconfig mesa-libOpenCL mesa-libOpenCL-devel opencl-headers ocl-icd ocl-icd-devel clang llvm -$ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -``` - -### Install Go 1.14 - -Install the latest version of Go by following [the docs on their website](https://golang.org/doc/install). - -### Clone the Lotus repository - -```sh -git clone https://github.com/filecoin-project/lotus.git -cd lotus/ -``` - -### Build the Lotus binaries from source and install - -! **If you are running an AMD platform or if your CPU supports SHA extensions you will want to build the Filecoin proofs natively** - -```sh -$ make clean && make all -$ sudo make install -``` - -#### Native Filecoin FFI building - -```sh -env env RUSTFLAGS="-C target-cpu=native -g" FFI_BUILD_FROM_SOURCE=1 make clean deps all -sudo make install -``` - -After installing Lotus, you can run the `lotus` command directly from your CLI to see usage documentation. Next, you can join the [Lotus TestNet](https://lotu.sh/en+join-testnet). diff --git a/documentation/en/install-lotus-ubuntu.md b/documentation/en/install-lotus-ubuntu.md deleted file mode 100644 index 500650692..000000000 --- a/documentation/en/install-lotus-ubuntu.md +++ /dev/null @@ -1,54 +0,0 @@ -# Ubuntu Instructions - -These steps will install the following dependencies: - -- go (1.14 or higher) -- gcc (7.4.0 or higher) -- git (version 2 or higher) -- bzr (some go dependency needs this) -- jq -- pkg-config -- opencl-icd-loader -- opencl driver (like nvidia-opencl on arch) (for GPU acceleration) -- opencl-headers (build) -- rustup (proofs build) -- llvm (proofs build) -- clang (proofs build) - -### Install dependencies - -```sh -sudo apt update -sudo apt install mesa-opencl-icd ocl-icd-opencl-dev gcc git bzr jq pkg-config curl -sudo apt upgrade -``` - -### Install Go 1.14 - -Install the latest version of Go by following [the docs on their website](https://golang.org/doc/install). - -### Clone the Lotus repository - -```sh -git clone https://github.com/filecoin-project/lotus.git -cd lotus/ -``` - -### Build the Lotus binaries from source and install - -! **If you are running an AMD platform or if your CPU supports SHA extensions you will want to build the Filecoin proofs natively** - -```sh -make clean && make all -sudo make install -``` - -#### Native Filecoin FFI building - -```sh -env env RUSTFLAGS="-C target-cpu=native -g" FFI_BUILD_FROM_SOURCE=1 make clean deps all -sudo make install -``` - - -After installing Lotus, you can run the `lotus` command directly from your CLI to see usage documentation. Next, you can join the [Lotus Testnet](https://lotu.sh/en+join-testnet). diff --git a/documentation/en/install-systemd-services.md b/documentation/en/install-systemd-services.md deleted file mode 100644 index fbde1feec..000000000 --- a/documentation/en/install-systemd-services.md +++ /dev/null @@ -1,145 +0,0 @@ -# Use Lotus with systemd - -Lotus is capable of running as a systemd service daemon. You can find installable service files for systemd in the [lotus repo scripts directory](https://github.com/filecoin-project/lotus/tree/master/scripts) as files with `.service` extension. In order to install these service files, you can copy these `.service` files to the default systemd unit load path. - -The services expect their binaries to be present in `/usr/local/bin/`. You can use `make` to install them by running: - -```sh -$ sudo make install -``` - -for `lotus` and `lotus-storage-miner` and - -```sh -$ sudo make install-chainwatch -``` - -for the `chainwatch` tool. - -## Installing services via `make` - -If your host uses the default systemd unit load path, the `lotus-daemon` and `lotus-miner` services can be installed by running: - -```sh -$ sudo make install-services -``` - -To install the the `lotus-chainwatch` service run: - -```sh -$ sudo make install-chainwatch-service -``` - -You can install all services together by running: - -```sh -$ sudo make install-all-services -``` - -The `lotus-daemon` and the `lotus-miner` services can be installed individually too by running: - -```sh -$ sudo make install-daemon-service -``` - -and - -```sh -$ sudo make install-miner-service -``` - -### Notes - -When installing the `lotus-miner` and/or `lotus-chainwatch` service the `lotus-daemon` service gets automatically installed since the other two services depend on it being installed to run. - -All `install-*-service*` commands will install the latest binaries in the lotus build folders to `/usr/local/bin/`. If you do not want to use the latest build binaries please copy the `*.service` files by hand. - -## Removing via `make` - -All services can beremoved via `make`. To remove all services together run: - -```sh -$ sudo make clean-all-services -``` - -Individual services can be removed by running: - -```sh -$ sudo make clean-chainwatch-services -$ sudo make clean-miner-services -$ sudo make clean-daemon-services -``` - -### Notes - -The services will be stoppend and disabled when removed. - -Removing the `lotus-daemon` service will automatically remove the depending services `lotus-miner` and `lotus-chainwatch`. - - -## Controlling services - -All service can be controlled with the `systemctl`. A few basic control commands are listed below. To get detailed infos about the capabilities of the `systemctl` command please consult your distributions man pages by running: - -```sh -$ man systemctl -``` - -### Start/Stop services - -You can start the services by running: - -```sh -$ sudo systemctl start lotus-daemon -$ sudo systemctl start lotus-miner -$ sudo systemctl start lotus-chainwatch -``` - -and can be stopped by running: - -```sh -$ sudo systemctl stop lotus-daemon -$ sudo systemctl stop lotus-miner -$ sudo systemctl stop lotus-chainwatch -``` - -### Enabling services on startup - -To enable the services to run automatically on startup execute: - -```sh -$ sudo systemctl enable lotus-daemon -$ sudo systemctl enable lotus-miner -$ sudo systemctl enable lotus-chainwatch -``` - -To disable the services on startup run: - -```sh -$ sudo systemctl disable lotus-daemon -$ sudo systemctl disable lotus-miner -$ sudo systemctl disable lotus-chainwatch -``` -### Notes - -Systemd will not let services be enabled or started without their requirements. Starting the `lotus-chainwatch` and/or `lotus-miner` service with automatically start the `lotus-daemon` service (if installed!). Stopping the `lotus-daemon` service will stop the other two services. The same pattern is executed for enabling and disabling the services. - -## Interacting with service logs - -Logs from the services can be reviewed using `journalctl`. - -### Follow logs from a specific service unit - -```sh -$ sudo journalctl -u lotus-daemon -f -``` - -### View logs in reverse order - -```sh -$ sudo journalctl -u lotus-miner -r -``` - -### Log files - -Besides the systemd service logs all services save their own log files in `/var/log/lotus/`. diff --git a/documentation/en/installation/install-linux.md b/documentation/en/installation/install-linux.md new file mode 100644 index 000000000..6fe12996e --- /dev/null +++ b/documentation/en/installation/install-linux.md @@ -0,0 +1,129 @@ +# Linux installation + +This page will show you the steps to build and install Lotus in your Linux computer. + +## Dependencies + +### System dependencies + +First of all, building Lotus will require installing some system dependencies, usually provided by your distribution. + +For Arch Linux: + +```sh +sudo pacman -Syu opencl-icd-loader gcc git bzr jq pkg-config opencl-icd-loader opencl-headers +``` + +For Ubuntu: + +```sh +sudo apt update +sudo apt install mesa-opencl-icd ocl-icd-opencl-dev gcc git bzr jq pkg-config curl +sudo apt upgrade +``` + +For Fedora: + +```sh +sudo dnf -y update +sudo dnf -y install gcc git bzr jq pkgconfig mesa-libOpenCL mesa-libOpenCL-devel opencl-headers ocl-icd ocl-icd-devel clang llvm +``` + +For OpenSUSE: + +```sh +sudo zypper in gcc git jq make libOpenCL1 opencl-headers ocl-icd-devel clang llvm +sudo ln -s /usr/lib64/libOpenCL.so.1 /usr/lib64/libOpenCL.so +``` + +### Rustup + +Lotus needs [rustup](https://rustup.rs/): + +```sh +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +``` + +Please make sure your `$PATH` variable is correctly configured after the rustup installation so that `cargo` and `rustc` are found in their rustup-configured locations. + +### Go + +To build lotus you will need a working installation of **[Go1.14](https://golang.org/dl/)**. Follow the [installation instructions](https://golang.org/doc/install), which generally amount to: + +```sh +# Example! Check the installation instructions. +wget -c https://dl.google.com/go/go1.14.7.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local +``` + +## Build and install Lotus + +With all the above, you are ready to build and install the Lotus suite (`lotus`, `lotus-miner` and `lotus-worker`): + +```sh +git clone https://github.com/filecoin-project/lotus.git +cd lotus/ +``` + +__IF YOU ARE IN CHINA__, set `export GOPROXY=https://goproxy.cn` before building + +Now, choose the network that you will be joining: + +* For `testnet`: `git checkout master` +* For `nerpa`: `git checkout ntwk-nerpa` +* For `butterfly`: `git checkout ntwk-butterfly` + +Once on the right branch, do: + +```sh +make clean install +sudo make install +``` + +This will put `lotus`, `lotus-miner` and `lotus-worker` in `/usr/local/bin`. `lotus` will use the `$HOME/.lotus` folder by default for storage (configuration, chain data, wallets...). `lotus-miner` will use `$HOME/.lotusminer` respectively. See the *environment variables* section below for how to customize these. + +> Remeber to [move your Lotus folder](en+update) if you are switching between different networks, or there has been a network reset. + + +### Native Filecoin FFI + +Some newer processors (AMD Zen (and later), Intel Ice Lake) have support SHA extensions. To make full use of your processor's capabilities, make sure you set the following variables BEFORE building from source (as described above): + +```sh +export RUSTFLAGS="-C target-cpu=native -g" +export FFI_BUILD_FROM_SOURCE=1 +``` + +> __NOTE__: This method of building does not produce portable binaries! Make sure you run the binary in the same machine as you built it. + +### systemd service files + +Lotus provides Systemd service files. They can be installed with: + +```sh +make install-daemon-service +make install-miner-service +``` + +After that, you should be able to control Lotus using `systemctl`. + +## Troubleshooting + +This section mentions some of the common pitfalls for building Lotus. Check the [getting started](en+getting-started) section for more tips on issues when running the lotus daemon. + +### Build errors + +Please check the build logs closely. If you have a dirty state in your git branch make sure to: + +```sh +git checkout +git reset origin/ --hard +make clean +``` + +### Slow builds from China + +Users from China can speed up their builds by setting: + +```sh +export GOPROXY=https://goproxy.cn +``` diff --git a/documentation/en/install-lotus-macos.md b/documentation/en/installation/install-macos.md similarity index 85% rename from documentation/en/install-lotus-macos.md rename to documentation/en/installation/install-macos.md index 371832c96..ea9ecb8ca 100644 --- a/documentation/en/install-lotus-macos.md +++ b/documentation/en/installation/install-macos.md @@ -59,4 +59,4 @@ make clean && make all sudo make install ``` -After installing Lotus, you can run the `lotus` command directly from your CLI to see usage documentation. Next, you can join the [Lotus Testnet](https://lotu.sh/en+join-testnet). +After intalling Lotus you will be ready to [setup and run the daemon](en+setup.md). diff --git a/documentation/en/installation/installation.md b/documentation/en/installation/installation.md new file mode 100644 index 000000000..98534da92 --- /dev/null +++ b/documentation/en/installation/installation.md @@ -0,0 +1,39 @@ +# Installation + +Lotus can be installed in [Linux](en-install-linux) and [MacOS](en-install-macos) machines by building it from source. Windows is not supported yet. + +This section contains guides to install Lotus in the supported platforms. + +Lotus is made of 3 binaries: + +* `lotus`: the main [Lotus node](en+setup) (Lotus client) +* `lotus-miner`: an application specifically for [Filecoin mining](en+miner-setup) +* `lotus-worker`: an additional [application to offload some heavy-processing tasks](en+lotus-worker) from the Lotus Miner. + +These applications are written in Go, but also import several Rust libraries. Lotus does not distribute +pre-compiled builds. + +## Hardware requirements + +### For client nodes + +* 8GiB of RAM +* Recommended for syncing speed: CPU with support for *Intel SHA Extensions* (AMD since Zen microarchitecture, Intel since Ice Lake). +* Recommended for speed: SSD hard drive (the bigger the better) + +### For miners + +The following correspond to the latest testing configuration: + +* 2 TB of hard drive space +* 8 core CPU +* 128 GiB of RAM with 256 GiB of NVMe SSD storage for swap (or simply, more RAM). +* Recommended for speed: CPU with support for *Intel SHA Extensions* (AMD since Zen microarchitecture, Intel since Ice Lake). +* GPU for block mining. The following have been [confirmed to be fast enough](en+gpus): + +- GeForce RTX 2080 Ti +- GeForce RTX 2080 SUPER +- GeForce RTX 2080 +- GeForce GTX 1080 Ti +- GeForce GTX 1080 +- GeForce GTX 1060 diff --git a/documentation/en/installation/update.md b/documentation/en/installation/update.md new file mode 100644 index 000000000..5d76592c9 --- /dev/null +++ b/documentation/en/installation/update.md @@ -0,0 +1,72 @@ +# Updating and restarting Lotus + +Updating Lotus is as simple as rebuilding and re-installing the software as explained in the previous sections. + +You can verify which version of Lotus you are running with: + +```sh +lotus version +``` + +Make sure that you `git pull` the branch that corresponds to the network that your Lotus daemon is using: + +```sh +git pull origin +make clean +make all +sudo make install # if necessary +``` + +Finally, restart the Lotus Node and/or Lotus Miner(s). + +__CAVEAT__: If you are running miners: check if your miner is safe to shut down and restart: `lotus-miner proving info`. If any deadline shows a block height in the past, do not restart: + +In the following example, Deadline Open is 454 which is earlier than Current Epoch of 500. This miner should **not** be shut down or restarted. + +``` +$ sudo lotus-miner proving info +Miner: t01001 +Current Epoch: 500 +Proving Period Boundary: 154 +Proving Period Start: 154 (2h53m0s ago) +Next Period Start: 3034 (in 21h7m0s) +Faults: 768 (100.00%) +Recovering: 768 +Deadline Index: 5 +Deadline Sectors: 0 +Deadline Open: 454 (23m0s ago) +Deadline Close: 514 (in 7m0s) +Deadline Challenge: 434 (33m0s ago) +Deadline FaultCutoff: 384 (58m0s ago) +``` + +In this next example, the miner can be safely restarted because no Deadlines are earlier than Current Epoch of 497. You have ~45 minutes before the miner must be back online to declare faults (FaultCutoff). If the miner has no faults, you have about an hour. + +``` +$ sudo lotus-miner proving info +Miner: t01000 +Current Epoch: 497 +Proving Period Boundary: 658 +Proving Period Start: 658 (in 1h20m30s) +Next Period Start: 3538 (in 25h20m30s) +Faults: 0 (0.00%) +Recovering: 0 +Deadline Index: 0 +Deadline Sectors: 768 +Deadline Open: 658 (in 1h20m30s) +Deadline Close: 718 (in 1h50m30s) +Deadline Challenge: 638 (in 1h10m30s) +Deadline FaultCutoff: 588 (in 45m30s) +``` + +## Switching networks and network resets + +If you wish to switch to a different lotus network or there has been a network reset, you will need to: + +* Checkout the appropiate repository branch and rebuild +* Ensure you do not mix Lotus data (`LOTUS_PATH`, usually `~/.lotus`) from a previous or different network. For this, either: + * Rename the folder to something else or, + * Set a different `LOTUS_PATH` for the new network. +* Same for `~/.lotusminer` if you are running a miner. + +Note that deleting the Lotus data folder will wipe all the chain data, wallets and configuration, so think twice before taking any non-reversible action. diff --git a/documentation/en/join-testnet.md b/documentation/en/join-testnet.md deleted file mode 100644 index 6660d26d8..000000000 --- a/documentation/en/join-testnet.md +++ /dev/null @@ -1,93 +0,0 @@ -# Join Testnet - -## Introduction - -Anyone can set up a **Lotus Node** and connect to the **Lotus Testnet**. This is the best way to explore the current CLI and the **Filecoin Decentralized Storage Market**. - -## Note: Using the Lotus Node from China - -If you are trying to use `lotus` from China. You should set this **environment variable** on your machine: - -```sh -export IPFS_GATEWAY="https://proof-parameters.s3.cn-south-1.jdcloud-oss.com/ipfs/" -``` - -## Get started - -Start the **daemon** using the default configuration in `./build`: - -```sh -lotus daemon -``` - -In another terminal window, check your connection with peers: - -```sh -lotus net peers | wc -l -``` - -In order to connect to the network, you need to be connected to at least 1 peer. If you’re seeing 0 peers, read our [troubleshooting notes](https://lotu.sh/en+setup-troubleshooting). - -Make sure that you have a reasonable "open files limit" set on your machine, such as 10000. If you're seeing a lower value, such as 256 (default on macOS), read our [troubleshooting notes](https://lotu.sh/en+setup-troubleshooting) on how to update it prior to starting the Lotus daemon. - -## Chain sync - -While the daemon is running, the next requirement is to sync the chain. Run the command below to view the chain sync progress. To see current chain height, visit the [network stats page](https://stats.testnet.filecoin.io/). - -```sh -lotus sync wait -``` - -- This step will take anywhere between a few hours to a couple of days. -- You will be able to perform **Lotus Testnet** operations after it is finished. - -## Create your first address - -Initialize a new wallet: - -```sh -lotus wallet new -``` - -Sometimes your operating system may limit file name length to under 150 characters. You need to use a file system that supports long filenames. - -Here is an example of the response: - -```sh -t1aswwvjsae63tcrniz6x5ykvsuotlgkvlulnqpsi -``` - -- Visit the [faucet](http://spacerace.faucet.glif.io/) to add funds. -- Paste the address you created under REQUEST. -- Press the Request button. - -## Check wallet address balance - -Wallet balances in the Lotus Testnet are in **FIL**, the smallest denomination of FIL is an **attoFil**, where 1 attoFil = 10^-18 FIL. - -```sh -lotus wallet balance -``` - -You will not see any attoFIL in your wallet if your **chain** is not fully synced. - -## Send FIL to another wallet - -To send FIL to another wallet from your default account, use this command: - -``` -lotus send -``` - -## Configure your node's connectivity - -To effectively accept incoming storage & retrieval deals, your Lotus node needs to be accessible to other nodes on the network. To improve your connectivity, be sure to: - -- [Set the multiaddresses for you miner to listen on](https://docs.filecoin.io/mine/connectivity/#setting-multiaddresses) -- [Maintain a healthy peer count](https://docs.filecoin.io/mine/connectivity/#checking-peer-count) -- [Enable port forwarding](https://docs.filecoin.io/mine/connectivity/#port-forwarding) -- [Configure your public IP address and port](https://docs.filecoin.io/mine/connectivity/#setting-a-public-ip-address) - -## Monitor the dashboard - -To see the latest network activity, including **chain block height**, **block height**, **blocktime**, **total network power**, largest **block producer miner**, check out the [monitoring dashboard](https://stats.testnet.filecoin.io). diff --git a/documentation/en/miner-deals.md b/documentation/en/miner-deals.md deleted file mode 100644 index 0aee0e1af..000000000 --- a/documentation/en/miner-deals.md +++ /dev/null @@ -1,39 +0,0 @@ -# Information for Miners - -Here is how a miner can get set up to accept storage deals. The first step is -to install a Lotus node and sync to the top of the chain. - -## Set up an ask - -``` -lotus-miner set-price -``` - -This command will set up your miner to accept deal proposals that meet the input price. -The price is inputted in FIL per GiB per epoch, and the default is 0.0000000005. - -## Ensure you can be discovered - -Clients need to be able to find you in order to make storage deals with you. -While there isn't necessarily anything you need to do to become discoverable, here are some things you can -try to check that people can connect to you. - -To start off, make sure you are connected to at least some peers, and your port is -open and working. - -### Connect to your own node - -If you are in contact with someone else running Lotus, you can ask them to try connecting -to your node. To do so, provide them your peer ID, which you can get by running `lotus net id` on -your node. - -They can then try running `lotus net findpeer ` to get your address(es), and can then -run `lotus net connect
` to connect to you. If successful, your node will now -appear on their peers list (run `lotus net peers` to check). - -You can also check this by running a second instance of Lotus yourself. - -### Query your own ask - -A client should be able to find your ask by running `lotus client query-ask `. If -someone is not able to retrieve your ask by doing so, then there is an issue with your node. \ No newline at end of file diff --git a/documentation/en/mining.md b/documentation/en/mining.md deleted file mode 100644 index 32c3c51d2..000000000 --- a/documentation/en/mining.md +++ /dev/null @@ -1,149 +0,0 @@ -# Storage Mining - -Here are instructions to learn how to perform storage mining. For hardware specifications please read [this](https://lotu.sh/en+hardware-mining). - -It is useful to [join the Testnet](https://lotu.sh/en+join-testnet) prior to attempting storage mining for the first time. - -## Note: Using the Lotus Miner from China - -If you are trying to use `lotus-miner` from China. You should set this **environment variable** on your machine. - -```sh -export IPFS_GATEWAY="https://proof-parameters.s3.cn-south-1.jdcloud-oss.com/ipfs/" -``` - -## Get started - -Please ensure that at least one **BLS address** (starts with `t3`) in your wallet exists with the following command: - -```sh -lotus wallet list -``` - -If you do not have a bls address, create a new bls wallet: - -```sh -lotus wallet new bls -``` - -With your wallet address: - -- Visit the [faucet](http://spacerace.faucet.glif.io/) -- Paste the address you created under REQUEST. -- Press the Request button. -- Run `/lotus-miner init --owner= --worker=` - -You will have to wait some time for this operation to complete. - -## Mining - -To mine: - -```sh -lotus-miner run -``` - -If you are downloading **Filecoin Proof Parameters**, the download can take some time. - -Get information about your miner: - -```sh -lotus-miner info -# example: miner id `t0111` -``` - -**Seal** random data to start producing **PoSts**: - -```sh -lotus-miner sectors pledge -``` - -- Warning: On Linux configurations, this command will write data to `$TMPDIR` which is not usually the largest partition. You should point the value to a larger partition if possible. - -Get **miner power** and **sector usage**: - -```sh -lotus state power -# returns total power - -lotus state power - -lotus state sectors -``` - -## Performance tuning - -### `FIL_PROOFS_MAXIMIZE_CACHING=1` Environment variable - -This env var can be used with `lotus-miner`, `lotus-worker`, and `lotus-bench` to make the precommit1 step faster at the cost of some memory use (1x sector size) - -### `FIL_PROOFS_USE_GPU_COLUMN_BUILDER=1` Environment variable - -This env var can be used with `lotus-miner`, `lotus-worker`, and `lotus-bench` to enable experimental precommit2 GPU acceleration - -### Setting multiaddresses - -Set multiaddresses for the miner to listen on in a miner's `config.toml` file -(by default, it is located at `~/.lotusminer/config.toml`). The `ListenAddresses` in this file should be interface listen addresses (usually `/ip4/0.0.0.0/tcp/PORT`), and the `AnnounceAddresses` should match the addresses being passed to `set-addrs`. - -The addresses passed to `set-addrs` parameter in the commands below should be currently active and dialable; confirm they are before entering them. - -Once the config file has been updated, set the on-chain record of the miner's listen addresses: - -``` -lotus-miner actor set-addrs ... -``` - -This updates the `MinerInfo` object in the miner's actor, which will be looked up -when a client attempts to make a deal. Any number of addresses can be provided. - -Example: - -``` -lotus-miner actor set-addrs /ip4/123.123.73.123/tcp/12345 /ip4/223.223.83.223/tcp/23456 -``` - -# Separate address for windowPoSt messages - -WindowPoSt is the mechanism through which storage is verified in Filecoin. It requires miners to submit proofs for all sectors every 24h, which require sending messages to the chain. - -Because many other mining related actions require sending messages to the chain, and not all of those are "high value", it may be desirable to use a separate account to send PoSt messages from. This allows for setting lower GasFeeCaps on the lower value messages without creating head-of-line blocking problems for the PoSt messages in congested chain conditions - -To set this up, first create a new account, and send it some funds for gas fees: -```sh -lotus wallet new bls -t3defg... - -lotus send t3defg... 100 -``` - -Next add the control address -```sh -lotus-miner actor control set t3defg... -Add t3defg... -Pass --really-do-it to actually execute this action -``` - -Now actually set the addresses -```sh -lotus-miner actor control set --really-do-it t3defg... -Add t3defg... -Message CID: bafy2.. -``` - -Wait for the message to land on chain -```sh -lotus state wait-msg bafy2.. -... -Exit Code: 0 -... -``` - -Check miner control address list to make sure the address was correctly setup -```sh -lotus-miner actor control list -name ID key use balance -owner t01111 t3abcd... other 300 FIL -worker t01111 t3abcd... other 300 FIL -control-0 t02222 t3defg... post 100 FIL -``` diff --git a/documentation/en/mining/gpus.md b/documentation/en/mining/gpus.md new file mode 100644 index 000000000..ad0ed4f66 --- /dev/null +++ b/documentation/en/mining/gpus.md @@ -0,0 +1,17 @@ +# Benchmarking additional GPUs + +If you want to test a GPU that is not explicitly supported, set the following *environment variable*: + +```sh +BELLMAN_CUSTOM_GPU=":" +``` + +Here is an example of trying a GeForce GTX 1660 Ti with 1536 cores. + +```sh +BELLMAN_CUSTOM_GPU="GeForce GTX 1660 Ti:1536" +``` + +To get the number of cores for your GPU, you will need to check your card’s specifications. + +To perform the benchmark you can use Lotus' [benchmarking tool](https://github.com/filecoin-project/lotus/tree/master/cmd/lotus-bench). Results and discussion are tracked in a [GitHub issue thread](https://github.com/filecoin-project/lotus/issues/694). diff --git a/documentation/en/mining-lotus-worker.md b/documentation/en/mining/lotus-seal-worker.md similarity index 60% rename from documentation/en/mining-lotus-worker.md rename to documentation/en/mining/lotus-seal-worker.md index f93780c44..62dde9bfb 100644 --- a/documentation/en/mining-lotus-worker.md +++ b/documentation/en/mining/lotus-seal-worker.md @@ -2,24 +2,14 @@ The **Lotus Worker** is an extra process that can offload heavy processing tasks from your **Lotus Miner**. The sealing process automatically runs in the **Lotus Miner** process, but you can use the Worker on another machine communicating over a fast network to free up resources on the machine running the mining process. -## Note: Using the Lotus Worker from China +## Installation -If you are trying to use `lotus-worker` from China. You should set this **environment variable** on your machine: - -```sh -export IPFS_GATEWAY="https://proof-parameters.s3.cn-south-1.jdcloud-oss.com/ipfs/" -``` - -## Get Started - -Make sure that the `lotus-worker` is compiled and installed by running: - -```sh -make lotus-worker -``` +The `lotus-worker` application is installed along with the others when running `sudo make install` as shown in the [Installation section](en+install-linux). For simplicity, we recommend following the same procedure in the machines that will run the Lotus Workers (even if the Lotus miner and the Lotus daemon are not used there). ## Setting up the Miner +### Allow external connections to the miner API + First, you will need to ensure your `lotus-miner`'s API is accessible over the network. To do this, open up `~/.lotusminer/config.toml` (Or if you manually set `LOTUS_MINER_PATH`, look under that directory) and look for the API field. @@ -32,30 +22,49 @@ ListenAddress = "/ip4/127.0.0.1/tcp/2345/http" RemoteListenAddress = "127.0.0.1:2345" ``` -To make your node accessible over the local area network, you will need to determine your machines IP on the LAN, and change the `127.0.0.1` in the file to that address. +To make your node accessible over the local area network, you will need to determine your machine's IP on the LAN (`ip a`), and change the `127.0.0.1` in the file to that address. -A more permissive and less secure option is to change it to `0.0.0.0`. This will allow anyone who can connect to your computer on that port to access the [API](https://lotu.sh/en+api). They will still need an auth token. +A more permissive and less secure option is to change it to `0.0.0.0`. This will allow anyone who can connect to your computer on that port to access the miner's API, though they will still need an auth token. `RemoteListenAddress` must be set to an address which other nodes on your network will be able to reach. -Next, you will need to [create an authentication token](https://lotu.sh/en+api-scripting-support#generate-a-jwt-46). All Lotus APIs require authentication tokens to ensure your processes are as secure against attackers attempting to make unauthenticated requests to them. +### Create an authentication token -### Connect the Lotus Worker +Write down the output of: -On the machine that will run `lotus-worker`, set the `MINER_API_INFO` environment variable to `TOKEN:MINER_NODE_MULTIADDR`. Where `TOKEN` is the token we created above, and `NIMER_NODE_MULTIADDR` is the `multiaddr` of the **Lotus Miner** API that was set in `config.toml`. +```sh +lotus-miner auth api-info --perm admin +``` -Once this is set, run: +The Lotus Workers will need this token to connect to the miner. + +## Connecting the Lotus Workers + +On each machine that will run the `lotus-worker` application you will need to define the following *environment variable*: + +```sh +export MINER_API_INFO::/ip4//tcp/2345` +``` + +If you are trying to use `lotus-worker` from China. You should additionally set: + +```sh +export IPFS_GATEWAY="https://proof-parameters.s3.cn-south-1.jdcloud-oss.com/ipfs/" +``` + + +Once that is done, you can run the Worker with: ```sh lotus-worker run ``` -If you are running multiple workers on the same host, you will need to specify the `--listen` flag and ensure each worker is on a different port. +> If you are running multiple workers on the same host, you will need to specify the `--listen` flag and ensure each worker is on a different port. -To check that the **Lotus Worker** is connected to your **Lotus Miner**, run `lotus-miner sealing workers` and check that the remote worker count has increased. +On your Lotus miner, check that the workers are correctly connected: ```sh -why@computer ~/lotus> lotus-miner sealing workers +lotus-miner sealing workers Worker 0, host computer CPU: [ ] 0 core(s) in use RAM: [|||||||||||||||||| ] 28% 18.1 GiB/62.7 GiB @@ -69,9 +78,10 @@ Worker 1, host othercomputer GPU: GeForce RTX 2080, not used ``` -### Running locally for manually managing process priority +## Running locally for manually managing process priority You can also run the **Lotus Worker** on the same machine as your **Lotus Miner**, so you can manually manage the process priority. + To do so you have to first __disable all seal task types__ in the miner config. This is important to prevent conflicts between the two processes. You can then run the miner on your local-loopback interface; diff --git a/documentation/en/mining/managing-deals.md b/documentation/en/mining/managing-deals.md new file mode 100644 index 000000000..5f73a6a2d --- /dev/null +++ b/documentation/en/mining/managing-deals.md @@ -0,0 +1,19 @@ +# Managing deals + + +While the Lotus Miner is running as a daemon, the `lotus-miner` application can be used to manage and configure the miner: + + +```sh +lotus-miner storage-deals --help +``` + +Running the above command will show the different options related to deals. For example, `lotus-miner storage-deals set-ask` allows to set the price for storage that your miner uses to respond ask requests from clients. + +If deals are ongoing, you can check the data transfers with: + +```sh +lotus-miner data-transfers list +``` + +Make sure you explore the `lotus-miner` CLI. Every command is self-documented and takes a `--help` flag that offers specific information about it. diff --git a/documentation/en/mining/miner-setup.md b/documentation/en/mining/miner-setup.md new file mode 100644 index 000000000..cafa1e7b1 --- /dev/null +++ b/documentation/en/mining/miner-setup.md @@ -0,0 +1,241 @@ +# Miner setup + +This page will guide you through all you need to know to sucessfully run a **Lotus Miner**. Before proceeding, remember that you should be running the Lotus daemon on a fully synced chain. + +## Performance tweaks + +This is a list of performance tweaks to consider before starting the miner: + +### Building + +As [explained already](en+install-linux#native-filecoin-ffi-10) should have exported the following variables before building the Lotus applications: + +```sh +export RUSTFLAGS="-C target-cpu=native -g" +export FFI_BUILD_FROM_SOURCE=1 +``` + +### Environment + +For high performance mining, we recommend setting the following variables in your environment so that they are available when running any of the Lotus applications: + +```sh +# See https://github.com/filecoin-project/bellman +export BELLMAN_CPU_UTILIZATION=0.875 + +# See https://github.com/filecoin-project/rust-fil-proofs/ +export FIL_PROOFS_MAXIMIZE_CACHING=1 # More speed at RAM cost (1x sector-size of RAM - 32 GB). +export FIL_PROOFS_USE_GPU_COLUMN_BUILDER=1 # precommit2 GPU acceleration +export FIL_PROOFS_USE_GPU_TREE_BUILDER=1 +``` + +IF YOU ARE RUNNING FROM CHINA: + +```sh +export IPFS_GATEWAY="https://proof-parameters.s3.cn-south-1.jdcloud-oss.com/ipfs/" +``` + +IF YOUR MINER RUNS IN A DIFFERENT MACHINE AS THE LOTUS DAEMON: + +```sh +export FULLNODE_API_INFO=:/ip4//tcp//http +``` + +If you will be using systemd service files to run the Lotus daemon and miner, make sure you include these variables manually in the service files. + +### Adding swap + +If you have only 128GiB of RAM, you will need to make sure your system provides at least an extra 256GiB of fast swap (preferably NVMe SSD): + +```sh +sudo fallocate -l 256G /swapfile +sudo chmod 600 /swapfile +sudo mkswap /swapfile +sudo swapon /swapfile +# show current swap spaces and take note of the current highest priority +swapon --show +# append the following line to /etc/fstab (ensure highest priority) and then reboot +# /swapfile swap swap pri=50 0 0 +sudo reboot +# check a 256GB swap file is automatically mounted and has the highest priority +swapon --show +``` + +## Creating a new BLS wallet + +You will need a BLS wallet (`t3...`) for mining. To create it, if you don't have one already, run: + +```sh +lotus wallet new bls +``` + +Next make sure to [send some funds](en+wallet) to this address so that the miner setup can be completed. + +## Initializing the miner + +> SPACE RACE: +> To participate in the Space race, please register your miner: +> +> - Visit the [faucet](http://spacerace.faucet.glif.io/) +> - Paste the address you created under REQUEST. +> - Press the Request button. + +Now that you have a miner address you can initialize the Lotus Miner: + +```sh +lotus-miner init --owner= --no-local-storage +``` + +* The `--no-local-storage` flag is used so that we configure specific locations for storage later below. +* The init process will download over 100GiB of initialization parameters to /var/tmp/filecoin-proof-parameters. Make sure there is space or set `FIL_PROOFS_PARAMETER_CACHE` to somewhere else. +* The Lotus Miner configuration folder is created at `~/.lotusminer/` or `$LOTUS_MINER_PATH` if set. + +## Reachability + +Before you start your miner, it is __very important__ to configure it so that it is reachable from any peer in the Filecoin network. For this you will need a stable public IP and edit your `~/.lotusminer/config.toml` as follows: + +```toml +... +[Libp2p] + ListenAddresses = ["/ip4/0.0.0.0/tcp/24001"] # choose a fixed port + AnnounceAddresses = ["/ip4//tcp/24001"] # important! +... +``` + +Once you start your miner, make sure you can connect to its public IP/port (you can use `telnet`, `nc` for the task...). If you have an active firewall or some sort, you may need to additionally open ports in it. + + +## Starting the miner + +You are now ready to start your Lotus miner: + +```sh +lotus-miner run +``` + +or if you are using the systemd service file: + +```sh +systemctl start lotus-miner +``` + +> __Do not proceed__ from here until you have verified that your miner not only is running, but also __reachable on its public IP address__. + +## Publishing the miner addresses + +Once the miner is up and running, publish your miner address (which you configured above) on the chain (please ensure it is dialable): + +```sh +lotus-miner actor set-addrs /ip4//tcp/24001 +``` + +## Setting locations for sealing and long-term storage + +If you used the `--no-local-storage` flag during initialization, you can now specify the disk locations for sealing (SSD recommended) and long-term storage (otherwise you can skip this): + +``` +lotus-miner storage attach --init --seal +lotus-miner storage attach --init --store +lotus-miner storage list +``` + +## Pledging sectors + +If you would like to compete for block rewards by increasing your power in the network as soon as possible, you can optionally pledge one or several sectors, depending on your storage. It can also be used to test that the sealing process works correctly. Pledging is equivalent to storing random data instead of real data obtained through storage deals. + +> Note that pledging sectors to the mainnet network makes most sense when trying to obtain a reasonable amount of total power in the network, thus obtaining real chances to mine new blocks. Otherwise it is only useful for testing purposes. + +If you decide to go ahead, then do: + +```sh +lotus-miner sectors pledge +``` + +This will write data to `$TMPDIR` so make sure that there is enough space available. + +You shoud check that your sealing job has started with: + +```sh +lotus-miner sealing jobs +``` + +This will be accommpanied by a file in `/unsealed`. + +After some minutes, you can check the sealing progress with: + +```sh +lotus-miner sectors list +# and +lotus-miner sealing workers +``` + +When sealing for the new is complete, `pSet: NO` will become `pSet: YES`. + +Once the sealing is finished, you will want to configure how long it took your miner to seal this sector and configure the miner accordingly. To find out how long it took use: + +``` +lotus-miner sectors status --log 0 +``` + +Once you know, you can edit the Miner's `~/.lotusminer/config.toml` accordingly: + +``` +... +[Dealmaking] +... + ExpectedSealDuration = "12h0m0s" # The time it took your miner +``` + +You can also take the chance to edit other values, such as `WaitForDealsDelay` which specifies the delay between accepting the first deal and sealing, allowing to place multiple deals in the same sector. + +Once you are done editing the configuration, [restart your miner](en+update). + +If you wish to be able to re-use a pledged sector for real storage deals before the pledged period of 6 months ends, you will need to mark them for upgrade: + +```sh +lotus-miner sectors mark-for-upgrade +``` + +The sector should become inactive within 24 hours. From that point, the pledged storage can be re-used to store real data associated with real storage deals. + +## Separate address for windowPoSt messages + +WindowPoSt is the mechanism through which storage is verified in Filecoin. It requires miners to submit proofs for all sectors every 24h, which require sending messages to the chain. + +Because many other mining related actions require sending messages to the chain, and not all of those are "high value", it may be desirable to use a separate account to send PoSt messages from. This allows for setting lower GasFeeCaps on the lower value messages without creating head-of-line blocking problems for the PoSt messages in congested chain conditions + +To set this up, first create a new account, and send it some funds for gas fees: + +```sh +lotus wallet new bls +t3defg... + +lotus send t3defg... 100 +``` + +Next add the control address: + +```sh +lotus-miner actor control set --really-do-it t3defg... +Add t3defg... +Message CID: bafy2.. +``` + +Wait for the message to land on chain: + +```sh +lotus state wait-msg bafy2.. +... +Exit Code: 0 +... +``` + +Finally, check the miner control address list to make sure the address was correctly setup: + +```sh +lotus-miner actor control list +name ID key use balance +owner t01111 t3abcd... other 300 FIL +worker t01111 t3abcd... other 300 FIL +control-0 t02222 t3defg... post 100 FIL +``` diff --git a/documentation/en/mining-troubleshooting.md b/documentation/en/mining/mining-troubleshooting.md similarity index 90% rename from documentation/en/mining-troubleshooting.md rename to documentation/en/mining/mining-troubleshooting.md index 5aaf9f6ef..758929075 100644 --- a/documentation/en/mining-troubleshooting.md +++ b/documentation/en/mining/mining-troubleshooting.md @@ -25,7 +25,7 @@ lotus-miner info # WARN main lotus-storage-miner/main.go:73 failed to get api endpoint: (/Users/myrmidon/.lotusminer) %!w(*errors.errorString=&{API not running (no endpoint)}): ``` -If you see this, that means your **Lotus Miner** isn't ready yet. You need to finish [syncing the chain](https://lotu.sh/en+join-testnet). +If you see this, that means your **Lotus Miner** isn't ready yet. You need to finish [syncing the chain](en+setup#waiting-to-sync-370). ## Error: Your computer may not be fast enough @@ -57,10 +57,3 @@ make bench This process uses a fair amount of GPU, and generally takes ~4 minutes to complete. If you do not see any activity in nvtop from lotus during the entire process, it is likely something is misconfigured with your GPU. -## Checking Sync Progress - -You can use this command to check how far behind you are on syncing: - -```sh -date -d @$(./lotus chain getblock $(./lotus chain head) | jq .Timestamp) -``` diff --git a/documentation/en/mining/mining.md b/documentation/en/mining/mining.md new file mode 100644 index 000000000..b1b944c6e --- /dev/null +++ b/documentation/en/mining/mining.md @@ -0,0 +1,8 @@ +# Storage Mining + +This section of the documentation explains how to do storage mining with Lotus. Please note that not everyone can do storage mining, and that you should not attempt it on on networks where sector sizes are 32GB+ unless you meet the [hardware requirements](en+install#hardware-requirements-1). + +From this point we assume that you have setup and are running the [Lotus Node](en+setup), that it has fully synced the Filecoin chain and that you are familiar with how to interact with it using the `lotus` command-line interface. + +In order to perform storage mining, apart from the Lotus daemon, you will be additionally interacting with the `lotus-miner` and potentially the `lotus-worker` applications (which you should have [installed](en+install-linux) along the `lotus` application already). + diff --git a/documentation/en/retrieving-data.md b/documentation/en/retrieving-data.md deleted file mode 100644 index 7cb0e31be..000000000 --- a/documentation/en/retrieving-data.md +++ /dev/null @@ -1,27 +0,0 @@ -# Retrieving Data - -> There are recent bug reports with these instructions. If you happen to encounter any problems, please create a [GitHub issue](https://github.com/filecoin-project/lotus/issues/new) and a maintainer will address the problem as soon as they can. - -Here are the operations you can perform after you have stored and sealed a **Data CID** with the **Lotus Miner** in the network. - -If you would like to learn how to store a **Data CID** on a miner, read the instructions [here](https://lotu.sh/en+storing-data). - -## Find by Data CID - -```sh -lotus client find -# LOCAL -# RETRIEVAL @-- -``` - -## Retrieve by Data CID - -All fields are required. - -```sh -lotus client retrieve -``` - -If the outfile does not exist it will be created in the Lotus repository directory. - -This command will initiate a **retrieval deal** and write the data to your computer. This process may take 2 to 10 minutes. diff --git a/documentation/en/setting-a-static-port.md b/documentation/en/setting-a-static-port.md deleted file mode 100644 index 97ac6528e..000000000 --- a/documentation/en/setting-a-static-port.md +++ /dev/null @@ -1,54 +0,0 @@ -# Static Ports - -Depending on how your network is set up, you may need to set a static port to successfully connect to peers to perform storage deals with your **Lotus Miner**. - -## Setup - -To change the random **swarm port**, you may edit the `config.toml` file located under `$LOTUS_MINER_PATH`. The default location of this file is `$HOME/.lotusminer`. - -To change the port to `1347`: - -```sh -[Libp2p] - ListenAddresses = ["/ip4/0.0.0.0/tcp/1347", "/ip6/::/tcp/1347"] -``` - -After changing the port value, restart your **daemon**. - -## Announce Addresses - -If the **swarm port** is port-forwarded from another address, it is possible to control what addresses -are announced to the network. - -```sh -[Libp2p] - AnnounceAddresses = ["/ip4//tcp/1347"] -``` - -If non-empty, this array specifies the swarm addresses to announce to the network. If empty, the daemon will announce inferred swarm addresses. - -Similarly, it is possible to set `NoAnnounceAddresses` with an array of addresses to not announce to the network. - -## Ubuntu's Uncomplicated Firewall - -Open firewall manually: - -```sh -ufw allow 1347/tcp -``` - -Or open and modify the profile located at `/etc/ufw/applications.d/lotus-daemon`: - -```sh -[Lotus Daemon] -title=Lotus Daemon -description=Lotus Daemon firewall rules -ports=1347/tcp -``` - -Then run these commands: - -```sh -ufw update lotus-daemon -ufw allow lotus-daemon -``` diff --git a/documentation/en/storing-ipfs-integration.md b/documentation/en/store/adding-from-ipfs.md similarity index 79% rename from documentation/en/storing-ipfs-integration.md rename to documentation/en/store/adding-from-ipfs.md index 041364380..2f6b097cc 100644 --- a/documentation/en/storing-ipfs-integration.md +++ b/documentation/en/store/adding-from-ipfs.md @@ -1,10 +1,10 @@ -# IPFS Integration +# Adding data from IPFS Lotus supports making deals with data stored in IPFS, without having to re-import it into lotus. To enable this integration, you need to have an IPFS daemon running in the background. -Then, open up `~/.lotus/config.toml` (or if you manually set `LOTUS_PATH`, look under that directory) -and look for the Client field, and set `UseIpfs` to `true`. + +Then, open up `~/.lotus/config.toml` (or if you manually set `LOTUS_PATH`, look under that directory) and look for the Client field, and set `UseIpfs` to `true`. ```toml [Client] diff --git a/documentation/en/store/making-deals.md b/documentation/en/store/making-deals.md new file mode 100644 index 000000000..ca3a47182 --- /dev/null +++ b/documentation/en/store/making-deals.md @@ -0,0 +1,71 @@ +# Making storage deals + +## Adding a file to Lotus + +Before sending data to a Filecoin miner for storage, the data needs to be correctly formatted and packed. This can be achieved by locally importing the data into Lotus with: + +```sh +lotus client import ./your-example-file.txt +``` + +Upon success, this command will return a **Data CID**. This is a very important piece of information, as it will be used to make deals to both store and retrieve the data in the future. + +You can list the data CIDs of the files you locally imported with: + +```sh +lotus client local +``` + +## Storing data in the network + +To store data in the network you will need to: + +* Find a Filecoin miner willing to store it +* Make a deal with the miner agreeing on the price to pay and the duration for which the data should be stored. + +You can obtain a list of all miners in the network with: + +```sh +lotus state list-miners +t0xxxx +t0xxxy +t0xxxz +... +``` + +This will print a list of miner IDs. In order to ask for the terms offered by a particular miner, you can then run: + +```sh +lotus client query-ask +``` + +If you are satisfied with the terms, you can proceed to propose a deal to the miner, using the **Data CID** that you obtained during the import step: + + +```sh +lotus client deal +``` + +This command will interactively ask you for the CID, miner ID and duration in days for the deal. You can also call it with arguments: + +```sh +lotus client deal +``` + +where the `duration` is expressed in blocks (1 block is equivalent to 30s). + +## Checking the status of the deals + +You can list deals with: + +```sh +lotus client list-deals +``` + +Among other things, this will give you information about the current state on your deals, whether they have been published on chain (by the miners) and whether the miners have been slashed for not honoring them. + +For a deal to succeed, the miner needs to be correctly configured and running, accept the deal and *seal* the file correctly. Otherwise, the deal will appear in error state. + +You can make deals with multiple miners for the same data. + +Once a deal is sucessful and the data is *sealed*, it can be [retrieved](en+retrieving). diff --git a/documentation/en/store/retrieve.md b/documentation/en/store/retrieve.md new file mode 100644 index 000000000..1e8db65af --- /dev/null +++ b/documentation/en/store/retrieve.md @@ -0,0 +1,27 @@ +# Retrieving Data + +Once data has been succesfully [stored](en+making-deals) and sealed by a Filecoin miner, it can be retrieved. + +In order to do this we will need to create a **retrieval deal**. + +## Finding data by CID + +In order to retrieve some data you will need the **Data CID** that was used to create the storage deal. + +You can find who is storing the data by running: + +```sh +lotus client find +``` + +## Making a retrieval deal + +You can then make a retrieval deal with: + +```sh +lotus client retrieve +``` + +This commands take other optional flags (check `--help`). + +If the outfile does not exist it will be created in the Lotus repository directory. This process may take 2 to 10 minutes. diff --git a/documentation/en/storing-data-troubleshooting.md b/documentation/en/store/storage-troubleshooting.md similarity index 51% rename from documentation/en/storing-data-troubleshooting.md rename to documentation/en/store/storage-troubleshooting.md index c8a0254fa..7087ec3d0 100644 --- a/documentation/en/storing-data-troubleshooting.md +++ b/documentation/en/store/storage-troubleshooting.md @@ -2,11 +2,11 @@ ## Error: Routing: not found -```sh +``` WARN main lotus/main.go:72 routing: not found ``` -- This miner is offline. +This error means that the miner is offline. ## Error: Failed to start deal @@ -14,14 +14,17 @@ WARN main lotus/main.go:72 routing: not found WARN main lotus/main.go:72 failed to start deal: computing commP failed: generating CommP: Piece must be at least 127 bytes ``` -- There is a minimum file size of 127 bytes. +This error means that there is a minimum file size of 127 bytes. ## Error: 0kb file response during retrieval -In order to retrieve a file, it must be sealed. Miners can check sealing progress with this command: +This means that the file to be retrieved may have not yet been sealed and is thus, not retrievable yet. + +Miners can check sealing progress with this command: ```sh lotus-miner sectors list ``` -When sealing is complete, `pSet: NO` will become `pSet: YES`. From now on the **Data CID** is [retrievable](https://lotu.sh/en+retrieving-data) from the **Lotus Miner**. +When sealing is complete, `pSet: NO` will become `pSet: YES`. + diff --git a/documentation/en/store/store.md b/documentation/en/store/store.md new file mode 100644 index 000000000..205bd0e23 --- /dev/null +++ b/documentation/en/store/store.md @@ -0,0 +1,11 @@ +# Storing and retrieving data + +Lotus enables you to store any data on the Filecoin network and retrieve it later. This is achieved by making *deals* with miners. + +A *storage deal* specifies that a miner should store ceratin data for a previously agreed period and price. + +Once a deal is made, the data is then sent to the miners, which regularly proves that it is storing it. If they fail to do so, the miner is penalized (slashed). + +The data can be retrieved with a *retrieval deal*. + +This section explains how to use Lotus to [store](en+making-deals) and [retrieve](en+retrieving) data from the Filecoin network. diff --git a/documentation/en/storing-data.md b/documentation/en/storing-data.md deleted file mode 100644 index 67d2b1533..000000000 --- a/documentation/en/storing-data.md +++ /dev/null @@ -1,62 +0,0 @@ -# Storing Data - -> There are recent bug reports with these instructions. If you happen to encounter any problems, please create a [GitHub issue](https://github.com/filecoin-project/lotus/issues/new) and a maintainer will address the problem as soon as they can. - -Here are instructions for how to store data on the **Lotus Testnet**. - -## Adding a file locally - -Adding a file locally allows you to make miner deals on the **Lotus Testnet**. - -```sh -lotus client import ./your-example-file.txt -``` - -Upon success, this command will return a **Data CID**. - -## List your local files - -The command to see a list of files by `CID`, `name`, `size` in bytes, and `status`: - -```sh -lotus client local -``` - -An example of the output: - -```sh -bafkreierupr5ioxn4obwly4i2a5cd2rwxqi6kwmcyyylifxjsmos7hrgpe Development/sample-1.txt 2332 ok -bafkreieuk7h4zs5alzpdyhlph4lxkefowvwdho3a3pml6j7dam5mipzaii Development/sample-2.txt 30618 ok -``` - -## Make a Miner Deal on Lotus Testnet - -Get a list of all miners that can store data: - -```sh -lotus state list-miners -``` - -Get the requirements of a miner you wish to store data with: - -```sh -lotus client query-ask -``` - -Store a **Data CID** with a miner: - -```sh -lotus client deal -``` - -Check the status of a deal: - -```sh -lotus client list-deals -``` - -- The `duration`, which represents how long the miner will keep your file hosted, is represented in blocks. Each block represents 25 seconds. - -Upon success, this command will return a **Deal CID**. - -The miner will need to **seal** the file before it can be retrieved. If the **Lotus Miner** is not running on a machine designed for sealing, the process will take a very long time. diff --git a/documentation/en/dev/WIP-arch-complementary-notes.md b/documentation/en/unclassified/WIP-arch-complementary-notes.md similarity index 100% rename from documentation/en/dev/WIP-arch-complementary-notes.md rename to documentation/en/unclassified/WIP-arch-complementary-notes.md diff --git a/documentation/en/block-validation.md b/documentation/en/unclassified/block-validation.md similarity index 100% rename from documentation/en/block-validation.md rename to documentation/en/unclassified/block-validation.md diff --git a/documentation/en/dev/create-miner.md b/documentation/en/unclassified/create-miner.md similarity index 100% rename from documentation/en/dev/create-miner.md rename to documentation/en/unclassified/create-miner.md diff --git a/documentation/en/dev-tools-pond-ui.md b/documentation/en/unclassified/dev-tools-pond-ui.md similarity index 100% rename from documentation/en/dev-tools-pond-ui.md rename to documentation/en/unclassified/dev-tools-pond-ui.md diff --git a/documentation/en/sealing-procs.md b/documentation/en/unclassified/sealing-procs.md similarity index 100% rename from documentation/en/sealing-procs.md rename to documentation/en/unclassified/sealing-procs.md diff --git a/documentation/en/updating-lotus.md b/documentation/en/updating-lotus.md deleted file mode 100644 index 862cea136..000000000 --- a/documentation/en/updating-lotus.md +++ /dev/null @@ -1,14 +0,0 @@ -# Updating Lotus - -If you installed Lotus on your machine, you can upgrade to the latest version by doing the following: - -```sh -# get the latest -git pull origin master - -# clean and remake the binaries -make clean && make build - -# instal binaries in correct location -make install # or sudo make install if necessary -``` From a153e1d5862b67a031fe7ee7306a7e430c517187 Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Mon, 31 Aug 2020 21:18:50 +0200 Subject: [PATCH 04/88] Fix #2334: Specify seal options to disable with co-located storage worker --- documentation/en/mining/lotus-seal-worker.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/documentation/en/mining/lotus-seal-worker.md b/documentation/en/mining/lotus-seal-worker.md index 62dde9bfb..47e201ca5 100644 --- a/documentation/en/mining/lotus-seal-worker.md +++ b/documentation/en/mining/lotus-seal-worker.md @@ -82,7 +82,15 @@ Worker 1, host othercomputer You can also run the **Lotus Worker** on the same machine as your **Lotus Miner**, so you can manually manage the process priority. -To do so you have to first __disable all seal task types__ in the miner config. This is important to prevent conflicts between the two processes. +To do so you have to first __disable all seal task types__ in the miner config. This is important to prevent conflicts between the two processes: + +```toml +[Storage] + AllowPreCommit1 = false + AllowPreCommit2 = false + AllowCommit = false + AllowUnseal = false +``` You can then run the miner on your local-loopback interface; From af38c902f88d5c7be13f24eca4e8d19b395e963f Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Tue, 1 Sep 2020 15:12:07 +0200 Subject: [PATCH 05/88] Fix architecture entry --- documentation/en/.library.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation/en/.library.json b/documentation/en/.library.json index 87c7353c1..59cc01e29 100644 --- a/documentation/en/.library.json +++ b/documentation/en/.library.json @@ -193,7 +193,7 @@ { "title": "Lotus Architecture (WIP)", "slug": "en+arch", - "github": "en/architectiure/architecture.md", + "github": "en/architecture/architecture.md", "value": null, "posts": [ { From bd0c6a4cccd5a5e21ae3f4939cf9d8822ea9ca37 Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Thu, 3 Sep 2020 16:35:55 +0200 Subject: [PATCH 06/88] Fix filename --- documentation/en/building/api.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation/en/building/api.md b/documentation/en/building/api.md index 626193ee2..3a2c2902b 100644 --- a/documentation/en/building/api.md +++ b/documentation/en/building/api.md @@ -19,7 +19,7 @@ Lotus uses its own Go library implementation of [JSON-RPC](https://github.com/fi ## cURL example -To demonstrate making an API request, we will take the method `ChainHead` from [api/api.go](https://github.com/filecoin-project/lotus/blob/master/api/api_full.go). +To demonstrate making an API request, we will take the method `ChainHead` from [api/api_full.go](https://github.com/filecoin-project/lotus/blob/master/api/api_full.go). ```go ChainHead(context.Context) (*types.TipSet, error) From e53aee26a3d1b440729212fde7c77b8c06bf9a14 Mon Sep 17 00:00:00 2001 From: austinabell Date: Tue, 15 Sep 2020 18:36:06 -0400 Subject: [PATCH 07/88] Make state transition in validation async --- chain/sync.go | 45 +++++++++++++++++++++++++-------------------- 1 file changed, 25 insertions(+), 20 deletions(-) diff --git a/chain/sync.go b/chain/sync.go index f7530f556..e23284134 100644 --- a/chain/sync.go +++ b/chain/sync.go @@ -788,31 +788,35 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) (er b.Header.ParentWeight, pweight) } - // Stuff that needs stateroot / worker address - stateroot, precp, err := syncer.sm.TipSetState(ctx, baseTs) - if err != nil { - return xerrors.Errorf("get tipsetstate(%d, %s) failed: %w", h.Height, h.Parents, err) - } - - if stateroot != h.ParentStateRoot { - msgs, err := syncer.store.MessagesForTipset(baseTs) + stateRootCheck := async.Err(func() error { + stateroot, precp, err := syncer.sm.TipSetState(ctx, baseTs) if err != nil { - log.Error("failed to load messages for tipset during tipset state mismatch error: ", err) - } else { - log.Warn("Messages for tipset with mismatching state:") - for i, m := range msgs { - mm := m.VMMessage() - log.Warnf("Message[%d]: from=%s to=%s method=%d params=%x", i, mm.From, mm.To, mm.Method, mm.Params) - } + return xerrors.Errorf("get tipsetstate(%d, %s) failed: %w", h.Height, h.Parents, err) } - return xerrors.Errorf("parent state root did not match computed state (%s != %s)", stateroot, h.ParentStateRoot) - } + if stateroot != h.ParentStateRoot { + msgs, err := syncer.store.MessagesForTipset(baseTs) + if err != nil { + log.Error("failed to load messages for tipset during tipset state mismatch error: ", err) + } else { + log.Warn("Messages for tipset with mismatching state:") + for i, m := range msgs { + mm := m.VMMessage() + log.Warnf("Message[%d]: from=%s to=%s method=%d params=%x", i, mm.From, mm.To, mm.Method, mm.Params) + } + } - if precp != h.ParentMessageReceipts { - return xerrors.Errorf("parent receipts root did not match computed value (%s != %s)", precp, h.ParentMessageReceipts) - } + return xerrors.Errorf("parent state root did not match computed state (%s != %s)", stateroot, h.ParentStateRoot) + } + if precp != h.ParentMessageReceipts { + return xerrors.Errorf("parent receipts root did not match computed value (%s != %s)", precp, h.ParentMessageReceipts) + } + + return nil + }) + + // Stuff that needs worker address waddr, err := stmgr.GetMinerWorkerRaw(ctx, syncer.sm, lbst, h.Miner) if err != nil { return xerrors.Errorf("GetMinerWorkerRaw failed: %w", err) @@ -933,6 +937,7 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) (er winnerCheck, msgsCheck, baseFeeCheck, + stateRootCheck, } var merr error From a4fd356fcbc20f38657c128ceb291c2db42f150a Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Thu, 17 Sep 2020 20:35:19 +0200 Subject: [PATCH 08/88] Delete most docs. Update about page with links to docs.filecoin.io --- documentation/en/.glossary.json | 147 +- documentation/en/.library.json | 202 +- documentation/en/about.md | 19 +- documentation/en/building/api-methods.md | 4567 ----------------- .../en/building/api-troubleshooting.md | 36 - documentation/en/building/api.md | 38 - documentation/en/building/building.md | 5 - documentation/en/building/jaeger-tracing.md | 26 - documentation/en/building/local-devnet.md | 54 - documentation/en/building/payment-channels.md | 111 - documentation/en/building/remote-api.md | 69 - .../en/getting-started/getting-started.md | 3 - .../getting-started/setup-troubleshooting.md | 57 - documentation/en/getting-started/setup.md | 169 - documentation/en/getting-started/wallet.md | 58 - .../en/installation/install-linux.md | 129 - .../en/installation/install-macos.md | 62 - documentation/en/installation/installation.md | 39 - documentation/en/installation/update.md | 72 - documentation/en/mining/gpus.md | 17 - documentation/en/mining/lotus-seal-worker.md | 99 - documentation/en/mining/managing-deals.md | 19 - documentation/en/mining/miner-setup.md | 241 - .../en/mining/mining-troubleshooting.md | 59 - documentation/en/mining/mining.md | 8 - documentation/en/store/adding-from-ipfs.md | 20 - documentation/en/store/making-deals.md | 71 - documentation/en/store/retrieve.md | 27 - .../en/store/storage-troubleshooting.md | 30 - documentation/en/store/store.md | 11 - 30 files changed, 14 insertions(+), 6451 deletions(-) delete mode 100644 documentation/en/building/api-methods.md delete mode 100644 documentation/en/building/api-troubleshooting.md delete mode 100644 documentation/en/building/api.md delete mode 100644 documentation/en/building/building.md delete mode 100644 documentation/en/building/jaeger-tracing.md delete mode 100644 documentation/en/building/local-devnet.md delete mode 100644 documentation/en/building/payment-channels.md delete mode 100644 documentation/en/building/remote-api.md delete mode 100644 documentation/en/getting-started/getting-started.md delete mode 100644 documentation/en/getting-started/setup-troubleshooting.md delete mode 100644 documentation/en/getting-started/setup.md delete mode 100644 documentation/en/getting-started/wallet.md delete mode 100644 documentation/en/installation/install-linux.md delete mode 100644 documentation/en/installation/install-macos.md delete mode 100644 documentation/en/installation/installation.md delete mode 100644 documentation/en/installation/update.md delete mode 100644 documentation/en/mining/gpus.md delete mode 100644 documentation/en/mining/lotus-seal-worker.md delete mode 100644 documentation/en/mining/managing-deals.md delete mode 100644 documentation/en/mining/miner-setup.md delete mode 100644 documentation/en/mining/mining-troubleshooting.md delete mode 100644 documentation/en/mining/mining.md delete mode 100644 documentation/en/store/adding-from-ipfs.md delete mode 100644 documentation/en/store/making-deals.md delete mode 100644 documentation/en/store/retrieve.md delete mode 100644 documentation/en/store/storage-troubleshooting.md delete mode 100644 documentation/en/store/store.md diff --git a/documentation/en/.glossary.json b/documentation/en/.glossary.json index e8a9e0846..0967ef424 100644 --- a/documentation/en/.glossary.json +++ b/documentation/en/.glossary.json @@ -1,146 +1 @@ -{ - "bellman": { - "title": "Bellman", - "value": "Bellman is a rust crate for building zk-SNARK circuits. It provides circuit traits and primitive structures, as well as basic gadget implementations such as booleans and number abstractions." - }, - "nvme": { - "title": "NVMe", - "value": "(non-volatile memory express) is a host controller interface and storage protocol created to accelerate the transfer of data between enterprise and client systems and solid-state drives (SSDs) over a computer's high-speed Peripheral Component Interconnect Express (PCIe) bus." - }, - "multiaddr": { - "title": "Multiaddr", - "value": "Multiaddr is a format for encoding addresses from various well-established network protocols. It is useful to write applications that future-proof their use of addresses, and allow multiple transport protocols and addresses to coexist." - }, - "attofil": { - "title": "attoFIL", - "value": "AttoFIL is a word used to describe 10^-18 FIL. The word atto comes from the Norwegian and Danish term: atten eighteen." - }, - "fil": { - "title": "FIL", - "value": "A ticker symbol is an abbreviation used to uniquely identify Filecoin when it is used in a wallet exchange or a cryptocurrency exchange." - }, - "epost": { - "title": "Election Proof-of-Spacetime", - "value": "Election Proof-of-Spacetime couples the Proof-of-Spacetime process with block production, meaning that in order to produce a block, the miner must produce a valid Proof-of-Spacetime proof (snark output)." - }, - "jwt": { - "title": "JWT", - "value": "JSON Web Tokens are an open, industry standard RFC 7519 method for representing claims securely between two parties." - }, - "json-rpc": { - "title": "JSON-RPC", - "value": "JSON-RPC is a remote procedure call protocol encoded in JSON. It is a very simple protocol (and very similar to XML-RPC), defining only a few data types and commands." - }, - "bls-address": { - "title": "BLS Signature (Address)", - "value": "A Boneh–Lynn–Shacham (BLS) signature is a digital signature scheme that allows a user to determine the authenticity of a signer, and is a commonly used signature scheme in the Filecoin Distributed Storage Network." - }, - "faucet": { - "title": "Filecoin Test Faucet", - "value": "A webpage where you can get free test Filecoin to participate in the Testnet." - }, - "chain": { - "title": "Chain", - "value": "The Filecoin Blockchain is a distributed virtual machine that achieves consensus, processes messages, accounts for storage, and maintains security in the Filecoin Protocol. It is the main interface linking various actors in the Filecoin system." - }, - "miner-power": { - "title": "Miner Power", - "value": "Miner storage in relation to network storage, tracked in the power table." - }, - "sector": { - "title": "Sector", - "value": "A fixed-size block of data of SECTOR_SIZE bytes which generally contains client's data." - }, - "sealing": { - "title": "Sealing", - "value": "A slow encoding process that returns commitments and proofs for data being stored in a sector." - }, - "seal": { - "title": "Seal", - "value": "A slow encoding process that returns commitments and proofs for data being stored in a sector." - }, - "posts": { - "title": "Proof-of-Spacetime(s)", - "value": "Filecoin is a protocol token whose blockchain runs on a novel proof, called Proof-of-Spacetime, where blocks are created by miners that are storing data." - }, - "filecoin-testnet": { - "title": "Filecoin Testnet", - "value": "Until we launch, we are making lots of changes to Lotus. The Testnet is expected to bring a few significant fixes/improvements. During Testnet, you can retrieve test filecoin from our network faucet to use as collateral to start mining. Test filecoin do not have any value – the official filecoin tokens will not be released until Mainnet launch." - }, - "filecoin-decentralized-storage-market": { - "title": "Filecoin Decentralized Storage Market", - "value": "Storage Market subsystem is the data entry point into the network. Miners only earn power from data stored in a storage deal and all deals live on the Filecoin network." - }, - "filecoin-proof-parameters": { - "title": "Filecoin Proof Parameters", - "value": "The proving algorithms rely on a large binary parameter file." - }, - "lotus-devnet": { - "title": "DevNet", - "value": "On the DevNets, you can store data as a storage client and also try how Filecoin mining works. The devnets are an important development tool for those who anticipate building applications on top of the Filecoin protocol or storing data on the decentralized storage market. " - }, - "filecoin-distributed-storage-network": { - "title": "Filecoin Distributed Storage Network", - "value": "Filecoin is a distributed storage network based on a blockchain mechanism. Filecoin miners can elect to provide storage capacity for the network, and thereby earn units of the Filecoin cryptocurrency (FIL) by periodically producing cryptographic proofs that certify that they are providing the capacity specified." - }, - "lotus-node": { - "title": "Lotus Node", - "value": "The Lotus Node is full of capabilities. It runs the Blockchain system, makes retrieval deals, does data transfer, supports block producer logic, and syncs and validates the chain." - }, - "block-rewards": { - "title": "Block Reward", - "value": "Over the entire lifetime of the protocol, 1,400,000,000 FIL (TotalIssuance) will be given out to miners. The rate at which the funds are given out is set to halve every six years, smoothly (not a fixed jump like in Bitcoin)." - }, - "block-producer-miner": { - "title": "Miner (Block Producer)", - "value": "The Block Producer Miner's logic. It currently shares an interface and process with the Lotus Node. A Block Producer chooses which messages to include in a block and is rewarded according to each message’s gas price and consumption, forming a market." - }, - "lotus-miner": { - "title": "Miner (lotus-miner)", - "value": "The Miner's logic. It has its own dedicated process. Contributes to the network through Sector commitments and Proofs of Spacetime to prove that it is storing the sectors it has commited to." - }, - "swarm-port": { - "title": "Swarm Port (Libp2p)", - "value": "The LibP2P Swarm manages groups of connections to peers, handles incoming and outgoing streams, and is part of the miners implementation. The port value is part of the Host interface." - }, - "daemon": { - "title": "Lotus Daemon", - "value": "A Daemon is a program that runs as a background process. A Daemon in the context of the Filecoin Distributed Storage Network may enable applications to communicate with peers, handle protocols, participate in pubsub, and interact with a distributed hash table (DHT)." - }, - "storage-deal": { - "title": "Storage deal", - "value": "One of the two types of deals in Filecoin markets. Storage deals are recorded on the blockchain and enforced by the protocol." - }, - "retrieval-deal": { - "title": "Retrieval deal", - "value": "One of the two types of deals in Filecoin markets. Retrieval deals are off chain and enabled by micropayment channel by transacting parties." - }, - "deal-cid": { - "title": "Deal CID", - "value": "CID is a format for referencing content in distributed information systems, it is a way to store information so it can be retrieved based on its content, not its location. DealCID specifically is used in storage deals." - }, - "data-cid": { - "title": "Data CID", - "value": "CID is a format for referencing content in distributed information systems, it is a way to store information so it can be retrieved based on its content, not its location. DataCID specifically is used to represent the file that is stored in the Filecoin Distributed Storage Network." - }, - "cid": { - "title": "CID", - "value": "A CID is a self-describing content-addressed identifier. It uses cryptographic hashes to achieve content addressing. It uses several multiformats to achieve flexible self-description, namely multihash for hashes, multicodec for data content types, and multibase to encode the CID itself into strings." - }, - "total-network-power": { - "title": "Total Network Power", - "value": "A reference to all the Power Tables for every subchain, accounting for each Lotus Miner on chain." - }, - "chain-block-height": { - "title": "Chain Block Height", - "value": "Chain block height is defined as the number of blocks in the chain between any given block and the very first block in the blockchain." - }, - "block-height": { - "title": "Block Height", - "value": "Height of the Merkle Tree of a sector. A sector is a contiguous array of bytes that a miner puts together, seals, and performs Proofs of Spacetime on." - }, - "blocktime": { - "title": "Blocktime", - "value": "The time it takes for a Block to propagate to the whole network." - } -} +{} diff --git a/documentation/en/.library.json b/documentation/en/.library.json index 59cc01e29..e31f09950 100644 --- a/documentation/en/.library.json +++ b/documentation/en/.library.json @@ -2,194 +2,11 @@ "posts": [ { "title": "About Lotus", - "slug": "en+lotus", + "slug": "", "github": "en/about.md", "value": null, "posts": [] }, - { - "title": "Installation", - "slug": "en+install", - "github": "en/installation/installation.md", - "value": null, - "posts": [ - { - "title": "Linux installation", - "slug": "en+install-linux", - "github": "en/installation/install-linux.md", - "value": null - }, - { - "title": "MacOS installation", - "slug": "en+install-macos", - "github": "en/installation/install-macos.md", - "value": null - }, - { - "title": "Updating Lotus", - "slug": "en+update", - "github": "en/installation/update.md", - "value": null - } - ] - }, - { - "title": "Getting started", - "slug": "en+getting-started", - "github": "en/getting-started/getting-started.md", - "value": null, - "posts": [ - { - "title": "Setting up Lotus", - "slug": "en+setup", - "github": "en/getting-started/setup.md", - "value": null - }, - { - - "title": "Obtaining and sending FIL", - "slug": "en+wallet", - "github": "en/getting-started/wallet.md", - "value": null - }, - { - "title": "Setup troubleshooting", - "slug": "en+setup-troubleshooting", - "github": "en/getting-started/setup-troubleshooting.md", - "value": null - } - ] - }, - { - "title": "Storing and retrieving data", - "slug": "en+store", - "github": "en/store/store.md", - "value": null, - "posts": [ - { - "title": "Making storage deals", - "slug": "en+making-deals", - "github": "en/store/making-deals.md", - "value": null - }, - { - "title": "Adding data from IPFS", - "slug": "en+adding-from-ipfs", - "github": "en/store/adding-from-ipfs.md", - "value": null - }, - { - "title": "Retrieving data", - "slug": "en+retriving", - "github": "en/store/retrieve.md", - "value": null - }, - { - "title": "Storage Troubleshooting", - "slug": "en+storage-troubleshooting", - "github": "en/store/storage-troubleshooting.md", - "value": null - } - ] - }, - { - "title": "Storage mining", - "slug": "en+mining", - "github": "en/mining/mining.md", - "value": null, - "posts": [ - { - "title": "Miner setup", - "slug": "en+miner-setup", - "github": "en/mining/miner-setup.md", - "value": null - }, - { - "title": "Managing deals", - "slug": "en+managing-deals", - "github": "en/mining/managing-deals.md", - "value": null - }, - { - "title": "Lotus Worker", - "slug": "en+lotus-worker", - "github": "en/mining/lotus-seal-worker.md", - "value": null - }, - { - "title": "Benchmarking GPUs", - "slug": "en+gpus", - "github": "en/mining/gpus.md", - "value": null - }, - { - "title": "Mining Troubleshooting", - "slug": "en+mining-troubleshooting", - "github": "en/mining/mining-troubleshooting.md", - "value": null - } - ] - }, - { - "title": "Building", - "slug": "en+building", - "github": "en/building/building.md", - "value": null, - "posts": [ - { - "title": "Setting up remote API access", - "slug": "en+remote-api", - "github": "en/building/remote-api.md", - "value": null, - "posts": [] - }, - { - "title": "API endpoints and methods", - "slug": "en+api", - "github": "en/building/api.md", - "value": null, - "posts": [] - }, - { - "title": "API Reference", - "slug": "en+api-methods", - "github": "en/building/api-methods.md", - "value": null, - "posts": [] - }, - - { - "title": "Payment Channels", - "slug": "en+payment-channels", - "github": "en/building/payment-channels.md", - "value": null, - "posts": [] - }, - - { - "title": "Running a local devnet", - "slug": "en+local-devnet", - "github": "en/building/local-devnet.md", - "value": null, - "posts": [] - }, - { - "title": "Jaeger Tracing", - "slug": "en+jaeger-tracing", - "github": "en/building/jaeger-tracing.md", - "value": null, - "posts": [] - }, - - { - "title": "API Troubleshooting", - "slug": "en+api-troubleshooting", - "github": "en/building/api-troubleshooting.md", - "value": null, - "posts": [] - } - ] - }, { "title": "Lotus Architecture (WIP)", "slug": "en+arch", @@ -203,23 +20,6 @@ "value": null } ] - }, - { - "title": "FAQs", - "slug": "en+faqs", - "github": "en/faqs.md", - "value": null, - "posts": [] - }, - { - "title": "Glossary", - "slug": "en+glossary", - "github": "en/.glossary.json", - "value": null, - "custom": { - "glossary": true - }, - "posts": [] } ] } diff --git a/documentation/en/about.md b/documentation/en/about.md index ee8536ac9..f2051e00b 100644 --- a/documentation/en/about.md +++ b/documentation/en/about.md @@ -2,13 +2,18 @@ Lotus is an implementation of the **Filecoin Distributed Storage Network**. -The **Lotus Node** (and the mining applications) can be built to join any of the [Filecoin networks](https://docs.filecoin.io/how-to/networks/). +It is written in Go and provides a suite of command-line applications: -For more details about Filecoin, check out the [Filecoin Docs](https://docs.filecoin.io) and [Filecoin Spec](https://filecoin-project.github.io/specs/). +- Lotus Node (`lotus`): a Filecoin Node: validates network transactions, manages a FIL wallet, can perform storage and retrieval deals. +- Lotus Miner (`lotus-miner`): a Filecoin miner. See the the respective Lotus Miner section in the Mine documentation. +- Lotus Worker (`lotus-worker`): a worker that assists miners to perform mining-related tasks. See its respective guide for more information. -## What can I learn here? +The [Lotus user documentation](https://docs.filecoin.io/get-started/lotus) is part of the [Filecoin documentation site](https://docs.filecoin.io): + +* To install and get started with Lotus, visit the [Get Started section](https://docs.filecoin.io/get-started/lotus). +* Information about how to perform deals on the Filecoin network using Lotus can be found in the [Store section](https://docs.filecoin.io/store/lotus). +* Miners looking to provide storage to the Network can find the latest guides in the [Mine section](https://docs.filecoin.io/mine/lotus). +* Developers and integrators that wish to use the Lotus APIs can start in the [Build section](https://docs.filecoin.io/mine/lotus). + +For more details about Filecoin, check out the [Filecoin Docs](https://docs.filecoin.io) and [Filecoin Spec](https://spec.filecoin.io/). -* How to [install](en+installation) and [setup](en+setup) the Lotus software -* How to [store data on the Filecoin network](en+store) -* How to [setup a high performance FIL miner](en+miner-setup) -* How to [configure and access Lotus APIs](en+remote-api) diff --git a/documentation/en/building/api-methods.md b/documentation/en/building/api-methods.md deleted file mode 100644 index 2f3164bb7..000000000 --- a/documentation/en/building/api-methods.md +++ /dev/null @@ -1,4567 +0,0 @@ -# Groups -* [](#) - * [Closing](#Closing) - * [Shutdown](#Shutdown) - * [Version](#Version) -* [Auth](#Auth) - * [AuthNew](#AuthNew) - * [AuthVerify](#AuthVerify) -* [Beacon](#Beacon) - * [BeaconGetEntry](#BeaconGetEntry) -* [Chain](#Chain) - * [ChainExport](#ChainExport) - * [ChainGetBlock](#ChainGetBlock) - * [ChainGetBlockMessages](#ChainGetBlockMessages) - * [ChainGetGenesis](#ChainGetGenesis) - * [ChainGetMessage](#ChainGetMessage) - * [ChainGetNode](#ChainGetNode) - * [ChainGetParentMessages](#ChainGetParentMessages) - * [ChainGetParentReceipts](#ChainGetParentReceipts) - * [ChainGetPath](#ChainGetPath) - * [ChainGetRandomnessFromBeacon](#ChainGetRandomnessFromBeacon) - * [ChainGetRandomnessFromTickets](#ChainGetRandomnessFromTickets) - * [ChainGetTipSet](#ChainGetTipSet) - * [ChainGetTipSetByHeight](#ChainGetTipSetByHeight) - * [ChainHasObj](#ChainHasObj) - * [ChainHead](#ChainHead) - * [ChainNotify](#ChainNotify) - * [ChainReadObj](#ChainReadObj) - * [ChainSetHead](#ChainSetHead) - * [ChainStatObj](#ChainStatObj) - * [ChainTipSetWeight](#ChainTipSetWeight) -* [Client](#Client) - * [ClientCalcCommP](#ClientCalcCommP) - * [ClientDataTransferUpdates](#ClientDataTransferUpdates) - * [ClientDealSize](#ClientDealSize) - * [ClientFindData](#ClientFindData) - * [ClientGenCar](#ClientGenCar) - * [ClientGetDealInfo](#ClientGetDealInfo) - * [ClientGetDealUpdates](#ClientGetDealUpdates) - * [ClientHasLocal](#ClientHasLocal) - * [ClientImport](#ClientImport) - * [ClientListDataTransfers](#ClientListDataTransfers) - * [ClientListDeals](#ClientListDeals) - * [ClientListImports](#ClientListImports) - * [ClientMinerQueryOffer](#ClientMinerQueryOffer) - * [ClientQueryAsk](#ClientQueryAsk) - * [ClientRemoveImport](#ClientRemoveImport) - * [ClientRetrieve](#ClientRetrieve) - * [ClientRetrieveTryRestartInsufficientFunds](#ClientRetrieveTryRestartInsufficientFunds) - * [ClientRetrieveWithEvents](#ClientRetrieveWithEvents) - * [ClientStartDeal](#ClientStartDeal) -* [Gas](#Gas) - * [GasEstimateFeeCap](#GasEstimateFeeCap) - * [GasEstimateGasLimit](#GasEstimateGasLimit) - * [GasEstimateGasPremium](#GasEstimateGasPremium) - * [GasEstimateMessageGas](#GasEstimateMessageGas) -* [I](#I) - * [ID](#ID) -* [Log](#Log) - * [LogList](#LogList) - * [LogSetLevel](#LogSetLevel) -* [Market](#Market) - * [MarketEnsureAvailable](#MarketEnsureAvailable) -* [Miner](#Miner) - * [MinerCreateBlock](#MinerCreateBlock) - * [MinerGetBaseInfo](#MinerGetBaseInfo) -* [Mpool](#Mpool) - * [MpoolClear](#MpoolClear) - * [MpoolGetConfig](#MpoolGetConfig) - * [MpoolGetNonce](#MpoolGetNonce) - * [MpoolPending](#MpoolPending) - * [MpoolPush](#MpoolPush) - * [MpoolPushMessage](#MpoolPushMessage) - * [MpoolSelect](#MpoolSelect) - * [MpoolSetConfig](#MpoolSetConfig) - * [MpoolSub](#MpoolSub) -* [Msig](#Msig) - * [MsigAddApprove](#MsigAddApprove) - * [MsigAddCancel](#MsigAddCancel) - * [MsigAddPropose](#MsigAddPropose) - * [MsigApprove](#MsigApprove) - * [MsigCancel](#MsigCancel) - * [MsigCreate](#MsigCreate) - * [MsigGetAvailableBalance](#MsigGetAvailableBalance) - * [MsigGetVested](#MsigGetVested) - * [MsigPropose](#MsigPropose) - * [MsigSwapApprove](#MsigSwapApprove) - * [MsigSwapCancel](#MsigSwapCancel) - * [MsigSwapPropose](#MsigSwapPropose) -* [Net](#Net) - * [NetAddrsListen](#NetAddrsListen) - * [NetAgentVersion](#NetAgentVersion) - * [NetAutoNatStatus](#NetAutoNatStatus) - * [NetBandwidthStats](#NetBandwidthStats) - * [NetBandwidthStatsByPeer](#NetBandwidthStatsByPeer) - * [NetBandwidthStatsByProtocol](#NetBandwidthStatsByProtocol) - * [NetConnect](#NetConnect) - * [NetConnectedness](#NetConnectedness) - * [NetDisconnect](#NetDisconnect) - * [NetFindPeer](#NetFindPeer) - * [NetPeers](#NetPeers) - * [NetPubsubScores](#NetPubsubScores) -* [Paych](#Paych) - * [PaychAllocateLane](#PaychAllocateLane) - * [PaychAvailableFunds](#PaychAvailableFunds) - * [PaychAvailableFundsByFromTo](#PaychAvailableFundsByFromTo) - * [PaychCollect](#PaychCollect) - * [PaychGet](#PaychGet) - * [PaychGetWaitReady](#PaychGetWaitReady) - * [PaychList](#PaychList) - * [PaychNewPayment](#PaychNewPayment) - * [PaychSettle](#PaychSettle) - * [PaychStatus](#PaychStatus) - * [PaychVoucherAdd](#PaychVoucherAdd) - * [PaychVoucherCheckSpendable](#PaychVoucherCheckSpendable) - * [PaychVoucherCheckValid](#PaychVoucherCheckValid) - * [PaychVoucherCreate](#PaychVoucherCreate) - * [PaychVoucherList](#PaychVoucherList) - * [PaychVoucherSubmit](#PaychVoucherSubmit) -* [State](#State) - * [StateAccountKey](#StateAccountKey) - * [StateAllMinerFaults](#StateAllMinerFaults) - * [StateCall](#StateCall) - * [StateChangedActors](#StateChangedActors) - * [StateCirculatingSupply](#StateCirculatingSupply) - * [StateCompute](#StateCompute) - * [StateDealProviderCollateralBounds](#StateDealProviderCollateralBounds) - * [StateGetActor](#StateGetActor) - * [StateGetReceipt](#StateGetReceipt) - * [StateListActors](#StateListActors) - * [StateListMessages](#StateListMessages) - * [StateListMiners](#StateListMiners) - * [StateLookupID](#StateLookupID) - * [StateMarketBalance](#StateMarketBalance) - * [StateMarketDeals](#StateMarketDeals) - * [StateMarketParticipants](#StateMarketParticipants) - * [StateMarketStorageDeal](#StateMarketStorageDeal) - * [StateMinerActiveSectors](#StateMinerActiveSectors) - * [StateMinerAvailableBalance](#StateMinerAvailableBalance) - * [StateMinerDeadlines](#StateMinerDeadlines) - * [StateMinerFaults](#StateMinerFaults) - * [StateMinerInfo](#StateMinerInfo) - * [StateMinerInitialPledgeCollateral](#StateMinerInitialPledgeCollateral) - * [StateMinerPartitions](#StateMinerPartitions) - * [StateMinerPower](#StateMinerPower) - * [StateMinerPreCommitDepositForPower](#StateMinerPreCommitDepositForPower) - * [StateMinerProvingDeadline](#StateMinerProvingDeadline) - * [StateMinerRecoveries](#StateMinerRecoveries) - * [StateMinerSectorCount](#StateMinerSectorCount) - * [StateMinerSectors](#StateMinerSectors) - * [StateMsgGasCost](#StateMsgGasCost) - * [StateNetworkName](#StateNetworkName) - * [StateReadState](#StateReadState) - * [StateReplay](#StateReplay) - * [StateSearchMsg](#StateSearchMsg) - * [StateSectorExpiration](#StateSectorExpiration) - * [StateSectorGetInfo](#StateSectorGetInfo) - * [StateSectorPartition](#StateSectorPartition) - * [StateSectorPreCommitInfo](#StateSectorPreCommitInfo) - * [StateVerifiedClientStatus](#StateVerifiedClientStatus) - * [StateWaitMsg](#StateWaitMsg) -* [Sync](#Sync) - * [SyncCheckBad](#SyncCheckBad) - * [SyncCheckpoint](#SyncCheckpoint) - * [SyncIncomingBlocks](#SyncIncomingBlocks) - * [SyncMarkBad](#SyncMarkBad) - * [SyncState](#SyncState) - * [SyncSubmitBlock](#SyncSubmitBlock) - * [SyncUnmarkBad](#SyncUnmarkBad) -* [Wallet](#Wallet) - * [WalletBalance](#WalletBalance) - * [WalletDefaultAddress](#WalletDefaultAddress) - * [WalletDelete](#WalletDelete) - * [WalletExport](#WalletExport) - * [WalletHas](#WalletHas) - * [WalletImport](#WalletImport) - * [WalletList](#WalletList) - * [WalletNew](#WalletNew) - * [WalletSetDefault](#WalletSetDefault) - * [WalletSign](#WalletSign) - * [WalletSignMessage](#WalletSignMessage) - * [WalletVerify](#WalletVerify) -## - - -### Closing - - -Perms: read - -Inputs: `null` - -Response: `{}` - -### Shutdown - - -Perms: admin - -Inputs: `null` - -Response: `{}` - -### Version - - -Perms: read - -Inputs: `null` - -Response: -```json -{ - "Version": "string value", - "APIVersion": 3584, - "BlockDelay": 42 -} -``` - -## Auth - - -### AuthNew - - -Perms: admin - -Inputs: -```json -[ - null -] -``` - -Response: `"Ynl0ZSBhcnJheQ=="` - -### AuthVerify - - -Perms: read - -Inputs: -```json -[ - "string value" -] -``` - -Response: `null` - -## Beacon -The Beacon method group contains methods for interacting with the random beacon (DRAND) - - -### BeaconGetEntry -BeaconGetEntry returns the beacon entry for the given filecoin epoch. If -the entry has not yet been produced, the call will block until the entry -becomes available - - -Perms: read - -Inputs: -```json -[ - 10101 -] -``` - -Response: -```json -{ - "Round": 42, - "Data": "Ynl0ZSBhcnJheQ==" -} -``` - -## Chain -The Chain method group contains methods for interacting with the -blockchain, but that do not require any form of state computation. - - -### ChainExport -ChainExport returns a stream of bytes with CAR dump of chain data. -The exported chain data includes the header chain from the given tipset -back to genesis, the entire genesis state, and the most recent 'nroots' -state trees. -If oldmsgskip is set, messages from before the requested roots are also not included. - - -Perms: read - -Inputs: -```json -[ - 10101, - true, - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: `"Ynl0ZSBhcnJheQ=="` - -### ChainGetBlock -ChainGetBlock returns the block specified by the given CID. - - -Perms: read - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: -```json -{ - "Miner": "t01234", - "Ticket": { - "VRFProof": "Ynl0ZSBhcnJheQ==" - }, - "ElectionProof": { - "WinCount": 9, - "VRFProof": "Ynl0ZSBhcnJheQ==" - }, - "BeaconEntries": null, - "WinPoStProof": null, - "Parents": null, - "ParentWeight": "0", - "Height": 10101, - "ParentStateRoot": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "ParentMessageReceipts": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Messages": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "BLSAggregate": { - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" - }, - "Timestamp": 42, - "BlockSig": { - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" - }, - "ForkSignaling": 42, - "ParentBaseFee": "0" -} -``` - -### ChainGetBlockMessages -ChainGetBlockMessages returns messages stored in the specified block. - - -Perms: read - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: -```json -{ - "BlsMessages": null, - "SecpkMessages": null, - "Cids": null -} -``` - -### ChainGetGenesis -ChainGetGenesis returns the genesis tipset. - - -Perms: read - -Inputs: `null` - -Response: -```json -{ - "Cids": null, - "Blocks": null, - "Height": 0 -} -``` - -### ChainGetMessage -ChainGetMessage reads a message referenced by the specified CID from the -chain blockstore. - - -Perms: read - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: -```json -{ - "Version": 42, - "To": "t01234", - "From": "t01234", - "Nonce": 42, - "Value": "0", - "GasLimit": 9, - "GasFeeCap": "0", - "GasPremium": "0", - "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" -} -``` - -### ChainGetNode -There are not yet any comments for this method. - -Perms: read - -Inputs: -```json -[ - "string value" -] -``` - -Response: -```json -{ - "Cid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Obj": {} -} -``` - -### ChainGetParentMessages -ChainGetParentMessages returns messages stored in parent tipset of the -specified block. - - -Perms: read - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: `null` - -### ChainGetParentReceipts -ChainGetParentReceipts returns receipts for messages in parent tipset of -the specified block. - - -Perms: read - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: `null` - -### ChainGetPath -ChainGetPath returns a set of revert/apply operations needed to get from -one tipset to another, for example: -``` - to - ^ -from tAA - ^ ^ -tBA tAB - ^---*--^ - ^ - tRR -``` -Would return `[revert(tBA), apply(tAB), apply(tAA)]` - - -Perms: read - -Inputs: -```json -[ - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ], - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: `null` - -### ChainGetRandomnessFromBeacon -ChainGetRandomnessFromBeacon is used to sample the beacon for randomness. - - -Perms: read - -Inputs: -```json -[ - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ], - 2, - 10101, - "Ynl0ZSBhcnJheQ==" -] -``` - -Response: `null` - -### ChainGetRandomnessFromTickets -ChainGetRandomnessFromTickets is used to sample the chain for randomness. - - -Perms: read - -Inputs: -```json -[ - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ], - 2, - 10101, - "Ynl0ZSBhcnJheQ==" -] -``` - -Response: `null` - -### ChainGetTipSet -ChainGetTipSet returns the tipset specified by the given TipSetKey. - - -Perms: read - -Inputs: -```json -[ - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: -```json -{ - "Cids": null, - "Blocks": null, - "Height": 0 -} -``` - -### ChainGetTipSetByHeight -ChainGetTipSetByHeight looks back for a tipset at the specified epoch. -If there are no blocks at the specified epoch, a tipset at an earlier epoch -will be returned. - - -Perms: read - -Inputs: -```json -[ - 10101, - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: -```json -{ - "Cids": null, - "Blocks": null, - "Height": 0 -} -``` - -### ChainHasObj -ChainHasObj checks if a given CID exists in the chain blockstore. - - -Perms: read - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: `true` - -### ChainHead -ChainHead returns the current head of the chain. - - -Perms: read - -Inputs: `null` - -Response: -```json -{ - "Cids": null, - "Blocks": null, - "Height": 0 -} -``` - -### ChainNotify -ChainNotify returns channel with chain head updates. -First message is guaranteed to be of len == 1, and type == 'current'. - - -Perms: read - -Inputs: `null` - -Response: `null` - -### ChainReadObj -ChainReadObj reads ipld nodes referenced by the specified CID from chain -blockstore and returns raw bytes. - - -Perms: read - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: `"Ynl0ZSBhcnJheQ=="` - -### ChainSetHead -ChainSetHead forcefully sets current chain head. Use with caution. - - -Perms: admin - -Inputs: -```json -[ - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: `{}` - -### ChainStatObj -ChainStatObj returns statistics about the graph referenced by 'obj'. -If 'base' is also specified, then the returned stat will be a diff -between the two objects. - - -Perms: read - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: -```json -{ - "Size": 42, - "Links": 42 -} -``` - -### ChainTipSetWeight -ChainTipSetWeight computes weight for the specified tipset. - - -Perms: read - -Inputs: -```json -[ - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: `"0"` - -## Client -The Client methods all have to do with interacting with the storage and -retrieval markets as a client - - -### ClientCalcCommP -ClientCalcCommP calculates the CommP for a specified file - - -Perms: read - -Inputs: -```json -[ - "string value" -] -``` - -Response: -```json -{ - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Size": 1024 -} -``` - -### ClientDataTransferUpdates -There are not yet any comments for this method. - -Perms: write - -Inputs: `null` - -Response: -```json -{ - "TransferID": 3, - "Status": 1, - "BaseCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "IsInitiator": true, - "IsSender": true, - "Voucher": "string value", - "Message": "string value", - "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Transferred": 42 -} -``` - -### ClientDealSize -ClientDealSize calculates real deal data size - - -Perms: read - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: -```json -{ - "PayloadSize": 9, - "PieceSize": 1032 -} -``` - -### ClientFindData -ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer). - - -Perms: read - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - null -] -``` - -Response: `null` - -### ClientGenCar -ClientGenCar generates a CAR file for the specified file. - - -Perms: write - -Inputs: -```json -[ - { - "Path": "string value", - "IsCAR": true - }, - "string value" -] -``` - -Response: `{}` - -### ClientGetDealInfo -ClientGetDealInfo returns the latest information about a given deal. - - -Perms: read - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: -```json -{ - "ProposalCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "State": 42, - "Message": "string value", - "Provider": "t01234", - "DataRef": { - "TransferType": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceCid": null, - "PieceSize": 1024 - }, - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Size": 42, - "PricePerEpoch": "0", - "Duration": 42, - "DealID": 5432, - "CreationTime": "0001-01-01T00:00:00Z" -} -``` - -### ClientGetDealUpdates -ClientGetDealUpdates returns the status of updated deals - - -Perms: read - -Inputs: `null` - -Response: -```json -{ - "ProposalCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "State": 42, - "Message": "string value", - "Provider": "t01234", - "DataRef": { - "TransferType": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceCid": null, - "PieceSize": 1024 - }, - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Size": 42, - "PricePerEpoch": "0", - "Duration": 42, - "DealID": 5432, - "CreationTime": "0001-01-01T00:00:00Z" -} -``` - -### ClientHasLocal -ClientHasLocal indicates whether a certain CID is locally stored. - - -Perms: write - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: `true` - -### ClientImport -ClientImport imports file under the specified path into filestore. - - -Perms: admin - -Inputs: -```json -[ - { - "Path": "string value", - "IsCAR": true - } -] -``` - -Response: -```json -{ - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "ImportID": 50 -} -``` - -### ClientListDataTransfers -ClientListTransfers returns the status of all ongoing transfers of data - - -Perms: write - -Inputs: `null` - -Response: `null` - -### ClientListDeals -ClientListDeals returns information about the deals made by the local client. - - -Perms: write - -Inputs: `null` - -Response: `null` - -### ClientListImports -ClientListImports lists imported files and their root CIDs - - -Perms: write - -Inputs: `null` - -Response: `null` - -### ClientMinerQueryOffer -ClientMinerQueryOffer returns a QueryOffer for the specific miner and file. - - -Perms: read - -Inputs: -```json -[ - "t01234", - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - null -] -``` - -Response: -```json -{ - "Err": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Piece": null, - "Size": 42, - "MinPrice": "0", - "UnsealPrice": "0", - "PaymentInterval": 42, - "PaymentIntervalIncrease": 42, - "Miner": "t01234", - "MinerPeer": { - "Address": "t01234", - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "PieceCID": null - } -} -``` - -### ClientQueryAsk -ClientQueryAsk returns a signed StorageAsk from the specified miner. - - -Perms: read - -Inputs: -```json -[ - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "t01234" -] -``` - -Response: -```json -{ - "Ask": { - "Price": "0", - "VerifiedPrice": "0", - "MinPieceSize": 1032, - "MaxPieceSize": 1032, - "Miner": "t01234", - "Timestamp": 10101, - "Expiry": 10101, - "SeqNo": 42 - }, - "Signature": { - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" - } -} -``` - -### ClientRemoveImport -ClientRemoveImport removes file import - - -Perms: admin - -Inputs: -```json -[ - 50 -] -``` - -Response: `{}` - -### ClientRetrieve -ClientRetrieve initiates the retrieval of a file, as specified in the order. - - -Perms: admin - -Inputs: -```json -[ - { - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Piece": null, - "Size": 42, - "Total": "0", - "UnsealPrice": "0", - "PaymentInterval": 42, - "PaymentIntervalIncrease": 42, - "Client": "t01234", - "Miner": "t01234", - "MinerPeer": { - "Address": "t01234", - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "PieceCID": null - } - }, - { - "Path": "string value", - "IsCAR": true - } -] -``` - -Response: `{}` - -### ClientRetrieveTryRestartInsufficientFunds -ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel -which are stuck due to insufficient funds - - -Perms: write - -Inputs: -```json -[ - "t01234" -] -``` - -Response: `{}` - -### ClientRetrieveWithEvents -ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel -of status updates. - - -Perms: admin - -Inputs: -```json -[ - { - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Piece": null, - "Size": 42, - "Total": "0", - "UnsealPrice": "0", - "PaymentInterval": 42, - "PaymentIntervalIncrease": 42, - "Client": "t01234", - "Miner": "t01234", - "MinerPeer": { - "Address": "t01234", - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "PieceCID": null - } - }, - { - "Path": "string value", - "IsCAR": true - } -] -``` - -Response: -```json -{ - "Event": 5, - "Status": 0, - "BytesReceived": 42, - "FundsSpent": "0", - "Err": "string value" -} -``` - -### ClientStartDeal -ClientStartDeal proposes a deal with a miner. - - -Perms: admin - -Inputs: -```json -[ - { - "Data": { - "TransferType": "string value", - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceCid": null, - "PieceSize": 1024 - }, - "Wallet": "t01234", - "Miner": "t01234", - "EpochPrice": "0", - "MinBlocksDuration": 42, - "ProviderCollateral": "0", - "DealStartEpoch": 10101, - "FastRetrieval": true, - "VerifiedDeal": true - } -] -``` - -Response: `null` - -## Gas - - -### GasEstimateFeeCap -GasEstimateFeeCap estimates gas fee cap - - -Perms: read - -Inputs: -```json -[ - { - "Version": 42, - "To": "t01234", - "From": "t01234", - "Nonce": 42, - "Value": "0", - "GasLimit": 9, - "GasFeeCap": "0", - "GasPremium": "0", - "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" - }, - 9, - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: `"0"` - -### GasEstimateGasLimit -GasEstimateGasLimit estimates gas used by the message and returns it. -It fails if message fails to execute. - - -Perms: read - -Inputs: -```json -[ - { - "Version": 42, - "To": "t01234", - "From": "t01234", - "Nonce": 42, - "Value": "0", - "GasLimit": 9, - "GasFeeCap": "0", - "GasPremium": "0", - "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" - }, - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: `9` - -### GasEstimateGasPremium -GasEstimateGasPremium estimates what gas price should be used for a -message to have high likelihood of inclusion in `nblocksincl` epochs. - - -Perms: read - -Inputs: -```json -[ - 42, - "t01234", - 9, - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: `"0"` - -### GasEstimateMessageGas -GasEstimateMessageGas estimates gas values for unset message gas fields - - -Perms: read - -Inputs: -```json -[ - { - "Version": 42, - "To": "t01234", - "From": "t01234", - "Nonce": 42, - "Value": "0", - "GasLimit": 9, - "GasFeeCap": "0", - "GasPremium": "0", - "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" - }, - { - "MaxFee": "0" - }, - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: -```json -{ - "Version": 42, - "To": "t01234", - "From": "t01234", - "Nonce": 42, - "Value": "0", - "GasLimit": 9, - "GasFeeCap": "0", - "GasPremium": "0", - "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" -} -``` - -## I - - -### ID - - -Perms: read - -Inputs: `null` - -Response: `"12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"` - -## Log - - -### LogList - - -Perms: write - -Inputs: `null` - -Response: `null` - -### LogSetLevel - - -Perms: write - -Inputs: -```json -[ - "string value", - "string value" -] -``` - -Response: `{}` - -## Market - - -### MarketEnsureAvailable -MarketFreeBalance - - -Perms: sign - -Inputs: -```json -[ - "t01234", - "t01234", - "0" -] -``` - -Response: -```json -{ - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" -} -``` - -## Miner - - -### MinerCreateBlock -There are not yet any comments for this method. - -Perms: write - -Inputs: -```json -[ - { - "Miner": "t01234", - "Parents": [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ], - "Ticket": { - "VRFProof": "Ynl0ZSBhcnJheQ==" - }, - "Eproof": { - "WinCount": 9, - "VRFProof": "Ynl0ZSBhcnJheQ==" - }, - "BeaconValues": null, - "Messages": null, - "Epoch": 10101, - "Timestamp": 42, - "WinningPoStProof": null - } -] -``` - -Response: -```json -{ - "Header": { - "Miner": "t01234", - "Ticket": { - "VRFProof": "Ynl0ZSBhcnJheQ==" - }, - "ElectionProof": { - "WinCount": 9, - "VRFProof": "Ynl0ZSBhcnJheQ==" - }, - "BeaconEntries": null, - "WinPoStProof": null, - "Parents": null, - "ParentWeight": "0", - "Height": 10101, - "ParentStateRoot": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "ParentMessageReceipts": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Messages": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "BLSAggregate": { - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" - }, - "Timestamp": 42, - "BlockSig": { - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" - }, - "ForkSignaling": 42, - "ParentBaseFee": "0" - }, - "BlsMessages": null, - "SecpkMessages": null -} -``` - -### MinerGetBaseInfo -There are not yet any comments for this method. - -Perms: read - -Inputs: -```json -[ - "t01234", - 10101, - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: -```json -{ - "MinerPower": "0", - "NetworkPower": "0", - "Sectors": null, - "WorkerKey": "t01234", - "SectorSize": 34359738368, - "PrevBeaconEntry": { - "Round": 42, - "Data": "Ynl0ZSBhcnJheQ==" - }, - "BeaconEntries": null, - "HasMinPower": true -} -``` - -## Mpool -The Mpool methods are for interacting with the message pool. The message pool -manages all incoming and outgoing 'messages' going over the network. - - -### MpoolClear -MpoolClear clears pending messages from the mpool - - -Perms: write - -Inputs: -```json -[ - true -] -``` - -Response: `{}` - -### MpoolGetConfig -MpoolGetConfig returns (a copy of) the current mpool config - - -Perms: read - -Inputs: `null` - -Response: -```json -{ - "PriorityAddrs": null, - "SizeLimitHigh": 123, - "SizeLimitLow": 123, - "ReplaceByFeeRatio": 12.3, - "PruneCooldown": 60000000000, - "GasLimitOverestimation": 12.3 -} -``` - -### MpoolGetNonce -MpoolGetNonce gets next nonce for the specified sender. -Note that this method may not be atomic. Use MpoolPushMessage instead. - - -Perms: read - -Inputs: -```json -[ - "t01234" -] -``` - -Response: `42` - -### MpoolPending -MpoolPending returns pending mempool messages. - - -Perms: read - -Inputs: -```json -[ - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: `null` - -### MpoolPush -MpoolPush pushes a signed message to mempool. - - -Perms: write - -Inputs: -```json -[ - { - "Message": { - "Version": 42, - "To": "t01234", - "From": "t01234", - "Nonce": 42, - "Value": "0", - "GasLimit": 9, - "GasFeeCap": "0", - "GasPremium": "0", - "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" - }, - "Signature": { - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" - } - } -] -``` - -Response: -```json -{ - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" -} -``` - -### MpoolPushMessage -MpoolPushMessage atomically assigns a nonce, signs, and pushes a message -to mempool. -maxFee is only used when GasFeeCap/GasPremium fields aren't specified - -When maxFee is set to 0, MpoolPushMessage will guess appropriate fee -based on current chain conditions - - -Perms: sign - -Inputs: -```json -[ - { - "Version": 42, - "To": "t01234", - "From": "t01234", - "Nonce": 42, - "Value": "0", - "GasLimit": 9, - "GasFeeCap": "0", - "GasPremium": "0", - "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" - }, - { - "MaxFee": "0" - } -] -``` - -Response: -```json -{ - "Message": { - "Version": 42, - "To": "t01234", - "From": "t01234", - "Nonce": 42, - "Value": "0", - "GasLimit": 9, - "GasFeeCap": "0", - "GasPremium": "0", - "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" - }, - "Signature": { - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" - } -} -``` - -### MpoolSelect -MpoolSelect returns a list of pending messages for inclusion in the next block - - -Perms: read - -Inputs: -```json -[ - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ], - 12.3 -] -``` - -Response: `null` - -### MpoolSetConfig -MpoolSetConfig sets the mpool config to (a copy of) the supplied config - - -Perms: write - -Inputs: -```json -[ - { - "PriorityAddrs": null, - "SizeLimitHigh": 123, - "SizeLimitLow": 123, - "ReplaceByFeeRatio": 12.3, - "PruneCooldown": 60000000000, - "GasLimitOverestimation": 12.3 - } -] -``` - -Response: `{}` - -### MpoolSub -There are not yet any comments for this method. - -Perms: read - -Inputs: `null` - -Response: -```json -{ - "Type": 0, - "Message": { - "Message": { - "Version": 42, - "To": "t01234", - "From": "t01234", - "Nonce": 42, - "Value": "0", - "GasLimit": 9, - "GasFeeCap": "0", - "GasPremium": "0", - "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" - }, - "Signature": { - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" - } - } -} -``` - -## Msig -The Msig methods are used to interact with multisig wallets on the -filecoin network - - -### MsigAddApprove -MsigAddApprove approves a previously proposed AddSigner message -It takes the following params: , , , -, , - - -Perms: sign - -Inputs: -```json -[ - "t01234", - "t01234", - 42, - "t01234", - "t01234", - true -] -``` - -Response: -```json -{ - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" -} -``` - -### MsigAddCancel -MsigAddCancel cancels a previously proposed AddSigner message -It takes the following params: , , , -, - - -Perms: sign - -Inputs: -```json -[ - "t01234", - "t01234", - 42, - "t01234", - true -] -``` - -Response: -```json -{ - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" -} -``` - -### MsigAddPropose -MsigAddPropose proposes adding a signer in the multisig -It takes the following params: , , -, - - -Perms: sign - -Inputs: -```json -[ - "t01234", - "t01234", - "t01234", - true -] -``` - -Response: -```json -{ - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" -} -``` - -### MsigApprove -MsigApprove approves a previously-proposed multisig message -It takes the following params: , , , , , -, , - - -Perms: sign - -Inputs: -```json -[ - "t01234", - 42, - "t01234", - "t01234", - "0", - "t01234", - 42, - "Ynl0ZSBhcnJheQ==" -] -``` - -Response: -```json -{ - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" -} -``` - -### MsigCancel -MsigCancel cancels a previously-proposed multisig message -It takes the following params: , , , , -, , - - -Perms: sign - -Inputs: -```json -[ - "t01234", - 42, - "t01234", - "0", - "t01234", - 42, - "Ynl0ZSBhcnJheQ==" -] -``` - -Response: -```json -{ - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" -} -``` - -### MsigCreate -MsigCreate creates a multisig wallet -It takes the following params: , , -, , - - -Perms: sign - -Inputs: -```json -[ - 42, - null, - 10101, - "0", - "t01234", - "0" -] -``` - -Response: -```json -{ - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" -} -``` - -### MsigGetAvailableBalance -MsigGetAvailableBalance returns the portion of a multisig's balance that can be withdrawn or spent - - -Perms: read - -Inputs: -```json -[ - "t01234", - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: `"0"` - -### MsigGetVested -MsigGetVested returns the amount of FIL that vested in a multisig in a certain period. -It takes the following params: , , - - -Perms: read - -Inputs: -```json -[ - "t01234", - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ], - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: `"0"` - -### MsigPropose -MsigPropose proposes a multisig message -It takes the following params: , , , -, , - - -Perms: sign - -Inputs: -```json -[ - "t01234", - "t01234", - "0", - "t01234", - 42, - "Ynl0ZSBhcnJheQ==" -] -``` - -Response: -```json -{ - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" -} -``` - -### MsigSwapApprove -MsigSwapApprove approves a previously proposed SwapSigner -It takes the following params: , , , -, , - - -Perms: sign - -Inputs: -```json -[ - "t01234", - "t01234", - 42, - "t01234", - "t01234", - "t01234" -] -``` - -Response: -```json -{ - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" -} -``` - -### MsigSwapCancel -MsigSwapCancel cancels a previously proposed SwapSigner message -It takes the following params: , , , -, - - -Perms: sign - -Inputs: -```json -[ - "t01234", - "t01234", - 42, - "t01234", - "t01234" -] -``` - -Response: -```json -{ - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" -} -``` - -### MsigSwapPropose -MsigSwapPropose proposes swapping 2 signers in the multisig -It takes the following params: , , -, - - -Perms: sign - -Inputs: -```json -[ - "t01234", - "t01234", - "t01234", - "t01234" -] -``` - -Response: -```json -{ - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" -} -``` - -## Net - - -### NetAddrsListen - - -Perms: read - -Inputs: `null` - -Response: -```json -{ - "Addrs": null, - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" -} -``` - -### NetAgentVersion - - -Perms: read - -Inputs: -```json -[ - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" -] -``` - -Response: `"string value"` - -### NetAutoNatStatus - - -Perms: read - -Inputs: `null` - -Response: -```json -{ - "Reachability": 1, - "PublicAddr": "string value" -} -``` - -### NetBandwidthStats - - -Perms: read - -Inputs: `null` - -Response: -```json -{ - "TotalIn": 9, - "TotalOut": 9, - "RateIn": 12.3, - "RateOut": 12.3 -} -``` - -### NetBandwidthStatsByPeer - - -Perms: read - -Inputs: `null` - -Response: -```json -{ - "12D3KooWSXmXLJmBR1M7i9RW9GQPNUhZSzXKzxDHWtAgNuJAbyEJ": { - "TotalIn": 174000, - "TotalOut": 12500, - "RateIn": 100, - "RateOut": 50 - } -} -``` - -### NetBandwidthStatsByProtocol - - -Perms: read - -Inputs: `null` - -Response: -```json -{ - "/fil/hello/1.0.0": { - "TotalIn": 174000, - "TotalOut": 12500, - "RateIn": 100, - "RateOut": 50 - } -} -``` - -### NetConnect - - -Perms: write - -Inputs: -```json -[ - { - "Addrs": null, - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" - } -] -``` - -Response: `{}` - -### NetConnectedness - - -Perms: read - -Inputs: -```json -[ - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" -] -``` - -Response: `1` - -### NetDisconnect - - -Perms: write - -Inputs: -```json -[ - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" -] -``` - -Response: `{}` - -### NetFindPeer - - -Perms: read - -Inputs: -```json -[ - "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" -] -``` - -Response: -```json -{ - "Addrs": null, - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" -} -``` - -### NetPeers - - -Perms: read - -Inputs: `null` - -Response: `null` - -### NetPubsubScores - - -Perms: read - -Inputs: `null` - -Response: `null` - -## Paych -The Paych methods are for interacting with and managing payment channels - - -### PaychAllocateLane -There are not yet any comments for this method. - -Perms: sign - -Inputs: -```json -[ - "t01234" -] -``` - -Response: `42` - -### PaychAvailableFunds -There are not yet any comments for this method. - -Perms: sign - -Inputs: -```json -[ - "t01234" -] -``` - -Response: -```json -{ - "Channel": "\u003cempty\u003e", - "From": "t01234", - "To": "t01234", - "ConfirmedAmt": "0", - "PendingAmt": "0", - "PendingWaitSentinel": null, - "QueuedAmt": "0", - "VoucherReedeemedAmt": "0" -} -``` - -### PaychAvailableFundsByFromTo -There are not yet any comments for this method. - -Perms: sign - -Inputs: -```json -[ - "t01234", - "t01234" -] -``` - -Response: -```json -{ - "Channel": "\u003cempty\u003e", - "From": "t01234", - "To": "t01234", - "ConfirmedAmt": "0", - "PendingAmt": "0", - "PendingWaitSentinel": null, - "QueuedAmt": "0", - "VoucherReedeemedAmt": "0" -} -``` - -### PaychCollect -There are not yet any comments for this method. - -Perms: sign - -Inputs: -```json -[ - "t01234" -] -``` - -Response: -```json -{ - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" -} -``` - -### PaychGet -There are not yet any comments for this method. - -Perms: sign - -Inputs: -```json -[ - "t01234", - "t01234", - "0" -] -``` - -Response: -```json -{ - "Channel": "t01234", - "WaitSentinel": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -} -``` - -### PaychGetWaitReady -There are not yet any comments for this method. - -Perms: sign - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: `"t01234"` - -### PaychList -There are not yet any comments for this method. - -Perms: read - -Inputs: `null` - -Response: `null` - -### PaychNewPayment -There are not yet any comments for this method. - -Perms: sign - -Inputs: -```json -[ - "t01234", - "t01234", - null -] -``` - -Response: -```json -{ - "Channel": "t01234", - "WaitSentinel": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Vouchers": null -} -``` - -### PaychSettle -There are not yet any comments for this method. - -Perms: sign - -Inputs: -```json -[ - "t01234" -] -``` - -Response: -```json -{ - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" -} -``` - -### PaychStatus -There are not yet any comments for this method. - -Perms: read - -Inputs: -```json -[ - "t01234" -] -``` - -Response: -```json -{ - "ControlAddr": "t01234", - "Direction": 1 -} -``` - -### PaychVoucherAdd -There are not yet any comments for this method. - -Perms: write - -Inputs: -```json -[ - "t01234", - { - "ChannelAddr": "t01234", - "TimeLockMin": 10101, - "TimeLockMax": 10101, - "SecretPreimage": "Ynl0ZSBhcnJheQ==", - "Extra": { - "Actor": "t01234", - "Method": 1, - "Data": "Ynl0ZSBhcnJheQ==" - }, - "Lane": 42, - "Nonce": 42, - "Amount": "0", - "MinSettleHeight": 10101, - "Merges": null, - "Signature": { - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" - } - }, - "Ynl0ZSBhcnJheQ==", - "0" -] -``` - -Response: `"0"` - -### PaychVoucherCheckSpendable -There are not yet any comments for this method. - -Perms: read - -Inputs: -```json -[ - "t01234", - { - "ChannelAddr": "t01234", - "TimeLockMin": 10101, - "TimeLockMax": 10101, - "SecretPreimage": "Ynl0ZSBhcnJheQ==", - "Extra": { - "Actor": "t01234", - "Method": 1, - "Data": "Ynl0ZSBhcnJheQ==" - }, - "Lane": 42, - "Nonce": 42, - "Amount": "0", - "MinSettleHeight": 10101, - "Merges": null, - "Signature": { - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" - } - }, - "Ynl0ZSBhcnJheQ==", - "Ynl0ZSBhcnJheQ==" -] -``` - -Response: `true` - -### PaychVoucherCheckValid -There are not yet any comments for this method. - -Perms: read - -Inputs: -```json -[ - "t01234", - { - "ChannelAddr": "t01234", - "TimeLockMin": 10101, - "TimeLockMax": 10101, - "SecretPreimage": "Ynl0ZSBhcnJheQ==", - "Extra": { - "Actor": "t01234", - "Method": 1, - "Data": "Ynl0ZSBhcnJheQ==" - }, - "Lane": 42, - "Nonce": 42, - "Amount": "0", - "MinSettleHeight": 10101, - "Merges": null, - "Signature": { - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" - } - } -] -``` - -Response: `{}` - -### PaychVoucherCreate -There are not yet any comments for this method. - -Perms: sign - -Inputs: -```json -[ - "t01234", - "0", - 42 -] -``` - -Response: -```json -{ - "Voucher": { - "ChannelAddr": "t01234", - "TimeLockMin": 10101, - "TimeLockMax": 10101, - "SecretPreimage": "Ynl0ZSBhcnJheQ==", - "Extra": { - "Actor": "t01234", - "Method": 1, - "Data": "Ynl0ZSBhcnJheQ==" - }, - "Lane": 42, - "Nonce": 42, - "Amount": "0", - "MinSettleHeight": 10101, - "Merges": null, - "Signature": { - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" - } - }, - "Shortfall": "0" -} -``` - -### PaychVoucherList -There are not yet any comments for this method. - -Perms: write - -Inputs: -```json -[ - "t01234" -] -``` - -Response: `null` - -### PaychVoucherSubmit -There are not yet any comments for this method. - -Perms: sign - -Inputs: -```json -[ - "t01234", - { - "ChannelAddr": "t01234", - "TimeLockMin": 10101, - "TimeLockMax": 10101, - "SecretPreimage": "Ynl0ZSBhcnJheQ==", - "Extra": { - "Actor": "t01234", - "Method": 1, - "Data": "Ynl0ZSBhcnJheQ==" - }, - "Lane": 42, - "Nonce": 42, - "Amount": "0", - "MinSettleHeight": 10101, - "Merges": null, - "Signature": { - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" - } - }, - "Ynl0ZSBhcnJheQ==", - "Ynl0ZSBhcnJheQ==" -] -``` - -Response: -```json -{ - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" -} -``` - -## State -The State methods are used to query, inspect, and interact with chain state. -All methods take a TipSetKey as a parameter. The state looked up is the state at that tipset. -A nil TipSetKey can be provided as a param, this will cause the heaviest tipset in the chain to be used. - - -### StateAccountKey -StateAccountKey returns the public key address of the given ID address - - -Perms: read - -Inputs: -```json -[ - "t01234", - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: `"t01234"` - -### StateAllMinerFaults -StateAllMinerFaults returns all non-expired Faults that occur within lookback epochs of the given tipset - - -Perms: read - -Inputs: -```json -[ - 10101, - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: `null` - -### StateCall -StateCall runs the given message and returns its result without any persisted changes. - - -Perms: read - -Inputs: -```json -[ - { - "Version": 42, - "To": "t01234", - "From": "t01234", - "Nonce": 42, - "Value": "0", - "GasLimit": 9, - "GasFeeCap": "0", - "GasPremium": "0", - "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" - }, - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: -```json -{ - "Msg": { - "Version": 42, - "To": "t01234", - "From": "t01234", - "Nonce": 42, - "Value": "0", - "GasLimit": 9, - "GasFeeCap": "0", - "GasPremium": "0", - "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" - }, - "MsgRct": { - "ExitCode": 0, - "Return": "Ynl0ZSBhcnJheQ==", - "GasUsed": 9 - }, - "ExecutionTrace": { - "Msg": { - "Version": 42, - "To": "t01234", - "From": "t01234", - "Nonce": 42, - "Value": "0", - "GasLimit": 9, - "GasFeeCap": "0", - "GasPremium": "0", - "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" - }, - "MsgRct": { - "ExitCode": 0, - "Return": "Ynl0ZSBhcnJheQ==", - "GasUsed": 9 - }, - "Error": "string value", - "Duration": 60000000000, - "GasCharges": null, - "Subcalls": null - }, - "Error": "string value", - "Duration": 60000000000 -} -``` - -### StateChangedActors -StateChangedActors returns all the actors whose states change between the two given state CIDs -TODO: Should this take tipset keys instead? - - -Perms: read - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: -```json -{ - "t01236": { - "Code": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Head": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Nonce": 42, - "Balance": "0" - } -} -``` - -### StateCirculatingSupply -StateCirculatingSupply returns the circulating supply of Filecoin at the given tipset - - -Perms: read - -Inputs: -```json -[ - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: -```json -{ - "FilVested": "0", - "FilMined": "0", - "FilBurnt": "0", - "FilLocked": "0", - "FilCirculating": "0" -} -``` - -### StateCompute -StateCompute is a flexible command that applies the given messages on the given tipset. -The messages are run as though the VM were at the provided height. - - -Perms: read - -Inputs: -```json -[ - 10101, - null, - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: -```json -{ - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Trace": null -} -``` - -### StateDealProviderCollateralBounds -StateDealProviderCollateralBounds returns the min and max collateral a storage provider -can issue. It takes the deal size and verified status as parameters. - - -Perms: read - -Inputs: -```json -[ - 1032, - true, - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: -```json -{ - "Min": "0", - "Max": "0" -} -``` - -### StateGetActor -StateGetActor returns the indicated actor's nonce and balance. - - -Perms: read - -Inputs: -```json -[ - "t01234", - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: -```json -{ - "Code": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Head": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Nonce": 42, - "Balance": "0" -} -``` - -### StateGetReceipt -StateGetReceipt returns the message receipt for the given message - - -Perms: read - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: -```json -{ - "ExitCode": 0, - "Return": "Ynl0ZSBhcnJheQ==", - "GasUsed": 9 -} -``` - -### StateListActors -StateListActors returns the addresses of every actor in the state - - -Perms: read - -Inputs: -```json -[ - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: `null` - -### StateListMessages -StateListMessages looks back and returns all messages with a matching to or from address, stopping at the given height. - - -Perms: read - -Inputs: -```json -[ - { - "Version": 42, - "To": "t01234", - "From": "t01234", - "Nonce": 42, - "Value": "0", - "GasLimit": 9, - "GasFeeCap": "0", - "GasPremium": "0", - "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" - }, - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ], - 10101 -] -``` - -Response: `null` - -### StateListMiners -StateListMiners returns the addresses of every miner that has claimed power in the Power Actor - - -Perms: read - -Inputs: -```json -[ - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: `null` - -### StateLookupID -StateLookupID retrieves the ID address of the given address - - -Perms: read - -Inputs: -```json -[ - "t01234", - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: `"t01234"` - -### StateMarketBalance -StateMarketBalance looks up the Escrow and Locked balances of the given address in the Storage Market - - -Perms: read - -Inputs: -```json -[ - "t01234", - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: -```json -{ - "Escrow": "0", - "Locked": "0" -} -``` - -### StateMarketDeals -StateMarketDeals returns information about every deal in the Storage Market - - -Perms: read - -Inputs: -```json -[ - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: -```json -{ - "t026363": { - "Proposal": { - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1032, - "VerifiedDeal": true, - "Client": "t01234", - "Provider": "t01234", - "Label": "string value", - "StartEpoch": 10101, - "EndEpoch": 10101, - "StoragePricePerEpoch": "0", - "ProviderCollateral": "0", - "ClientCollateral": "0" - }, - "State": { - "SectorStartEpoch": 10101, - "LastUpdatedEpoch": 10101, - "SlashEpoch": 10101 - } - } -} -``` - -### StateMarketParticipants -StateMarketParticipants returns the Escrow and Locked balances of every participant in the Storage Market - - -Perms: read - -Inputs: -```json -[ - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: -```json -{ - "t026363": { - "Escrow": "0", - "Locked": "0" - } -} -``` - -### StateMarketStorageDeal -StateMarketStorageDeal returns information about the indicated deal - - -Perms: read - -Inputs: -```json -[ - 5432, - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: -```json -{ - "Proposal": { - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1032, - "VerifiedDeal": true, - "Client": "t01234", - "Provider": "t01234", - "Label": "string value", - "StartEpoch": 10101, - "EndEpoch": 10101, - "StoragePricePerEpoch": "0", - "ProviderCollateral": "0", - "ClientCollateral": "0" - }, - "State": { - "SectorStartEpoch": 10101, - "LastUpdatedEpoch": 10101, - "SlashEpoch": 10101 - } -} -``` - -### StateMinerActiveSectors -StateMinerActiveSectors returns info about sectors that a given miner is actively proving. - - -Perms: read - -Inputs: -```json -[ - "t01234", - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: `null` - -### StateMinerAvailableBalance -StateMinerAvailableBalance returns the portion of a miner's balance that can be withdrawn or spent - - -Perms: read - -Inputs: -```json -[ - "t01234", - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: `"0"` - -### StateMinerDeadlines -StateMinerDeadlines returns all the proving deadlines for the given miner - - -Perms: read - -Inputs: -```json -[ - "t01234", - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: `null` - -### StateMinerFaults -StateMinerFaults returns a bitfield indicating the faulty sectors of the given miner - - -Perms: read - -Inputs: -```json -[ - "t01234", - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: -```json -[ - 5, - 1 -] -``` - -### StateMinerInfo -StateMinerInfo returns info about the indicated miner - - -Perms: read - -Inputs: -```json -[ - "t01234", - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: -```json -{ - "Owner": "t01234", - "Worker": "t01234", - "NewWorker": "t01234", - "ControlAddresses": null, - "WorkerChangeEpoch": 10101, - "PeerId": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Multiaddrs": null, - "SealProofType": 3, - "SectorSize": 34359738368, - "WindowPoStPartitionSectors": 42 -} -``` - -### StateMinerInitialPledgeCollateral -StateMinerInitialPledgeCollateral returns the initial pledge collateral for the specified miner's sector - - -Perms: read - -Inputs: -```json -[ - "t01234", - { - "SealProof": 3, - "SectorNumber": 9, - "SealedCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "SealRandEpoch": 10101, - "DealIDs": null, - "Expiration": 10101, - "ReplaceCapacity": true, - "ReplaceSectorDeadline": 42, - "ReplaceSectorPartition": 42, - "ReplaceSectorNumber": 9 - }, - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: `"0"` - -### StateMinerPartitions -StateMinerPartitions loads miner partitions for the specified miner/deadline - - -Perms: read - -Inputs: -```json -[ - "t01234", - 42, - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: `null` - -### StateMinerPower -StateMinerPower returns the power of the indicated miner - - -Perms: read - -Inputs: -```json -[ - "t01234", - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: -```json -{ - "MinerPower": { - "RawBytePower": "0", - "QualityAdjPower": "0" - }, - "TotalPower": { - "RawBytePower": "0", - "QualityAdjPower": "0" - } -} -``` - -### StateMinerPreCommitDepositForPower -StateMinerInitialPledgeCollateral returns the precommit deposit for the specified miner's sector - - -Perms: read - -Inputs: -```json -[ - "t01234", - { - "SealProof": 3, - "SectorNumber": 9, - "SealedCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "SealRandEpoch": 10101, - "DealIDs": null, - "Expiration": 10101, - "ReplaceCapacity": true, - "ReplaceSectorDeadline": 42, - "ReplaceSectorPartition": 42, - "ReplaceSectorNumber": 9 - }, - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: `"0"` - -### StateMinerProvingDeadline -StateMinerProvingDeadline calculates the deadline at some epoch for a proving period -and returns the deadline-related calculations. - - -Perms: read - -Inputs: -```json -[ - "t01234", - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: -```json -{ - "CurrentEpoch": 10101, - "PeriodStart": 10101, - "Index": 42, - "Open": 10101, - "Close": 10101, - "Challenge": 10101, - "FaultCutoff": 10101, - "WPoStPeriodDeadlines": 42, - "WPoStProvingPeriod": 10101, - "WPoStChallengeWindow": 10101, - "WPoStChallengeLookback": 10101, - "FaultDeclarationCutoff": 10101 -} -``` - -### StateMinerRecoveries -StateMinerRecoveries returns a bitfield indicating the recovering sectors of the given miner - - -Perms: read - -Inputs: -```json -[ - "t01234", - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: -```json -[ - 5, - 1 -] -``` - -### StateMinerSectorCount -StateMinerSectorCount returns the number of sectors in a miner's sector set and proving set - - -Perms: read - -Inputs: -```json -[ - "t01234", - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: -```json -{ - "Sectors": 42, - "Active": 42 -} -``` - -### StateMinerSectors -StateMinerSectors returns info about the given miner's sectors. If the filter bitfield is nil, all sectors are included. -If the filterOut boolean is set to true, any sectors in the filter are excluded. -If false, only those sectors in the filter are included. - - -Perms: read - -Inputs: -```json -[ - "t01234", - [ - 0 - ], - true, - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: `null` - -### StateMsgGasCost -StateMsgGasCost searches for a message in the chain, and returns details of the messages gas costs, including the penalty and miner tip - - -Perms: read - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: -```json -{ - "Message": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "GasUsed": "0", - "BaseFeeBurn": "0", - "OverEstimationBurn": "0", - "MinerPenalty": "0", - "MinerTip": "0", - "Refund": "0", - "TotalCost": "0" -} -``` - -### StateNetworkName -StateNetworkName returns the name of the network the node is synced to - - -Perms: read - -Inputs: `null` - -Response: `"lotus"` - -### StateReadState -StateReadState returns the indicated actor's state. - - -Perms: read - -Inputs: -```json -[ - "t01234", - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: -```json -{ - "Balance": "0", - "State": {} -} -``` - -### StateReplay -StateReplay returns the result of executing the indicated message, assuming it was executed in the indicated tipset. - - -Perms: read - -Inputs: -```json -[ - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ], - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: -```json -{ - "Msg": { - "Version": 42, - "To": "t01234", - "From": "t01234", - "Nonce": 42, - "Value": "0", - "GasLimit": 9, - "GasFeeCap": "0", - "GasPremium": "0", - "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" - }, - "MsgRct": { - "ExitCode": 0, - "Return": "Ynl0ZSBhcnJheQ==", - "GasUsed": 9 - }, - "ExecutionTrace": { - "Msg": { - "Version": 42, - "To": "t01234", - "From": "t01234", - "Nonce": 42, - "Value": "0", - "GasLimit": 9, - "GasFeeCap": "0", - "GasPremium": "0", - "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" - }, - "MsgRct": { - "ExitCode": 0, - "Return": "Ynl0ZSBhcnJheQ==", - "GasUsed": 9 - }, - "Error": "string value", - "Duration": 60000000000, - "GasCharges": null, - "Subcalls": null - }, - "Error": "string value", - "Duration": 60000000000 -} -``` - -### StateSearchMsg -StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed - - -Perms: read - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: -```json -{ - "Message": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Receipt": { - "ExitCode": 0, - "Return": "Ynl0ZSBhcnJheQ==", - "GasUsed": 9 - }, - "ReturnDec": {}, - "TipSet": [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ], - "Height": 10101 -} -``` - -### StateSectorExpiration -StateSectorExpiration returns epoch at which given sector will expire - - -Perms: read - -Inputs: -```json -[ - "t01234", - 9, - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: -```json -{ - "OnTime": 10101, - "Early": 10101 -} -``` - -### StateSectorGetInfo -StateSectorGetInfo returns the on-chain info for the specified miner's sector. Returns null in case the sector info isn't found -NOTE: returned info.Expiration may not be accurate in some cases, use StateSectorExpiration to get accurate -expiration epoch - - -Perms: read - -Inputs: -```json -[ - "t01234", - 9, - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: -```json -{ - "SectorNumber": 9, - "SealProof": 3, - "SealedCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "DealIDs": null, - "Activation": 10101, - "Expiration": 10101, - "DealWeight": "0", - "VerifiedDealWeight": "0", - "InitialPledge": "0", - "ExpectedDayReward": "0", - "ExpectedStoragePledge": "0" -} -``` - -### StateSectorPartition -StateSectorPartition finds deadline/partition with the specified sector - - -Perms: read - -Inputs: -```json -[ - "t01234", - 9, - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: -```json -{ - "Deadline": 42, - "Partition": 42 -} -``` - -### StateSectorPreCommitInfo -StateSectorPreCommitInfo returns the PreCommit info for the specified miner's sector - - -Perms: read - -Inputs: -```json -[ - "t01234", - 9, - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: -```json -{ - "Info": { - "SealProof": 3, - "SectorNumber": 9, - "SealedCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "SealRandEpoch": 10101, - "DealIDs": null, - "Expiration": 10101, - "ReplaceCapacity": true, - "ReplaceSectorDeadline": 42, - "ReplaceSectorPartition": 42, - "ReplaceSectorNumber": 9 - }, - "PreCommitDeposit": "0", - "PreCommitEpoch": 10101, - "DealWeight": "0", - "VerifiedDealWeight": "0" -} -``` - -### StateVerifiedClientStatus -StateVerifiedClientStatus returns the data cap for the given address. -Returns nil if there is no entry in the data cap table for the -address. - - -Perms: read - -Inputs: -```json -[ - "t01234", - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: `"0"` - -### StateWaitMsg -StateWaitMsg looks back in the chain for a message. If not found, it blocks until the -message arrives on chain, and gets to the indicated confidence depth. - - -Perms: read - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - 42 -] -``` - -Response: -```json -{ - "Message": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Receipt": { - "ExitCode": 0, - "Return": "Ynl0ZSBhcnJheQ==", - "GasUsed": 9 - }, - "ReturnDec": {}, - "TipSet": [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ], - "Height": 10101 -} -``` - -## Sync -The Sync method group contains methods for interacting with and -observing the lotus sync service. - - -### SyncCheckBad -SyncCheckBad checks if a block was marked as bad, and if it was, returns -the reason. - - -Perms: read - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: `"string value"` - -### SyncCheckpoint -SyncCheckpoint marks a blocks as checkpointed, meaning that it won't ever fork away from it. - - -Perms: admin - -Inputs: -```json -[ - [ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - { - "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" - } - ] -] -``` - -Response: `{}` - -### SyncIncomingBlocks -SyncIncomingBlocks returns a channel streaming incoming, potentially not -yet synced block headers. - - -Perms: read - -Inputs: `null` - -Response: -```json -{ - "Miner": "t01234", - "Ticket": { - "VRFProof": "Ynl0ZSBhcnJheQ==" - }, - "ElectionProof": { - "WinCount": 9, - "VRFProof": "Ynl0ZSBhcnJheQ==" - }, - "BeaconEntries": null, - "WinPoStProof": null, - "Parents": null, - "ParentWeight": "0", - "Height": 10101, - "ParentStateRoot": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "ParentMessageReceipts": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Messages": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "BLSAggregate": { - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" - }, - "Timestamp": 42, - "BlockSig": { - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" - }, - "ForkSignaling": 42, - "ParentBaseFee": "0" -} -``` - -### SyncMarkBad -SyncMarkBad marks a blocks as bad, meaning that it won't ever by synced. -Use with extreme caution. - - -Perms: admin - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: `{}` - -### SyncState -SyncState returns the current status of the lotus sync system. - - -Perms: read - -Inputs: `null` - -Response: -```json -{ - "ActiveSyncs": null -} -``` - -### SyncSubmitBlock -SyncSubmitBlock can be used to submit a newly created block to the. -network through this node - - -Perms: write - -Inputs: -```json -[ - { - "Header": { - "Miner": "t01234", - "Ticket": { - "VRFProof": "Ynl0ZSBhcnJheQ==" - }, - "ElectionProof": { - "WinCount": 9, - "VRFProof": "Ynl0ZSBhcnJheQ==" - }, - "BeaconEntries": null, - "WinPoStProof": null, - "Parents": null, - "ParentWeight": "0", - "Height": 10101, - "ParentStateRoot": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "ParentMessageReceipts": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Messages": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "BLSAggregate": { - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" - }, - "Timestamp": 42, - "BlockSig": { - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" - }, - "ForkSignaling": 42, - "ParentBaseFee": "0" - }, - "BlsMessages": null, - "SecpkMessages": null - } -] -``` - -Response: `{}` - -### SyncUnmarkBad -SyncUnmarkBad unmarks a blocks as bad, making it possible to be validated and synced again. - - -Perms: admin - -Inputs: -```json -[ - { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } -] -``` - -Response: `{}` - -## Wallet - - -### WalletBalance -WalletBalance returns the balance of the given address at the current head of the chain. - - -Perms: read - -Inputs: -```json -[ - "t01234" -] -``` - -Response: `"0"` - -### WalletDefaultAddress -WalletDefaultAddress returns the address marked as default in the wallet. - - -Perms: write - -Inputs: `null` - -Response: `"t01234"` - -### WalletDelete -WalletDelete deletes an address from the wallet. - - -Perms: write - -Inputs: -```json -[ - "t01234" -] -``` - -Response: `{}` - -### WalletExport -WalletExport returns the private key of an address in the wallet. - - -Perms: admin - -Inputs: -```json -[ - "t01234" -] -``` - -Response: -```json -{ - "Type": "string value", - "PrivateKey": "Ynl0ZSBhcnJheQ==" -} -``` - -### WalletHas -WalletHas indicates whether the given address is in the wallet. - - -Perms: write - -Inputs: -```json -[ - "t01234" -] -``` - -Response: `true` - -### WalletImport -WalletImport receives a KeyInfo, which includes a private key, and imports it into the wallet. - - -Perms: admin - -Inputs: -```json -[ - { - "Type": "string value", - "PrivateKey": "Ynl0ZSBhcnJheQ==" - } -] -``` - -Response: `"t01234"` - -### WalletList -WalletList lists all the addresses in the wallet. - - -Perms: write - -Inputs: `null` - -Response: `null` - -### WalletNew -WalletNew creates a new address in the wallet with the given sigType. - - -Perms: write - -Inputs: -```json -[ - 2 -] -``` - -Response: `"t01234"` - -### WalletSetDefault -WalletSetDefault marks the given address as as the default one. - - -Perms: admin - -Inputs: -```json -[ - "t01234" -] -``` - -Response: `{}` - -### WalletSign -WalletSign signs the given bytes using the given address. - - -Perms: sign - -Inputs: -```json -[ - "t01234", - "Ynl0ZSBhcnJheQ==" -] -``` - -Response: -```json -{ - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" -} -``` - -### WalletSignMessage -WalletSignMessage signs the given message using the given address. - - -Perms: sign - -Inputs: -```json -[ - "t01234", - { - "Version": 42, - "To": "t01234", - "From": "t01234", - "Nonce": 42, - "Value": "0", - "GasLimit": 9, - "GasFeeCap": "0", - "GasPremium": "0", - "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" - } -] -``` - -Response: -```json -{ - "Message": { - "Version": 42, - "To": "t01234", - "From": "t01234", - "Nonce": 42, - "Value": "0", - "GasLimit": 9, - "GasFeeCap": "0", - "GasPremium": "0", - "Method": 1, - "Params": "Ynl0ZSBhcnJheQ==" - }, - "Signature": { - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" - } -} -``` - -### WalletVerify -WalletVerify takes an address, a signature, and some bytes, and indicates whether the signature is valid. -The address does not have to be in the wallet. - - -Perms: read - -Inputs: -```json -[ - "t01234", - "Ynl0ZSBhcnJheQ==", - { - "Type": 2, - "Data": "Ynl0ZSBhcnJheQ==" - } -] -``` - -Response: `true` - diff --git a/documentation/en/building/api-troubleshooting.md b/documentation/en/building/api-troubleshooting.md deleted file mode 100644 index 0cb3a6800..000000000 --- a/documentation/en/building/api-troubleshooting.md +++ /dev/null @@ -1,36 +0,0 @@ -# API Troubleshooting - -## Types: params - -`params` must be an array. If there are no `params` you should still pass an empty array. - -## Types: TipSet - -For methods such as `Filecoin.StateMinerPower`, where the method accepts the argument of the type `TipSet`, you can pass `null` to use the current chain head. - -```sh -curl -X POST \ - -H "Content-Type: application/json" \ - --data '{ "jsonrpc": "2.0", "method": "Filecoin.StateMinerPower", "params": ["t0101", null], "id": 3 }' \ - 'http://127.0.0.1:1234/rpc/v0' -``` - -## Types: Sending a CID - -If you do not serialize the CID as a [JSON IPLD link](https://did-ipid.github.io/ipid-did-method/#txref), you will receive an error. Here is an example of a broken CURL request: - -```sh -curl -X POST \ - -H "Content-Type: application/json" \ - --data '{ "jsonrpc": "2.0", "method":"Filecoin.ClientGetDealInfo", "params": ["bafyreiaxl446wlnu6t6dpq4ivrjf4gda4gvsoi4rr6mpxau7z25xvk5pl4"], "id": 0 }' \ - 'http://127.0.0.1:1234/rpc/v0' -``` - -To fix it, change the `params` property to: - -```sh -curl -X POST \ - -H "Content-Type: application/json" \ - --data '{ "jsonrpc": "2.0", "method":"Filecoin.ClientGetDealInfo", "params": [{"/": "bafyreiaxl446wlnu6t6dpq4ivrjf4gda4gvsoi4rr6mpxau7z25xvk5pl4"}], "id": 0 }' \ - 'http://127.0.0.1:1234/rpc/v0' -``` diff --git a/documentation/en/building/api.md b/documentation/en/building/api.md deleted file mode 100644 index 3a2c2902b..000000000 --- a/documentation/en/building/api.md +++ /dev/null @@ -1,38 +0,0 @@ -# API endpoints and methods - -The API can be accessed on: - -- `http://[api:port]/rpc/v0` - HTTP RPC-API endpoint -- `ws://[api:port]/rpc/v0` - Websocket RPC-API endpoint -- `PUT http://[api:port]/rest/v0/import` - REST endpoint for file import (multipart upload). It requires write permissions. - -The RPC methods can be found in the [Reference](en+api-methods) and directly in the source code: - -- [Both Lotus node + miner APIs](https://github.com/filecoin-project/lotus/blob/master/api/api_common.go) -- [Lotus node API](https://github.com/filecoin-project/lotus/blob/master/api/api_full.go) -- [Lotus miner API](https://github.com/filecoin-project/lotus/blob/master/api/api_storage.go) - - -## JSON-RPC client - -Lotus uses its own Go library implementation of [JSON-RPC](https://github.com/filecoin-project/go-jsonrpc). - -## cURL example - -To demonstrate making an API request, we will take the method `ChainHead` from [api/api_full.go](https://github.com/filecoin-project/lotus/blob/master/api/api_full.go). - -```go -ChainHead(context.Context) (*types.TipSet, error) -``` - -And create a CURL command. In this command, `ChainHead` is included as `{ "method": "Filecoin.ChainHead" }`: - -```sh -curl -X POST \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer $(cat ~/.lotusminer/token)" \ - --data '{ "jsonrpc": "2.0", "method": "Filecoin.ChainHead", "params": [], "id": 3 }' \ - 'http://127.0.0.1:1234/rpc/v0' -``` - -(See [this section](en+remote-api) to learn how to generate authorization tokens). diff --git a/documentation/en/building/building.md b/documentation/en/building/building.md deleted file mode 100644 index 5194f8314..000000000 --- a/documentation/en/building/building.md +++ /dev/null @@ -1,5 +0,0 @@ -# Building with Lotus - -Lotus applications provide HTTP (JSON-RPC) APIs that allow developers to control Lotus programatically. - -This section dives into how to setup and use these APIs, additionally providing information on advanced Lotus features and workflows, like Payment Channels or how to setup a fully local Lotus development network. diff --git a/documentation/en/building/jaeger-tracing.md b/documentation/en/building/jaeger-tracing.md deleted file mode 100644 index bbe4d3052..000000000 --- a/documentation/en/building/jaeger-tracing.md +++ /dev/null @@ -1,26 +0,0 @@ -# Jaeger Tracing - -Lotus has tracing built into many of its internals. To view the traces, first download [Jaeger](https://www.jaegertracing.io/download/) (Choose the 'all-in-one' binary). Then run it somewhere, start up the lotus daemon, and open up localhost:16686 in your browser. - -## Open Census - -Lotus uses [OpenCensus](https://opencensus.io/) for tracing application flow. This generates spans through the execution of annotated code paths. - -Currently it is set up to use Jaeger, though other tracing backends should be fairly easy to swap in. - -## Running Locally - -To easily run and view tracing locally, first, install jaeger. The easiest way to do this is to [download the binaries](https://www.jaegertracing.io/download/) and then run the `jaeger-all-in-one` binary. This will start up jaeger, listen for spans on `localhost:6831`, and expose a web UI for viewing traces on `http://localhost:16686/`. - -Now, to start sending traces from Lotus to Jaeger, set the environment variable `LOTUS_JAEGER` to `localhost:6831`, and start the `lotus daemon`. - -Now, to view any generated traces, open up `http://localhost:16686/` in your browser. - -## Adding Spans - -To annotate a new codepath with spans, add the following lines to the top of the function you wish to trace: - -```go -ctx, span := trace.StartSpan(ctx, "put function name here") -defer span.End() -``` diff --git a/documentation/en/building/local-devnet.md b/documentation/en/building/local-devnet.md deleted file mode 100644 index 3382b6471..000000000 --- a/documentation/en/building/local-devnet.md +++ /dev/null @@ -1,54 +0,0 @@ -# Setup Local Devnet - -Build the Lotus Binaries in debug mode, This enables the use of 2048 byte sectors. - -```sh -make 2k -``` - -Set the `LOTUS_SKIP_GENESIS_CHECK` environment variable to `_yes_`. This tells your -Lotus node that it's okay if the genesis being used doesn't match any baked-in -genesis. - -```sh -export LOTUS_SKIP_GENESIS_CHECK=_yes_ -``` - -Download the 2048 byte parameters: -```sh -./lotus fetch-params 2048 -``` - -Pre-seal some sectors: - -```sh -./lotus-seed pre-seal --sector-size 2KiB --num-sectors 2 -``` - -Create the genesis block and start up the first node: - -```sh -./lotus-seed genesis new localnet.json -./lotus-seed genesis add-miner localnet.json ~/.genesis-sectors/pre-seal-t01000.json -./lotus daemon --lotus-make-genesis=devgen.car --genesis-template=localnet.json --bootstrap=false -``` - -Then, in another console, import the genesis miner key: - -```sh -./lotus wallet import --as-default ~/.genesis-sectors/pre-seal-t01000.key -``` - -Set up the genesis miner: - -```sh -./lotus-miner init --genesis-miner --actor=t01000 --sector-size=2KiB --pre-sealed-sectors=~/.genesis-sectors --pre-sealed-metadata=~/.genesis-sectors/pre-seal-t01000.json --nosync -``` - -Now, finally, start up the miner: - -```sh -./lotus-miner run --nosync -``` - -If all went well, you will have your own local Lotus Devnet running. diff --git a/documentation/en/building/payment-channels.md b/documentation/en/building/payment-channels.md deleted file mode 100644 index afddcdc40..000000000 --- a/documentation/en/building/payment-channels.md +++ /dev/null @@ -1,111 +0,0 @@ -# Payment Channels - -Payment channels are used to transfer funds between two actors. - -For example in lotus a payment channel is created when a client wants to fetch data from a provider. -The client sends vouchers for the payment channel, and the provider sends data in response. - -The payment channel is created on-chain with an initial amount. -Vouchers allow the client and the provider to exchange funds incrementally off-chain. -The provider can submit vouchers to chain at any stage. -Either party to the payment channel can settle the payment channel on chain. -After a settlement period (currently 12 hours) either party to the payment channel can call collect on chain. -Collect sends the value of submitted vouchers to the channel recipient (the provider), and refunds the remaining channel balance to the channel creator (the client). - -Vouchers have a lane, a nonce and a value, where vouchers with a higher nonce supersede vouchers with a lower nonce in the same lane. -Each deal is created on a different lane. - -Note that payment channels and vouchers can be used for any situation in which two parties need to incrementally transfer value between each other off-chain. - -## Using the CLI - -For example a client creates a payment channel to a provider with value 10 FIL. - -```sh -$ lotus paych add-funds 10 - -``` - -The client creates a voucher in lane 0 (implied) with nonce 1 (implied) and value 2. - -```sh -$ lotus paych voucher create 2 - -``` - -The client sends the voucher to the provider and the provider adds the voucher to their local store. - -```sh -$ lotus paych voucher add -``` - -The provider sends some data to the client. - -The client creates a voucher in lane 0 (implied) with nonce 2 (implied) and value 4. - -```sh -$ lotus paych voucher create 4 - -``` - -The client sends the voucher to the provider and the provider adds the voucher and sends back more data. -etc. - -The client can add value to the channel after it has been created by calling `paych add-funds` with the same client and provider addresses. - -```sh -$ lotus paych add-funds 5 - # Same address as above. Channel now has 15 -``` - -Once the client has received all their data, they may settle the channel. -Note that settlement doesn't have to be done immediately. -For example the client may keep the channel open as long as it wants to continue making deals with the provider. - -```sh -$ lotus paych settle -``` - -The provider can submit vouchers to chain (note that lotus does this automatically when it sees a settle message appear on chain). -The provider may have received many vouchers with incrementally higher values. -The provider should submit the best vouchers. Note that there will be one best voucher for each lane. - -```sh -$ lotus paych voucher best-spendable - - - - -$ lotus paych voucher submit -``` - -Once the settlement period is over, either the client or provider can call collect to disburse funds. - -```sh -$ lotus paych collect -``` - -Check the status of a channel that is still being created using `lotus paych status-by-from-to`. - -```sh -$ lotus paych status-by-from-to -Creating channel - From: t3sb6xzvs6rhlziatagevxpp3dwapdolurtkpn4kyh3kgoo4tn5o7lutjqlsnvpceztlhxu3lzzfe34rvpsjgq - To: t1zip4sblhyrn4oxygzsm6nafbsynp2avmk3xafea - Pending Amt: 10000 - Wait Sentinel: bafy2bzacedk2jidsyxcynusted35t5ipkhu2kpiodtwyjr3pimrhke6f5pqbm -``` - -Check the status of a channel that has been created using `lotus paych status`. - -```sh -$ lotus paych status -Channel exists - Channel: t2nydpzhmeqkmid5smtqnowlr2mr5az6rexpmyv6i - From: t3sb6xzvs6rhlziatagevxpp3dwapdolurtkpn4kyh3kgoo4tn5o7lutjqlsnvpceztlhxu3lzzfe34rvpsjgq - To: t1zip4sblhyrn4oxygzsm6nafbsynp2avmk3xafea - Confirmed Amt: 10000 - Pending Amt: 6000 - Queued Amt: 3000 - Voucher Redeemed Amt: 2000 -``` diff --git a/documentation/en/building/remote-api.md b/documentation/en/building/remote-api.md deleted file mode 100644 index d0fedb51b..000000000 --- a/documentation/en/building/remote-api.md +++ /dev/null @@ -1,69 +0,0 @@ -# Setting up remote API access - -The **Lotus Miner** and the **Lotus Node** applications come with their own local API endpoints setup by default when they are running. - -These endpoints are used by `lotus` and `lotus-miner` to interact with the running process. In this section we will explain how to enable remote access to the Lotus APIs. - -Note that instructions are the same for `lotus` and `lotus-miner`. For simplicity, we will just show how to do it with `lotus`. - -## Setting the listening interface for the API endpoint - -By default, the API listens on the local "loopback" interface (`127.0.0.1`). This is configured in the `config.toml` file: - -```toml -[API] -# ListenAddress = "/ip4/127.0.0.1/tcp/1234/http" -# RemoteListenAddress = "" -# Timeout = "30s" -``` - -To access the API remotely, Lotus needs to listen on the right IP/interface. The IP associated to each interface can be usually found with the command `ip a`. Once the right IP is known, it can be set in the configuration: - -```toml -[API] -ListenAddress = "/ip4//tcp/3453/http" # port is an example - -# Only relevant for lotus-miner -# This should be the IP:Port pair where the miner is reachable from anyone trying to dial to it. -# If you have placed a reverse proxy or a NAT'ing device in front of it, this may be different from -# the EXTERNAL_INTERFACE_IP. -RemoteListenAddress = "" -``` - -> `0.0.0.0` can be used too. This is a wildcard that means "all interfaces". Depending on the network setup, this may affect security (listening on the wrong, exposed interface). - -After making these changes, please restart the affected process. - -## Issuing tokens - -Any client wishing to talk to the API endpoints will need a token. Tokens can be generated with: - -```sh -lotus auth create-token --perm -``` - -(similarly for the Lotus Miner). - -The permissions work as follows: - -- `read` - Read node state, no private data. -- `write` - Write to local store / chain, and `read` permissions. -- `sign` - Use private keys stored in wallet for signing, `read` and `write` permissions. -- `admin` - Manage permissions, `read`, `write`, and `sign` permissions. - - -Tokens can then be used in applications by setting an Authorization header as: - -``` -Authorization: Bearer -``` - - -## Environment variables - -`lotus`, `lotus-miner` and `lotus-worker` can actually interact with their respective applications running on a different node. All is needed to configure them are the following the *environment variables*: - -```sh -FULLNODE_API_INFO="TOKEN:/ip4//tcp//http" -MINER_API_INFO="TOKEN:/ip4//tcp//http" -``` diff --git a/documentation/en/getting-started/getting-started.md b/documentation/en/getting-started/getting-started.md deleted file mode 100644 index 99b4095d4..000000000 --- a/documentation/en/getting-started/getting-started.md +++ /dev/null @@ -1,3 +0,0 @@ -# Getting started - -This section will get you started with Lotus. We will setup the Lotus daemon (that should already be [installed](en+install)), start it, create a wallet and use it to send and receive some Filecoin. diff --git a/documentation/en/getting-started/setup-troubleshooting.md b/documentation/en/getting-started/setup-troubleshooting.md deleted file mode 100644 index f27a3faa5..000000000 --- a/documentation/en/getting-started/setup-troubleshooting.md +++ /dev/null @@ -1,57 +0,0 @@ -# Setup Troubleshooting - - -## Error: initializing node error: cbor input had wrong number of fields - -This happens when you are starting Lotus which has been compiled for one network, but it encounters data in the Lotus data folder which is for a different network, or for an older incompatible version. - -The solution is to clear the data folder (see below). - -## Config: Clearing data - -Here is a command that will delete your chain data, stored wallets, stored data and any miners you have set up: - -```sh -rm -rf ~/.lotus ~/.lotusminer -``` - -Note you do not always need to clear your data for [updating](en+update). - -## Error: Failed to connect bootstrap peer - -```sh -WARN peermgr peermgr/peermgr.go:131 failed to connect to bootstrap peer: failed to dial : all dials failed - * [/ip4/147.75.80.17/tcp/1347] failed to negotiate security protocol: connected to wrong peer -``` - -- Try running the build steps again and make sure that you have the latest code from GitHub. - -```sh -ERROR hello hello/hello.go:81 other peer has different genesis! -``` - -- Try deleting your file system's `~/.lotus` directory. Check that it exists with `ls ~/.lotus`. - -```sh -- repo is already locked -``` - -- You already have another lotus daemon running. - -## Config: Open files limit - -Lotus will attempt to set up the file descriptor (FD) limit automatically. If that does not work, you can still configure your system to allow higher than the default values. - -On most systems you can check the open files limit with: - -```sh -ulimit -n -``` - -You can also modify this number by using the `ulimit` command. It gives you the ability to control the resources available for the shell or process started by it. If the number is below 10000, you can change it with the following command prior to starting the Lotus daemon: - -```sh -ulimit -n 10000 -``` - -Note that this is not persisted and that systemd manages its own FD limits for services. Please use your favourite search engine to find instructions on how to persist and configure FD limits for your system. diff --git a/documentation/en/getting-started/setup.md b/documentation/en/getting-started/setup.md deleted file mode 100644 index e751da80b..000000000 --- a/documentation/en/getting-started/setup.md +++ /dev/null @@ -1,169 +0,0 @@ -# Setting up Lotus - -Your Lotus binaries have been installed and you are ready to start participating in the Filecoin network. - -## Selecting the right network - -You should have built the Lotus binaries from the right Github branch and Lotus will be fully setup to join the matching [Filecoin network](https://docs.filecoin.io/how-to/networks/). For more information on switching networks, check the [updating Lotus section](en+update). - -## Starting the daemon - -To start the daemon simply run: - -```sh -lotus daemon -``` - -or if you are using the provided systemd service files, do: - -```sh -systemctl start lotus-daemon -``` - -__If you are using Lotus from China__, make sure you set the following environment variable before running Lotus: - -``` -export IPFS_GATEWAY="https://proof-parameters.s3.cn-south-1.jdcloud-oss.com/ipfs/" -``` - - -During the first start, Lotus: - -* Will setup its data folder at `~/.lotus` -* Will download the necessary parameters -* Start syncing the Lotus chain - -If you started lotus using systemd, the logs will appear in `/var/log/lotus/daemon.log` (not in journalctl as usual), otherwise you will see them in your screen. - -Do not be appalled by the amount of warnings and sometimes errors showing in the logs, there are usually part of the usual functioning of the daemon as part of a distributed network. - -## Waiting to sync - -After the first start, the chain will start syncing until it has reached the tip. You can check how far the syncing process is with: - -```sh -lotus sync status -``` - -You can also interactively wait for the chain to be fully synced with: - -```sh -lotus sync wait -``` - -## Interacting with the Lotus daemon - -As shown above, the `lotus` command allows to interact with the running daemon. You will see it getting used in many of the documentation examples. - -This command-line-interface is self-documenting: - -```sh -# Show general help -lotus --help -# Show specific help for the "client" subcommand -lotus client --help -``` - -For example, after your Lotus daemon has been running for a few minutes, use `lotus` to check the number of other peers that it is connected to in the Filecoin network: - -```sh -lotus net peers -``` - -## Controlling the logging level - -```sh -lotus log set-level -``` -This command can be used to toggle the logging levels of the different -systems of a Lotus node. In decreasing order -of logging detail, the levels are `debug`, `info`, `warn`, and `error`. - -As an example, -to set the `chain` and `blocksync` to log at the `debug` level, run -`lotus log set-level --system chain --system blocksync debug`. - -To see the various logging system, run `lotus log list`. - - -## Configuration - -### Configuration file - -The Lotus daemon stores a configuration file in `~/.lotus/config.toml`. Note that by default all settings are commented. Here is an example configuration: - -```toml -[API] - # Binding address for the Lotus API - ListenAddress = "/ip4/127.0.0.1/tcp/1234/http" - # Not used by lotus daemon - RemoteListenAddress = "" - # General network timeout value - Timeout = "30s" - -# Libp2p provides connectivity to other Filecoin network nodes -[Libp2p] - # Binding address swarm - 0 means random port. - ListenAddresses = ["/ip4/0.0.0.0/tcp/0", "/ip6/::/tcp/0"] - # Insert any addresses you want to explicitally - # announce to other peers here. Otherwise, they are - # guessed. - AnnounceAddresses = [] - # Insert any addresses to avoid announcing here. - NoAnnounceAddresses = [] - # Connection manager settings, decrease if your - # machine is overwhelmed by connections. - ConnMgrLow = 150 - ConnMgrHigh = 180 - ConnMgrGrace = "20s" - -# Pubsub is used to broadcast information in the network -[Pubsub] - Bootstrapper = false - RemoteTracer = "/dns4/pubsub-tracer.filecoin.io/tcp/4001/p2p/QmTd6UvR47vUidRNZ1ZKXHrAFhqTJAD27rKL9XYghEKgKX" - -# This section can be used to enable adding and retriving files from IPFS -[Client] - UseIpfs = false - IpfsMAddr = "" - IpfsUseForRetrieval = false - -# Metrics configuration -[Metrics] - Nickname = "" - HeadNotifs = false -``` - -### Ensuring connectivity to your Lotus daemon - -Usually your lotus daemon will establish connectivity with others in the network and try to make itself diallable using uPnP. If you wish to manually ensure that your daemon is reachable: - -* Set a fixed port of your choice in the `ListenAddresses` in the Libp2p section (i.e. 6665). -* Open a port in your router that is forwarded to this port. This is usually called featured as "Port forwarding" and the instructions differ from router model to model but there are many guides online. -* Add your public IP/port to `AnnounceAddresses`. i.e. `/ip4//tcp/6665/`. - -Note that it is not a requirement to use Lotus as a client to the network to be fully reachable, as your node already connects to others directly. - - -### Environment variables - -Common to most Lotus binaries: - -* `LOTUS_FD_MAX`: Sets the file descriptor limit for the process -* `LOTUS_JAEGER`: Sets the Jaeger URL to send traces. See TODO. -* `LOTUS_DEV`: Any non-empty value will enable more verbose logging, useful only for developers. - -Specific to the *Lotus daemon*: - -* `LOTUS_PATH`: Location to store Lotus data (defaults to `~/.lotus`). -* `LOTUS_SKIP_GENESIS_CHECK=_yes_`: Set only if you wish to run a lotus network with a different genesis block. -* `LOTUS_CHAIN_TIPSET_CACHE`: Sets the size for the chainstore tipset cache. Defaults to `8192`. Increase if you perform frequent arbitrary tipset lookups. -* `LOTUS_CHAIN_INDEX_CACHE`: Sets the size for the epoch index cache. Defaults to `32768`. Increase if you perform frequent deep chain lookups for block heights far from the latest height. -* `LOTUS_BSYNC_MSG_WINDOW`: Set the initial maximum window size for message fetching blocksync request. Set to 10-20 if you have an internet connection with low bandwidth. - -Specific to the *Lotus miner*: - -* `LOTUS_MINER_PATH`: Location for the miner's on-disk repo. Defaults to `./lotusminer`. -* A number of environment variables are respected for configuring the behaviour of the Filecoin proving subsystem. [See here](en+miner-setup). - - diff --git a/documentation/en/getting-started/wallet.md b/documentation/en/getting-started/wallet.md deleted file mode 100644 index 25a67fb09..000000000 --- a/documentation/en/getting-started/wallet.md +++ /dev/null @@ -1,58 +0,0 @@ -# Obtaining and sending FIL - -In order to receive and send FIL with Lotus you will need to have installed the program and be running the Lotus daemon. - -## Creating a wallet - - -```sh -lotus wallet new bls -``` - -This will print your Filecoin address. - -Your wallet information is stored in the `~/.lotus/keystore` (or `$LOTUS_PATH/keystore`). For instructions on export/import, see below. - -You can create multiple wallets and list them with: - -```sh -lotus wallet list -``` - -## Obtaining FIL - -FIL can be obtained either by using one of the Faucets (available for the test networks) or by buying it from an exchange supporting FIL trading (once mainnet has launched). - -Once you have received some FIL you can check your balance with: - -```sh -lotus wallet balance -``` - -Remember that your will only see the latest balance when your daemon is fully synced to the chain. - -## Sending FIL - -Sending some FIL can be achieved by running: - -```sh -lotus wallet send
-``` - -Make sure to check `lotus wallet send --help` for additional options. - -## Exporting and importing a wallet - -You can export and re-import a wallet with: - -```sh -lotus wallet export
> wallet.private -``` - -and: - -```sh -lotus wallet import wallet.private -``` - -Keep your wallet's private key safe! diff --git a/documentation/en/installation/install-linux.md b/documentation/en/installation/install-linux.md deleted file mode 100644 index 6fe12996e..000000000 --- a/documentation/en/installation/install-linux.md +++ /dev/null @@ -1,129 +0,0 @@ -# Linux installation - -This page will show you the steps to build and install Lotus in your Linux computer. - -## Dependencies - -### System dependencies - -First of all, building Lotus will require installing some system dependencies, usually provided by your distribution. - -For Arch Linux: - -```sh -sudo pacman -Syu opencl-icd-loader gcc git bzr jq pkg-config opencl-icd-loader opencl-headers -``` - -For Ubuntu: - -```sh -sudo apt update -sudo apt install mesa-opencl-icd ocl-icd-opencl-dev gcc git bzr jq pkg-config curl -sudo apt upgrade -``` - -For Fedora: - -```sh -sudo dnf -y update -sudo dnf -y install gcc git bzr jq pkgconfig mesa-libOpenCL mesa-libOpenCL-devel opencl-headers ocl-icd ocl-icd-devel clang llvm -``` - -For OpenSUSE: - -```sh -sudo zypper in gcc git jq make libOpenCL1 opencl-headers ocl-icd-devel clang llvm -sudo ln -s /usr/lib64/libOpenCL.so.1 /usr/lib64/libOpenCL.so -``` - -### Rustup - -Lotus needs [rustup](https://rustup.rs/): - -```sh -curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -``` - -Please make sure your `$PATH` variable is correctly configured after the rustup installation so that `cargo` and `rustc` are found in their rustup-configured locations. - -### Go - -To build lotus you will need a working installation of **[Go1.14](https://golang.org/dl/)**. Follow the [installation instructions](https://golang.org/doc/install), which generally amount to: - -```sh -# Example! Check the installation instructions. -wget -c https://dl.google.com/go/go1.14.7.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local -``` - -## Build and install Lotus - -With all the above, you are ready to build and install the Lotus suite (`lotus`, `lotus-miner` and `lotus-worker`): - -```sh -git clone https://github.com/filecoin-project/lotus.git -cd lotus/ -``` - -__IF YOU ARE IN CHINA__, set `export GOPROXY=https://goproxy.cn` before building - -Now, choose the network that you will be joining: - -* For `testnet`: `git checkout master` -* For `nerpa`: `git checkout ntwk-nerpa` -* For `butterfly`: `git checkout ntwk-butterfly` - -Once on the right branch, do: - -```sh -make clean install -sudo make install -``` - -This will put `lotus`, `lotus-miner` and `lotus-worker` in `/usr/local/bin`. `lotus` will use the `$HOME/.lotus` folder by default for storage (configuration, chain data, wallets...). `lotus-miner` will use `$HOME/.lotusminer` respectively. See the *environment variables* section below for how to customize these. - -> Remeber to [move your Lotus folder](en+update) if you are switching between different networks, or there has been a network reset. - - -### Native Filecoin FFI - -Some newer processors (AMD Zen (and later), Intel Ice Lake) have support SHA extensions. To make full use of your processor's capabilities, make sure you set the following variables BEFORE building from source (as described above): - -```sh -export RUSTFLAGS="-C target-cpu=native -g" -export FFI_BUILD_FROM_SOURCE=1 -``` - -> __NOTE__: This method of building does not produce portable binaries! Make sure you run the binary in the same machine as you built it. - -### systemd service files - -Lotus provides Systemd service files. They can be installed with: - -```sh -make install-daemon-service -make install-miner-service -``` - -After that, you should be able to control Lotus using `systemctl`. - -## Troubleshooting - -This section mentions some of the common pitfalls for building Lotus. Check the [getting started](en+getting-started) section for more tips on issues when running the lotus daemon. - -### Build errors - -Please check the build logs closely. If you have a dirty state in your git branch make sure to: - -```sh -git checkout -git reset origin/ --hard -make clean -``` - -### Slow builds from China - -Users from China can speed up their builds by setting: - -```sh -export GOPROXY=https://goproxy.cn -``` diff --git a/documentation/en/installation/install-macos.md b/documentation/en/installation/install-macos.md deleted file mode 100644 index ea9ecb8ca..000000000 --- a/documentation/en/installation/install-macos.md +++ /dev/null @@ -1,62 +0,0 @@ -# MacOS Instructions - -## Get XCode Command Line Tools - -To check if you already have the XCode Command Line Tools installed via the CLI, run: - -```sh -xcode-select -p -``` - -If this command returns a path, you can move on to the next step. Otherwise, to install via the CLI, run: - -```sh -xcode-select --install -``` - -To update, run: - -```sh -sudo rm -rf /Library/Developer/CommandLineTools -xcode-select --install -``` - -## Get HomeBrew - -We recommend that MacOS users use [HomeBrew](https://brew.sh) to install each the necessary packages. - -Check if you have HomeBrew: - -```sh -brew -v -``` - -This command returns a version number if you have HomeBrew installed and nothing otherwise. - -In your terminal, enter this command to install Homebrew: - -```sh -/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" -``` - -Use the command `brew install` to install the following packages: - -```sh -brew install go bzr jq pkg-config rustup -``` - -Clone - -```sh -git clone https://github.com/filecoin-project/lotus.git -cd lotus/ -``` - -Build - -```sh -make clean && make all -sudo make install -``` - -After intalling Lotus you will be ready to [setup and run the daemon](en+setup.md). diff --git a/documentation/en/installation/installation.md b/documentation/en/installation/installation.md deleted file mode 100644 index 98534da92..000000000 --- a/documentation/en/installation/installation.md +++ /dev/null @@ -1,39 +0,0 @@ -# Installation - -Lotus can be installed in [Linux](en-install-linux) and [MacOS](en-install-macos) machines by building it from source. Windows is not supported yet. - -This section contains guides to install Lotus in the supported platforms. - -Lotus is made of 3 binaries: - -* `lotus`: the main [Lotus node](en+setup) (Lotus client) -* `lotus-miner`: an application specifically for [Filecoin mining](en+miner-setup) -* `lotus-worker`: an additional [application to offload some heavy-processing tasks](en+lotus-worker) from the Lotus Miner. - -These applications are written in Go, but also import several Rust libraries. Lotus does not distribute -pre-compiled builds. - -## Hardware requirements - -### For client nodes - -* 8GiB of RAM -* Recommended for syncing speed: CPU with support for *Intel SHA Extensions* (AMD since Zen microarchitecture, Intel since Ice Lake). -* Recommended for speed: SSD hard drive (the bigger the better) - -### For miners - -The following correspond to the latest testing configuration: - -* 2 TB of hard drive space -* 8 core CPU -* 128 GiB of RAM with 256 GiB of NVMe SSD storage for swap (or simply, more RAM). -* Recommended for speed: CPU with support for *Intel SHA Extensions* (AMD since Zen microarchitecture, Intel since Ice Lake). -* GPU for block mining. The following have been [confirmed to be fast enough](en+gpus): - -- GeForce RTX 2080 Ti -- GeForce RTX 2080 SUPER -- GeForce RTX 2080 -- GeForce GTX 1080 Ti -- GeForce GTX 1080 -- GeForce GTX 1060 diff --git a/documentation/en/installation/update.md b/documentation/en/installation/update.md deleted file mode 100644 index 5d76592c9..000000000 --- a/documentation/en/installation/update.md +++ /dev/null @@ -1,72 +0,0 @@ -# Updating and restarting Lotus - -Updating Lotus is as simple as rebuilding and re-installing the software as explained in the previous sections. - -You can verify which version of Lotus you are running with: - -```sh -lotus version -``` - -Make sure that you `git pull` the branch that corresponds to the network that your Lotus daemon is using: - -```sh -git pull origin -make clean -make all -sudo make install # if necessary -``` - -Finally, restart the Lotus Node and/or Lotus Miner(s). - -__CAVEAT__: If you are running miners: check if your miner is safe to shut down and restart: `lotus-miner proving info`. If any deadline shows a block height in the past, do not restart: - -In the following example, Deadline Open is 454 which is earlier than Current Epoch of 500. This miner should **not** be shut down or restarted. - -``` -$ sudo lotus-miner proving info -Miner: t01001 -Current Epoch: 500 -Proving Period Boundary: 154 -Proving Period Start: 154 (2h53m0s ago) -Next Period Start: 3034 (in 21h7m0s) -Faults: 768 (100.00%) -Recovering: 768 -Deadline Index: 5 -Deadline Sectors: 0 -Deadline Open: 454 (23m0s ago) -Deadline Close: 514 (in 7m0s) -Deadline Challenge: 434 (33m0s ago) -Deadline FaultCutoff: 384 (58m0s ago) -``` - -In this next example, the miner can be safely restarted because no Deadlines are earlier than Current Epoch of 497. You have ~45 minutes before the miner must be back online to declare faults (FaultCutoff). If the miner has no faults, you have about an hour. - -``` -$ sudo lotus-miner proving info -Miner: t01000 -Current Epoch: 497 -Proving Period Boundary: 658 -Proving Period Start: 658 (in 1h20m30s) -Next Period Start: 3538 (in 25h20m30s) -Faults: 0 (0.00%) -Recovering: 0 -Deadline Index: 0 -Deadline Sectors: 768 -Deadline Open: 658 (in 1h20m30s) -Deadline Close: 718 (in 1h50m30s) -Deadline Challenge: 638 (in 1h10m30s) -Deadline FaultCutoff: 588 (in 45m30s) -``` - -## Switching networks and network resets - -If you wish to switch to a different lotus network or there has been a network reset, you will need to: - -* Checkout the appropiate repository branch and rebuild -* Ensure you do not mix Lotus data (`LOTUS_PATH`, usually `~/.lotus`) from a previous or different network. For this, either: - * Rename the folder to something else or, - * Set a different `LOTUS_PATH` for the new network. -* Same for `~/.lotusminer` if you are running a miner. - -Note that deleting the Lotus data folder will wipe all the chain data, wallets and configuration, so think twice before taking any non-reversible action. diff --git a/documentation/en/mining/gpus.md b/documentation/en/mining/gpus.md deleted file mode 100644 index ad0ed4f66..000000000 --- a/documentation/en/mining/gpus.md +++ /dev/null @@ -1,17 +0,0 @@ -# Benchmarking additional GPUs - -If you want to test a GPU that is not explicitly supported, set the following *environment variable*: - -```sh -BELLMAN_CUSTOM_GPU=":" -``` - -Here is an example of trying a GeForce GTX 1660 Ti with 1536 cores. - -```sh -BELLMAN_CUSTOM_GPU="GeForce GTX 1660 Ti:1536" -``` - -To get the number of cores for your GPU, you will need to check your card’s specifications. - -To perform the benchmark you can use Lotus' [benchmarking tool](https://github.com/filecoin-project/lotus/tree/master/cmd/lotus-bench). Results and discussion are tracked in a [GitHub issue thread](https://github.com/filecoin-project/lotus/issues/694). diff --git a/documentation/en/mining/lotus-seal-worker.md b/documentation/en/mining/lotus-seal-worker.md deleted file mode 100644 index 47e201ca5..000000000 --- a/documentation/en/mining/lotus-seal-worker.md +++ /dev/null @@ -1,99 +0,0 @@ -# Lotus Worker - -The **Lotus Worker** is an extra process that can offload heavy processing tasks from your **Lotus Miner**. The sealing process automatically runs in the **Lotus Miner** process, but you can use the Worker on another machine communicating over a fast network to free up resources on the machine running the mining process. - -## Installation - -The `lotus-worker` application is installed along with the others when running `sudo make install` as shown in the [Installation section](en+install-linux). For simplicity, we recommend following the same procedure in the machines that will run the Lotus Workers (even if the Lotus miner and the Lotus daemon are not used there). - -## Setting up the Miner - -### Allow external connections to the miner API - -First, you will need to ensure your `lotus-miner`'s API is accessible over the network. - -To do this, open up `~/.lotusminer/config.toml` (Or if you manually set `LOTUS_MINER_PATH`, look under that directory) and look for the API field. - -Default config: - -```toml -[API] -ListenAddress = "/ip4/127.0.0.1/tcp/2345/http" -RemoteListenAddress = "127.0.0.1:2345" -``` - -To make your node accessible over the local area network, you will need to determine your machine's IP on the LAN (`ip a`), and change the `127.0.0.1` in the file to that address. - -A more permissive and less secure option is to change it to `0.0.0.0`. This will allow anyone who can connect to your computer on that port to access the miner's API, though they will still need an auth token. - -`RemoteListenAddress` must be set to an address which other nodes on your network will be able to reach. - -### Create an authentication token - -Write down the output of: - -```sh -lotus-miner auth api-info --perm admin -``` - -The Lotus Workers will need this token to connect to the miner. - -## Connecting the Lotus Workers - -On each machine that will run the `lotus-worker` application you will need to define the following *environment variable*: - -```sh -export MINER_API_INFO::/ip4//tcp/2345` -``` - -If you are trying to use `lotus-worker` from China. You should additionally set: - -```sh -export IPFS_GATEWAY="https://proof-parameters.s3.cn-south-1.jdcloud-oss.com/ipfs/" -``` - - -Once that is done, you can run the Worker with: - -```sh -lotus-worker run -``` - -> If you are running multiple workers on the same host, you will need to specify the `--listen` flag and ensure each worker is on a different port. - -On your Lotus miner, check that the workers are correctly connected: - -```sh -lotus-miner sealing workers -Worker 0, host computer - CPU: [ ] 0 core(s) in use - RAM: [|||||||||||||||||| ] 28% 18.1 GiB/62.7 GiB - VMEM: [|||||||||||||||||| ] 28% 18.1 GiB/62.7 GiB - GPU: GeForce RTX 2080, not used - -Worker 1, host othercomputer - CPU: [ ] 0 core(s) in use - RAM: [|||||||||||||| ] 23% 14 GiB/62.7 GiB - VMEM: [|||||||||||||| ] 23% 14 GiB/62.7 GiB - GPU: GeForce RTX 2080, not used -``` - -## Running locally for manually managing process priority - -You can also run the **Lotus Worker** on the same machine as your **Lotus Miner**, so you can manually manage the process priority. - -To do so you have to first __disable all seal task types__ in the miner config. This is important to prevent conflicts between the two processes: - -```toml -[Storage] - AllowPreCommit1 = false - AllowPreCommit2 = false - AllowCommit = false - AllowUnseal = false -``` - -You can then run the miner on your local-loopback interface; - -```sh -lotus-worker run -``` diff --git a/documentation/en/mining/managing-deals.md b/documentation/en/mining/managing-deals.md deleted file mode 100644 index 5f73a6a2d..000000000 --- a/documentation/en/mining/managing-deals.md +++ /dev/null @@ -1,19 +0,0 @@ -# Managing deals - - -While the Lotus Miner is running as a daemon, the `lotus-miner` application can be used to manage and configure the miner: - - -```sh -lotus-miner storage-deals --help -``` - -Running the above command will show the different options related to deals. For example, `lotus-miner storage-deals set-ask` allows to set the price for storage that your miner uses to respond ask requests from clients. - -If deals are ongoing, you can check the data transfers with: - -```sh -lotus-miner data-transfers list -``` - -Make sure you explore the `lotus-miner` CLI. Every command is self-documented and takes a `--help` flag that offers specific information about it. diff --git a/documentation/en/mining/miner-setup.md b/documentation/en/mining/miner-setup.md deleted file mode 100644 index cafa1e7b1..000000000 --- a/documentation/en/mining/miner-setup.md +++ /dev/null @@ -1,241 +0,0 @@ -# Miner setup - -This page will guide you through all you need to know to sucessfully run a **Lotus Miner**. Before proceeding, remember that you should be running the Lotus daemon on a fully synced chain. - -## Performance tweaks - -This is a list of performance tweaks to consider before starting the miner: - -### Building - -As [explained already](en+install-linux#native-filecoin-ffi-10) should have exported the following variables before building the Lotus applications: - -```sh -export RUSTFLAGS="-C target-cpu=native -g" -export FFI_BUILD_FROM_SOURCE=1 -``` - -### Environment - -For high performance mining, we recommend setting the following variables in your environment so that they are available when running any of the Lotus applications: - -```sh -# See https://github.com/filecoin-project/bellman -export BELLMAN_CPU_UTILIZATION=0.875 - -# See https://github.com/filecoin-project/rust-fil-proofs/ -export FIL_PROOFS_MAXIMIZE_CACHING=1 # More speed at RAM cost (1x sector-size of RAM - 32 GB). -export FIL_PROOFS_USE_GPU_COLUMN_BUILDER=1 # precommit2 GPU acceleration -export FIL_PROOFS_USE_GPU_TREE_BUILDER=1 -``` - -IF YOU ARE RUNNING FROM CHINA: - -```sh -export IPFS_GATEWAY="https://proof-parameters.s3.cn-south-1.jdcloud-oss.com/ipfs/" -``` - -IF YOUR MINER RUNS IN A DIFFERENT MACHINE AS THE LOTUS DAEMON: - -```sh -export FULLNODE_API_INFO=:/ip4//tcp//http -``` - -If you will be using systemd service files to run the Lotus daemon and miner, make sure you include these variables manually in the service files. - -### Adding swap - -If you have only 128GiB of RAM, you will need to make sure your system provides at least an extra 256GiB of fast swap (preferably NVMe SSD): - -```sh -sudo fallocate -l 256G /swapfile -sudo chmod 600 /swapfile -sudo mkswap /swapfile -sudo swapon /swapfile -# show current swap spaces and take note of the current highest priority -swapon --show -# append the following line to /etc/fstab (ensure highest priority) and then reboot -# /swapfile swap swap pri=50 0 0 -sudo reboot -# check a 256GB swap file is automatically mounted and has the highest priority -swapon --show -``` - -## Creating a new BLS wallet - -You will need a BLS wallet (`t3...`) for mining. To create it, if you don't have one already, run: - -```sh -lotus wallet new bls -``` - -Next make sure to [send some funds](en+wallet) to this address so that the miner setup can be completed. - -## Initializing the miner - -> SPACE RACE: -> To participate in the Space race, please register your miner: -> -> - Visit the [faucet](http://spacerace.faucet.glif.io/) -> - Paste the address you created under REQUEST. -> - Press the Request button. - -Now that you have a miner address you can initialize the Lotus Miner: - -```sh -lotus-miner init --owner= --no-local-storage -``` - -* The `--no-local-storage` flag is used so that we configure specific locations for storage later below. -* The init process will download over 100GiB of initialization parameters to /var/tmp/filecoin-proof-parameters. Make sure there is space or set `FIL_PROOFS_PARAMETER_CACHE` to somewhere else. -* The Lotus Miner configuration folder is created at `~/.lotusminer/` or `$LOTUS_MINER_PATH` if set. - -## Reachability - -Before you start your miner, it is __very important__ to configure it so that it is reachable from any peer in the Filecoin network. For this you will need a stable public IP and edit your `~/.lotusminer/config.toml` as follows: - -```toml -... -[Libp2p] - ListenAddresses = ["/ip4/0.0.0.0/tcp/24001"] # choose a fixed port - AnnounceAddresses = ["/ip4//tcp/24001"] # important! -... -``` - -Once you start your miner, make sure you can connect to its public IP/port (you can use `telnet`, `nc` for the task...). If you have an active firewall or some sort, you may need to additionally open ports in it. - - -## Starting the miner - -You are now ready to start your Lotus miner: - -```sh -lotus-miner run -``` - -or if you are using the systemd service file: - -```sh -systemctl start lotus-miner -``` - -> __Do not proceed__ from here until you have verified that your miner not only is running, but also __reachable on its public IP address__. - -## Publishing the miner addresses - -Once the miner is up and running, publish your miner address (which you configured above) on the chain (please ensure it is dialable): - -```sh -lotus-miner actor set-addrs /ip4//tcp/24001 -``` - -## Setting locations for sealing and long-term storage - -If you used the `--no-local-storage` flag during initialization, you can now specify the disk locations for sealing (SSD recommended) and long-term storage (otherwise you can skip this): - -``` -lotus-miner storage attach --init --seal -lotus-miner storage attach --init --store -lotus-miner storage list -``` - -## Pledging sectors - -If you would like to compete for block rewards by increasing your power in the network as soon as possible, you can optionally pledge one or several sectors, depending on your storage. It can also be used to test that the sealing process works correctly. Pledging is equivalent to storing random data instead of real data obtained through storage deals. - -> Note that pledging sectors to the mainnet network makes most sense when trying to obtain a reasonable amount of total power in the network, thus obtaining real chances to mine new blocks. Otherwise it is only useful for testing purposes. - -If you decide to go ahead, then do: - -```sh -lotus-miner sectors pledge -``` - -This will write data to `$TMPDIR` so make sure that there is enough space available. - -You shoud check that your sealing job has started with: - -```sh -lotus-miner sealing jobs -``` - -This will be accommpanied by a file in `/unsealed`. - -After some minutes, you can check the sealing progress with: - -```sh -lotus-miner sectors list -# and -lotus-miner sealing workers -``` - -When sealing for the new is complete, `pSet: NO` will become `pSet: YES`. - -Once the sealing is finished, you will want to configure how long it took your miner to seal this sector and configure the miner accordingly. To find out how long it took use: - -``` -lotus-miner sectors status --log 0 -``` - -Once you know, you can edit the Miner's `~/.lotusminer/config.toml` accordingly: - -``` -... -[Dealmaking] -... - ExpectedSealDuration = "12h0m0s" # The time it took your miner -``` - -You can also take the chance to edit other values, such as `WaitForDealsDelay` which specifies the delay between accepting the first deal and sealing, allowing to place multiple deals in the same sector. - -Once you are done editing the configuration, [restart your miner](en+update). - -If you wish to be able to re-use a pledged sector for real storage deals before the pledged period of 6 months ends, you will need to mark them for upgrade: - -```sh -lotus-miner sectors mark-for-upgrade -``` - -The sector should become inactive within 24 hours. From that point, the pledged storage can be re-used to store real data associated with real storage deals. - -## Separate address for windowPoSt messages - -WindowPoSt is the mechanism through which storage is verified in Filecoin. It requires miners to submit proofs for all sectors every 24h, which require sending messages to the chain. - -Because many other mining related actions require sending messages to the chain, and not all of those are "high value", it may be desirable to use a separate account to send PoSt messages from. This allows for setting lower GasFeeCaps on the lower value messages without creating head-of-line blocking problems for the PoSt messages in congested chain conditions - -To set this up, first create a new account, and send it some funds for gas fees: - -```sh -lotus wallet new bls -t3defg... - -lotus send t3defg... 100 -``` - -Next add the control address: - -```sh -lotus-miner actor control set --really-do-it t3defg... -Add t3defg... -Message CID: bafy2.. -``` - -Wait for the message to land on chain: - -```sh -lotus state wait-msg bafy2.. -... -Exit Code: 0 -... -``` - -Finally, check the miner control address list to make sure the address was correctly setup: - -```sh -lotus-miner actor control list -name ID key use balance -owner t01111 t3abcd... other 300 FIL -worker t01111 t3abcd... other 300 FIL -control-0 t02222 t3defg... post 100 FIL -``` diff --git a/documentation/en/mining/mining-troubleshooting.md b/documentation/en/mining/mining-troubleshooting.md deleted file mode 100644 index a9972c2bd..000000000 --- a/documentation/en/mining/mining-troubleshooting.md +++ /dev/null @@ -1,59 +0,0 @@ -# Mining Troubleshooting - -## Config: Filecoin Proof Parameters directory - -If you want to put the **Filecoin Proof Parameters** in a different directory, use the following environment variable: - -```sh -FIL_PROOFS_PARAMETER_CACHE -``` - -## Error: Can't acquire bellman.lock - -The **Bellman** lockfile is created to lock a GPU for a process. This bug can occur when this file isn't properly cleaned up: - -```sh -mining block failed: computing election proof: github.com/filecoin-project/lotus/miner.(*Miner).mineOne -``` - -This bug occurs when the miner can't acquire the `bellman.lock`. To fix it you need to stop the `lotus-miner` and remove `/tmp/bellman.lock`. - -## Error: Failed to get api endpoint - -```sh -lotus-miner info -# WARN main lotus-miner/main.go:73 failed to get api endpoint: (/Users/myrmidon/.lotusminer) %!w(*errors.errorString=&{API not running (no endpoint)}): -``` - -If you see this, that means your **Lotus Miner** isn't ready yet. You need to finish [syncing the chain](en+setup#waiting-to-sync-370). - -## Error: Your computer may not be fast enough - -```sh -CAUTION: block production took longer than the block delay. Your computer may not be fast enough to keep up -``` - -If you see this, that means your computer is too slow and your blocks are not included in the chain, and you will not receive any rewards. - -## Error: No space left on device - -```sh -lotus-miner sectors pledge -# No space left on device (os error 28) -``` - -If you see this, that means `pledge-sector` wrote too much data to `$TMPDIR` which by default is the root partition (This is common for Linux setups). Usually your root partition does not get the largest partition of storage so you will need to change the environment variable to something else. - -## Error: GPU unused - -If you suspect that your GPU is not being used, first make sure it is properly configured as described in the [testing configuration page](hardware-mining.md). Once you've done that (and set the `BELLMAN_CUSTOM_GPU` as appropriate if necessary) you can verify your GPU is being used by running a quick lotus-bench benchmark. - -First, to watch GPU utilization run `nvtop` in one terminal, then in a separate terminal, run: - -```sh -make bench -./bench sealing --sector-size=2KiB -``` - -This process uses a fair amount of GPU, and generally takes ~4 minutes to complete. If you do not see any activity in nvtop from lotus during the entire process, it is likely something is misconfigured with your GPU. - diff --git a/documentation/en/mining/mining.md b/documentation/en/mining/mining.md deleted file mode 100644 index b1b944c6e..000000000 --- a/documentation/en/mining/mining.md +++ /dev/null @@ -1,8 +0,0 @@ -# Storage Mining - -This section of the documentation explains how to do storage mining with Lotus. Please note that not everyone can do storage mining, and that you should not attempt it on on networks where sector sizes are 32GB+ unless you meet the [hardware requirements](en+install#hardware-requirements-1). - -From this point we assume that you have setup and are running the [Lotus Node](en+setup), that it has fully synced the Filecoin chain and that you are familiar with how to interact with it using the `lotus` command-line interface. - -In order to perform storage mining, apart from the Lotus daemon, you will be additionally interacting with the `lotus-miner` and potentially the `lotus-worker` applications (which you should have [installed](en+install-linux) along the `lotus` application already). - diff --git a/documentation/en/store/adding-from-ipfs.md b/documentation/en/store/adding-from-ipfs.md deleted file mode 100644 index 2f6b097cc..000000000 --- a/documentation/en/store/adding-from-ipfs.md +++ /dev/null @@ -1,20 +0,0 @@ -# Adding data from IPFS - -Lotus supports making deals with data stored in IPFS, without having to re-import it into lotus. - -To enable this integration, you need to have an IPFS daemon running in the background. - -Then, open up `~/.lotus/config.toml` (or if you manually set `LOTUS_PATH`, look under that directory) and look for the Client field, and set `UseIpfs` to `true`. - -```toml -[Client] -UseIpfs = true -``` - -After restarting the lotus daemon, you should be able to make deals with data in your IPFS node: - -```sh -$ ipfs add -r SomeData -QmSomeData -$ ./lotus client deal QmSomeData t01000 0.0000000001 80000 -``` diff --git a/documentation/en/store/making-deals.md b/documentation/en/store/making-deals.md deleted file mode 100644 index ca3a47182..000000000 --- a/documentation/en/store/making-deals.md +++ /dev/null @@ -1,71 +0,0 @@ -# Making storage deals - -## Adding a file to Lotus - -Before sending data to a Filecoin miner for storage, the data needs to be correctly formatted and packed. This can be achieved by locally importing the data into Lotus with: - -```sh -lotus client import ./your-example-file.txt -``` - -Upon success, this command will return a **Data CID**. This is a very important piece of information, as it will be used to make deals to both store and retrieve the data in the future. - -You can list the data CIDs of the files you locally imported with: - -```sh -lotus client local -``` - -## Storing data in the network - -To store data in the network you will need to: - -* Find a Filecoin miner willing to store it -* Make a deal with the miner agreeing on the price to pay and the duration for which the data should be stored. - -You can obtain a list of all miners in the network with: - -```sh -lotus state list-miners -t0xxxx -t0xxxy -t0xxxz -... -``` - -This will print a list of miner IDs. In order to ask for the terms offered by a particular miner, you can then run: - -```sh -lotus client query-ask -``` - -If you are satisfied with the terms, you can proceed to propose a deal to the miner, using the **Data CID** that you obtained during the import step: - - -```sh -lotus client deal -``` - -This command will interactively ask you for the CID, miner ID and duration in days for the deal. You can also call it with arguments: - -```sh -lotus client deal -``` - -where the `duration` is expressed in blocks (1 block is equivalent to 30s). - -## Checking the status of the deals - -You can list deals with: - -```sh -lotus client list-deals -``` - -Among other things, this will give you information about the current state on your deals, whether they have been published on chain (by the miners) and whether the miners have been slashed for not honoring them. - -For a deal to succeed, the miner needs to be correctly configured and running, accept the deal and *seal* the file correctly. Otherwise, the deal will appear in error state. - -You can make deals with multiple miners for the same data. - -Once a deal is sucessful and the data is *sealed*, it can be [retrieved](en+retrieving). diff --git a/documentation/en/store/retrieve.md b/documentation/en/store/retrieve.md deleted file mode 100644 index 1e8db65af..000000000 --- a/documentation/en/store/retrieve.md +++ /dev/null @@ -1,27 +0,0 @@ -# Retrieving Data - -Once data has been succesfully [stored](en+making-deals) and sealed by a Filecoin miner, it can be retrieved. - -In order to do this we will need to create a **retrieval deal**. - -## Finding data by CID - -In order to retrieve some data you will need the **Data CID** that was used to create the storage deal. - -You can find who is storing the data by running: - -```sh -lotus client find -``` - -## Making a retrieval deal - -You can then make a retrieval deal with: - -```sh -lotus client retrieve -``` - -This commands take other optional flags (check `--help`). - -If the outfile does not exist it will be created in the Lotus repository directory. This process may take 2 to 10 minutes. diff --git a/documentation/en/store/storage-troubleshooting.md b/documentation/en/store/storage-troubleshooting.md deleted file mode 100644 index 7087ec3d0..000000000 --- a/documentation/en/store/storage-troubleshooting.md +++ /dev/null @@ -1,30 +0,0 @@ -# Storage Troubleshooting - -## Error: Routing: not found - -``` -WARN main lotus/main.go:72 routing: not found -``` - -This error means that the miner is offline. - -## Error: Failed to start deal - -```sh -WARN main lotus/main.go:72 failed to start deal: computing commP failed: generating CommP: Piece must be at least 127 bytes -``` - -This error means that there is a minimum file size of 127 bytes. - -## Error: 0kb file response during retrieval - -This means that the file to be retrieved may have not yet been sealed and is thus, not retrievable yet. - -Miners can check sealing progress with this command: - -```sh -lotus-miner sectors list -``` - -When sealing is complete, `pSet: NO` will become `pSet: YES`. - diff --git a/documentation/en/store/store.md b/documentation/en/store/store.md deleted file mode 100644 index 205bd0e23..000000000 --- a/documentation/en/store/store.md +++ /dev/null @@ -1,11 +0,0 @@ -# Storing and retrieving data - -Lotus enables you to store any data on the Filecoin network and retrieve it later. This is achieved by making *deals* with miners. - -A *storage deal* specifies that a miner should store ceratin data for a previously agreed period and price. - -Once a deal is made, the data is then sent to the miners, which regularly proves that it is storing it. If they fail to do so, the miner is penalized (slashed). - -The data can be retrieved with a *retrieval deal*. - -This section explains how to use Lotus to [store](en+making-deals) and [retrieve](en+retrieving) data from the Filecoin network. From 591e32af487204f58d092015a8ce25439b884718 Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Thu, 17 Sep 2020 20:41:46 +0200 Subject: [PATCH 09/88] Remove circle_ci docs-check. --- .circleci/config.yml | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index acd447f69..d8f149889 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -346,15 +346,6 @@ jobs: - run: git --no-pager diff - run: git --no-pager diff --quiet - docs-check: - executor: golang - steps: - - install-deps - - prepare - - run: make docsgen - - run: git --no-pager diff - - run: git --no-pager diff --quiet - lint: &lint description: | Run golangci-lint. @@ -424,7 +415,6 @@ workflows: - mod-tidy-check - gofmt - cbor-gen-check - - docs-check - test: codecov-upload: true test-suite-name: full From bc9232544c7f5d68c737b9d7887ae05f0232ff34 Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Thu, 17 Sep 2020 20:48:09 +0200 Subject: [PATCH 10/88] Remove faqs too --- documentation/en/faqs.md | 132 --------------------------------------- 1 file changed, 132 deletions(-) delete mode 100644 documentation/en/faqs.md diff --git a/documentation/en/faqs.md b/documentation/en/faqs.md deleted file mode 100644 index 74119a5b6..000000000 --- a/documentation/en/faqs.md +++ /dev/null @@ -1,132 +0,0 @@ -# Frequently Asked Questions - -Here are some FAQs concerning the Lotus implementation and participation in -Testnet. -For questions concerning the broader Filecoin project, please -go [here](https://filecoin.io/faqs/). - -## Introduction to Lotus - -### What is Lotus? - -Lotus is an implementation of the **Filecoin Distributed Storage Network**, written in Go. -It is designed to be modular and interoperable with any other implementation of the Filecoin Protocol. - -### What are the components of Lotus? - -Lotus is composed of two separate pieces that can talk to each other: - -The Lotus Node can sync the blockchain, validating all blocks, transfers, and deals -along the way. It can also facilitate the creation of new storage deals. If you are not -interested in providing your own storage to the network, and do not want to produce blocks -yourself, then the Lotus Node is all you need! - -The Lotus Miner does everything you need for the registration of storage, and the -production of new blocks. The Lotus Miner communicates with the network by talking -to a Lotus Node over the JSON-RPC API. - -## Setting up a Lotus Node - -### How do I set up a Lotus Node? - -Follow the instructions found [here](en+install) and [here](en+setup). - -### Where can I get the latest version of Lotus? - -Download the binary tagged as the `Latest Release` from the [Lotus Github repo](https://github.com/filecoin-project/lotus/releases) or checkout the `master` branch of the source repository. - -### What operating systems can Lotus run on? - -Lotus can build and run on most Linux and MacOS systems with [at least 8GB of RAM](en+install#hardware-requirements-1). Windows is not yet supported. - -### How can I update to the latest version of Lotus? - -To update Lotus, follow the instructions [here](en+update). - -### How do I prepare a fresh installation of Lotus? - -Stop the Lotus daemon, and delete all related files, including sealed and chain data by -running `rm ~/.lotus ~/.lotusminer`. - -Then, install Lotus afresh by following the instructions -found [here](en+install). - -### Can I configure where the node's config and data goes? - -Yes! The `LOTUS_PATH` variable sets the path for where the Lotus node's data is written. -The `LOTUS_MINER_PATH` variable does the same for miner-specific information. - -## Interacting with a Lotus Node - -### How can I communicate with a Lotus Node? - -Lotus Nodes have a command-line interface, as well as a JSON-RPC API. - -### What are the commands I can send using the command-line interface? - -The command-line interface is self-documenting, try running `lotus --help` from the `lotus` home -directory for more. - -### How can I send a request over the JSON-RPC API? - -Information on how to send a `cURL` request to the JSON-RPC API can be found -[here](en+api). - -### What are the requests I can send over the JSON-RPC API? - -Please have a look [here](en+api). - - -## The Test Network - -### What is Testnet? - -Testnet is a live network of Lotus Nodes run by the -community for testing purposes. - -### Is FIL on the Testnet worth anything? - -Nothing at all! - -### How can I see the status of Testnet? - -The [dashboard](https://stats.testnet.filecoin.io/) displays the status of the network as -well as a ton of other metrics you might find interesting. - -## Mining with a Lotus Node on Testnet - -### How do I get started mining with Lotus? - -Follow the instructions found [here](en+mining). - -### What are the minimum hardware requirements? - -An example test configuration, and minimum hardware requirements can be found -[here](en+install#hardware-requirements-8). - -Note that these might NOT be the minimum requirements for mining on Mainnet. - -### What are some GPUs that have been tested? - -See previous question. - -### Why is my GPU not being used when sealing a sector? - -Sealing a sector does not involve constant GPU operations. It's possible -that your GPU simply isn't necessary at the moment you checked. - -## Advanced questions - -### Is there a Docker image for lotus? - -Community-contributed Docker and Docker Compose examples are available -[here](https://github.com/filecoin-project/lotus/tree/master/tools/dockers/docker-examples). - -### How can I run two miners on the same machine? - -You can do so by changing the storage path variable for the second miner, e.g., -`LOTUS_MINER_PATH=~/.lotusminer2`. You will also need to make sure that no ports collide. - -### How do I setup my own local devnet? - -Follow the instructions found [here](en+local-devnet). From 784399738a49f282bc8002771e080d2eba5a33f9 Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Thu, 17 Sep 2020 20:50:37 +0200 Subject: [PATCH 11/88] Keep api-methods.md even if not part of the docs --- Makefile | 2 +- documentation/en/api-methods.md | 4567 +++++++++++++++++++++++++++++++ 2 files changed, 4568 insertions(+), 1 deletion(-) create mode 100644 documentation/en/api-methods.md diff --git a/Makefile b/Makefile index 245d8a9a0..56ab361ec 100644 --- a/Makefile +++ b/Makefile @@ -286,7 +286,7 @@ method-gen: gen: type-gen method-gen docsgen: - go run ./api/docgen > documentation/en/building/api-methods.md + go run ./api/docgen > documentation/en/api-methods.md print-%: @echo $*=$($*) diff --git a/documentation/en/api-methods.md b/documentation/en/api-methods.md new file mode 100644 index 000000000..2f3164bb7 --- /dev/null +++ b/documentation/en/api-methods.md @@ -0,0 +1,4567 @@ +# Groups +* [](#) + * [Closing](#Closing) + * [Shutdown](#Shutdown) + * [Version](#Version) +* [Auth](#Auth) + * [AuthNew](#AuthNew) + * [AuthVerify](#AuthVerify) +* [Beacon](#Beacon) + * [BeaconGetEntry](#BeaconGetEntry) +* [Chain](#Chain) + * [ChainExport](#ChainExport) + * [ChainGetBlock](#ChainGetBlock) + * [ChainGetBlockMessages](#ChainGetBlockMessages) + * [ChainGetGenesis](#ChainGetGenesis) + * [ChainGetMessage](#ChainGetMessage) + * [ChainGetNode](#ChainGetNode) + * [ChainGetParentMessages](#ChainGetParentMessages) + * [ChainGetParentReceipts](#ChainGetParentReceipts) + * [ChainGetPath](#ChainGetPath) + * [ChainGetRandomnessFromBeacon](#ChainGetRandomnessFromBeacon) + * [ChainGetRandomnessFromTickets](#ChainGetRandomnessFromTickets) + * [ChainGetTipSet](#ChainGetTipSet) + * [ChainGetTipSetByHeight](#ChainGetTipSetByHeight) + * [ChainHasObj](#ChainHasObj) + * [ChainHead](#ChainHead) + * [ChainNotify](#ChainNotify) + * [ChainReadObj](#ChainReadObj) + * [ChainSetHead](#ChainSetHead) + * [ChainStatObj](#ChainStatObj) + * [ChainTipSetWeight](#ChainTipSetWeight) +* [Client](#Client) + * [ClientCalcCommP](#ClientCalcCommP) + * [ClientDataTransferUpdates](#ClientDataTransferUpdates) + * [ClientDealSize](#ClientDealSize) + * [ClientFindData](#ClientFindData) + * [ClientGenCar](#ClientGenCar) + * [ClientGetDealInfo](#ClientGetDealInfo) + * [ClientGetDealUpdates](#ClientGetDealUpdates) + * [ClientHasLocal](#ClientHasLocal) + * [ClientImport](#ClientImport) + * [ClientListDataTransfers](#ClientListDataTransfers) + * [ClientListDeals](#ClientListDeals) + * [ClientListImports](#ClientListImports) + * [ClientMinerQueryOffer](#ClientMinerQueryOffer) + * [ClientQueryAsk](#ClientQueryAsk) + * [ClientRemoveImport](#ClientRemoveImport) + * [ClientRetrieve](#ClientRetrieve) + * [ClientRetrieveTryRestartInsufficientFunds](#ClientRetrieveTryRestartInsufficientFunds) + * [ClientRetrieveWithEvents](#ClientRetrieveWithEvents) + * [ClientStartDeal](#ClientStartDeal) +* [Gas](#Gas) + * [GasEstimateFeeCap](#GasEstimateFeeCap) + * [GasEstimateGasLimit](#GasEstimateGasLimit) + * [GasEstimateGasPremium](#GasEstimateGasPremium) + * [GasEstimateMessageGas](#GasEstimateMessageGas) +* [I](#I) + * [ID](#ID) +* [Log](#Log) + * [LogList](#LogList) + * [LogSetLevel](#LogSetLevel) +* [Market](#Market) + * [MarketEnsureAvailable](#MarketEnsureAvailable) +* [Miner](#Miner) + * [MinerCreateBlock](#MinerCreateBlock) + * [MinerGetBaseInfo](#MinerGetBaseInfo) +* [Mpool](#Mpool) + * [MpoolClear](#MpoolClear) + * [MpoolGetConfig](#MpoolGetConfig) + * [MpoolGetNonce](#MpoolGetNonce) + * [MpoolPending](#MpoolPending) + * [MpoolPush](#MpoolPush) + * [MpoolPushMessage](#MpoolPushMessage) + * [MpoolSelect](#MpoolSelect) + * [MpoolSetConfig](#MpoolSetConfig) + * [MpoolSub](#MpoolSub) +* [Msig](#Msig) + * [MsigAddApprove](#MsigAddApprove) + * [MsigAddCancel](#MsigAddCancel) + * [MsigAddPropose](#MsigAddPropose) + * [MsigApprove](#MsigApprove) + * [MsigCancel](#MsigCancel) + * [MsigCreate](#MsigCreate) + * [MsigGetAvailableBalance](#MsigGetAvailableBalance) + * [MsigGetVested](#MsigGetVested) + * [MsigPropose](#MsigPropose) + * [MsigSwapApprove](#MsigSwapApprove) + * [MsigSwapCancel](#MsigSwapCancel) + * [MsigSwapPropose](#MsigSwapPropose) +* [Net](#Net) + * [NetAddrsListen](#NetAddrsListen) + * [NetAgentVersion](#NetAgentVersion) + * [NetAutoNatStatus](#NetAutoNatStatus) + * [NetBandwidthStats](#NetBandwidthStats) + * [NetBandwidthStatsByPeer](#NetBandwidthStatsByPeer) + * [NetBandwidthStatsByProtocol](#NetBandwidthStatsByProtocol) + * [NetConnect](#NetConnect) + * [NetConnectedness](#NetConnectedness) + * [NetDisconnect](#NetDisconnect) + * [NetFindPeer](#NetFindPeer) + * [NetPeers](#NetPeers) + * [NetPubsubScores](#NetPubsubScores) +* [Paych](#Paych) + * [PaychAllocateLane](#PaychAllocateLane) + * [PaychAvailableFunds](#PaychAvailableFunds) + * [PaychAvailableFundsByFromTo](#PaychAvailableFundsByFromTo) + * [PaychCollect](#PaychCollect) + * [PaychGet](#PaychGet) + * [PaychGetWaitReady](#PaychGetWaitReady) + * [PaychList](#PaychList) + * [PaychNewPayment](#PaychNewPayment) + * [PaychSettle](#PaychSettle) + * [PaychStatus](#PaychStatus) + * [PaychVoucherAdd](#PaychVoucherAdd) + * [PaychVoucherCheckSpendable](#PaychVoucherCheckSpendable) + * [PaychVoucherCheckValid](#PaychVoucherCheckValid) + * [PaychVoucherCreate](#PaychVoucherCreate) + * [PaychVoucherList](#PaychVoucherList) + * [PaychVoucherSubmit](#PaychVoucherSubmit) +* [State](#State) + * [StateAccountKey](#StateAccountKey) + * [StateAllMinerFaults](#StateAllMinerFaults) + * [StateCall](#StateCall) + * [StateChangedActors](#StateChangedActors) + * [StateCirculatingSupply](#StateCirculatingSupply) + * [StateCompute](#StateCompute) + * [StateDealProviderCollateralBounds](#StateDealProviderCollateralBounds) + * [StateGetActor](#StateGetActor) + * [StateGetReceipt](#StateGetReceipt) + * [StateListActors](#StateListActors) + * [StateListMessages](#StateListMessages) + * [StateListMiners](#StateListMiners) + * [StateLookupID](#StateLookupID) + * [StateMarketBalance](#StateMarketBalance) + * [StateMarketDeals](#StateMarketDeals) + * [StateMarketParticipants](#StateMarketParticipants) + * [StateMarketStorageDeal](#StateMarketStorageDeal) + * [StateMinerActiveSectors](#StateMinerActiveSectors) + * [StateMinerAvailableBalance](#StateMinerAvailableBalance) + * [StateMinerDeadlines](#StateMinerDeadlines) + * [StateMinerFaults](#StateMinerFaults) + * [StateMinerInfo](#StateMinerInfo) + * [StateMinerInitialPledgeCollateral](#StateMinerInitialPledgeCollateral) + * [StateMinerPartitions](#StateMinerPartitions) + * [StateMinerPower](#StateMinerPower) + * [StateMinerPreCommitDepositForPower](#StateMinerPreCommitDepositForPower) + * [StateMinerProvingDeadline](#StateMinerProvingDeadline) + * [StateMinerRecoveries](#StateMinerRecoveries) + * [StateMinerSectorCount](#StateMinerSectorCount) + * [StateMinerSectors](#StateMinerSectors) + * [StateMsgGasCost](#StateMsgGasCost) + * [StateNetworkName](#StateNetworkName) + * [StateReadState](#StateReadState) + * [StateReplay](#StateReplay) + * [StateSearchMsg](#StateSearchMsg) + * [StateSectorExpiration](#StateSectorExpiration) + * [StateSectorGetInfo](#StateSectorGetInfo) + * [StateSectorPartition](#StateSectorPartition) + * [StateSectorPreCommitInfo](#StateSectorPreCommitInfo) + * [StateVerifiedClientStatus](#StateVerifiedClientStatus) + * [StateWaitMsg](#StateWaitMsg) +* [Sync](#Sync) + * [SyncCheckBad](#SyncCheckBad) + * [SyncCheckpoint](#SyncCheckpoint) + * [SyncIncomingBlocks](#SyncIncomingBlocks) + * [SyncMarkBad](#SyncMarkBad) + * [SyncState](#SyncState) + * [SyncSubmitBlock](#SyncSubmitBlock) + * [SyncUnmarkBad](#SyncUnmarkBad) +* [Wallet](#Wallet) + * [WalletBalance](#WalletBalance) + * [WalletDefaultAddress](#WalletDefaultAddress) + * [WalletDelete](#WalletDelete) + * [WalletExport](#WalletExport) + * [WalletHas](#WalletHas) + * [WalletImport](#WalletImport) + * [WalletList](#WalletList) + * [WalletNew](#WalletNew) + * [WalletSetDefault](#WalletSetDefault) + * [WalletSign](#WalletSign) + * [WalletSignMessage](#WalletSignMessage) + * [WalletVerify](#WalletVerify) +## + + +### Closing + + +Perms: read + +Inputs: `null` + +Response: `{}` + +### Shutdown + + +Perms: admin + +Inputs: `null` + +Response: `{}` + +### Version + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "Version": "string value", + "APIVersion": 3584, + "BlockDelay": 42 +} +``` + +## Auth + + +### AuthNew + + +Perms: admin + +Inputs: +```json +[ + null +] +``` + +Response: `"Ynl0ZSBhcnJheQ=="` + +### AuthVerify + + +Perms: read + +Inputs: +```json +[ + "string value" +] +``` + +Response: `null` + +## Beacon +The Beacon method group contains methods for interacting with the random beacon (DRAND) + + +### BeaconGetEntry +BeaconGetEntry returns the beacon entry for the given filecoin epoch. If +the entry has not yet been produced, the call will block until the entry +becomes available + + +Perms: read + +Inputs: +```json +[ + 10101 +] +``` + +Response: +```json +{ + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" +} +``` + +## Chain +The Chain method group contains methods for interacting with the +blockchain, but that do not require any form of state computation. + + +### ChainExport +ChainExport returns a stream of bytes with CAR dump of chain data. +The exported chain data includes the header chain from the given tipset +back to genesis, the entire genesis state, and the most recent 'nroots' +state trees. +If oldmsgskip is set, messages from before the requested roots are also not included. + + +Perms: read + +Inputs: +```json +[ + 10101, + true, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"Ynl0ZSBhcnJheQ=="` + +### ChainGetBlock +ChainGetBlock returns the block specified by the given CID. + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "Miner": "t01234", + "Ticket": { + "VRFProof": "Ynl0ZSBhcnJheQ==" + }, + "ElectionProof": { + "WinCount": 9, + "VRFProof": "Ynl0ZSBhcnJheQ==" + }, + "BeaconEntries": null, + "WinPoStProof": null, + "Parents": null, + "ParentWeight": "0", + "Height": 10101, + "ParentStateRoot": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ParentMessageReceipts": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Messages": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "BLSAggregate": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Timestamp": 42, + "BlockSig": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "ForkSignaling": 42, + "ParentBaseFee": "0" +} +``` + +### ChainGetBlockMessages +ChainGetBlockMessages returns messages stored in the specified block. + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "BlsMessages": null, + "SecpkMessages": null, + "Cids": null +} +``` + +### ChainGetGenesis +ChainGetGenesis returns the genesis tipset. + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "Cids": null, + "Blocks": null, + "Height": 0 +} +``` + +### ChainGetMessage +ChainGetMessage reads a message referenced by the specified CID from the +chain blockstore. + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "Version": 42, + "To": "t01234", + "From": "t01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" +} +``` + +### ChainGetNode +There are not yet any comments for this method. + +Perms: read + +Inputs: +```json +[ + "string value" +] +``` + +Response: +```json +{ + "Cid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Obj": {} +} +``` + +### ChainGetParentMessages +ChainGetParentMessages returns messages stored in parent tipset of the +specified block. + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: `null` + +### ChainGetParentReceipts +ChainGetParentReceipts returns receipts for messages in parent tipset of +the specified block. + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: `null` + +### ChainGetPath +ChainGetPath returns a set of revert/apply operations needed to get from +one tipset to another, for example: +``` + to + ^ +from tAA + ^ ^ +tBA tAB + ^---*--^ + ^ + tRR +``` +Would return `[revert(tBA), apply(tAB), apply(tAA)]` + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `null` + +### ChainGetRandomnessFromBeacon +ChainGetRandomnessFromBeacon is used to sample the beacon for randomness. + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + 2, + 10101, + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: `null` + +### ChainGetRandomnessFromTickets +ChainGetRandomnessFromTickets is used to sample the chain for randomness. + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + 2, + 10101, + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: `null` + +### ChainGetTipSet +ChainGetTipSet returns the tipset specified by the given TipSetKey. + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Cids": null, + "Blocks": null, + "Height": 0 +} +``` + +### ChainGetTipSetByHeight +ChainGetTipSetByHeight looks back for a tipset at the specified epoch. +If there are no blocks at the specified epoch, a tipset at an earlier epoch +will be returned. + + +Perms: read + +Inputs: +```json +[ + 10101, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Cids": null, + "Blocks": null, + "Height": 0 +} +``` + +### ChainHasObj +ChainHasObj checks if a given CID exists in the chain blockstore. + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: `true` + +### ChainHead +ChainHead returns the current head of the chain. + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "Cids": null, + "Blocks": null, + "Height": 0 +} +``` + +### ChainNotify +ChainNotify returns channel with chain head updates. +First message is guaranteed to be of len == 1, and type == 'current'. + + +Perms: read + +Inputs: `null` + +Response: `null` + +### ChainReadObj +ChainReadObj reads ipld nodes referenced by the specified CID from chain +blockstore and returns raw bytes. + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: `"Ynl0ZSBhcnJheQ=="` + +### ChainSetHead +ChainSetHead forcefully sets current chain head. Use with caution. + + +Perms: admin + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `{}` + +### ChainStatObj +ChainStatObj returns statistics about the graph referenced by 'obj'. +If 'base' is also specified, then the returned stat will be a diff +between the two objects. + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "Size": 42, + "Links": 42 +} +``` + +### ChainTipSetWeight +ChainTipSetWeight computes weight for the specified tipset. + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +## Client +The Client methods all have to do with interacting with the storage and +retrieval markets as a client + + +### ClientCalcCommP +ClientCalcCommP calculates the CommP for a specified file + + +Perms: read + +Inputs: +```json +[ + "string value" +] +``` + +Response: +```json +{ + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 1024 +} +``` + +### ClientDataTransferUpdates +There are not yet any comments for this method. + +Perms: write + +Inputs: `null` + +Response: +```json +{ + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42 +} +``` + +### ClientDealSize +ClientDealSize calculates real deal data size + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "PayloadSize": 9, + "PieceSize": 1032 +} +``` + +### ClientFindData +ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer). + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + null +] +``` + +Response: `null` + +### ClientGenCar +ClientGenCar generates a CAR file for the specified file. + + +Perms: write + +Inputs: +```json +[ + { + "Path": "string value", + "IsCAR": true + }, + "string value" +] +``` + +Response: `{}` + +### ClientGetDealInfo +ClientGetDealInfo returns the latest information about a given deal. + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "ProposalCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "State": 42, + "Message": "string value", + "Provider": "t01234", + "DataRef": { + "TransferType": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceCid": null, + "PieceSize": 1024 + }, + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 42, + "PricePerEpoch": "0", + "Duration": 42, + "DealID": 5432, + "CreationTime": "0001-01-01T00:00:00Z" +} +``` + +### ClientGetDealUpdates +ClientGetDealUpdates returns the status of updated deals + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "ProposalCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "State": 42, + "Message": "string value", + "Provider": "t01234", + "DataRef": { + "TransferType": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceCid": null, + "PieceSize": 1024 + }, + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 42, + "PricePerEpoch": "0", + "Duration": 42, + "DealID": 5432, + "CreationTime": "0001-01-01T00:00:00Z" +} +``` + +### ClientHasLocal +ClientHasLocal indicates whether a certain CID is locally stored. + + +Perms: write + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: `true` + +### ClientImport +ClientImport imports file under the specified path into filestore. + + +Perms: admin + +Inputs: +```json +[ + { + "Path": "string value", + "IsCAR": true + } +] +``` + +Response: +```json +{ + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ImportID": 50 +} +``` + +### ClientListDataTransfers +ClientListTransfers returns the status of all ongoing transfers of data + + +Perms: write + +Inputs: `null` + +Response: `null` + +### ClientListDeals +ClientListDeals returns information about the deals made by the local client. + + +Perms: write + +Inputs: `null` + +Response: `null` + +### ClientListImports +ClientListImports lists imported files and their root CIDs + + +Perms: write + +Inputs: `null` + +Response: `null` + +### ClientMinerQueryOffer +ClientMinerQueryOffer returns a QueryOffer for the specific miner and file. + + +Perms: read + +Inputs: +```json +[ + "t01234", + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + null +] +``` + +Response: +```json +{ + "Err": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Piece": null, + "Size": 42, + "MinPrice": "0", + "UnsealPrice": "0", + "PaymentInterval": 42, + "PaymentIntervalIncrease": 42, + "Miner": "t01234", + "MinerPeer": { + "Address": "t01234", + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "PieceCID": null + } +} +``` + +### ClientQueryAsk +ClientQueryAsk returns a signed StorageAsk from the specified miner. + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "t01234" +] +``` + +Response: +```json +{ + "Ask": { + "Price": "0", + "VerifiedPrice": "0", + "MinPieceSize": 1032, + "MaxPieceSize": 1032, + "Miner": "t01234", + "Timestamp": 10101, + "Expiry": 10101, + "SeqNo": 42 + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } +} +``` + +### ClientRemoveImport +ClientRemoveImport removes file import + + +Perms: admin + +Inputs: +```json +[ + 50 +] +``` + +Response: `{}` + +### ClientRetrieve +ClientRetrieve initiates the retrieval of a file, as specified in the order. + + +Perms: admin + +Inputs: +```json +[ + { + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Piece": null, + "Size": 42, + "Total": "0", + "UnsealPrice": "0", + "PaymentInterval": 42, + "PaymentIntervalIncrease": 42, + "Client": "t01234", + "Miner": "t01234", + "MinerPeer": { + "Address": "t01234", + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "PieceCID": null + } + }, + { + "Path": "string value", + "IsCAR": true + } +] +``` + +Response: `{}` + +### ClientRetrieveTryRestartInsufficientFunds +ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel +which are stuck due to insufficient funds + + +Perms: write + +Inputs: +```json +[ + "t01234" +] +``` + +Response: `{}` + +### ClientRetrieveWithEvents +ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel +of status updates. + + +Perms: admin + +Inputs: +```json +[ + { + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Piece": null, + "Size": 42, + "Total": "0", + "UnsealPrice": "0", + "PaymentInterval": 42, + "PaymentIntervalIncrease": 42, + "Client": "t01234", + "Miner": "t01234", + "MinerPeer": { + "Address": "t01234", + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "PieceCID": null + } + }, + { + "Path": "string value", + "IsCAR": true + } +] +``` + +Response: +```json +{ + "Event": 5, + "Status": 0, + "BytesReceived": 42, + "FundsSpent": "0", + "Err": "string value" +} +``` + +### ClientStartDeal +ClientStartDeal proposes a deal with a miner. + + +Perms: admin + +Inputs: +```json +[ + { + "Data": { + "TransferType": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceCid": null, + "PieceSize": 1024 + }, + "Wallet": "t01234", + "Miner": "t01234", + "EpochPrice": "0", + "MinBlocksDuration": 42, + "ProviderCollateral": "0", + "DealStartEpoch": 10101, + "FastRetrieval": true, + "VerifiedDeal": true + } +] +``` + +Response: `null` + +## Gas + + +### GasEstimateFeeCap +GasEstimateFeeCap estimates gas fee cap + + +Perms: read + +Inputs: +```json +[ + { + "Version": 42, + "To": "t01234", + "From": "t01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + 9, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### GasEstimateGasLimit +GasEstimateGasLimit estimates gas used by the message and returns it. +It fails if message fails to execute. + + +Perms: read + +Inputs: +```json +[ + { + "Version": 42, + "To": "t01234", + "From": "t01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `9` + +### GasEstimateGasPremium +GasEstimateGasPremium estimates what gas price should be used for a +message to have high likelihood of inclusion in `nblocksincl` epochs. + + +Perms: read + +Inputs: +```json +[ + 42, + "t01234", + 9, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### GasEstimateMessageGas +GasEstimateMessageGas estimates gas values for unset message gas fields + + +Perms: read + +Inputs: +```json +[ + { + "Version": 42, + "To": "t01234", + "From": "t01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + { + "MaxFee": "0" + }, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Version": 42, + "To": "t01234", + "From": "t01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" +} +``` + +## I + + +### ID + + +Perms: read + +Inputs: `null` + +Response: `"12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"` + +## Log + + +### LogList + + +Perms: write + +Inputs: `null` + +Response: `null` + +### LogSetLevel + + +Perms: write + +Inputs: +```json +[ + "string value", + "string value" +] +``` + +Response: `{}` + +## Market + + +### MarketEnsureAvailable +MarketFreeBalance + + +Perms: sign + +Inputs: +```json +[ + "t01234", + "t01234", + "0" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +## Miner + + +### MinerCreateBlock +There are not yet any comments for this method. + +Perms: write + +Inputs: +```json +[ + { + "Miner": "t01234", + "Parents": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Ticket": { + "VRFProof": "Ynl0ZSBhcnJheQ==" + }, + "Eproof": { + "WinCount": 9, + "VRFProof": "Ynl0ZSBhcnJheQ==" + }, + "BeaconValues": null, + "Messages": null, + "Epoch": 10101, + "Timestamp": 42, + "WinningPoStProof": null + } +] +``` + +Response: +```json +{ + "Header": { + "Miner": "t01234", + "Ticket": { + "VRFProof": "Ynl0ZSBhcnJheQ==" + }, + "ElectionProof": { + "WinCount": 9, + "VRFProof": "Ynl0ZSBhcnJheQ==" + }, + "BeaconEntries": null, + "WinPoStProof": null, + "Parents": null, + "ParentWeight": "0", + "Height": 10101, + "ParentStateRoot": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ParentMessageReceipts": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Messages": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "BLSAggregate": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Timestamp": 42, + "BlockSig": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "ForkSignaling": 42, + "ParentBaseFee": "0" + }, + "BlsMessages": null, + "SecpkMessages": null +} +``` + +### MinerGetBaseInfo +There are not yet any comments for this method. + +Perms: read + +Inputs: +```json +[ + "t01234", + 10101, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "MinerPower": "0", + "NetworkPower": "0", + "Sectors": null, + "WorkerKey": "t01234", + "SectorSize": 34359738368, + "PrevBeaconEntry": { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "BeaconEntries": null, + "HasMinPower": true +} +``` + +## Mpool +The Mpool methods are for interacting with the message pool. The message pool +manages all incoming and outgoing 'messages' going over the network. + + +### MpoolClear +MpoolClear clears pending messages from the mpool + + +Perms: write + +Inputs: +```json +[ + true +] +``` + +Response: `{}` + +### MpoolGetConfig +MpoolGetConfig returns (a copy of) the current mpool config + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "PriorityAddrs": null, + "SizeLimitHigh": 123, + "SizeLimitLow": 123, + "ReplaceByFeeRatio": 12.3, + "PruneCooldown": 60000000000, + "GasLimitOverestimation": 12.3 +} +``` + +### MpoolGetNonce +MpoolGetNonce gets next nonce for the specified sender. +Note that this method may not be atomic. Use MpoolPushMessage instead. + + +Perms: read + +Inputs: +```json +[ + "t01234" +] +``` + +Response: `42` + +### MpoolPending +MpoolPending returns pending mempool messages. + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `null` + +### MpoolPush +MpoolPush pushes a signed message to mempool. + + +Perms: write + +Inputs: +```json +[ + { + "Message": { + "Version": 42, + "To": "t01234", + "From": "t01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + } +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MpoolPushMessage +MpoolPushMessage atomically assigns a nonce, signs, and pushes a message +to mempool. +maxFee is only used when GasFeeCap/GasPremium fields aren't specified + +When maxFee is set to 0, MpoolPushMessage will guess appropriate fee +based on current chain conditions + + +Perms: sign + +Inputs: +```json +[ + { + "Version": 42, + "To": "t01234", + "From": "t01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + { + "MaxFee": "0" + } +] +``` + +Response: +```json +{ + "Message": { + "Version": 42, + "To": "t01234", + "From": "t01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } +} +``` + +### MpoolSelect +MpoolSelect returns a list of pending messages for inclusion in the next block + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + 12.3 +] +``` + +Response: `null` + +### MpoolSetConfig +MpoolSetConfig sets the mpool config to (a copy of) the supplied config + + +Perms: write + +Inputs: +```json +[ + { + "PriorityAddrs": null, + "SizeLimitHigh": 123, + "SizeLimitLow": 123, + "ReplaceByFeeRatio": 12.3, + "PruneCooldown": 60000000000, + "GasLimitOverestimation": 12.3 + } +] +``` + +Response: `{}` + +### MpoolSub +There are not yet any comments for this method. + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "Type": 0, + "Message": { + "Message": { + "Version": 42, + "To": "t01234", + "From": "t01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + } +} +``` + +## Msig +The Msig methods are used to interact with multisig wallets on the +filecoin network + + +### MsigAddApprove +MsigAddApprove approves a previously proposed AddSigner message +It takes the following params: , , , +, , + + +Perms: sign + +Inputs: +```json +[ + "t01234", + "t01234", + 42, + "t01234", + "t01234", + true +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MsigAddCancel +MsigAddCancel cancels a previously proposed AddSigner message +It takes the following params: , , , +, + + +Perms: sign + +Inputs: +```json +[ + "t01234", + "t01234", + 42, + "t01234", + true +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MsigAddPropose +MsigAddPropose proposes adding a signer in the multisig +It takes the following params: , , +, + + +Perms: sign + +Inputs: +```json +[ + "t01234", + "t01234", + "t01234", + true +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MsigApprove +MsigApprove approves a previously-proposed multisig message +It takes the following params: , , , , , +, , + + +Perms: sign + +Inputs: +```json +[ + "t01234", + 42, + "t01234", + "t01234", + "0", + "t01234", + 42, + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MsigCancel +MsigCancel cancels a previously-proposed multisig message +It takes the following params: , , , , +, , + + +Perms: sign + +Inputs: +```json +[ + "t01234", + 42, + "t01234", + "0", + "t01234", + 42, + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MsigCreate +MsigCreate creates a multisig wallet +It takes the following params: , , +, , + + +Perms: sign + +Inputs: +```json +[ + 42, + null, + 10101, + "0", + "t01234", + "0" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MsigGetAvailableBalance +MsigGetAvailableBalance returns the portion of a multisig's balance that can be withdrawn or spent + + +Perms: read + +Inputs: +```json +[ + "t01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### MsigGetVested +MsigGetVested returns the amount of FIL that vested in a multisig in a certain period. +It takes the following params: , , + + +Perms: read + +Inputs: +```json +[ + "t01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### MsigPropose +MsigPropose proposes a multisig message +It takes the following params: , , , +, , + + +Perms: sign + +Inputs: +```json +[ + "t01234", + "t01234", + "0", + "t01234", + 42, + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MsigSwapApprove +MsigSwapApprove approves a previously proposed SwapSigner +It takes the following params: , , , +, , + + +Perms: sign + +Inputs: +```json +[ + "t01234", + "t01234", + 42, + "t01234", + "t01234", + "t01234" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MsigSwapCancel +MsigSwapCancel cancels a previously proposed SwapSigner message +It takes the following params: , , , +, + + +Perms: sign + +Inputs: +```json +[ + "t01234", + "t01234", + 42, + "t01234", + "t01234" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### MsigSwapPropose +MsigSwapPropose proposes swapping 2 signers in the multisig +It takes the following params: , , +, + + +Perms: sign + +Inputs: +```json +[ + "t01234", + "t01234", + "t01234", + "t01234" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +## Net + + +### NetAddrsListen + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "Addrs": null, + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +} +``` + +### NetAgentVersion + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: `"string value"` + +### NetAutoNatStatus + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "Reachability": 1, + "PublicAddr": "string value" +} +``` + +### NetBandwidthStats + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "TotalIn": 9, + "TotalOut": 9, + "RateIn": 12.3, + "RateOut": 12.3 +} +``` + +### NetBandwidthStatsByPeer + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "12D3KooWSXmXLJmBR1M7i9RW9GQPNUhZSzXKzxDHWtAgNuJAbyEJ": { + "TotalIn": 174000, + "TotalOut": 12500, + "RateIn": 100, + "RateOut": 50 + } +} +``` + +### NetBandwidthStatsByProtocol + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "/fil/hello/1.0.0": { + "TotalIn": 174000, + "TotalOut": 12500, + "RateIn": 100, + "RateOut": 50 + } +} +``` + +### NetConnect + + +Perms: write + +Inputs: +```json +[ + { + "Addrs": null, + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + } +] +``` + +Response: `{}` + +### NetConnectedness + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: `1` + +### NetDisconnect + + +Perms: write + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: `{}` + +### NetFindPeer + + +Perms: read + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: +```json +{ + "Addrs": null, + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +} +``` + +### NetPeers + + +Perms: read + +Inputs: `null` + +Response: `null` + +### NetPubsubScores + + +Perms: read + +Inputs: `null` + +Response: `null` + +## Paych +The Paych methods are for interacting with and managing payment channels + + +### PaychAllocateLane +There are not yet any comments for this method. + +Perms: sign + +Inputs: +```json +[ + "t01234" +] +``` + +Response: `42` + +### PaychAvailableFunds +There are not yet any comments for this method. + +Perms: sign + +Inputs: +```json +[ + "t01234" +] +``` + +Response: +```json +{ + "Channel": "\u003cempty\u003e", + "From": "t01234", + "To": "t01234", + "ConfirmedAmt": "0", + "PendingAmt": "0", + "PendingWaitSentinel": null, + "QueuedAmt": "0", + "VoucherReedeemedAmt": "0" +} +``` + +### PaychAvailableFundsByFromTo +There are not yet any comments for this method. + +Perms: sign + +Inputs: +```json +[ + "t01234", + "t01234" +] +``` + +Response: +```json +{ + "Channel": "\u003cempty\u003e", + "From": "t01234", + "To": "t01234", + "ConfirmedAmt": "0", + "PendingAmt": "0", + "PendingWaitSentinel": null, + "QueuedAmt": "0", + "VoucherReedeemedAmt": "0" +} +``` + +### PaychCollect +There are not yet any comments for this method. + +Perms: sign + +Inputs: +```json +[ + "t01234" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### PaychGet +There are not yet any comments for this method. + +Perms: sign + +Inputs: +```json +[ + "t01234", + "t01234", + "0" +] +``` + +Response: +```json +{ + "Channel": "t01234", + "WaitSentinel": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +} +``` + +### PaychGetWaitReady +There are not yet any comments for this method. + +Perms: sign + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: `"t01234"` + +### PaychList +There are not yet any comments for this method. + +Perms: read + +Inputs: `null` + +Response: `null` + +### PaychNewPayment +There are not yet any comments for this method. + +Perms: sign + +Inputs: +```json +[ + "t01234", + "t01234", + null +] +``` + +Response: +```json +{ + "Channel": "t01234", + "WaitSentinel": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Vouchers": null +} +``` + +### PaychSettle +There are not yet any comments for this method. + +Perms: sign + +Inputs: +```json +[ + "t01234" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +### PaychStatus +There are not yet any comments for this method. + +Perms: read + +Inputs: +```json +[ + "t01234" +] +``` + +Response: +```json +{ + "ControlAddr": "t01234", + "Direction": 1 +} +``` + +### PaychVoucherAdd +There are not yet any comments for this method. + +Perms: write + +Inputs: +```json +[ + "t01234", + { + "ChannelAddr": "t01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretPreimage": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "t01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": null, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + }, + "Ynl0ZSBhcnJheQ==", + "0" +] +``` + +Response: `"0"` + +### PaychVoucherCheckSpendable +There are not yet any comments for this method. + +Perms: read + +Inputs: +```json +[ + "t01234", + { + "ChannelAddr": "t01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretPreimage": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "t01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": null, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + }, + "Ynl0ZSBhcnJheQ==", + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: `true` + +### PaychVoucherCheckValid +There are not yet any comments for this method. + +Perms: read + +Inputs: +```json +[ + "t01234", + { + "ChannelAddr": "t01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretPreimage": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "t01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": null, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + } +] +``` + +Response: `{}` + +### PaychVoucherCreate +There are not yet any comments for this method. + +Perms: sign + +Inputs: +```json +[ + "t01234", + "0", + 42 +] +``` + +Response: +```json +{ + "Voucher": { + "ChannelAddr": "t01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretPreimage": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "t01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": null, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + }, + "Shortfall": "0" +} +``` + +### PaychVoucherList +There are not yet any comments for this method. + +Perms: write + +Inputs: +```json +[ + "t01234" +] +``` + +Response: `null` + +### PaychVoucherSubmit +There are not yet any comments for this method. + +Perms: sign + +Inputs: +```json +[ + "t01234", + { + "ChannelAddr": "t01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretPreimage": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "t01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": null, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + }, + "Ynl0ZSBhcnJheQ==", + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: +```json +{ + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" +} +``` + +## State +The State methods are used to query, inspect, and interact with chain state. +All methods take a TipSetKey as a parameter. The state looked up is the state at that tipset. +A nil TipSetKey can be provided as a param, this will cause the heaviest tipset in the chain to be used. + + +### StateAccountKey +StateAccountKey returns the public key address of the given ID address + + +Perms: read + +Inputs: +```json +[ + "t01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"t01234"` + +### StateAllMinerFaults +StateAllMinerFaults returns all non-expired Faults that occur within lookback epochs of the given tipset + + +Perms: read + +Inputs: +```json +[ + 10101, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `null` + +### StateCall +StateCall runs the given message and returns its result without any persisted changes. + + +Perms: read + +Inputs: +```json +[ + { + "Version": 42, + "To": "t01234", + "From": "t01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Msg": { + "Version": 42, + "To": "t01234", + "From": "t01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "ExecutionTrace": { + "Msg": { + "Version": 42, + "To": "t01234", + "From": "t01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "Error": "string value", + "Duration": 60000000000, + "GasCharges": null, + "Subcalls": null + }, + "Error": "string value", + "Duration": 60000000000 +} +``` + +### StateChangedActors +StateChangedActors returns all the actors whose states change between the two given state CIDs +TODO: Should this take tipset keys instead? + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "t01236": { + "Code": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Head": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Nonce": 42, + "Balance": "0" + } +} +``` + +### StateCirculatingSupply +StateCirculatingSupply returns the circulating supply of Filecoin at the given tipset + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "FilVested": "0", + "FilMined": "0", + "FilBurnt": "0", + "FilLocked": "0", + "FilCirculating": "0" +} +``` + +### StateCompute +StateCompute is a flexible command that applies the given messages on the given tipset. +The messages are run as though the VM were at the provided height. + + +Perms: read + +Inputs: +```json +[ + 10101, + null, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Trace": null +} +``` + +### StateDealProviderCollateralBounds +StateDealProviderCollateralBounds returns the min and max collateral a storage provider +can issue. It takes the deal size and verified status as parameters. + + +Perms: read + +Inputs: +```json +[ + 1032, + true, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Min": "0", + "Max": "0" +} +``` + +### StateGetActor +StateGetActor returns the indicated actor's nonce and balance. + + +Perms: read + +Inputs: +```json +[ + "t01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Code": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Head": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Nonce": 42, + "Balance": "0" +} +``` + +### StateGetReceipt +StateGetReceipt returns the message receipt for the given message + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 +} +``` + +### StateListActors +StateListActors returns the addresses of every actor in the state + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `null` + +### StateListMessages +StateListMessages looks back and returns all messages with a matching to or from address, stopping at the given height. + + +Perms: read + +Inputs: +```json +[ + { + "Version": 42, + "To": "t01234", + "From": "t01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + 10101 +] +``` + +Response: `null` + +### StateListMiners +StateListMiners returns the addresses of every miner that has claimed power in the Power Actor + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `null` + +### StateLookupID +StateLookupID retrieves the ID address of the given address + + +Perms: read + +Inputs: +```json +[ + "t01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"t01234"` + +### StateMarketBalance +StateMarketBalance looks up the Escrow and Locked balances of the given address in the Storage Market + + +Perms: read + +Inputs: +```json +[ + "t01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Escrow": "0", + "Locked": "0" +} +``` + +### StateMarketDeals +StateMarketDeals returns information about every deal in the Storage Market + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "t026363": { + "Proposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "t01234", + "Provider": "t01234", + "Label": "string value", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "State": { + "SectorStartEpoch": 10101, + "LastUpdatedEpoch": 10101, + "SlashEpoch": 10101 + } + } +} +``` + +### StateMarketParticipants +StateMarketParticipants returns the Escrow and Locked balances of every participant in the Storage Market + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "t026363": { + "Escrow": "0", + "Locked": "0" + } +} +``` + +### StateMarketStorageDeal +StateMarketStorageDeal returns information about the indicated deal + + +Perms: read + +Inputs: +```json +[ + 5432, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Proposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "t01234", + "Provider": "t01234", + "Label": "string value", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "State": { + "SectorStartEpoch": 10101, + "LastUpdatedEpoch": 10101, + "SlashEpoch": 10101 + } +} +``` + +### StateMinerActiveSectors +StateMinerActiveSectors returns info about sectors that a given miner is actively proving. + + +Perms: read + +Inputs: +```json +[ + "t01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `null` + +### StateMinerAvailableBalance +StateMinerAvailableBalance returns the portion of a miner's balance that can be withdrawn or spent + + +Perms: read + +Inputs: +```json +[ + "t01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### StateMinerDeadlines +StateMinerDeadlines returns all the proving deadlines for the given miner + + +Perms: read + +Inputs: +```json +[ + "t01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `null` + +### StateMinerFaults +StateMinerFaults returns a bitfield indicating the faulty sectors of the given miner + + +Perms: read + +Inputs: +```json +[ + "t01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +[ + 5, + 1 +] +``` + +### StateMinerInfo +StateMinerInfo returns info about the indicated miner + + +Perms: read + +Inputs: +```json +[ + "t01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Owner": "t01234", + "Worker": "t01234", + "NewWorker": "t01234", + "ControlAddresses": null, + "WorkerChangeEpoch": 10101, + "PeerId": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Multiaddrs": null, + "SealProofType": 3, + "SectorSize": 34359738368, + "WindowPoStPartitionSectors": 42 +} +``` + +### StateMinerInitialPledgeCollateral +StateMinerInitialPledgeCollateral returns the initial pledge collateral for the specified miner's sector + + +Perms: read + +Inputs: +```json +[ + "t01234", + { + "SealProof": 3, + "SectorNumber": 9, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "SealRandEpoch": 10101, + "DealIDs": null, + "Expiration": 10101, + "ReplaceCapacity": true, + "ReplaceSectorDeadline": 42, + "ReplaceSectorPartition": 42, + "ReplaceSectorNumber": 9 + }, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### StateMinerPartitions +StateMinerPartitions loads miner partitions for the specified miner/deadline + + +Perms: read + +Inputs: +```json +[ + "t01234", + 42, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `null` + +### StateMinerPower +StateMinerPower returns the power of the indicated miner + + +Perms: read + +Inputs: +```json +[ + "t01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "MinerPower": { + "RawBytePower": "0", + "QualityAdjPower": "0" + }, + "TotalPower": { + "RawBytePower": "0", + "QualityAdjPower": "0" + } +} +``` + +### StateMinerPreCommitDepositForPower +StateMinerInitialPledgeCollateral returns the precommit deposit for the specified miner's sector + + +Perms: read + +Inputs: +```json +[ + "t01234", + { + "SealProof": 3, + "SectorNumber": 9, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "SealRandEpoch": 10101, + "DealIDs": null, + "Expiration": 10101, + "ReplaceCapacity": true, + "ReplaceSectorDeadline": 42, + "ReplaceSectorPartition": 42, + "ReplaceSectorNumber": 9 + }, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### StateMinerProvingDeadline +StateMinerProvingDeadline calculates the deadline at some epoch for a proving period +and returns the deadline-related calculations. + + +Perms: read + +Inputs: +```json +[ + "t01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "CurrentEpoch": 10101, + "PeriodStart": 10101, + "Index": 42, + "Open": 10101, + "Close": 10101, + "Challenge": 10101, + "FaultCutoff": 10101, + "WPoStPeriodDeadlines": 42, + "WPoStProvingPeriod": 10101, + "WPoStChallengeWindow": 10101, + "WPoStChallengeLookback": 10101, + "FaultDeclarationCutoff": 10101 +} +``` + +### StateMinerRecoveries +StateMinerRecoveries returns a bitfield indicating the recovering sectors of the given miner + + +Perms: read + +Inputs: +```json +[ + "t01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +[ + 5, + 1 +] +``` + +### StateMinerSectorCount +StateMinerSectorCount returns the number of sectors in a miner's sector set and proving set + + +Perms: read + +Inputs: +```json +[ + "t01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Sectors": 42, + "Active": 42 +} +``` + +### StateMinerSectors +StateMinerSectors returns info about the given miner's sectors. If the filter bitfield is nil, all sectors are included. +If the filterOut boolean is set to true, any sectors in the filter are excluded. +If false, only those sectors in the filter are included. + + +Perms: read + +Inputs: +```json +[ + "t01234", + [ + 0 + ], + true, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `null` + +### StateMsgGasCost +StateMsgGasCost searches for a message in the chain, and returns details of the messages gas costs, including the penalty and miner tip + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Message": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "GasUsed": "0", + "BaseFeeBurn": "0", + "OverEstimationBurn": "0", + "MinerPenalty": "0", + "MinerTip": "0", + "Refund": "0", + "TotalCost": "0" +} +``` + +### StateNetworkName +StateNetworkName returns the name of the network the node is synced to + + +Perms: read + +Inputs: `null` + +Response: `"lotus"` + +### StateReadState +StateReadState returns the indicated actor's state. + + +Perms: read + +Inputs: +```json +[ + "t01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Balance": "0", + "State": {} +} +``` + +### StateReplay +StateReplay returns the result of executing the indicated message, assuming it was executed in the indicated tipset. + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "Msg": { + "Version": 42, + "To": "t01234", + "From": "t01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "ExecutionTrace": { + "Msg": { + "Version": 42, + "To": "t01234", + "From": "t01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "Error": "string value", + "Duration": 60000000000, + "GasCharges": null, + "Subcalls": null + }, + "Error": "string value", + "Duration": 60000000000 +} +``` + +### StateSearchMsg +StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "Message": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Receipt": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "ReturnDec": {}, + "TipSet": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Height": 10101 +} +``` + +### StateSectorExpiration +StateSectorExpiration returns epoch at which given sector will expire + + +Perms: read + +Inputs: +```json +[ + "t01234", + 9, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "OnTime": 10101, + "Early": 10101 +} +``` + +### StateSectorGetInfo +StateSectorGetInfo returns the on-chain info for the specified miner's sector. Returns null in case the sector info isn't found +NOTE: returned info.Expiration may not be accurate in some cases, use StateSectorExpiration to get accurate +expiration epoch + + +Perms: read + +Inputs: +```json +[ + "t01234", + 9, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "SectorNumber": 9, + "SealProof": 3, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "DealIDs": null, + "Activation": 10101, + "Expiration": 10101, + "DealWeight": "0", + "VerifiedDealWeight": "0", + "InitialPledge": "0", + "ExpectedDayReward": "0", + "ExpectedStoragePledge": "0" +} +``` + +### StateSectorPartition +StateSectorPartition finds deadline/partition with the specified sector + + +Perms: read + +Inputs: +```json +[ + "t01234", + 9, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Deadline": 42, + "Partition": 42 +} +``` + +### StateSectorPreCommitInfo +StateSectorPreCommitInfo returns the PreCommit info for the specified miner's sector + + +Perms: read + +Inputs: +```json +[ + "t01234", + 9, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: +```json +{ + "Info": { + "SealProof": 3, + "SectorNumber": 9, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "SealRandEpoch": 10101, + "DealIDs": null, + "Expiration": 10101, + "ReplaceCapacity": true, + "ReplaceSectorDeadline": 42, + "ReplaceSectorPartition": 42, + "ReplaceSectorNumber": 9 + }, + "PreCommitDeposit": "0", + "PreCommitEpoch": 10101, + "DealWeight": "0", + "VerifiedDealWeight": "0" +} +``` + +### StateVerifiedClientStatus +StateVerifiedClientStatus returns the data cap for the given address. +Returns nil if there is no entry in the data cap table for the +address. + + +Perms: read + +Inputs: +```json +[ + "t01234", + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `"0"` + +### StateWaitMsg +StateWaitMsg looks back in the chain for a message. If not found, it blocks until the +message arrives on chain, and gets to the indicated confidence depth. + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + 42 +] +``` + +Response: +```json +{ + "Message": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Receipt": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "ReturnDec": {}, + "TipSet": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Height": 10101 +} +``` + +## Sync +The Sync method group contains methods for interacting with and +observing the lotus sync service. + + +### SyncCheckBad +SyncCheckBad checks if a block was marked as bad, and if it was, returns +the reason. + + +Perms: read + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: `"string value"` + +### SyncCheckpoint +SyncCheckpoint marks a blocks as checkpointed, meaning that it won't ever fork away from it. + + +Perms: admin + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `{}` + +### SyncIncomingBlocks +SyncIncomingBlocks returns a channel streaming incoming, potentially not +yet synced block headers. + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "Miner": "t01234", + "Ticket": { + "VRFProof": "Ynl0ZSBhcnJheQ==" + }, + "ElectionProof": { + "WinCount": 9, + "VRFProof": "Ynl0ZSBhcnJheQ==" + }, + "BeaconEntries": null, + "WinPoStProof": null, + "Parents": null, + "ParentWeight": "0", + "Height": 10101, + "ParentStateRoot": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ParentMessageReceipts": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Messages": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "BLSAggregate": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Timestamp": 42, + "BlockSig": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "ForkSignaling": 42, + "ParentBaseFee": "0" +} +``` + +### SyncMarkBad +SyncMarkBad marks a blocks as bad, meaning that it won't ever by synced. +Use with extreme caution. + + +Perms: admin + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: `{}` + +### SyncState +SyncState returns the current status of the lotus sync system. + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "ActiveSyncs": null +} +``` + +### SyncSubmitBlock +SyncSubmitBlock can be used to submit a newly created block to the. +network through this node + + +Perms: write + +Inputs: +```json +[ + { + "Header": { + "Miner": "t01234", + "Ticket": { + "VRFProof": "Ynl0ZSBhcnJheQ==" + }, + "ElectionProof": { + "WinCount": 9, + "VRFProof": "Ynl0ZSBhcnJheQ==" + }, + "BeaconEntries": null, + "WinPoStProof": null, + "Parents": null, + "ParentWeight": "0", + "Height": 10101, + "ParentStateRoot": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ParentMessageReceipts": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Messages": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "BLSAggregate": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Timestamp": 42, + "BlockSig": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "ForkSignaling": 42, + "ParentBaseFee": "0" + }, + "BlsMessages": null, + "SecpkMessages": null + } +] +``` + +Response: `{}` + +### SyncUnmarkBad +SyncUnmarkBad unmarks a blocks as bad, making it possible to be validated and synced again. + + +Perms: admin + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: `{}` + +## Wallet + + +### WalletBalance +WalletBalance returns the balance of the given address at the current head of the chain. + + +Perms: read + +Inputs: +```json +[ + "t01234" +] +``` + +Response: `"0"` + +### WalletDefaultAddress +WalletDefaultAddress returns the address marked as default in the wallet. + + +Perms: write + +Inputs: `null` + +Response: `"t01234"` + +### WalletDelete +WalletDelete deletes an address from the wallet. + + +Perms: write + +Inputs: +```json +[ + "t01234" +] +``` + +Response: `{}` + +### WalletExport +WalletExport returns the private key of an address in the wallet. + + +Perms: admin + +Inputs: +```json +[ + "t01234" +] +``` + +Response: +```json +{ + "Type": "string value", + "PrivateKey": "Ynl0ZSBhcnJheQ==" +} +``` + +### WalletHas +WalletHas indicates whether the given address is in the wallet. + + +Perms: write + +Inputs: +```json +[ + "t01234" +] +``` + +Response: `true` + +### WalletImport +WalletImport receives a KeyInfo, which includes a private key, and imports it into the wallet. + + +Perms: admin + +Inputs: +```json +[ + { + "Type": "string value", + "PrivateKey": "Ynl0ZSBhcnJheQ==" + } +] +``` + +Response: `"t01234"` + +### WalletList +WalletList lists all the addresses in the wallet. + + +Perms: write + +Inputs: `null` + +Response: `null` + +### WalletNew +WalletNew creates a new address in the wallet with the given sigType. + + +Perms: write + +Inputs: +```json +[ + 2 +] +``` + +Response: `"t01234"` + +### WalletSetDefault +WalletSetDefault marks the given address as as the default one. + + +Perms: admin + +Inputs: +```json +[ + "t01234" +] +``` + +Response: `{}` + +### WalletSign +WalletSign signs the given bytes using the given address. + + +Perms: sign + +Inputs: +```json +[ + "t01234", + "Ynl0ZSBhcnJheQ==" +] +``` + +Response: +```json +{ + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" +} +``` + +### WalletSignMessage +WalletSignMessage signs the given message using the given address. + + +Perms: sign + +Inputs: +```json +[ + "t01234", + { + "Version": 42, + "To": "t01234", + "From": "t01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + } +] +``` + +Response: +```json +{ + "Message": { + "Version": 42, + "To": "t01234", + "From": "t01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==" + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } +} +``` + +### WalletVerify +WalletVerify takes an address, a signature, and some bytes, and indicates whether the signature is valid. +The address does not have to be in the wallet. + + +Perms: read + +Inputs: +```json +[ + "t01234", + "Ynl0ZSBhcnJheQ==", + { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } +] +``` + +Response: `true` + From dfd28ab0c44d7f032d6dff4e994b38491268ff0e Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Thu, 17 Sep 2020 20:56:43 +0200 Subject: [PATCH 12/88] Fix links in READMEs. --- README.md | 4 ++-- tools/dockers/docker-examples/README.md | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 6c1e23efa..fa432bf7d 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@

- + Project Lotus Logo

@@ -18,7 +18,7 @@ Lotus is an implementation of the Filecoin Distributed Storage Network. For more ## Building & Documentation -For instructions on how to build lotus from source, please visit [https://lotu.sh](https://lotu.sh) or read the source [here](https://github.com/filecoin-project/lotus/tree/master/documentation). +For instructions on how to build, install and setup lotus, please visit [https://docs.filecoin.io/get-started/lotus](https://docs.filecoin.io/get-started/lotus/). ## Reporting a Vulnerability diff --git a/tools/dockers/docker-examples/README.md b/tools/dockers/docker-examples/README.md index 28553653c..3b8c34480 100644 --- a/tools/dockers/docker-examples/README.md +++ b/tools/dockers/docker-examples/README.md @@ -11,7 +11,7 @@ In this `docker-examples/` directory are community-contributed Docker and Docker - local node for a developer (`api-local-`) - hosted endpoint for apps / multiple developers (`api-hosted-`) - **For a local devnet or shared devnet** - - basic local devnet (also see [lotus docs on setting up a local devnet](https://lotu.sh/en+setup-local-dev-net)) + - basic local devnet (also see [lotus docs on setting up a local devnet](https://docs.filecoin.io/build/local-devnet/)) - shared devnet From a5e7fd8f5c0e8ed2c900bd23860e16d2336411f1 Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Mon, 21 Sep 2020 18:43:57 +0200 Subject: [PATCH 13/88] Re-add docs-check step to circle --- .circleci/config.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index d8f149889..acd447f69 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -346,6 +346,15 @@ jobs: - run: git --no-pager diff - run: git --no-pager diff --quiet + docs-check: + executor: golang + steps: + - install-deps + - prepare + - run: make docsgen + - run: git --no-pager diff + - run: git --no-pager diff --quiet + lint: &lint description: | Run golangci-lint. @@ -415,6 +424,7 @@ workflows: - mod-tidy-check - gofmt - cbor-gen-check + - docs-check - test: codecov-upload: true test-suite-name: full From ad3db0e83c5f6723573eec1464bc3bc48d29c001 Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Mon, 21 Sep 2020 19:05:39 +0200 Subject: [PATCH 14/88] Move unclassified docs back to the docs root --- .../en/{unclassified => }/WIP-arch-complementary-notes.md | 0 documentation/en/{unclassified => }/block-validation.md | 0 documentation/en/{unclassified => }/create-miner.md | 0 documentation/en/{unclassified => }/dev-tools-pond-ui.md | 0 documentation/en/{unclassified => }/sealing-procs.md | 0 5 files changed, 0 insertions(+), 0 deletions(-) rename documentation/en/{unclassified => }/WIP-arch-complementary-notes.md (100%) rename documentation/en/{unclassified => }/block-validation.md (100%) rename documentation/en/{unclassified => }/create-miner.md (100%) rename documentation/en/{unclassified => }/dev-tools-pond-ui.md (100%) rename documentation/en/{unclassified => }/sealing-procs.md (100%) diff --git a/documentation/en/unclassified/WIP-arch-complementary-notes.md b/documentation/en/WIP-arch-complementary-notes.md similarity index 100% rename from documentation/en/unclassified/WIP-arch-complementary-notes.md rename to documentation/en/WIP-arch-complementary-notes.md diff --git a/documentation/en/unclassified/block-validation.md b/documentation/en/block-validation.md similarity index 100% rename from documentation/en/unclassified/block-validation.md rename to documentation/en/block-validation.md diff --git a/documentation/en/unclassified/create-miner.md b/documentation/en/create-miner.md similarity index 100% rename from documentation/en/unclassified/create-miner.md rename to documentation/en/create-miner.md diff --git a/documentation/en/unclassified/dev-tools-pond-ui.md b/documentation/en/dev-tools-pond-ui.md similarity index 100% rename from documentation/en/unclassified/dev-tools-pond-ui.md rename to documentation/en/dev-tools-pond-ui.md diff --git a/documentation/en/unclassified/sealing-procs.md b/documentation/en/sealing-procs.md similarity index 100% rename from documentation/en/unclassified/sealing-procs.md rename to documentation/en/sealing-procs.md From 3c524ac0e0d806468e1e3a28437e94115089d477 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Wed, 23 Sep 2020 18:28:11 +0200 Subject: [PATCH 15/88] refactor: move nonce generation from mpool to wallet --- chain/messagepool/messagepool.go | 94 ------------- chain/messagesigner/messagesigner.go | 116 ++++++++++++++++ chain/messagesigner/messagesigner_test.go | 159 ++++++++++++++++++++++ node/builder.go | 3 + node/impl/full/mpool.go | 66 +++++---- 5 files changed, 310 insertions(+), 128 deletions(-) create mode 100644 chain/messagesigner/messagesigner.go create mode 100644 chain/messagesigner/messagesigner_test.go diff --git a/chain/messagepool/messagepool.go b/chain/messagepool/messagepool.go index 96900925f..d54ea7164 100644 --- a/chain/messagepool/messagepool.go +++ b/chain/messagepool/messagepool.go @@ -75,8 +75,6 @@ var ( ErrRBFTooLowPremium = errors.New("replace by fee has too low GasPremium") ErrTooManyPendingMessages = errors.New("too many pending messages for actor") ErrNonceGap = errors.New("unfulfilled nonce gap") - - ErrTryAgain = errors.New("state inconsistency while pushing message; please try again") ) const ( @@ -795,98 +793,6 @@ func (mp *MessagePool) getStateBalance(addr address.Address, ts *types.TipSet) ( return act.Balance, nil } -func (mp *MessagePool) PushWithNonce(ctx context.Context, addr address.Address, cb func(address.Address, uint64) (*types.SignedMessage, error)) (*types.SignedMessage, error) { - // serialize push access to reduce lock contention - mp.addSema <- struct{}{} - defer func() { - <-mp.addSema - }() - - mp.curTsLk.Lock() - mp.lk.Lock() - - curTs := mp.curTs - - fromKey := addr - if fromKey.Protocol() == address.ID { - var err error - fromKey, err = mp.api.StateAccountKey(ctx, fromKey, mp.curTs) - if err != nil { - mp.lk.Unlock() - mp.curTsLk.Unlock() - return nil, xerrors.Errorf("resolving sender key: %w", err) - } - } - - nonce, err := mp.getNonceLocked(fromKey, mp.curTs) - if err != nil { - mp.lk.Unlock() - mp.curTsLk.Unlock() - return nil, xerrors.Errorf("get nonce locked failed: %w", err) - } - - // release the locks for signing - mp.lk.Unlock() - mp.curTsLk.Unlock() - - msg, err := cb(fromKey, nonce) - if err != nil { - return nil, err - } - - err = mp.checkMessage(msg) - if err != nil { - return nil, err - } - - msgb, err := msg.Serialize() - if err != nil { - return nil, err - } - - // reacquire the locks and check state for consistency - mp.curTsLk.Lock() - defer mp.curTsLk.Unlock() - - if mp.curTs != curTs { - return nil, ErrTryAgain - } - - mp.lk.Lock() - defer mp.lk.Unlock() - - nonce2, err := mp.getNonceLocked(fromKey, mp.curTs) - if err != nil { - return nil, xerrors.Errorf("get nonce locked failed: %w", err) - } - - if nonce2 != nonce { - return nil, ErrTryAgain - } - - publish, err := mp.verifyMsgBeforeAdd(msg, curTs, true) - if err != nil { - return nil, err - } - - if err := mp.checkBalance(msg, curTs); err != nil { - return nil, err - } - - if err := mp.addLocked(msg, false); err != nil { - return nil, xerrors.Errorf("add locked failed: %w", err) - } - if err := mp.addLocal(msg, msgb); err != nil { - log.Errorf("addLocal failed: %+v", err) - } - - if publish { - err = mp.api.PubSubPublish(build.MessagesTopic(mp.netName), msgb) - } - - return msg, err -} - func (mp *MessagePool) Remove(from address.Address, nonce uint64, applied bool) { mp.lk.Lock() defer mp.lk.Unlock() diff --git a/chain/messagesigner/messagesigner.go b/chain/messagesigner/messagesigner.go new file mode 100644 index 000000000..41b0edee9 --- /dev/null +++ b/chain/messagesigner/messagesigner.go @@ -0,0 +1,116 @@ +package messagesigner + +import ( + "bytes" + "context" + + "github.com/filecoin-project/lotus/chain/wallet" + + "github.com/filecoin-project/lotus/chain/messagepool" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" +) + +const dsKeyActorNonce = "ActorNonce" + +type mpoolAPI interface { + GetNonce(address.Address) (uint64, error) +} + +// MessageSigner keeps track of nonces per address, and increments the nonce +// when signing a message +type MessageSigner struct { + wallet *wallet.Wallet + mpool mpoolAPI + ds datastore.Batching +} + +func NewMessageSigner(wallet *wallet.Wallet, mpool *messagepool.MessagePool, ds dtypes.MetadataDS) *MessageSigner { + return newMessageSigner(wallet, mpool, ds) +} + +func newMessageSigner(wallet *wallet.Wallet, mpool mpoolAPI, ds dtypes.MetadataDS) *MessageSigner { + ds = namespace.Wrap(ds, datastore.NewKey("/message-signer/")) + return &MessageSigner{ + wallet: wallet, + mpool: mpool, + ds: ds, + } +} + +// SignMessage increments the nonce for the message From address, and signs +// the message +func (ms *MessageSigner) SignMessage(ctx context.Context, msg *types.Message) (*types.SignedMessage, error) { + nonce, err := ms.nextNonce(msg.From) + if err != nil { + return nil, xerrors.Errorf("failed to create nonce: %w", err) + } + + msg.Nonce = nonce + sig, err := ms.wallet.Sign(ctx, msg.From, msg.Cid().Bytes()) + if err != nil { + return nil, xerrors.Errorf("failed to sign message: %w", err) + } + + return &types.SignedMessage{ + Message: *msg, + Signature: *sig, + }, nil +} + +// nextNonce increments the nonce. +// If there is no nonce in the datastore, gets the nonce from the message pool. +func (ms *MessageSigner) nextNonce(addr address.Address) (uint64, error) { + addrNonceKey := datastore.KeyWithNamespaces([]string{dsKeyActorNonce, addr.String()}) + + // Get the nonce for this address from the datastore + nonceBytes, err := ms.ds.Get(addrNonceKey) + + var nonce uint64 + switch { + case xerrors.Is(err, datastore.ErrNotFound): + // If a nonce for this address hasn't yet been created in the + // datastore, check the mempool - nonces used to be created by + // the mempool so we need to support nodes that still have mempool + // nonces. Note that the mempool returns the actor state's nonce by + // default. + nonce, err = ms.mpool.GetNonce(addr) + if err != nil { + return 0, xerrors.Errorf("failed to get nonce from mempool: %w", err) + } + + case err != nil: + return 0, xerrors.Errorf("failed to get nonce from datastore: %w", err) + + default: + // There is a nonce in the mempool, so unmarshall and increment it + maj, val, err := cbg.CborReadHeader(bytes.NewReader(nonceBytes)) + if err != nil { + return 0, xerrors.Errorf("failed to parse nonce from datastore: %w", err) + } + if maj != cbg.MajUnsignedInt { + return 0, xerrors.Errorf("bad cbor type parsing nonce from datastore") + } + + nonce = val + 1 + } + + // Write the nonce for this address to the datastore + buf := bytes.Buffer{} + _, err = buf.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, nonce)) + if err != nil { + return 0, xerrors.Errorf("failed to marshall nonce: %w", err) + } + err = ms.ds.Put(addrNonceKey, buf.Bytes()) + if err != nil { + return 0, xerrors.Errorf("failed to write nonce to datastore: %w", err) + } + + return nonce, nil +} diff --git a/chain/messagesigner/messagesigner_test.go b/chain/messagesigner/messagesigner_test.go new file mode 100644 index 000000000..e52137892 --- /dev/null +++ b/chain/messagesigner/messagesigner_test.go @@ -0,0 +1,159 @@ +package messagesigner + +import ( + "context" + "sync" + "testing" + + "github.com/filecoin-project/lotus/chain/wallet" + + "github.com/filecoin-project/go-state-types/crypto" + "github.com/stretchr/testify/require" + + ds_sync "github.com/ipfs/go-datastore/sync" + + "github.com/filecoin-project/go-address" + + "github.com/filecoin-project/lotus/chain/types" + "github.com/ipfs/go-datastore" +) + +type mockMpool struct { + lk sync.RWMutex + nonces map[address.Address]uint64 +} + +func newMockMpool() *mockMpool { + return &mockMpool{nonces: make(map[address.Address]uint64)} +} + +func (mp *mockMpool) setNonce(addr address.Address, nonce uint64) { + mp.lk.Lock() + defer mp.lk.Unlock() + + mp.nonces[addr] = nonce +} + +func (mp *mockMpool) GetNonce(addr address.Address) (uint64, error) { + mp.lk.RLock() + defer mp.lk.RUnlock() + + return mp.nonces[addr], nil +} + +func TestMessageSignerSignMessage(t *testing.T) { + ctx := context.Background() + + w, _ := wallet.NewWallet(wallet.NewMemKeyStore()) + from1, err := w.GenerateKey(crypto.SigTypeSecp256k1) + require.NoError(t, err) + from2, err := w.GenerateKey(crypto.SigTypeSecp256k1) + require.NoError(t, err) + to1, err := w.GenerateKey(crypto.SigTypeSecp256k1) + require.NoError(t, err) + to2, err := w.GenerateKey(crypto.SigTypeSecp256k1) + require.NoError(t, err) + + type msgSpec struct { + msg *types.Message + mpoolNonce [1]uint64 + expNonce uint64 + } + tests := []struct { + name string + msgs []msgSpec + }{{ + // No nonce yet in datastore + name: "no nonce yet", + msgs: []msgSpec{{ + msg: &types.Message{ + To: to1, + From: from1, + }, + expNonce: 0, + }}, + }, { + // Get nonce value of zero from mpool + name: "mpool nonce zero", + msgs: []msgSpec{{ + msg: &types.Message{ + To: to1, + From: from1, + }, + mpoolNonce: [1]uint64{0}, + expNonce: 0, + }}, + }, { + // Get non-zero nonce value from mpool + name: "mpool nonce set", + msgs: []msgSpec{{ + msg: &types.Message{ + To: to1, + From: from1, + }, + mpoolNonce: [1]uint64{5}, + expNonce: 5, + }, { + msg: &types.Message{ + To: to1, + From: from1, + }, + // Should ignore mpool nonce because after the first message nonce + // will come from the datastore + mpoolNonce: [1]uint64{10}, + expNonce: 6, + }}, + }, { + // Nonce should increment independently for each address + name: "nonce increments per address", + msgs: []msgSpec{{ + msg: &types.Message{ + To: to1, + From: from1, + }, + expNonce: 0, + }, { + msg: &types.Message{ + To: to1, + From: from1, + }, + expNonce: 1, + }, { + msg: &types.Message{ + To: to2, + From: from2, + }, + mpoolNonce: [1]uint64{5}, + expNonce: 5, + }, { + msg: &types.Message{ + To: to2, + From: from2, + }, + expNonce: 6, + }, { + msg: &types.Message{ + To: to1, + From: from1, + }, + expNonce: 2, + }}, + }} + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + mpool := newMockMpool() + ds := ds_sync.MutexWrap(datastore.NewMapDatastore()) + ms := newMessageSigner(w, mpool, ds) + + for _, m := range tt.msgs { + if len(m.mpoolNonce) == 1 { + mpool.setNonce(m.msg.From, m.mpoolNonce[0]) + } + smsg, err := ms.SignMessage(ctx, m.msg) + require.NoError(t, err) + require.Equal(t, m.expNonce, smsg.Message.Nonce) + } + }) + } +} diff --git a/node/builder.go b/node/builder.go index c37a5db58..c49789a6a 100644 --- a/node/builder.go +++ b/node/builder.go @@ -6,6 +6,8 @@ import ( "os" "time" + "github.com/filecoin-project/lotus/chain/messagesigner" + logging "github.com/ipfs/go-log" ci "github.com/libp2p/go-libp2p-core/crypto" "github.com/libp2p/go-libp2p-core/host" @@ -259,6 +261,7 @@ func Online() Option { Override(new(*store.ChainStore), modules.ChainStore), Override(new(*stmgr.StateManager), stmgr.NewStateManager), Override(new(*wallet.Wallet), wallet.NewWallet), + Override(new(*messagesigner.MessageSigner), messagesigner.NewMessageSigner), Override(new(dtypes.ChainGCLocker), blockstore.NewGCLocker), Override(new(dtypes.ChainGCBlockstore), modules.ChainGCBlockstore), diff --git a/node/impl/full/mpool.go b/node/impl/full/mpool.go index 6acb17990..003260496 100644 --- a/node/impl/full/mpool.go +++ b/node/impl/full/mpool.go @@ -4,14 +4,14 @@ import ( "context" "encoding/json" + "github.com/filecoin-project/lotus/chain/messagesigner" + "github.com/filecoin-project/go-address" "github.com/ipfs/go-cid" "go.uber.org/fx" "golang.org/x/xerrors" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/messagepool" - "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/node/modules/dtypes" ) @@ -22,9 +22,7 @@ type MpoolAPI struct { WalletAPI GasAPI - Chain *store.ChainStore - - Mpool *messagepool.MessagePool + MessageSigner *messagesigner.MessageSigner PushLocks *dtypes.MpoolLocker } @@ -114,12 +112,14 @@ func (a *MpoolAPI) MpoolPush(ctx context.Context, smsg *types.SignedMessage) (ci } func (a *MpoolAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) { + cp := *msg + msg = &cp inMsg := *msg + fromA, err := a.Stmgr.ResolveToKeyAddress(ctx, msg.From, nil) + if err != nil { + return nil, xerrors.Errorf("getting key address: %w", err) + } { - fromA, err := a.Stmgr.ResolveToKeyAddress(ctx, msg.From, nil) - if err != nil { - return nil, xerrors.Errorf("getting key address: %w", err) - } done, err := a.PushLocks.TakeLock(ctx, fromA) if err != nil { return nil, xerrors.Errorf("taking lock: %w", err) @@ -131,7 +131,7 @@ func (a *MpoolAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spe return nil, xerrors.Errorf("MpoolPushMessage expects message nonce to be 0, was %d", msg.Nonce) } - msg, err := a.GasAPI.GasEstimateMessageGas(ctx, msg, spec, types.EmptyTSK) + msg, err = a.GasAPI.GasEstimateMessageGas(ctx, msg, spec, types.EmptyTSK) if err != nil { return nil, xerrors.Errorf("GasEstimateMessageGas error: %w", err) } @@ -143,33 +143,31 @@ func (a *MpoolAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spe inJson, outJson) } - sign := func(from address.Address, nonce uint64) (*types.SignedMessage, error) { - msg.Nonce = nonce - if msg.From.Protocol() == address.ID { - log.Warnf("Push from ID address (%s), adjusting to %s", msg.From, from) - msg.From = from - } - - b, err := a.WalletBalance(ctx, msg.From) - if err != nil { - return nil, xerrors.Errorf("mpool push: getting origin balance: %w", err) - } - - if b.LessThan(msg.Value) { - return nil, xerrors.Errorf("mpool push: not enough funds: %s < %s", b, msg.Value) - } - - return a.WalletSignMessage(ctx, from, msg) + if msg.From.Protocol() == address.ID { + log.Warnf("Push from ID address (%s), adjusting to %s", msg.From, fromA) + msg.From = fromA } - var m *types.SignedMessage -again: - m, err = a.Mpool.PushWithNonce(ctx, msg.From, sign) - if err == messagepool.ErrTryAgain { - log.Debugf("temporary failure while pushing message: %s; retrying", err) - goto again + b, err := a.WalletBalance(ctx, msg.From) + if err != nil { + return nil, xerrors.Errorf("mpool push: getting origin balance: %w", err) } - return m, err + + if b.LessThan(msg.Value) { + return nil, xerrors.Errorf("mpool push: not enough funds: %s < %s", b, msg.Value) + } + + smsg, err := a.MessageSigner.SignMessage(ctx, msg) + if err != nil { + return nil, xerrors.Errorf("mpool push: failed to sign message: %w", err) + } + + _, err = a.Mpool.Push(smsg) + if err != nil { + return nil, xerrors.Errorf("mpool push: failed to push message: %w", err) + } + + return smsg, err } func (a *MpoolAPI) MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error) { From 7eb9bec13f6d351e192b0e8319ccf1bf0c3c077e Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Fri, 18 Sep 2020 18:03:59 +0200 Subject: [PATCH 16/88] feat: dont recompute post on submit redux --- storage/wdpost_changehandler.go | 533 ++++++++++++ storage/wdpost_changehandler_test.go | 1173 ++++++++++++++++++++++++++ storage/wdpost_nextdl_test.go | 38 + storage/wdpost_run.go | 193 +++-- storage/wdpost_run_test.go | 6 +- storage/wdpost_sched.go | 142 +--- 6 files changed, 1913 insertions(+), 172 deletions(-) create mode 100644 storage/wdpost_changehandler.go create mode 100644 storage/wdpost_changehandler_test.go create mode 100644 storage/wdpost_nextdl_test.go diff --git a/storage/wdpost_changehandler.go b/storage/wdpost_changehandler.go new file mode 100644 index 000000000..e65b7a7fc --- /dev/null +++ b/storage/wdpost_changehandler.go @@ -0,0 +1,533 @@ +package storage + +import ( + "context" + "sync" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/specs-actors/actors/builtin/miner" + + "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/lotus/chain/types" +) + +const SubmitConfidence = 4 + +type CompleteGeneratePoSTCb func(posts []miner.SubmitWindowedPoStParams, err error) +type CompleteSubmitPoSTCb func(err error) + +type changeHandlerAPI interface { + StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) + startGeneratePoST(ctx context.Context, ts *types.TipSet, deadline *dline.Info, onComplete CompleteGeneratePoSTCb) context.CancelFunc + startSubmitPoST(ctx context.Context, ts *types.TipSet, deadline *dline.Info, posts []miner.SubmitWindowedPoStParams, onComplete CompleteSubmitPoSTCb) context.CancelFunc + onAbort(ts *types.TipSet, deadline *dline.Info) + failPost(err error, ts *types.TipSet, deadline *dline.Info) +} + +type changeHandler struct { + api changeHandlerAPI + actor address.Address + proveHdlr *proveHandler + submitHdlr *submitHandler +} + +func newChangeHandler(api changeHandlerAPI, actor address.Address) *changeHandler { + posts := newPostsCache() + p := newProver(api, posts) + s := newSubmitter(api, posts) + return &changeHandler{api: api, actor: actor, proveHdlr: p, submitHdlr: s} +} + +func (ch *changeHandler) start() { + go ch.proveHdlr.run() + go ch.submitHdlr.run() +} + +func (ch *changeHandler) update(ctx context.Context, revert *types.TipSet, advance *types.TipSet) error { + // Get the current deadline period + di, err := ch.api.StateMinerProvingDeadline(ctx, ch.actor, advance.Key()) + if err != nil { + return err + } + + if !di.PeriodStarted() { + return nil // not proving anything yet + } + + hc := &headChange{ + ctx: ctx, + revert: revert, + advance: advance, + di: di, + } + + select { + case ch.proveHdlr.hcs <- hc: + case <-ch.proveHdlr.shutdownCtx.Done(): + case <-ctx.Done(): + } + + select { + case ch.submitHdlr.hcs <- hc: + case <-ch.submitHdlr.shutdownCtx.Done(): + case <-ctx.Done(): + } + + return nil +} + +func (ch *changeHandler) shutdown() { + ch.proveHdlr.shutdown() + ch.submitHdlr.shutdown() +} + +func (ch *changeHandler) currentTSDI() (*types.TipSet, *dline.Info) { + return ch.submitHdlr.currentTSDI() +} + +// postsCache keeps a cache of PoSTs for each proving window +type postsCache struct { + added chan *postInfo + lk sync.RWMutex + cache map[abi.ChainEpoch][]miner.SubmitWindowedPoStParams +} + +func newPostsCache() *postsCache { + return &postsCache{ + added: make(chan *postInfo, 16), + cache: make(map[abi.ChainEpoch][]miner.SubmitWindowedPoStParams), + } +} + +func (c *postsCache) add(di *dline.Info, posts []miner.SubmitWindowedPoStParams) { + c.lk.Lock() + defer c.lk.Unlock() + + // TODO: clear cache entries older than chain finality + c.cache[di.Open] = posts + + c.added <- &postInfo{ + di: di, + posts: posts, + } +} + +func (c *postsCache) get(di *dline.Info) ([]miner.SubmitWindowedPoStParams, bool) { + c.lk.RLock() + defer c.lk.RUnlock() + + posts, ok := c.cache[di.Open] + return posts, ok +} + +type headChange struct { + ctx context.Context + revert *types.TipSet + advance *types.TipSet + di *dline.Info +} + +type currentPost struct { + di *dline.Info + abort context.CancelFunc +} + +type postResult struct { + ts *types.TipSet + currPost *currentPost + posts []miner.SubmitWindowedPoStParams + err error +} + +// proveHandler generates proofs +type proveHandler struct { + api changeHandlerAPI + posts *postsCache + + postResults chan *postResult + hcs chan *headChange + + current *currentPost + + shutdownCtx context.Context + shutdown context.CancelFunc + + // Used for testing + processedHeadChanges chan *headChange + processedPostResults chan *postResult +} + +func newProver( + api changeHandlerAPI, + posts *postsCache, +) *proveHandler { + ctx, cancel := context.WithCancel(context.Background()) + return &proveHandler{ + api: api, + posts: posts, + postResults: make(chan *postResult), + hcs: make(chan *headChange), + shutdownCtx: ctx, + shutdown: cancel, + } +} + +func (p *proveHandler) run() { + // Abort proving on shutdown + defer func() { + if p.current != nil { + p.current.abort() + } + }() + + for p.shutdownCtx.Err() == nil { + select { + case <-p.shutdownCtx.Done(): + return + + case hc := <-p.hcs: + // Head changed + p.processHeadChange(hc.ctx, hc.advance, hc.di) + if p.processedHeadChanges != nil { + p.processedHeadChanges <- hc + } + + case res := <-p.postResults: + // Proof generation complete + p.processPostResult(res) + if p.processedPostResults != nil { + p.processedPostResults <- res + } + } + } +} + +func (p *proveHandler) processHeadChange(ctx context.Context, newTS *types.TipSet, di *dline.Info) { + // If the post window has expired, abort the current proof + if p.current != nil && newTS.Height() >= p.current.di.Close { + // Cancel the context on the current proof + p.current.abort() + + // Clear out the reference to the proof so that we can immediately + // start generating a new proof, without having to worry about state + // getting clobbered when the abort completes + p.current = nil + } + + // Only generate one proof at a time + if p.current != nil { + return + } + + // If the proof for the current post window has been generated, check the + // next post window + _, complete := p.posts.get(di) + for complete { + di = nextDeadline(di) + _, complete = p.posts.get(di) + } + + // Check if the chain is above the Challenge height for the post window + if newTS.Height() < di.Challenge { + return + } + + p.current = ¤tPost{di: di} + curr := p.current + p.current.abort = p.api.startGeneratePoST(ctx, newTS, di, func(posts []miner.SubmitWindowedPoStParams, err error) { + p.postResults <- &postResult{ts: newTS, currPost: curr, posts: posts, err: err} + }) +} + +func (p *proveHandler) processPostResult(res *postResult) { + di := res.currPost.di + if res.err != nil { + // Proving failed so inform the API + p.api.failPost(res.err, res.ts, di) + log.Warnf("Aborted window post Proving (Deadline: %+v)", di) + p.api.onAbort(res.ts, di) + + // Check if the current post has already been aborted + if p.current == res.currPost { + // If the current post was not already aborted, setting it to nil + // marks it as complete so that a new post can be started + p.current = nil + } + return + } + + // Completed processing this proving window + p.current = nil + + // Add the proofs to the cache + p.posts.add(di, res.posts) +} + +type submitResult struct { + pw *postWindow + err error +} + +type SubmitState string + +const ( + SubmitStateStart SubmitState = "SubmitStateStart" + SubmitStateSubmitting SubmitState = "SubmitStateSubmitting" + SubmitStateComplete SubmitState = "SubmitStateComplete" +) + +type postWindow struct { + ts *types.TipSet + di *dline.Info + submitState SubmitState + abort context.CancelFunc +} + +type postInfo struct { + di *dline.Info + posts []miner.SubmitWindowedPoStParams +} + +// submitHandler submits proofs on-chain +type submitHandler struct { + api changeHandlerAPI + posts *postsCache + + submitResults chan *submitResult + hcs chan *headChange + + postWindows map[abi.ChainEpoch]*postWindow + getPostWindowReqs chan *getPWReq + + shutdownCtx context.Context + shutdown context.CancelFunc + + currentCtx context.Context + currentTS *types.TipSet + currentDI *dline.Info + getTSDIReq chan chan *tsdi + + // Used for testing + processedHeadChanges chan *headChange + processedSubmitResults chan *submitResult + processedPostReady chan *postInfo +} + +func newSubmitter( + api changeHandlerAPI, + posts *postsCache, +) *submitHandler { + ctx, cancel := context.WithCancel(context.Background()) + return &submitHandler{ + api: api, + posts: posts, + submitResults: make(chan *submitResult), + hcs: make(chan *headChange), + postWindows: make(map[abi.ChainEpoch]*postWindow), + getPostWindowReqs: make(chan *getPWReq), + getTSDIReq: make(chan chan *tsdi), + shutdownCtx: ctx, + shutdown: cancel, + } +} + +func (s *submitHandler) run() { + // On shutdown, abort in-progress submits + defer func() { + for _, pw := range s.postWindows { + if pw.abort != nil { + pw.abort() + } + } + }() + + for s.shutdownCtx.Err() == nil { + select { + case <-s.shutdownCtx.Done(): + return + + case hc := <-s.hcs: + // Head change + s.processHeadChange(hc.ctx, hc.revert, hc.advance, hc.di) + if s.processedHeadChanges != nil { + s.processedHeadChanges <- hc + } + + case pi := <-s.posts.added: + // Proof generated + s.processPostReady(pi) + if s.processedPostReady != nil { + s.processedPostReady <- pi + } + + case res := <-s.submitResults: + // Submit complete + s.processSubmitResult(res) + if s.processedSubmitResults != nil { + s.processedSubmitResults <- res + } + + case pwreq := <-s.getPostWindowReqs: + // used by getPostWindow() to sync with run loop + pwreq.out <- s.postWindows[pwreq.di.Open] + + case out := <-s.getTSDIReq: + // used by currentTSDI() to sync with run loop + out <- &tsdi{ts: s.currentTS, di: s.currentDI} + } + } +} + +// processHeadChange is called when the chain head changes +func (s *submitHandler) processHeadChange(ctx context.Context, revert *types.TipSet, advance *types.TipSet, di *dline.Info) { + s.currentCtx = ctx + s.currentTS = advance + s.currentDI = di + + // Start tracking the current post window if we're not already + // TODO: clear post windows older than chain finality + if _, ok := s.postWindows[di.Open]; !ok { + s.postWindows[di.Open] = &postWindow{ + di: di, + ts: advance, + submitState: SubmitStateStart, + } + } + + // Apply the change to all post windows + for _, pw := range s.postWindows { + s.processHeadChangeForPW(ctx, revert, advance, pw) + } +} + +func (s *submitHandler) processHeadChangeForPW(ctx context.Context, revert *types.TipSet, advance *types.TipSet, pw *postWindow) { + revertedToPrevDL := revert != nil && revert.Height() < pw.di.Open + expired := advance.Height() >= pw.di.Close + + // If the chain was reverted back to the previous deadline, or if the post + // window has expired, abort submit + if pw.submitState == SubmitStateSubmitting && (revertedToPrevDL || expired) { + // Replace the aborted postWindow with a new one so that we can + // submit again at any time without the state getting clobbered + // when the abort completes + abort := pw.abort + if abort != nil { + pw = &postWindow{ + di: pw.di, + ts: advance, + submitState: SubmitStateStart, + } + s.postWindows[pw.di.Open] = pw + + // Abort the current submit + abort() + } + } else if pw.submitState == SubmitStateComplete && revertedToPrevDL { + // If submit for this deadline has completed, but the chain was + // reverted back to the previous deadline, reset the submit state to the + // starting state, so that it can be resubmitted + pw.submitState = SubmitStateStart + } + + // Submit the proof to chain if the proof has been generated and the chain + // height is above confidence + s.submitIfReady(ctx, advance, pw) +} + +// processPostReady is called when a proof generation completes +func (s *submitHandler) processPostReady(pi *postInfo) { + pw, ok := s.postWindows[pi.di.Open] + if ok { + s.submitIfReady(s.currentCtx, s.currentTS, pw) + } +} + +// submitIfReady submits a proof if the chain is high enough and the proof +// has been generated for this deadline +func (s *submitHandler) submitIfReady(ctx context.Context, advance *types.TipSet, pw *postWindow) { + // If the window has expired, there's nothing more to do. + if advance.Height() >= pw.di.Close { + return + } + + // Check if we're already submitting, or already completed submit + if pw.submitState != SubmitStateStart { + return + } + + // Check if we've reached the confidence height to submit + if advance.Height() < pw.di.Open+SubmitConfidence { + return + } + + // Check if the proofs have been generated for this deadline + posts, ok := s.posts.get(pw.di) + if !ok { + return + } + + // If there was nothing to prove, move straight to the complete state + if len(posts) == 0 { + pw.submitState = SubmitStateComplete + return + } + + // Start submitting post + pw.submitState = SubmitStateSubmitting + pw.abort = s.api.startSubmitPoST(ctx, advance, pw.di, posts, func(err error) { + s.submitResults <- &submitResult{pw: pw, err: err} + }) +} + +// processSubmitResult is called with the response to a submit +func (s *submitHandler) processSubmitResult(res *submitResult) { + if res.err != nil { + // Submit failed so inform the API and go back to the start state + s.api.failPost(res.err, res.pw.ts, res.pw.di) + log.Warnf("Aborted window post Submitting (Deadline: %+v)", res.pw.di) + s.api.onAbort(res.pw.ts, res.pw.di) + + res.pw.submitState = SubmitStateStart + return + } + + // Submit succeeded so move to complete state + res.pw.submitState = SubmitStateComplete +} + +type tsdi struct { + ts *types.TipSet + di *dline.Info +} + +func (s *submitHandler) currentTSDI() (*types.TipSet, *dline.Info) { + out := make(chan *tsdi) + s.getTSDIReq <- out + res := <-out + return res.ts, res.di +} + +type getPWReq struct { + di *dline.Info + out chan *postWindow +} + +func (s *submitHandler) getPostWindow(di *dline.Info) *postWindow { + out := make(chan *postWindow) + s.getPostWindowReqs <- &getPWReq{di: di, out: out} + return <-out +} + +// nextDeadline gets deadline info for the subsequent deadline +func nextDeadline(currentDeadline *dline.Info) *dline.Info { + periodStart := currentDeadline.PeriodStart + newDeadline := currentDeadline.Index + 1 + if newDeadline == miner.WPoStPeriodDeadlines { + newDeadline = 0 + periodStart = periodStart + miner.WPoStProvingPeriod + } + + return miner.NewDeadlineInfo(periodStart, newDeadline, currentDeadline.CurrentEpoch) +} diff --git a/storage/wdpost_changehandler_test.go b/storage/wdpost_changehandler_test.go new file mode 100644 index 000000000..d2a4779e6 --- /dev/null +++ b/storage/wdpost_changehandler_test.go @@ -0,0 +1,1173 @@ +package storage + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + tutils "github.com/filecoin-project/specs-actors/support/testing" + + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/specs-actors/actors/builtin/miner" +) + +var dummyCid cid.Cid + +func init() { + dummyCid, _ = cid.Parse("bafkqaaa") +} + +type proveRes struct { + posts []miner.SubmitWindowedPoStParams + err error +} + +type postStatus string + +const ( + postStatusStart postStatus = "postStatusStart" + postStatusProving postStatus = "postStatusProving" + postStatusComplete postStatus = "postStatusComplete" +) + +type mockAPI struct { + ch *changeHandler + deadline *dline.Info + proveResult chan *proveRes + submitResult chan error + onStateChange chan struct{} + + tsLock sync.RWMutex + ts map[types.TipSetKey]*types.TipSet + + abortCalledLock sync.RWMutex + abortCalled bool + + statesLk sync.RWMutex + postStates map[abi.ChainEpoch]postStatus +} + +func newMockAPI() *mockAPI { + return &mockAPI{ + proveResult: make(chan *proveRes), + onStateChange: make(chan struct{}), + submitResult: make(chan error), + postStates: make(map[abi.ChainEpoch]postStatus), + ts: make(map[types.TipSetKey]*types.TipSet), + } +} + +func (m *mockAPI) makeTs(t *testing.T, h abi.ChainEpoch) *types.TipSet { + m.tsLock.Lock() + defer m.tsLock.Unlock() + + ts := makeTs(t, h) + m.ts[ts.Key()] = ts + return ts +} + +func (m *mockAPI) setDeadline(di *dline.Info) { + m.tsLock.Lock() + defer m.tsLock.Unlock() + + m.deadline = di +} + +func (m *mockAPI) getDeadline(currentEpoch abi.ChainEpoch) *dline.Info { + close := miner.WPoStChallengeWindow - 1 + dlIdx := uint64(0) + for close < currentEpoch { + close += miner.WPoStChallengeWindow + dlIdx++ + } + return miner.NewDeadlineInfo(0, dlIdx, currentEpoch) +} + +func (m *mockAPI) StateMinerProvingDeadline(ctx context.Context, address address.Address, key types.TipSetKey) (*dline.Info, error) { + m.tsLock.RLock() + defer m.tsLock.RUnlock() + + ts, ok := m.ts[key] + if !ok { + panic(fmt.Sprintf("unexpected tipset key %s", key)) + } + + if m.deadline != nil { + m.deadline.CurrentEpoch = ts.Height() + return m.deadline, nil + } + + return m.getDeadline(ts.Height()), nil +} + +func (m *mockAPI) startGeneratePoST( + ctx context.Context, + ts *types.TipSet, + deadline *dline.Info, + completeGeneratePoST CompleteGeneratePoSTCb, +) context.CancelFunc { + ctx, cancel := context.WithCancel(ctx) + + m.statesLk.Lock() + defer m.statesLk.Unlock() + m.postStates[deadline.Open] = postStatusProving + + go func() { + defer cancel() + + select { + case psRes := <-m.proveResult: + m.statesLk.Lock() + { + if psRes.err == nil { + m.postStates[deadline.Open] = postStatusComplete + } else { + m.postStates[deadline.Open] = postStatusStart + } + } + m.statesLk.Unlock() + completeGeneratePoST(psRes.posts, psRes.err) + case <-ctx.Done(): + completeGeneratePoST(nil, ctx.Err()) + } + }() + + return cancel +} + +func (m *mockAPI) getPostStatus(di *dline.Info) postStatus { + m.statesLk.RLock() + defer m.statesLk.RUnlock() + + status, ok := m.postStates[di.Open] + if ok { + return status + } + return postStatusStart +} + +func (m *mockAPI) startSubmitPoST( + ctx context.Context, + ts *types.TipSet, + deadline *dline.Info, + posts []miner.SubmitWindowedPoStParams, + completeSubmitPoST CompleteSubmitPoSTCb, +) context.CancelFunc { + ctx, cancel := context.WithCancel(ctx) + + go func() { + defer cancel() + + select { + case err := <-m.submitResult: + completeSubmitPoST(err) + case <-ctx.Done(): + completeSubmitPoST(ctx.Err()) + } + }() + + return cancel +} + +func (m *mockAPI) onAbort(ts *types.TipSet, deadline *dline.Info) { + m.abortCalledLock.Lock() + defer m.abortCalledLock.Unlock() + m.abortCalled = true +} + +func (m *mockAPI) wasAbortCalled() bool { + m.abortCalledLock.RLock() + defer m.abortCalledLock.RUnlock() + return m.abortCalled +} + +func (m *mockAPI) failPost(err error, ts *types.TipSet, deadline *dline.Info) { +} + +func (m *mockAPI) setChangeHandler(ch *changeHandler) { + m.ch = ch +} + +// TestChangeHandlerBasic verifies we can generate a proof and submit it +func TestChangeHandlerBasic(t *testing.T) { + s := makeScaffolding(t) + mock := s.mock + + defer s.ch.shutdown() + s.ch.start() + + // Trigger a head change + currentEpoch := abi.ChainEpoch(1) + go triggerHeadAdvance(t, s, currentEpoch) + + // Should start proving + <-s.ch.proveHdlr.processedHeadChanges + di := mock.getDeadline(currentEpoch) + require.Equal(t, postStatusProving, s.mock.getPostStatus(di)) + + // Submitter doesn't have anything to do yet + <-s.ch.submitHdlr.processedHeadChanges + require.Equal(t, SubmitStateStart, s.submitState(di)) + + // Send a response to the call to generate proofs + posts := []miner.SubmitWindowedPoStParams{{Deadline: di.Index}} + mock.proveResult <- &proveRes{posts: posts} + + // Should move to proving complete + <-s.ch.proveHdlr.processedPostResults + require.Equal(t, postStatusComplete, s.mock.getPostStatus(di)) + + // Move to the correct height to submit the proof + currentEpoch = 1 + SubmitConfidence + go triggerHeadAdvance(t, s, currentEpoch) + + // Should move to submitting state + <-s.ch.submitHdlr.processedHeadChanges + di = mock.getDeadline(currentEpoch) + require.Equal(t, SubmitStateSubmitting, s.submitState(di)) + + // Send a response to the submit call + mock.submitResult <- nil + + // Should move to the complete state + <-s.ch.submitHdlr.processedSubmitResults + require.Equal(t, SubmitStateComplete, s.submitState(di)) +} + +// TestChangeHandlerFromProvingToSubmittingNoHeadChange tests that when the +// chain is already advanced past the confidence interval, we should move from +// proving to submitting without a head change in between. +func TestChangeHandlerFromProvingToSubmittingNoHeadChange(t *testing.T) { + s := makeScaffolding(t) + mock := s.mock + + // Monitor submit handler's processing of incoming postInfo + s.ch.submitHdlr.processedPostReady = make(chan *postInfo) + + defer s.ch.shutdown() + s.ch.start() + + // Trigger a head change + currentEpoch := abi.ChainEpoch(1) + go triggerHeadAdvance(t, s, currentEpoch) + + // Should start proving + <-s.ch.proveHdlr.processedHeadChanges + di := mock.getDeadline(currentEpoch) + require.Equal(t, postStatusProving, s.mock.getPostStatus(di)) + + // Submitter doesn't have anything to do yet + <-s.ch.submitHdlr.processedHeadChanges + require.Equal(t, SubmitStateStart, s.submitState(di)) + + // Trigger a head change that advances the chain beyond the submit + // confidence + currentEpoch = 1 + SubmitConfidence + go triggerHeadAdvance(t, s, currentEpoch) + + // Should be no change to state yet + <-s.ch.proveHdlr.processedHeadChanges + require.Equal(t, postStatusProving, s.mock.getPostStatus(di)) + <-s.ch.submitHdlr.processedHeadChanges + require.Equal(t, SubmitStateStart, s.submitState(di)) + + // Send a response to the call to generate proofs + posts := []miner.SubmitWindowedPoStParams{{Deadline: di.Index}} + mock.proveResult <- &proveRes{posts: posts} + + // Should move to proving complete + <-s.ch.proveHdlr.processedPostResults + di = mock.getDeadline(currentEpoch) + require.Equal(t, postStatusComplete, s.mock.getPostStatus(di)) + + // Should move directly to submitting state with no further head changes + <-s.ch.submitHdlr.processedPostReady + require.Equal(t, SubmitStateSubmitting, s.submitState(di)) +} + +// TestChangeHandlerFromProvingEmptyProofsToComplete tests that when there are no +// proofs generated we should not submit anything to chain but submit state +// should move to completed +func TestChangeHandlerFromProvingEmptyProofsToComplete(t *testing.T) { + s := makeScaffolding(t) + mock := s.mock + + // Monitor submit handler's processing of incoming postInfo + s.ch.submitHdlr.processedPostReady = make(chan *postInfo) + + defer s.ch.shutdown() + s.ch.start() + + // Trigger a head change + currentEpoch := abi.ChainEpoch(1) + go triggerHeadAdvance(t, s, currentEpoch) + + // Should start proving + <-s.ch.proveHdlr.processedHeadChanges + di := mock.getDeadline(currentEpoch) + require.Equal(t, postStatusProving, s.mock.getPostStatus(di)) + + // Submitter doesn't have anything to do yet + <-s.ch.submitHdlr.processedHeadChanges + require.Equal(t, SubmitStateStart, s.submitState(di)) + + // Trigger a head change that advances the chain beyond the submit + // confidence + currentEpoch = 1 + SubmitConfidence + go triggerHeadAdvance(t, s, currentEpoch) + + // Should be no change to state yet + <-s.ch.proveHdlr.processedHeadChanges + require.Equal(t, postStatusProving, s.mock.getPostStatus(di)) + <-s.ch.submitHdlr.processedHeadChanges + require.Equal(t, SubmitStateStart, s.submitState(di)) + + // Send a response to the call to generate proofs with an empty proofs array + posts := []miner.SubmitWindowedPoStParams{} + mock.proveResult <- &proveRes{posts: posts} + + // Should move to proving complete + <-s.ch.proveHdlr.processedPostResults + di = mock.getDeadline(currentEpoch) + require.Equal(t, postStatusComplete, s.mock.getPostStatus(di)) + + // Should move directly to submitting complete state + <-s.ch.submitHdlr.processedPostReady + require.Equal(t, SubmitStateComplete, s.submitState(di)) +} + +// TestChangeHandlerDontStartUntilProvingPeriod tests that the handler +// ignores updates until the proving period has been reached. +func TestChangeHandlerDontStartUntilProvingPeriod(t *testing.T) { + s := makeScaffolding(t) + mock := s.mock + + periodStart := miner.WPoStProvingPeriod + dlIdx := uint64(1) + currentEpoch := abi.ChainEpoch(10) + di := miner.NewDeadlineInfo(periodStart, dlIdx, currentEpoch) + mock.setDeadline(di) + + defer s.ch.shutdown() + s.ch.start() + + // Trigger a head change + go triggerHeadAdvance(t, s, currentEpoch) + + // Nothing should happen because the proving period has not started + select { + case <-s.ch.proveHdlr.processedHeadChanges: + require.Fail(t, "unexpected prove change") + case <-s.ch.submitHdlr.processedHeadChanges: + require.Fail(t, "unexpected submit change") + case <-time.After(10 * time.Millisecond): + } + + // Advance the head to the next proving period's first epoch + currentEpoch = periodStart + miner.WPoStChallengeWindow + di = miner.NewDeadlineInfo(periodStart, dlIdx, currentEpoch) + mock.setDeadline(di) + go triggerHeadAdvance(t, s, currentEpoch) + + // Should start proving + <-s.ch.proveHdlr.processedHeadChanges + require.Equal(t, postStatusProving, s.mock.getPostStatus(di)) +} + +// TestChangeHandlerStartProvingNextDeadline verifies that the proof handler +// starts proving the next deadline after the current one +func TestChangeHandlerStartProvingNextDeadline(t *testing.T) { + s := makeScaffolding(t) + mock := s.mock + + defer s.ch.shutdown() + s.ch.start() + + // Trigger a head change + currentEpoch := abi.ChainEpoch(1) + go triggerHeadAdvance(t, s, currentEpoch) + + // Should start proving + <-s.ch.proveHdlr.processedHeadChanges + di := mock.getDeadline(currentEpoch) + require.Equal(t, postStatusProving, s.mock.getPostStatus(di)) + + // Trigger a head change that advances the chain beyond the submit + // confidence + currentEpoch = 1 + SubmitConfidence + go triggerHeadAdvance(t, s, currentEpoch) + + // Should be no change to state yet + <-s.ch.proveHdlr.processedHeadChanges + require.Equal(t, postStatusProving, s.mock.getPostStatus(di)) + + // Send a response to the call to generate proofs + posts := []miner.SubmitWindowedPoStParams{{Deadline: di.Index}} + mock.proveResult <- &proveRes{posts: posts} + + // Should move to proving complete + <-s.ch.proveHdlr.processedPostResults + di = mock.getDeadline(currentEpoch) + require.Equal(t, postStatusComplete, s.mock.getPostStatus(di)) + + // Trigger head change that advances the chain to the Challenge epoch for + // the next deadline + go func() { + di = nextDeadline(di) + currentEpoch = di.Challenge + triggerHeadAdvance(t, s, currentEpoch) + }() + + // Should start generating next window's proof + <-s.ch.proveHdlr.processedHeadChanges + require.Equal(t, postStatusProving, s.mock.getPostStatus(di)) +} + +// TestChangeHandlerProvingRounds verifies we can generate several rounds of +// proofs as the chain head advances +func TestChangeHandlerProvingRounds(t *testing.T) { + s := makeScaffolding(t) + mock := s.mock + + defer s.ch.shutdown() + s.ch.start() + + completeProofIndex := abi.ChainEpoch(10) + for currentEpoch := abi.ChainEpoch(1); currentEpoch < miner.WPoStChallengeWindow*5; currentEpoch++ { + // Trigger a head change + di := mock.getDeadline(currentEpoch) + go triggerHeadAdvance(t, s, currentEpoch) + + // Wait for prover to process head change + <-s.ch.proveHdlr.processedHeadChanges + + completeProofEpoch := di.Open + completeProofIndex + next := nextDeadline(di) + //fmt.Println("epoch", currentEpoch, s.mock.getPostStatus(di), "next", s.mock.getPostStatus(next)) + if currentEpoch >= next.Challenge { + require.Equal(t, postStatusComplete, s.mock.getPostStatus(di)) + // At the next deadline's challenge epoch, should start proving + // for that epoch + require.Equal(t, postStatusProving, s.mock.getPostStatus(next)) + } else if currentEpoch > completeProofEpoch { + // After proving for the round is complete, should be in complete state + require.Equal(t, postStatusComplete, s.mock.getPostStatus(di)) + require.Equal(t, postStatusStart, s.mock.getPostStatus(next)) + } else { + // Until proving completes, should be in the proving state + require.Equal(t, postStatusProving, s.mock.getPostStatus(di)) + require.Equal(t, postStatusStart, s.mock.getPostStatus(next)) + } + + // Wait for submitter to process head change + <-s.ch.submitHdlr.processedHeadChanges + + completeSubmitEpoch := completeProofEpoch + 1 + //fmt.Println("epoch", currentEpoch, s.submitState(di)) + if currentEpoch > completeSubmitEpoch { + require.Equal(t, SubmitStateComplete, s.submitState(di)) + } else if currentEpoch > completeProofEpoch { + require.Equal(t, SubmitStateSubmitting, s.submitState(di)) + } else { + require.Equal(t, SubmitStateStart, s.submitState(di)) + } + + if currentEpoch == completeProofEpoch { + // Send a response to the call to generate proofs + posts := []miner.SubmitWindowedPoStParams{{Deadline: di.Index}} + mock.proveResult <- &proveRes{posts: posts} + + // Should move to proving complete + <-s.ch.proveHdlr.processedPostResults + require.Equal(t, postStatusComplete, s.mock.getPostStatus(di)) + } + + if currentEpoch == completeSubmitEpoch { + // Send a response to the submit call + mock.submitResult <- nil + + // Should move to the complete state + <-s.ch.submitHdlr.processedSubmitResults + require.Equal(t, SubmitStateComplete, s.submitState(di)) + } + } +} + +// TestChangeHandlerProvingErrorRecovery verifies that the proof handler +// recovers correctly from an error +func TestChangeHandlerProvingErrorRecovery(t *testing.T) { + s := makeScaffolding(t) + mock := s.mock + + defer s.ch.shutdown() + s.ch.start() + + // Trigger a head change + currentEpoch := abi.ChainEpoch(1) + go triggerHeadAdvance(t, s, currentEpoch) + + // Should start proving + <-s.ch.proveHdlr.processedHeadChanges + di := mock.getDeadline(currentEpoch) + require.Equal(t, postStatusProving, s.mock.getPostStatus(di)) + + // Send an error response to the call to generate proofs + mock.proveResult <- &proveRes{err: fmt.Errorf("err")} + + // Should abort and then move to start state + <-s.ch.proveHdlr.processedPostResults + require.Equal(t, postStatusStart, s.mock.getPostStatus(di)) + + // Trigger a head change + go triggerHeadAdvance(t, s, currentEpoch) + + // Should start proving + <-s.ch.proveHdlr.processedHeadChanges + require.Equal(t, postStatusProving, s.mock.getPostStatus(di)) + + // Send a success response to the call to generate proofs + posts := []miner.SubmitWindowedPoStParams{{Deadline: di.Index}} + mock.proveResult <- &proveRes{posts: posts} + + // Should move to proving complete + <-s.ch.proveHdlr.processedPostResults + require.Equal(t, postStatusComplete, s.mock.getPostStatus(di)) +} + +// TestChangeHandlerSubmitErrorRecovery verifies that the submit handler +// recovers correctly from an error +func TestChangeHandlerSubmitErrorRecovery(t *testing.T) { + s := makeScaffolding(t) + mock := s.mock + + defer s.ch.shutdown() + s.ch.start() + + // Trigger a head change + currentEpoch := abi.ChainEpoch(1) + go triggerHeadAdvance(t, s, currentEpoch) + + // Should start proving + <-s.ch.proveHdlr.processedHeadChanges + di := mock.getDeadline(currentEpoch) + require.Equal(t, postStatusProving, s.mock.getPostStatus(di)) + + // Submitter doesn't have anything to do yet + <-s.ch.submitHdlr.processedHeadChanges + require.Equal(t, SubmitStateStart, s.submitState(di)) + + // Send a response to the call to generate proofs + posts := []miner.SubmitWindowedPoStParams{{Deadline: di.Index}} + mock.proveResult <- &proveRes{posts: posts} + + // Should move to proving complete + <-s.ch.proveHdlr.processedPostResults + require.Equal(t, postStatusComplete, s.mock.getPostStatus(di)) + + // Move to the correct height to submit the proof + currentEpoch = 1 + SubmitConfidence + go triggerHeadAdvance(t, s, currentEpoch) + + // Read from prover incoming channel (so as not to block) + <-s.ch.proveHdlr.processedHeadChanges + + // Should move to submitting state + <-s.ch.submitHdlr.processedHeadChanges + di = mock.getDeadline(currentEpoch) + require.Equal(t, SubmitStateSubmitting, s.submitState(di)) + + // Send an error response to the call to submit + mock.submitResult <- fmt.Errorf("err") + + // Should abort and then move back to the start state + <-s.ch.submitHdlr.processedSubmitResults + require.Equal(t, SubmitStateStart, s.submitState(di)) + require.True(t, mock.wasAbortCalled()) + + // Trigger another head change + go triggerHeadAdvance(t, s, currentEpoch) + + // Read from prover incoming channel (so as not to block) + <-s.ch.proveHdlr.processedHeadChanges + + // Should move to submitting state + <-s.ch.submitHdlr.processedHeadChanges + di = mock.getDeadline(currentEpoch) + require.Equal(t, SubmitStateSubmitting, s.submitState(di)) + + // Send a response to the submit call + mock.submitResult <- nil + + // Should move to the complete state + <-s.ch.submitHdlr.processedSubmitResults + require.Equal(t, SubmitStateComplete, s.submitState(di)) +} + +// TestChangeHandlerProveExpiry verifies that the prove handler +// behaves correctly on expiry +func TestChangeHandlerProveExpiry(t *testing.T) { + s := makeScaffolding(t) + mock := s.mock + + defer s.ch.shutdown() + s.ch.start() + + // Trigger a head change + currentEpoch := abi.ChainEpoch(1) + go triggerHeadAdvance(t, s, currentEpoch) + + // Should start proving + <-s.ch.proveHdlr.processedHeadChanges + di := mock.getDeadline(currentEpoch) + require.Equal(t, postStatusProving, s.mock.getPostStatus(di)) + + // Move to a height that expires the current proof + currentEpoch = miner.WPoStChallengeWindow + di = mock.getDeadline(currentEpoch) + go triggerHeadAdvance(t, s, currentEpoch) + + // Should trigger an abort and start proving for the new deadline + <-s.ch.proveHdlr.processedHeadChanges + require.Equal(t, postStatusProving, s.mock.getPostStatus(di)) + <-s.ch.proveHdlr.processedPostResults + require.True(t, mock.wasAbortCalled()) + + // Send a response to the call to generate proofs + posts := []miner.SubmitWindowedPoStParams{{Deadline: di.Index}} + mock.proveResult <- &proveRes{posts: posts} + + // Should move to proving complete + <-s.ch.proveHdlr.processedPostResults + require.Equal(t, postStatusComplete, s.mock.getPostStatus(di)) +} + +// TestChangeHandlerSubmitExpiry verifies that the submit handler +// behaves correctly on expiry +func TestChangeHandlerSubmitExpiry(t *testing.T) { + s := makeScaffolding(t) + mock := s.mock + + // Ignore prove handler head change processing for this test + s.ch.proveHdlr.processedHeadChanges = nil + + defer s.ch.shutdown() + s.ch.start() + + // Trigger a head change + currentEpoch := abi.ChainEpoch(1) + go triggerHeadAdvance(t, s, currentEpoch) + + // Submitter doesn't have anything to do yet + <-s.ch.submitHdlr.processedHeadChanges + di := mock.getDeadline(currentEpoch) + require.Equal(t, SubmitStateStart, s.submitState(di)) + + // Send a response to the call to generate proofs + posts := []miner.SubmitWindowedPoStParams{{Deadline: di.Index}} + mock.proveResult <- &proveRes{posts: posts} + + // Should move to proving complete + <-s.ch.proveHdlr.processedPostResults + require.Equal(t, postStatusComplete, s.mock.getPostStatus(di)) + + // Move to the correct height to submit the proof + currentEpoch = 1 + SubmitConfidence + go triggerHeadAdvance(t, s, currentEpoch) + + // Should move to submitting state + <-s.ch.submitHdlr.processedHeadChanges + di = mock.getDeadline(currentEpoch) + require.Equal(t, SubmitStateSubmitting, s.submitState(di)) + + // Move to a height that expires the submit + currentEpoch = miner.WPoStChallengeWindow + di = mock.getDeadline(currentEpoch) + go triggerHeadAdvance(t, s, currentEpoch) + + // Should trigger an abort and move back to start state + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + + <-s.ch.submitHdlr.processedSubmitResults + require.True(t, mock.wasAbortCalled()) + }() + + go func() { + defer wg.Done() + + <-s.ch.submitHdlr.processedHeadChanges + require.Equal(t, SubmitStateStart, s.submitState(di)) + }() + + wg.Wait() +} + +// TestChangeHandlerProveRevert verifies that the prove handler +// behaves correctly on revert +func TestChangeHandlerProveRevert(t *testing.T) { + s := makeScaffolding(t) + mock := s.mock + + defer s.ch.shutdown() + s.ch.start() + + // Trigger a head change + currentEpoch := miner.WPoStChallengeWindow + go triggerHeadAdvance(t, s, currentEpoch) + + // Should start proving + <-s.ch.proveHdlr.processedHeadChanges + di := mock.getDeadline(currentEpoch) + require.Equal(t, postStatusProving, s.mock.getPostStatus(di)) + + // Trigger a revert to the previous epoch + revertEpoch := di.Open - 5 + go triggerHeadChange(t, s, revertEpoch, currentEpoch) + + // Should be no change + <-s.ch.proveHdlr.processedHeadChanges + require.Equal(t, postStatusProving, s.mock.getPostStatus(di)) + + // Send a response to the call to generate proofs + posts := []miner.SubmitWindowedPoStParams{{Deadline: di.Index}} + mock.proveResult <- &proveRes{posts: posts} + + // Should move to proving complete + <-s.ch.proveHdlr.processedPostResults + require.Equal(t, postStatusComplete, s.mock.getPostStatus(di)) + require.False(t, mock.wasAbortCalled()) +} + +// TestChangeHandlerSubmittingRevert verifies that the submit handler +// behaves correctly when there's a revert from the submitting state +func TestChangeHandlerSubmittingRevert(t *testing.T) { + s := makeScaffolding(t) + mock := s.mock + + // Ignore prove handler head change processing for this test + s.ch.proveHdlr.processedHeadChanges = nil + + defer s.ch.shutdown() + s.ch.start() + + // Trigger a head change + currentEpoch := miner.WPoStChallengeWindow + go triggerHeadAdvance(t, s, currentEpoch) + + // Submitter doesn't have anything to do yet + <-s.ch.submitHdlr.processedHeadChanges + di := mock.getDeadline(currentEpoch) + require.Equal(t, SubmitStateStart, s.submitState(di)) + + // Send a response to the call to generate proofs + posts := []miner.SubmitWindowedPoStParams{{Deadline: di.Index}} + mock.proveResult <- &proveRes{posts: posts} + + // Should move to proving complete + <-s.ch.proveHdlr.processedPostResults + require.Equal(t, postStatusComplete, s.mock.getPostStatus(di)) + + // Move to the correct height to submit the proof + currentEpoch = currentEpoch + 1 + SubmitConfidence + go triggerHeadAdvance(t, s, currentEpoch) + + // Should move to submitting state + <-s.ch.submitHdlr.processedHeadChanges + di = mock.getDeadline(currentEpoch) + require.Equal(t, SubmitStateSubmitting, s.submitState(di)) + + // Trigger a revert to the previous epoch + revertEpoch := di.Open - 5 + go triggerHeadChange(t, s, revertEpoch, currentEpoch) + + var wg sync.WaitGroup + wg.Add(2) + + // Should trigger an abort + go func() { + defer wg.Done() + + <-s.ch.submitHdlr.processedSubmitResults + require.True(t, mock.wasAbortCalled()) + }() + + // Should resubmit current epoch + go func() { + defer wg.Done() + + <-s.ch.submitHdlr.processedHeadChanges + require.Equal(t, SubmitStateSubmitting, s.submitState(di)) + }() + + wg.Wait() + + // Send a response to the resubmit call + mock.submitResult <- nil + + // Should move to the complete state + <-s.ch.submitHdlr.processedSubmitResults + require.Equal(t, SubmitStateComplete, s.submitState(di)) +} + +// TestChangeHandlerSubmitCompleteRevert verifies that the submit handler +// behaves correctly when there's a revert from the submit complete state +func TestChangeHandlerSubmitCompleteRevert(t *testing.T) { + s := makeScaffolding(t) + mock := s.mock + + // Ignore prove handler head change processing for this test + s.ch.proveHdlr.processedHeadChanges = nil + + defer s.ch.shutdown() + s.ch.start() + + // Trigger a head change + currentEpoch := miner.WPoStChallengeWindow + go triggerHeadAdvance(t, s, currentEpoch) + + // Submitter doesn't have anything to do yet + <-s.ch.submitHdlr.processedHeadChanges + di := mock.getDeadline(currentEpoch) + require.Equal(t, SubmitStateStart, s.submitState(di)) + + // Send a response to the call to generate proofs + posts := []miner.SubmitWindowedPoStParams{{Deadline: di.Index}} + mock.proveResult <- &proveRes{posts: posts} + + // Should move to proving complete + <-s.ch.proveHdlr.processedPostResults + require.Equal(t, postStatusComplete, s.mock.getPostStatus(di)) + + // Move to the correct height to submit the proof + currentEpoch = currentEpoch + 1 + SubmitConfidence + go triggerHeadAdvance(t, s, currentEpoch) + + // Should move to submitting state + <-s.ch.submitHdlr.processedHeadChanges + di = mock.getDeadline(currentEpoch) + require.Equal(t, SubmitStateSubmitting, s.submitState(di)) + + // Send a response to the resubmit call + mock.submitResult <- nil + + // Should move to the complete state + <-s.ch.submitHdlr.processedSubmitResults + require.Equal(t, SubmitStateComplete, s.submitState(di)) + + // Trigger a revert to the previous epoch + revertEpoch := di.Open - 5 + go triggerHeadChange(t, s, revertEpoch, currentEpoch) + + // Should resubmit current epoch + <-s.ch.submitHdlr.processedHeadChanges + require.Equal(t, SubmitStateSubmitting, s.submitState(di)) + + // Send a response to the resubmit call + mock.submitResult <- nil + + // Should move to the complete state + <-s.ch.submitHdlr.processedSubmitResults + require.Equal(t, SubmitStateComplete, s.submitState(di)) +} + +// TestChangeHandlerSubmitRevertTwoEpochs verifies that the submit handler +// behaves correctly when the revert is two epochs deep +func TestChangeHandlerSubmitRevertTwoEpochs(t *testing.T) { + s := makeScaffolding(t) + mock := s.mock + + // Ignore prove handler head change processing for this test + s.ch.proveHdlr.processedHeadChanges = nil + + defer s.ch.shutdown() + s.ch.start() + + // Trigger a head change + currentEpoch := miner.WPoStChallengeWindow + go triggerHeadAdvance(t, s, currentEpoch) + + // Submitter doesn't have anything to do yet + <-s.ch.submitHdlr.processedHeadChanges + diE1 := mock.getDeadline(currentEpoch) + require.Equal(t, SubmitStateStart, s.submitState(diE1)) + + // Send a response to the call to generate proofs + posts := []miner.SubmitWindowedPoStParams{{Deadline: diE1.Index}} + mock.proveResult <- &proveRes{posts: posts} + + // Should move to proving complete + <-s.ch.proveHdlr.processedPostResults + require.Equal(t, postStatusComplete, s.mock.getPostStatus(diE1)) + + // Move to the challenge epoch for the next deadline + diE2 := nextDeadline(diE1) + currentEpoch = diE2.Challenge + go triggerHeadAdvance(t, s, currentEpoch) + + // Should move to submitting state for epoch 1 + <-s.ch.submitHdlr.processedHeadChanges + diE1 = mock.getDeadline(currentEpoch) + require.Equal(t, SubmitStateSubmitting, s.submitState(diE1)) + + // Send a response to the submit call for epoch 1 + mock.submitResult <- nil + + // Should move to the complete state for epoch 1 + <-s.ch.submitHdlr.processedSubmitResults + require.Equal(t, SubmitStateComplete, s.submitState(diE1)) + + // Should start proving epoch 2 + // Send a response to the call to generate proofs + postsE2 := []miner.SubmitWindowedPoStParams{{Deadline: diE2.Index}} + mock.proveResult <- &proveRes{posts: postsE2} + + // Should move to proving complete for epoch 2 + <-s.ch.proveHdlr.processedPostResults + require.Equal(t, postStatusComplete, s.mock.getPostStatus(diE2)) + + // Move to the correct height to submit the proof for epoch 2 + currentEpoch = diE2.Open + 1 + SubmitConfidence + go triggerHeadAdvance(t, s, currentEpoch) + + // Should move to submitting state for epoch 2 + <-s.ch.submitHdlr.processedHeadChanges + diE2 = mock.getDeadline(currentEpoch) + require.Equal(t, SubmitStateSubmitting, s.submitState(diE2)) + + // Trigger a revert through two epochs (from epoch 2 to epoch 0) + revertEpoch := diE1.Open - 5 + go triggerHeadChange(t, s, revertEpoch, currentEpoch) + + var wg sync.WaitGroup + wg.Add(2) + + // Should trigger an abort + go func() { + defer wg.Done() + + <-s.ch.submitHdlr.processedSubmitResults + require.True(t, mock.wasAbortCalled()) + }() + + go func() { + defer wg.Done() + + <-s.ch.submitHdlr.processedHeadChanges + + // Should reset epoch 1 (that is expired) to start state + require.Equal(t, SubmitStateStart, s.submitState(diE1)) + // Should resubmit epoch 2 + require.Equal(t, SubmitStateSubmitting, s.submitState(diE2)) + }() + + wg.Wait() + + // Send a response to the resubmit call for epoch 2 + mock.submitResult <- nil + + // Should move to the complete state for epoch 2 + <-s.ch.submitHdlr.processedSubmitResults + require.Equal(t, SubmitStateComplete, s.submitState(diE2)) +} + +// TestChangeHandlerSubmitRevertAdvanceLess verifies that the submit handler +// behaves correctly when the revert is two epochs deep and the advance is +// to a lower height than before +func TestChangeHandlerSubmitRevertAdvanceLess(t *testing.T) { + s := makeScaffolding(t) + mock := s.mock + + // Ignore prove handler head change processing for this test + s.ch.proveHdlr.processedHeadChanges = nil + + defer s.ch.shutdown() + s.ch.start() + + // Trigger a head change + currentEpoch := miner.WPoStChallengeWindow + go triggerHeadAdvance(t, s, currentEpoch) + + // Submitter doesn't have anything to do yet + <-s.ch.submitHdlr.processedHeadChanges + diE1 := mock.getDeadline(currentEpoch) + require.Equal(t, SubmitStateStart, s.submitState(diE1)) + + // Send a response to the call to generate proofs + posts := []miner.SubmitWindowedPoStParams{{Deadline: diE1.Index}} + mock.proveResult <- &proveRes{posts: posts} + + // Should move to proving complete + <-s.ch.proveHdlr.processedPostResults + require.Equal(t, postStatusComplete, s.mock.getPostStatus(diE1)) + + // Move to the challenge epoch for the next deadline + diE2 := nextDeadline(diE1) + currentEpoch = diE2.Challenge + go triggerHeadAdvance(t, s, currentEpoch) + + // Should move to submitting state for epoch 1 + <-s.ch.submitHdlr.processedHeadChanges + diE1 = mock.getDeadline(currentEpoch) + require.Equal(t, SubmitStateSubmitting, s.submitState(diE1)) + + // Send a response to the submit call for epoch 1 + mock.submitResult <- nil + + // Should move to the complete state for epoch 1 + <-s.ch.submitHdlr.processedSubmitResults + require.Equal(t, SubmitStateComplete, s.submitState(diE1)) + + // Should start proving epoch 2 + // Send a response to the call to generate proofs + postsE2 := []miner.SubmitWindowedPoStParams{{Deadline: diE2.Index}} + mock.proveResult <- &proveRes{posts: postsE2} + + // Should move to proving complete for epoch 2 + <-s.ch.proveHdlr.processedPostResults + require.Equal(t, postStatusComplete, s.mock.getPostStatus(diE2)) + + // Move to the correct height to submit the proof for epoch 2 + currentEpoch = diE2.Open + 1 + SubmitConfidence + go triggerHeadAdvance(t, s, currentEpoch) + + // Should move to submitting state for epoch 2 + <-s.ch.submitHdlr.processedHeadChanges + diE2 = mock.getDeadline(currentEpoch) + require.Equal(t, SubmitStateSubmitting, s.submitState(diE2)) + + // Trigger a revert through two epochs (from epoch 2 to epoch 0) + // then advance to the previous epoch (to epoch 1) + revertEpoch := diE1.Open - 5 + currentEpoch = diE2.Open - 1 + go triggerHeadChange(t, s, revertEpoch, currentEpoch) + + var wg sync.WaitGroup + wg.Add(2) + + // Should trigger an abort + go func() { + defer wg.Done() + + <-s.ch.submitHdlr.processedSubmitResults + require.True(t, mock.wasAbortCalled()) + }() + + go func() { + defer wg.Done() + + <-s.ch.submitHdlr.processedHeadChanges + + // Should resubmit epoch 1 + require.Equal(t, SubmitStateSubmitting, s.submitState(diE1)) + // Should reset epoch 2 to start state + require.Equal(t, SubmitStateStart, s.submitState(diE2)) + }() + + wg.Wait() + + // Send a response to the resubmit call for epoch 1 + mock.submitResult <- nil + + // Should move to the complete state for epoch 1 + <-s.ch.submitHdlr.processedSubmitResults + require.Equal(t, SubmitStateComplete, s.submitState(diE1)) +} + +type smScaffolding struct { + ctx context.Context + mock *mockAPI + ch *changeHandler +} + +func makeScaffolding(t *testing.T) *smScaffolding { + ctx := context.Background() + actor := tutils.NewActorAddr(t, "actor") + mock := newMockAPI() + ch := newChangeHandler(mock, actor) + mock.setChangeHandler(ch) + + ch.proveHdlr.processedHeadChanges = make(chan *headChange) + ch.proveHdlr.processedPostResults = make(chan *postResult) + + ch.submitHdlr.processedHeadChanges = make(chan *headChange) + ch.submitHdlr.processedSubmitResults = make(chan *submitResult) + + return &smScaffolding{ + ctx: ctx, + mock: mock, + ch: ch, + } +} + +func triggerHeadAdvance(t *testing.T, s *smScaffolding, height abi.ChainEpoch) { + ts := s.mock.makeTs(t, height) + err := s.ch.update(s.ctx, nil, ts) + require.NoError(t, err) +} + +func triggerHeadChange(t *testing.T, s *smScaffolding, revertHeight, advanceHeight abi.ChainEpoch) { + tsRev := s.mock.makeTs(t, revertHeight) + tsAdv := s.mock.makeTs(t, advanceHeight) + err := s.ch.update(s.ctx, tsRev, tsAdv) + require.NoError(t, err) +} + +func (s *smScaffolding) submitState(di *dline.Info) SubmitState { + return s.ch.submitHdlr.getPostWindow(di).submitState +} + +func makeTs(t *testing.T, h abi.ChainEpoch) *types.TipSet { + var parents []cid.Cid + msgcid := dummyCid + + a, _ := address.NewFromString("t00") + b, _ := address.NewFromString("t02") + var ts, err = types.NewTipSet([]*types.BlockHeader{ + { + Height: h, + Miner: a, + + Parents: parents, + + Ticket: &types.Ticket{VRFProof: []byte{byte(h % 2)}}, + + ParentStateRoot: dummyCid, + Messages: msgcid, + ParentMessageReceipts: dummyCid, + + BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, + BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, + }, + { + Height: h, + Miner: b, + + Parents: parents, + + Ticket: &types.Ticket{VRFProof: []byte{byte((h + 1) % 2)}}, + + ParentStateRoot: dummyCid, + Messages: msgcid, + ParentMessageReceipts: dummyCid, + + BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, + BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, + }, + }) + + require.NoError(t, err) + + return ts +} diff --git a/storage/wdpost_nextdl_test.go b/storage/wdpost_nextdl_test.go new file mode 100644 index 000000000..ad4b1fdeb --- /dev/null +++ b/storage/wdpost_nextdl_test.go @@ -0,0 +1,38 @@ +package storage + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-actors/actors/builtin/miner" +) + +func TestNextDeadline(t *testing.T) { + periodStart := abi.ChainEpoch(0) + deadlineIdx := 0 + currentEpoch := abi.ChainEpoch(10) + + di := miner.NewDeadlineInfo(periodStart, uint64(deadlineIdx), currentEpoch) + require.EqualValues(t, 0, di.Index) + require.EqualValues(t, 0, di.PeriodStart) + require.EqualValues(t, -20, di.Challenge) + require.EqualValues(t, 0, di.Open) + require.EqualValues(t, 60, di.Close) + + for i := 1; i < 1+int(miner.WPoStPeriodDeadlines)*2; i++ { + di = nextDeadline(di) + deadlineIdx = i % int(miner.WPoStPeriodDeadlines) + expPeriodStart := int(miner.WPoStProvingPeriod) * (i / int(miner.WPoStPeriodDeadlines)) + expOpen := expPeriodStart + deadlineIdx*int(miner.WPoStChallengeWindow) + expClose := expOpen + int(miner.WPoStChallengeWindow) + expChallenge := expOpen - int(miner.WPoStChallengeLookback) + //fmt.Printf("%d: %d@%d %d-%d (%d)\n", i, expPeriodStart, deadlineIdx, expOpen, expClose, expChallenge) + require.EqualValues(t, deadlineIdx, di.Index) + require.EqualValues(t, expPeriodStart, di.PeriodStart) + require.EqualValues(t, expOpen, di.Open) + require.EqualValues(t, expClose, di.Close) + require.EqualValues(t, expChallenge, di.Challenge) + } +} diff --git a/storage/wdpost_run.go b/storage/wdpost_run.go index 9a497f879..35fdfc6d1 100644 --- a/storage/wdpost_run.go +++ b/storage/wdpost_run.go @@ -29,15 +29,21 @@ import ( "github.com/filecoin-project/lotus/journal" ) -func (s *WindowPoStScheduler) failPost(err error, deadline *dline.Info) { +func (s *WindowPoStScheduler) failPost(err error, ts *types.TipSet, deadline *dline.Info) { journal.J.RecordEvent(s.evtTypes[evtTypeWdPoStScheduler], func() interface{} { + c := evtCommon{Error: err} + if ts != nil { + c.Deadline = deadline + c.Height = ts.Height() + c.TipSet = ts.Cids() + } return WdPoStSchedulerEvt{ - evtCommon: s.getEvtCommon(err), + evtCommon: c, State: SchedulerStateFaulted, } }) - log.Errorf("TODO") + log.Errorf("Got err %w - TODO handle errors", err) /*s.failLk.Lock() if eps > s.failed { s.failed = eps @@ -45,67 +51,134 @@ func (s *WindowPoStScheduler) failPost(err error, deadline *dline.Info) { s.failLk.Unlock()*/ } -func (s *WindowPoStScheduler) doPost(ctx context.Context, deadline *dline.Info, ts *types.TipSet) { - ctx, abort := context.WithCancel(ctx) - - s.abort = abort - s.activeDeadline = deadline - - journal.J.RecordEvent(s.evtTypes[evtTypeWdPoStScheduler], func() interface{} { - return WdPoStSchedulerEvt{ - evtCommon: s.getEvtCommon(nil), - State: SchedulerStateStarted, +// recordProofsEvent records a successful proofs_processed event in the +// journal, even if it was a noop (no partitions). +func (s *WindowPoStScheduler) recordProofsEvent(partitions []miner.PoStPartition, mcid cid.Cid) { + journal.J.RecordEvent(s.evtTypes[evtTypeWdPoStProofs], func() interface{} { + return &WdPoStProofsProcessedEvt{ + evtCommon: s.getEvtCommon(nil), + Partitions: partitions, + MessageCID: mcid, } }) +} +// startGeneratePoST kicks off the process of generating a PoST +func (s *WindowPoStScheduler) startGeneratePoST( + ctx context.Context, + ts *types.TipSet, + deadline *dline.Info, + completeGeneratePoST CompleteGeneratePoSTCb, +) context.CancelFunc { + ctx, abort := context.WithCancel(ctx) go func() { defer abort() - ctx, span := trace.StartSpan(ctx, "WindowPoStScheduler.doPost") - defer span.End() - - // recordProofsEvent records a successful proofs_processed event in the - // journal, even if it was a noop (no partitions). - recordProofsEvent := func(partitions []miner.PoStPartition, mcid cid.Cid) { - journal.J.RecordEvent(s.evtTypes[evtTypeWdPoStProofs], func() interface{} { - return &WdPoStProofsProcessedEvt{ - evtCommon: s.getEvtCommon(nil), - Partitions: partitions, - MessageCID: mcid, - } - }) - } - - posts, err := s.runPost(ctx, *deadline, ts) - if err != nil { - log.Errorf("run window post failed: %+v", err) - s.failPost(err, deadline) - return - } - - if len(posts) == 0 { - recordProofsEvent(nil, cid.Undef) - return - } - - for i := range posts { - post := &posts[i] - sm, err := s.submitPost(ctx, post) - if err != nil { - log.Errorf("submit window post failed: %+v", err) - s.failPost(err, deadline) - } else { - recordProofsEvent(post.Partitions, sm.Cid()) - } - } - journal.J.RecordEvent(s.evtTypes[evtTypeWdPoStScheduler], func() interface{} { return WdPoStSchedulerEvt{ evtCommon: s.getEvtCommon(nil), - State: SchedulerStateSucceeded, + State: SchedulerStateStarted, } }) + + posts, err := s.runGeneratePoST(ctx, ts, deadline) + completeGeneratePoST(posts, err) }() + + return abort +} + +// runGeneratePoST generates the PoST +func (s *WindowPoStScheduler) runGeneratePoST( + ctx context.Context, + ts *types.TipSet, + deadline *dline.Info, +) ([]miner.SubmitWindowedPoStParams, error) { + ctx, span := trace.StartSpan(ctx, "WindowPoStScheduler.generatePoST") + defer span.End() + + posts, err := s.runPost(ctx, *deadline, ts) + if err != nil { + log.Errorf("runPost failed: %+v", err) + return nil, err + } + + if len(posts) == 0 { + s.recordProofsEvent(nil, cid.Undef) + } + + return posts, nil +} + +// startSubmitPoST kicks of the process of submitting PoST +func (s *WindowPoStScheduler) startSubmitPoST( + ctx context.Context, + ts *types.TipSet, + deadline *dline.Info, + posts []miner.SubmitWindowedPoStParams, + completeSubmitPoST CompleteSubmitPoSTCb, +) context.CancelFunc { + + ctx, abort := context.WithCancel(ctx) + go func() { + defer abort() + + err := s.runSubmitPoST(ctx, ts, deadline, posts) + if err == nil { + journal.J.RecordEvent(s.evtTypes[evtTypeWdPoStScheduler], func() interface{} { + return WdPoStSchedulerEvt{ + evtCommon: s.getEvtCommon(nil), + State: SchedulerStateSucceeded, + } + }) + } + completeSubmitPoST(err) + }() + + return abort +} + +// runSubmitPoST submits PoST +func (s *WindowPoStScheduler) runSubmitPoST( + ctx context.Context, + ts *types.TipSet, + deadline *dline.Info, + posts []miner.SubmitWindowedPoStParams, +) error { + if len(posts) == 0 { + return nil + } + + ctx, span := trace.StartSpan(ctx, "WindowPoStScheduler.submitPoST") + defer span.End() + + // Get randomness from tickets + commEpoch := deadline.Open + commRand, err := s.api.ChainGetRandomnessFromTickets(ctx, ts.Key(), crypto.DomainSeparationTag_PoStChainCommit, commEpoch, nil) + if err != nil { + err = xerrors.Errorf("failed to get chain randomness from tickets for windowPost (ts=%d; deadline=%d): %w", ts.Height(), commEpoch, err) + log.Errorf("submitPost failed: %+v", err) + + return err + } + + var submitErr error + for i := range posts { + // Add randomness to PoST + post := &posts[i] + post.ChainCommitEpoch = commEpoch + post.ChainCommitRand = commRand + + // Submit PoST + sm, submitErr := s.submitPost(ctx, post) + if submitErr != nil { + log.Errorf("submit window post failed: %+v", submitErr) + } else { + s.recordProofsEvent(post.Partitions, sm.Cid()) + } + } + + return submitErr } func (s *WindowPoStScheduler) checkSectors(ctx context.Context, check bitfield.BitField) (bitfield.BitField, error) { @@ -392,7 +465,7 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty rand, err := s.api.ChainGetRandomnessFromBeacon(ctx, ts.Key(), crypto.DomainSeparationTag_WindowedPoStChallengeSeed, di.Challenge, buf.Bytes()) if err != nil { - return nil, xerrors.Errorf("failed to get chain randomness for window post (ts=%d; deadline=%d): %w", ts.Height(), di, err) + return nil, xerrors.Errorf("failed to get chain randomness from beacon for window post (ts=%d; deadline=%d): %w", ts.Height(), di, err) } // Get the partitions for the given deadline @@ -536,19 +609,6 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty posts = append(posts, params) } - // Compute randomness after generating proofs so as to reduce the impact - // of chain reorgs (which change randomness) - commEpoch := di.Open - commRand, err := s.api.ChainGetRandomnessFromTickets(ctx, ts.Key(), crypto.DomainSeparationTag_PoStChainCommit, commEpoch, nil) - if err != nil { - return nil, xerrors.Errorf("failed to get chain randomness for window post (ts=%d; deadline=%d): %w", ts.Height(), commEpoch, err) - } - - for i := range posts { - posts[i].ChainCommitEpoch = commEpoch - posts[i].ChainCommitRand = commRand - } - return posts, nil } @@ -589,6 +649,7 @@ func (s *WindowPoStScheduler) batchPartitions(partitions []api.Partition) ([][]a } batches = append(batches, partitions[i:end]) } + return batches, nil } diff --git a/storage/wdpost_run_test.go b/storage/wdpost_run_test.go index 10be2fbcd..09b9aee5c 100644 --- a/storage/wdpost_run_test.go +++ b/storage/wdpost_run_test.go @@ -11,6 +11,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" @@ -177,7 +178,10 @@ func TestWDPostDoPost(t *testing.T) { FaultDeclarationCutoff: miner0.FaultDeclarationCutoff, } ts := mockTipSet(t) - scheduler.doPost(ctx, di, ts) + + scheduler.startGeneratePoST(ctx, ts, di, func(posts []miner.SubmitWindowedPoStParams, err error) { + scheduler.startSubmitPoST(ctx, ts, di, posts, func(err error) {}) + }) // Read the window PoST messages for i := 0; i < expectedMsgCount; i++ { diff --git a/storage/wdpost_sched.go b/storage/wdpost_sched.go index 7e60fd9ee..3a76a219f 100644 --- a/storage/wdpost_sched.go +++ b/storage/wdpost_sched.go @@ -22,8 +22,6 @@ import ( "go.opencensus.io/trace" ) -const StartConfidence = 4 // TODO: config - type WindowPoStScheduler struct { api storageMinerApi feeCfg config.MinerFeeConfig @@ -31,16 +29,11 @@ type WindowPoStScheduler struct { faultTracker sectorstorage.FaultTracker proofType abi.RegisteredPoStProof partitionSectors uint64 + ch *changeHandler actor address.Address worker address.Address - cur *types.TipSet - - // if a post is in progress, this indicates for which ElectionPeriodStart - activeDeadline *dline.Info - abort context.CancelFunc - evtTypes [4]journal.EventType // failed abi.ChainEpoch // eps @@ -77,16 +70,17 @@ func NewWindowedPoStScheduler(api storageMinerApi, fc config.MinerFeeConfig, sb }, nil } -func deadlineEquals(a, b *dline.Info) bool { - if a == nil || b == nil { - return b == a - } - - return a.PeriodStart == b.PeriodStart && a.Index == b.Index && a.Challenge == b.Challenge +type changeHandlerAPIImpl struct { + storageMinerApi + *WindowPoStScheduler } func (s *WindowPoStScheduler) Run(ctx context.Context) { - defer s.abortActivePoSt() + // Initialize change handler + chImpl := &changeHandlerAPIImpl{storageMinerApi: s.api, WindowPoStScheduler: s} + s.ch = newChangeHandler(chImpl, s.actor) + defer s.ch.shutdown() + s.ch.start() var notifs <-chan []*api.HeadChange var err error @@ -125,9 +119,7 @@ func (s *WindowPoStScheduler) Run(ctx context.Context) { continue } - if err := s.update(ctx, chg.Val); err != nil { - log.Errorf("%+v", err) - } + s.update(ctx, nil, chg.Val) gotCur = true continue @@ -135,7 +127,7 @@ func (s *WindowPoStScheduler) Run(ctx context.Context) { ctx, span := trace.StartSpan(ctx, "WindowPoStScheduler.headChange") - var lowest, highest *types.TipSet = s.cur, nil + var lowest, highest *types.TipSet = nil, nil for _, change := range changes { if change.Val == nil { @@ -149,12 +141,7 @@ func (s *WindowPoStScheduler) Run(ctx context.Context) { } } - if err := s.revert(ctx, lowest); err != nil { - log.Error("handling head reverts in window post sched: %+v", err) - } - if err := s.update(ctx, highest); err != nil { - log.Error("handling head updates in window post sched: %+v", err) - } + s.update(ctx, lowest, highest) span.End() case <-ctx.Done(): @@ -163,95 +150,40 @@ func (s *WindowPoStScheduler) Run(ctx context.Context) { } } -func (s *WindowPoStScheduler) revert(ctx context.Context, newLowest *types.TipSet) error { - if s.cur == newLowest { - return nil +func (s *WindowPoStScheduler) update(ctx context.Context, revert, apply *types.TipSet) { + if apply == nil { + log.Error("no new tipset in window post WindowPoStScheduler.update") + return } - s.cur = newLowest - - newDeadline, err := s.api.StateMinerProvingDeadline(ctx, s.actor, newLowest.Key()) + err := s.ch.update(ctx, revert, apply) if err != nil { - return err + log.Errorf("handling head updates in window post sched: %+v", err) } - - if !deadlineEquals(s.activeDeadline, newDeadline) { - s.abortActivePoSt() - } - - return nil } -func (s *WindowPoStScheduler) update(ctx context.Context, new *types.TipSet) error { - if new == nil { - return xerrors.Errorf("no new tipset in window post sched update") - } - - di, err := s.api.StateMinerProvingDeadline(ctx, s.actor, new.Key()) - if err != nil { - return err - } - - if deadlineEquals(s.activeDeadline, di) { - return nil // already working on this deadline - } - - if !di.PeriodStarted() { - return nil // not proving anything yet - } - - s.abortActivePoSt() - - // TODO: wait for di.Challenge here, will give us ~10min more to compute windowpost - // (Need to get correct deadline above, which is tricky) - - if di.Open+StartConfidence >= new.Height() { - log.Info("not starting window post yet, waiting for startconfidence", di.Open, di.Open+StartConfidence, new.Height()) - return nil - } - - /*s.failLk.Lock() - if s.failed > 0 { - s.failed = 0 - s.activeEPS = 0 - } - s.failLk.Unlock()*/ - log.Infof("at %d, do window post for P %d, dd %d", new.Height(), di.PeriodStart, di.Index) - - s.doPost(ctx, di, new) - - return nil +// onAbort is called when generating proofs or submitting proofs is aborted +func (s *WindowPoStScheduler) onAbort(ts *types.TipSet, deadline *dline.Info) { + journal.J.RecordEvent(s.evtTypes[evtTypeWdPoStScheduler], func() interface{} { + c := evtCommon{} + if ts != nil { + c.Deadline = deadline + c.Height = ts.Height() + c.TipSet = ts.Cids() + } + return WdPoStSchedulerEvt{ + evtCommon: c, + State: SchedulerStateAborted, + } + }) } -func (s *WindowPoStScheduler) abortActivePoSt() { - if s.activeDeadline == nil { - return // noop - } - - if s.abort != nil { - s.abort() - - journal.J.RecordEvent(s.evtTypes[evtTypeWdPoStScheduler], func() interface{} { - return WdPoStSchedulerEvt{ - evtCommon: s.getEvtCommon(nil), - State: SchedulerStateAborted, - } - }) - - log.Warnf("Aborting window post (Deadline: %+v)", s.activeDeadline) - } - - s.activeDeadline = nil - s.abort = nil -} - -// getEvtCommon populates and returns common attributes from state, for a -// WdPoSt journal event. func (s *WindowPoStScheduler) getEvtCommon(err error) evtCommon { c := evtCommon{Error: err} - if s.cur != nil { - c.Deadline = s.activeDeadline - c.Height = s.cur.Height() - c.TipSet = s.cur.Cids() + currentTS, currentDeadline := s.ch.currentTSDI() + if currentTS != nil { + c.Deadline = currentDeadline + c.Height = currentTS.Height() + c.TipSet = currentTS.Cids() } return c } From e48c525053ada752041c4917a5c22d9610e8746d Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Thu, 24 Sep 2020 19:41:14 +0200 Subject: [PATCH 17/88] Re-add jaeger-tracing --- documentation/en/jaeger-tracing.md | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 documentation/en/jaeger-tracing.md diff --git a/documentation/en/jaeger-tracing.md b/documentation/en/jaeger-tracing.md new file mode 100644 index 000000000..bbe4d3052 --- /dev/null +++ b/documentation/en/jaeger-tracing.md @@ -0,0 +1,26 @@ +# Jaeger Tracing + +Lotus has tracing built into many of its internals. To view the traces, first download [Jaeger](https://www.jaegertracing.io/download/) (Choose the 'all-in-one' binary). Then run it somewhere, start up the lotus daemon, and open up localhost:16686 in your browser. + +## Open Census + +Lotus uses [OpenCensus](https://opencensus.io/) for tracing application flow. This generates spans through the execution of annotated code paths. + +Currently it is set up to use Jaeger, though other tracing backends should be fairly easy to swap in. + +## Running Locally + +To easily run and view tracing locally, first, install jaeger. The easiest way to do this is to [download the binaries](https://www.jaegertracing.io/download/) and then run the `jaeger-all-in-one` binary. This will start up jaeger, listen for spans on `localhost:6831`, and expose a web UI for viewing traces on `http://localhost:16686/`. + +Now, to start sending traces from Lotus to Jaeger, set the environment variable `LOTUS_JAEGER` to `localhost:6831`, and start the `lotus daemon`. + +Now, to view any generated traces, open up `http://localhost:16686/` in your browser. + +## Adding Spans + +To annotate a new codepath with spans, add the following lines to the top of the function you wish to trace: + +```go +ctx, span := trace.StartSpan(ctx, "put function name here") +defer span.End() +``` From 6db37b72a85bf056630d757f591d11c92f8c65c9 Mon Sep 17 00:00:00 2001 From: whyrusleeping Date: Thu, 24 Sep 2020 11:05:21 -0700 Subject: [PATCH 18/88] update ffi to code with blst fixes --- extern/filecoin-ffi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi index f640612a1..57e38efe4 160000 --- a/extern/filecoin-ffi +++ b/extern/filecoin-ffi @@ -1 +1 @@ -Subproject commit f640612a1a1f7a2dd8b3a49e1531db0aa0f63447 +Subproject commit 57e38efe4943f09d3127dcf6f0edd614e6acf68e From 306c098d304cb853351a9fe7d8ccf8ccb2bb1caa Mon Sep 17 00:00:00 2001 From: whyrusleeping Date: Thu, 24 Sep 2020 11:32:38 -0700 Subject: [PATCH 19/88] also update our vendored blst repo --- extern/fil-blst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extern/fil-blst b/extern/fil-blst index 5f93488fc..8609119cf 160000 --- a/extern/fil-blst +++ b/extern/fil-blst @@ -1 +1 @@ -Subproject commit 5f93488fc0dbfb450f2355269f18fc67010d59bb +Subproject commit 8609119cf4595d1741139c24378fcd8bc4f1c475 From 585b8cc51dee241dd0d340bd3df0502b10e324a6 Mon Sep 17 00:00:00 2001 From: Arsenii Petrovich Date: Fri, 25 Sep 2020 03:54:29 +0300 Subject: [PATCH 20/88] Add glif node to bootstrap peers --- build/bootstrap/bootstrappers.pi | 1 + 1 file changed, 1 insertion(+) diff --git a/build/bootstrap/bootstrappers.pi b/build/bootstrap/bootstrappers.pi index 465f3b5e9..481ede6ab 100644 --- a/build/bootstrap/bootstrappers.pi +++ b/build/bootstrap/bootstrappers.pi @@ -4,3 +4,4 @@ /dns4/bootstrap-4.testnet.fildev.network/tcp/1347/p2p/12D3KooWPkL9LrKRQgHtq7kn9ecNhGU9QaziG8R5tX8v9v7t3h34 /dns4/bootstrap-3.testnet.fildev.network/tcp/1347/p2p/12D3KooWKYSsbpgZ3HAjax5M1BXCwXLa6gVkUARciz7uN3FNtr7T /dns4/bootstrap-5.testnet.fildev.network/tcp/1347/p2p/12D3KooWQYzqnLASJAabyMpPb1GcWZvNSe7JDcRuhdRqonFoiK9W +/dns4/node.glif.io/tcp/1235/p2p/12D3KooWBF8cpp65hp2u9LK5mh19x67ftAam84z9LsfaquTDSBpt From 89bfe84f36bc8264a50cc6f9982774e06feac7e8 Mon Sep 17 00:00:00 2001 From: MaoBisheng-IPFSUNION Date: Fri, 25 Sep 2020 10:18:12 +0800 Subject: [PATCH 21/88] add new booststrappers we hope to add two booststrappers for the filecoin network to improve the network synchronization --- build/bootstrap/bootstrappers.pi | 3 +++ 1 file changed, 3 insertions(+) diff --git a/build/bootstrap/bootstrappers.pi b/build/bootstrap/bootstrappers.pi index 9c619b1bb..e64bd9dae 100644 --- a/build/bootstrap/bootstrappers.pi +++ b/build/bootstrap/bootstrappers.pi @@ -4,3 +4,6 @@ /dns4/lotus-bootstrap-1.fra.fil-test.net/tcp/1347/p2p/12D3KooWLLpNYoKdf9NgcWudBhXLdTcXncqAsTzozw1scMMu6nS5 /dns4/lotus-bootstrap-0.sin.fil-test.net/tcp/1347/p2p/12D3KooWCNL9vXaXwNs3Bu8uRAJK4pxpCyPeM7jZLSDpJma1wrV8 /dns4/lotus-bootstrap-1.sin.fil-test.net/tcp/1347/p2p/12D3KooWNGGxFda1eC5U2YKAgs4ypoFHn3Z3xHCsjmFdrCcytoxm + +/dns4/bootstrap-0.starpool.in/tcp/12757/p2p/12D3KooWGHpBMeZbestVEWkfdnC9u7p6uFHXL1n7m1ZBqsEmiUzz +/dns4/bootstrap-1.starpool.in/tcp/12757/p2p/12D3KooWQZrGH1PxSNZPum99M1zNvjNFM33d1AAu5DcvdHptuU7u From 4ff38aa8564a3c86391bc6886e5e37d0c7ca286e Mon Sep 17 00:00:00 2001 From: hannahhoward Date: Fri, 25 Sep 2020 02:35:34 -0700 Subject: [PATCH 22/88] feat(markets): update to 0.6.3 --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index cfdf4cb1d..ca7e0760d 100644 --- a/go.mod +++ b/go.mod @@ -25,9 +25,9 @@ require ( github.com/filecoin-project/go-bitfield v0.2.0 github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 - github.com/filecoin-project/go-data-transfer v0.6.5 + github.com/filecoin-project/go-data-transfer v0.6.6 github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f - github.com/filecoin-project/go-fil-markets v0.6.2 + github.com/filecoin-project/go-fil-markets v0.6.3 github.com/filecoin-project/go-jsonrpc v0.1.2-0.20200822201400-474f4fdccc52 github.com/filecoin-project/go-multistore v0.0.3 github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 @@ -60,7 +60,7 @@ require ( github.com/ipfs/go-ds-measure v0.1.0 github.com/ipfs/go-filestore v1.0.0 github.com/ipfs/go-fs-lock v0.0.6 - github.com/ipfs/go-graphsync v0.2.0 + github.com/ipfs/go-graphsync v0.2.1 github.com/ipfs/go-ipfs-blockstore v1.0.1 github.com/ipfs/go-ipfs-chunker v0.0.5 github.com/ipfs/go-ipfs-ds-help v1.0.0 diff --git a/go.sum b/go.sum index 3b31e6c3b..1ca615b59 100644 --- a/go.sum +++ b/go.sum @@ -222,12 +222,12 @@ github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:a github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= -github.com/filecoin-project/go-data-transfer v0.6.5 h1:oP20la8Z0CLrw0uqvt6xVgw6rOevZeGJ9GNQeC0OCSU= -github.com/filecoin-project/go-data-transfer v0.6.5/go.mod h1:I9Ylb/UiZyqnI41wUoCXq/le0nDLhlwpFQCtNPxEPOA= +github.com/filecoin-project/go-data-transfer v0.6.6 h1:2TccLSxPYJENcYRdov2WvpTvQ1qUMrPkWe8sBrfj36g= +github.com/filecoin-project/go-data-transfer v0.6.6/go.mod h1:C++k1U6+jMQODOaen5OPDo9XQbth9Yq3ie94vNjBJbk= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f h1:GxJzR3oRIMTPtpZ0b7QF8FKPK6/iPAc7trhlL5k/g+s= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= -github.com/filecoin-project/go-fil-markets v0.6.2 h1:9Z57KeaQSa1liCmT1pH6SIjrn9mGTDFJXmR2WQVuaiY= -github.com/filecoin-project/go-fil-markets v0.6.2/go.mod h1:wtN4Hc/1hoVCpWhSWYxwUxH3PQtjSkWWuC1nQjiIWog= +github.com/filecoin-project/go-fil-markets v0.6.3 h1:3kTxfquGvk3zQY+hJH1kEA28tRQ47phqSRqOI4+YcQM= +github.com/filecoin-project/go-fil-markets v0.6.3/go.mod h1:Ug1yhGhzTYC6qrpKsR2QpU8QRCeBpwkTA9RICVKuOMM= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= @@ -502,8 +502,8 @@ github.com/ipfs/go-filestore v1.0.0/go.mod h1:/XOCuNtIe2f1YPbiXdYvD0BKLA0JR1MgPi github.com/ipfs/go-fs-lock v0.0.6 h1:sn3TWwNVQqSeNjlWy6zQ1uUGAZrV3hPOyEA6y1/N2a0= github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28L7zESmM= github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= -github.com/ipfs/go-graphsync v0.2.0 h1:x94MvHLNuRwBlZzVal7tR1RYK7T7H6bqQLPopxDbIF0= -github.com/ipfs/go-graphsync v0.2.0/go.mod h1:gEBvJUNelzMkaRPJTpg/jaKN4AQW/7wDWu0K92D8o10= +github.com/ipfs/go-graphsync v0.2.1 h1:MdehhqBSuTI2LARfKLkpYnt0mUrqHs/mtuDnESXHBfU= +github.com/ipfs/go-graphsync v0.2.1/go.mod h1:gEBvJUNelzMkaRPJTpg/jaKN4AQW/7wDWu0K92D8o10= github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= From 9dfce400a7a5234ace883ae61648f94eb915516f Mon Sep 17 00:00:00 2001 From: MaoBisheng-IPFSUNION Date: Fri, 25 Sep 2020 17:36:57 +0800 Subject: [PATCH 23/88] delate --- build/bootstrap/bootstrappers.pi | 1 - 1 file changed, 1 deletion(-) diff --git a/build/bootstrap/bootstrappers.pi b/build/bootstrap/bootstrappers.pi index e64bd9dae..802f1471a 100644 --- a/build/bootstrap/bootstrappers.pi +++ b/build/bootstrap/bootstrappers.pi @@ -4,6 +4,5 @@ /dns4/lotus-bootstrap-1.fra.fil-test.net/tcp/1347/p2p/12D3KooWLLpNYoKdf9NgcWudBhXLdTcXncqAsTzozw1scMMu6nS5 /dns4/lotus-bootstrap-0.sin.fil-test.net/tcp/1347/p2p/12D3KooWCNL9vXaXwNs3Bu8uRAJK4pxpCyPeM7jZLSDpJma1wrV8 /dns4/lotus-bootstrap-1.sin.fil-test.net/tcp/1347/p2p/12D3KooWNGGxFda1eC5U2YKAgs4ypoFHn3Z3xHCsjmFdrCcytoxm - /dns4/bootstrap-0.starpool.in/tcp/12757/p2p/12D3KooWGHpBMeZbestVEWkfdnC9u7p6uFHXL1n7m1ZBqsEmiUzz /dns4/bootstrap-1.starpool.in/tcp/12757/p2p/12D3KooWQZrGH1PxSNZPum99M1zNvjNFM33d1AAu5DcvdHptuU7u From 80a7ed811638853987a3124175606d7db5269bf2 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Fri, 25 Sep 2020 15:54:27 +0200 Subject: [PATCH 24/88] refactor: use abstract types instead of specs-actors --- chain/actors/builtin/miner/miner.go | 4 ++++ storage/wdpost_changehandler.go | 8 ++++++-- storage/wdpost_changehandler_test.go | 8 ++++---- storage/wdpost_nextdl_test.go | 4 ++-- 4 files changed, 16 insertions(+), 8 deletions(-) diff --git a/chain/actors/builtin/miner/miner.go b/chain/actors/builtin/miner/miner.go index 50a0fc5ca..e09cac587 100644 --- a/chain/actors/builtin/miner/miner.go +++ b/chain/actors/builtin/miner/miner.go @@ -21,6 +21,10 @@ import ( // Unchanged between v0 and v1 actors var PreCommitChallengeDelay = miner0.PreCommitChallengeDelay var WPoStProvingPeriod = miner0.WPoStProvingPeriod +var WPoStPeriodDeadlines = miner0.WPoStPeriodDeadlines +var WPoStChallengeWindow = miner0.WPoStChallengeWindow +var WPoStChallengeLookback = miner0.WPoStChallengeLookback +var FaultDeclarationCutoff = miner0.FaultDeclarationCutoff const MinSectorExpiration = miner0.MinSectorExpiration diff --git a/storage/wdpost_changehandler.go b/storage/wdpost_changehandler.go index e65b7a7fc..285995757 100644 --- a/storage/wdpost_changehandler.go +++ b/storage/wdpost_changehandler.go @@ -7,7 +7,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/lotus/chain/types" @@ -529,5 +529,9 @@ func nextDeadline(currentDeadline *dline.Info) *dline.Info { periodStart = periodStart + miner.WPoStProvingPeriod } - return miner.NewDeadlineInfo(periodStart, newDeadline, currentDeadline.CurrentEpoch) + return NewDeadlineInfo(periodStart, newDeadline, currentDeadline.CurrentEpoch) +} + +func NewDeadlineInfo(periodStart abi.ChainEpoch, deadlineIdx uint64, currEpoch abi.ChainEpoch) *dline.Info { + return dline.NewInfo(periodStart, deadlineIdx, currEpoch, miner.WPoStPeriodDeadlines, miner.WPoStProvingPeriod, miner.WPoStChallengeWindow, miner.WPoStChallengeLookback, miner.FaultDeclarationCutoff) } diff --git a/storage/wdpost_changehandler_test.go b/storage/wdpost_changehandler_test.go index d2a4779e6..6479c0d7e 100644 --- a/storage/wdpost_changehandler_test.go +++ b/storage/wdpost_changehandler_test.go @@ -17,8 +17,8 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" ) var dummyCid cid.Cid @@ -90,7 +90,7 @@ func (m *mockAPI) getDeadline(currentEpoch abi.ChainEpoch) *dline.Info { close += miner.WPoStChallengeWindow dlIdx++ } - return miner.NewDeadlineInfo(0, dlIdx, currentEpoch) + return NewDeadlineInfo(0, dlIdx, currentEpoch) } func (m *mockAPI) StateMinerProvingDeadline(ctx context.Context, address address.Address, key types.TipSetKey) (*dline.Info, error) { @@ -355,7 +355,7 @@ func TestChangeHandlerDontStartUntilProvingPeriod(t *testing.T) { periodStart := miner.WPoStProvingPeriod dlIdx := uint64(1) currentEpoch := abi.ChainEpoch(10) - di := miner.NewDeadlineInfo(periodStart, dlIdx, currentEpoch) + di := NewDeadlineInfo(periodStart, dlIdx, currentEpoch) mock.setDeadline(di) defer s.ch.shutdown() @@ -375,7 +375,7 @@ func TestChangeHandlerDontStartUntilProvingPeriod(t *testing.T) { // Advance the head to the next proving period's first epoch currentEpoch = periodStart + miner.WPoStChallengeWindow - di = miner.NewDeadlineInfo(periodStart, dlIdx, currentEpoch) + di = NewDeadlineInfo(periodStart, dlIdx, currentEpoch) mock.setDeadline(di) go triggerHeadAdvance(t, s, currentEpoch) diff --git a/storage/wdpost_nextdl_test.go b/storage/wdpost_nextdl_test.go index ad4b1fdeb..4a23bad65 100644 --- a/storage/wdpost_nextdl_test.go +++ b/storage/wdpost_nextdl_test.go @@ -6,7 +6,7 @@ import ( "github.com/stretchr/testify/require" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" ) func TestNextDeadline(t *testing.T) { @@ -14,7 +14,7 @@ func TestNextDeadline(t *testing.T) { deadlineIdx := 0 currentEpoch := abi.ChainEpoch(10) - di := miner.NewDeadlineInfo(periodStart, uint64(deadlineIdx), currentEpoch) + di := NewDeadlineInfo(periodStart, uint64(deadlineIdx), currentEpoch) require.EqualValues(t, 0, di.Index) require.EqualValues(t, 0, di.PeriodStart) require.EqualValues(t, -20, di.Challenge) From bdc782617f1547b1e9631ba34ba8fba917ce767e Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 25 Sep 2020 11:27:30 -0700 Subject: [PATCH 25/88] return an error when we fail to find a sector when checking sector expiration Returning nil, nil is a footgun. fix: https://github.com/filecoin-project/lotus/issues/3984 --- chain/actors/builtin/miner/v0.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/chain/actors/builtin/miner/v0.go b/chain/actors/builtin/miner/v0.go index 9cdfc25bc..f5aa7849d 100644 --- a/chain/actors/builtin/miner/v0.go +++ b/chain/actors/builtin/miner/v0.go @@ -6,6 +6,7 @@ import ( "github.com/libp2p/go-libp2p-core/peer" cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" @@ -96,9 +97,7 @@ func (s *state0) NumLiveSectors() (uint64, error) { // GetSectorExpiration returns the effective expiration of the given sector. // -// If the sector isn't found or has already been terminated, this method returns -// nil and no error. If the sector does not expire early, the Early expiration -// field is 0. +// If the sector does not expire early, the Early expiration field is 0. func (s *state0) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) { dls, err := s.State.LoadDeadlines(s.store) if err != nil { @@ -161,7 +160,7 @@ func (s *state0) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e return nil, err } if out.Early == 0 && out.OnTime == 0 { - return nil, nil + return nil, xerrors.Errorf("failed to find sector %d", num) } return &out, nil } From ddcbcdea4809eb2e0c153207f9c8d1116d9cd1e8 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 25 Sep 2020 12:16:35 -0700 Subject: [PATCH 26/88] test sector status on expiring sectors --- api/test/ccupgrade.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/api/test/ccupgrade.go b/api/test/ccupgrade.go index b281f30a0..f58f1ff6e 100644 --- a/api/test/ccupgrade.go +++ b/api/test/ccupgrade.go @@ -94,6 +94,22 @@ func TestCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) { require.Less(t, 50000, int(exp.OnTime)) } + dlInfo, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + // Sector should expire. + for { + // Wait for the sector to expire. + status, err := miner.SectorsStatus(ctx, CC, true) + require.NoError(t, err) + if status.OnTime == 0 && status.Early == 0 { + break + } + t.Log("waiting for sector to expire") + // wait one deadline per loop. + time.Sleep(time.Duration(dlInfo.WPoStChallengeWindow) * blocktime) + } + fmt.Println("shutting down mining") atomic.AddInt64(&mine, -1) <-done From 60e43ccbb13a88473f91b80cf645758523658ad2 Mon Sep 17 00:00:00 2001 From: Aayush Rajasekaran Date: Fri, 25 Sep 2020 15:45:27 -0400 Subject: [PATCH 27/88] Add an envvar to set address network version --- build/params_shared_funcs.go | 6 ++++++ build/params_shared_vals.go | 12 +++++++++++ chain/types_test.go | 40 ++++++++++++++++++++++++++++++++++++ go.mod | 2 +- go.sum | 2 ++ 5 files changed, 61 insertions(+), 1 deletion(-) diff --git a/build/params_shared_funcs.go b/build/params_shared_funcs.go index 0e9739914..28567d3d1 100644 --- a/build/params_shared_funcs.go +++ b/build/params_shared_funcs.go @@ -3,6 +3,8 @@ package build import ( "sort" + "github.com/filecoin-project/go-address" + "github.com/libp2p/go-libp2p-core/protocol" "github.com/filecoin-project/go-state-types/abi" @@ -44,3 +46,7 @@ func UseNewestNetwork() bool { } return false } + +func SetAddressNetwork(n address.Network) { + address.CurrentNetwork = n +} diff --git a/build/params_shared_vals.go b/build/params_shared_vals.go index 3ee9f52ec..5dd23de44 100644 --- a/build/params_shared_vals.go +++ b/build/params_shared_vals.go @@ -4,6 +4,9 @@ package build import ( "math/big" + "os" + + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/network" @@ -60,6 +63,11 @@ const TicketRandomnessLookback = abi.ChainEpoch(1) const WinningPoStSectorSetLookback = abi.ChainEpoch(10) +// ///// +// Address + +const AddressMainnetEnvVar = "_mainnet_" + // ///// // Devnet settings @@ -75,6 +83,10 @@ var InitialRewardBalance *big.Int func init() { InitialRewardBalance = big.NewInt(int64(FilAllocStorageMining)) InitialRewardBalance = InitialRewardBalance.Mul(InitialRewardBalance, big.NewInt(int64(FilecoinPrecision))) + + if os.Getenv("LOTUS_ADDRESS_TYPE") == AddressMainnetEnvVar { + SetAddressNetwork(address.Mainnet) + } } // Sync diff --git a/chain/types_test.go b/chain/types_test.go index 7d68da68d..b47471c9d 100644 --- a/chain/types_test.go +++ b/chain/types_test.go @@ -1,9 +1,12 @@ package chain import ( + "crypto/rand" "encoding/json" "testing" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/lotus/chain/types" ) @@ -35,3 +38,40 @@ func TestSignedMessageJsonRoundtrip(t *testing.T) { t.Fatal(err) } } + +func TestAddressType(t *testing.T) { + build.SetAddressNetwork(address.Testnet) + addr, err := makeRandomAddress() + if err != nil { + t.Fatal(err) + } + + if string(addr[0]) != address.TestnetPrefix { + t.Fatalf("address should start with %s", address.TestnetPrefix) + } + + build.SetAddressNetwork(address.Mainnet) + addr, err = makeRandomAddress() + if err != nil { + t.Fatal(err) + } + + if string(addr[0]) != address.MainnetPrefix { + t.Fatalf("address should start with %s", address.MainnetPrefix) + } +} + +func makeRandomAddress() (string, error) { + bytes := make([]byte, 32) + _, err := rand.Read(bytes) + if err != nil { + return "", err + } + + addr, err := address.NewActorAddress(bytes) + if err != nil { + return "", err + } + + return addr.String(), nil +} diff --git a/go.mod b/go.mod index ca7e0760d..cfa6bf47f 100644 --- a/go.mod +++ b/go.mod @@ -21,7 +21,7 @@ require ( github.com/elastic/go-sysinfo v1.3.0 github.com/fatih/color v1.8.0 github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200716204036-cddc56607e1d - github.com/filecoin-project/go-address v0.0.3 + github.com/filecoin-project/go-address v0.0.4 github.com/filecoin-project/go-bitfield v0.2.0 github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 diff --git a/go.sum b/go.sum index 1ca615b59..7e50fc200 100644 --- a/go.sum +++ b/go.sum @@ -214,6 +214,8 @@ github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGj github.com/fd/go-nat v1.0.0/go.mod h1:BTBu/CKvMmOMUPkKVef1pngt2WFH/lg7E6yQnulfp6E= github.com/filecoin-project/go-address v0.0.3 h1:eVfbdjEbpbzIrbiSa+PiGUY+oDK9HnUn+M1R/ggoHf8= github.com/filecoin-project/go-address v0.0.3/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= +github.com/filecoin-project/go-address v0.0.4 h1:gSNMv0qWwH16fGQs7ycOUrDjY6YCSsgLUl0I0KLjo8w= +github.com/filecoin-project/go-address v0.0.4/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 h1:t6qDiuGYYngDqaLc2ZUvdtAg4UNxPeOYaXhBWSNsVaM= github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs= github.com/filecoin-project/go-bitfield v0.2.0 h1:gCtLcjskIPtdg4NfN7gQZSQF9yrBQ7mkT0qCJxzGI2Q= From a13d10e42d70018652e9c4926bad9dd155514950 Mon Sep 17 00:00:00 2001 From: Travis Person Date: Fri, 25 Sep 2020 20:08:28 +0000 Subject: [PATCH 28/88] add logging to chain export --- chain/store/store.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/chain/store/store.go b/chain/store/store.go index fce8a650f..1dbf69547 100644 --- a/chain/store/store.go +++ b/chain/store/store.go @@ -15,9 +15,11 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-actors/actors/builtin" "github.com/filecoin-project/specs-actors/actors/util/adt" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/journal" bstore "github.com/filecoin-project/lotus/lib/blockstore" @@ -1183,6 +1185,7 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRo } blocksToWalk := ts.Cids() + currentMinHeight := ts.Height() walkChain := func(blk cid.Cid) error { if !seen.Visit(blk) { @@ -1203,6 +1206,13 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRo return xerrors.Errorf("unmarshaling block header (cid=%s): %w", blk, err) } + if currentMinHeight > b.Height { + currentMinHeight = b.Height + if currentMinHeight%builtin.EpochsInDay == 0 { + log.Infow("export", "height", currentMinHeight) + } + } + var cids []cid.Cid if !skipOldMsgs || b.Height > ts.Height()-inclRecentRoots { mcids, err := recurseLinks(cs.bs, walked, b.Messages, []cid.Cid{b.Messages}) @@ -1251,6 +1261,9 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRo return nil } + log.Infow("export started") + exportStart := build.Clock.Now() + for len(blocksToWalk) > 0 { next := blocksToWalk[0] blocksToWalk = blocksToWalk[1:] @@ -1259,6 +1272,8 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRo } } + log.Infow("export finished", "duration", build.Clock.Now().Sub(exportStart).Seconds()) + return nil } From 4eec4a01410c19a5687fec620fca418551ce9803 Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Fri, 25 Sep 2020 23:28:12 +0200 Subject: [PATCH 29/88] Move policy change to seal bench Signed-off-by: Jakub Sztandera --- cmd/lotus-bench/main.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/lotus-bench/main.go b/cmd/lotus-bench/main.go index 2516bfd26..e409dfe5a 100644 --- a/cmd/lotus-bench/main.go +++ b/cmd/lotus-bench/main.go @@ -76,8 +76,6 @@ func main() { log.Info("Starting lotus-bench") - policy.AddSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) - app := &cli.App{ Name: "lotus-bench", Usage: "Benchmark performance of lotus on your hardware", @@ -147,6 +145,8 @@ var sealBenchCmd = &cli.Command{ }, }, Action: func(c *cli.Context) error { + policy.AddSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) + if c.Bool("no-gpu") { err := os.Setenv("BELLMAN_NO_GPU", "1") if err != nil { From d8431ff611be8ae5d47a4b05504e10ffd48c1673 Mon Sep 17 00:00:00 2001 From: Aayush Rajasekaran Date: Fri, 25 Sep 2020 17:33:25 -0400 Subject: [PATCH 30/88] Fix AddSupportedProofTypes --- chain/actors/policy/policy.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/chain/actors/policy/policy.go b/chain/actors/policy/policy.go index eec52e855..b8205177e 100644 --- a/chain/actors/policy/policy.go +++ b/chain/actors/policy/policy.go @@ -22,12 +22,10 @@ func SetSupportedProofTypes(types ...abi.RegisteredSealProof) { // AddSupportedProofTypes sets supported proof types, across all actor versions. // This should only be used for testing. func AddSupportedProofTypes(types ...abi.RegisteredSealProof) { - newTypes := make(map[abi.RegisteredSealProof]struct{}, len(types)) for _, t := range types { - newTypes[t] = struct{}{} + // Set for all miner versions. + miner0.SupportedProofTypes[t] = struct{}{} } - // Set for all miner versions. - miner0.SupportedProofTypes = newTypes } // SetPreCommitChallengeDelay sets the pre-commit challenge delay across all From a5f13a5b31cb555eca3d1e9a63fb69fb93ef5023 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Fri, 25 Sep 2020 14:36:36 -0700 Subject: [PATCH 31/88] Test supported proof types. --- chain/actors/policy/policy_test.go | 36 ++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 chain/actors/policy/policy_test.go diff --git a/chain/actors/policy/policy_test.go b/chain/actors/policy/policy_test.go new file mode 100644 index 000000000..be64362a2 --- /dev/null +++ b/chain/actors/policy/policy_test.go @@ -0,0 +1,36 @@ +package policy + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/abi" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" +) + +func TestSupportedProofTypes(t *testing.T) { + var oldTypes []abi.RegisteredSealProof + for t := range miner0.SupportedProofTypes { + oldTypes = append(oldTypes, t) + } + t.Cleanup(func() { + SetSupportedProofTypes(oldTypes...) + }) + + SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) + require.EqualValues(t, + miner0.SupportedProofTypes, + map[abi.RegisteredSealProof]struct{}{ + abi.RegisteredSealProof_StackedDrg2KiBV1: {}, + }, + ) + AddSupportedProofTypes(abi.RegisteredSealProof_StackedDrg8MiBV1) + require.EqualValues(t, + miner0.SupportedProofTypes, + map[abi.RegisteredSealProof]struct{}{ + abi.RegisteredSealProof_StackedDrg2KiBV1: {}, + abi.RegisteredSealProof_StackedDrg8MiBV1: {}, + }, + ) +} From 8545c08f30bdb25add71604f5f6b5f12ed862966 Mon Sep 17 00:00:00 2001 From: whyrusleeping Date: Fri, 25 Sep 2020 23:47:59 -0700 Subject: [PATCH 32/88] add json output to state compute --- cli/state.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/cli/state.go b/cli/state.go index d96c93c54..3d37e6fb7 100644 --- a/cli/state.go +++ b/cli/state.go @@ -818,6 +818,10 @@ var stateComputeStateCmd = &cli.Command{ Name: "html", Usage: "generate html report", }, + &cli.BoolFlag{ + Name: "json", + Usage: "generate json output", + }, }, Action: func(cctx *cli.Context) error { api, closer, err := GetFullNodeAPI(cctx) @@ -862,6 +866,15 @@ var stateComputeStateCmd = &cli.Command{ return err } + if cctx.Bool("json") { + out, err := json.Marshal(stout) + if err != nil { + return err + } + fmt.Println(string(out)) + return nil + } + if cctx.Bool("html") { codeCache := map[address.Address]cid.Cid{} getCode := func(addr address.Address) (cid.Cid, error) { From ef28ebb14a9e92833d6eb9cfab5f6f59279e516a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Thu, 24 Sep 2020 23:30:11 +0200 Subject: [PATCH 33/88] Ignition upgrades, much excite. --- build/params_2k.go | 2 + build/params_shared_vals.go | 6 +- build/params_testnet.go | 8 + chain/actors/builtin/builtin.go | 2 +- chain/stmgr/forks.go | 377 +++++++++++++++++++++++++++++--- chain/stmgr/forks_test.go | 18 +- chain/stmgr/stmgr.go | 194 +++++++++++++--- chain/stmgr/utils.go | 20 +- cli/state.go | 24 +- cmd/lotus-shed/balances.go | 138 ++++++++++++ go.mod | 2 +- go.sum | 4 +- node/modules/chain.go | 5 + node/modules/storageminer.go | 3 + storage/wdpost_run.go | 4 + 15 files changed, 708 insertions(+), 99 deletions(-) diff --git a/build/params_2k.go b/build/params_2k.go index 3edd0fb82..3682f7be1 100644 --- a/build/params_2k.go +++ b/build/params_2k.go @@ -12,6 +12,8 @@ const UpgradeBreezeHeight = -1 const BreezeGasTampingDuration = 0 const UpgradeSmokeHeight = -1 +const UpgradeIgnitionHeight = -2 +const UpgradeLiftoffHeight = -3 var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, diff --git a/build/params_shared_vals.go b/build/params_shared_vals.go index 3ee9f52ec..95c347281 100644 --- a/build/params_shared_vals.go +++ b/build/params_shared_vals.go @@ -22,8 +22,8 @@ const UnixfsLinksPerLevel = 1024 // Consensus / Network const AllowableClockDriftSecs = uint64(1) -const NewestNetworkVersion = network.Version2 -const ActorUpgradeNetworkVersion = network.Version3 +const NewestNetworkVersion = network.Version3 +const ActorUpgradeNetworkVersion = network.Version4 // Epochs const ForkLengthThreshold = Finality @@ -63,6 +63,8 @@ const WinningPoStSectorSetLookback = abi.ChainEpoch(10) // ///// // Devnet settings +var Devnet = true + const FilBase = uint64(2_000_000_000) const FilAllocStorageMining = uint64(1_100_000_000) diff --git a/build/params_testnet.go b/build/params_testnet.go index 13d2ff62e..960f3a9b6 100644 --- a/build/params_testnet.go +++ b/build/params_testnet.go @@ -21,12 +21,20 @@ const BreezeGasTampingDuration = 120 const UpgradeSmokeHeight = 51000 +const UpgradeIgnitionHeight = 94000 + +// This signals our tentative epoch for mainnet launch. Can make it later, but not earlier. +// Miners, clients, developers, custodians all need time to prepare. +// We still have upgrades and state changes to do, but can happen after signaling timing here. +const UpgradeLiftoffHeight = 148888 + func init() { policy.SetConsensusMinerMinPower(abi.NewStoragePower(10 << 40)) policy.SetSupportedProofTypes( abi.RegisteredSealProof_StackedDrg32GiBV1, abi.RegisteredSealProof_StackedDrg64GiBV1, ) + Devnet = false } const BlockDelaySecs = uint64(builtin0.EpochDurationSeconds) diff --git a/chain/actors/builtin/builtin.go b/chain/actors/builtin/builtin.go index 4079e694a..a85b4da65 100644 --- a/chain/actors/builtin/builtin.go +++ b/chain/actors/builtin/builtin.go @@ -21,7 +21,7 @@ const ( // Converts a network version into a specs-actors version. func VersionForNetwork(version network.Version) Version { switch version { - case network.Version0, network.Version1, network.Version2: + case network.Version0, network.Version1, network.Version2, network.Version3: return Version0 default: panic(fmt.Sprintf("unsupported network version %d", version)) diff --git a/chain/stmgr/forks.go b/chain/stmgr/forks.go index f9418c4d8..872c70b1e 100644 --- a/chain/stmgr/forks.go +++ b/chain/stmgr/forks.go @@ -1,44 +1,63 @@ package stmgr import ( + "bytes" "context" + "encoding/binary" + "math" + + multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" + + "github.com/filecoin-project/lotus/chain/actors/builtin/multisig" + + "github.com/filecoin-project/lotus/chain/state" + + "github.com/filecoin-project/specs-actors/actors/migration/nv3" + + "github.com/ipfs/go-cid" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + init0 "github.com/filecoin-project/specs-actors/actors/builtin/init" miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/vm" cbor "github.com/ipfs/go-ipld-cbor" "golang.org/x/xerrors" ) -var ForksAtHeight = map[abi.ChainEpoch]func(context.Context, *StateManager, types.StateTree, *types.TipSet) error{ - build.UpgradeBreezeHeight: UpgradeFaucetBurnRecovery, +var ForksAtHeight = map[abi.ChainEpoch]func(context.Context, *StateManager, ExecCallback, cid.Cid, *types.TipSet) (cid.Cid, error){ + build.UpgradeBreezeHeight: UpgradeFaucetBurnRecovery, + build.UpgradeIgnitionHeight: UpgradeIgnition, + build.UpgradeLiftoffHeight: UpgradeLiftoff, } -func (sm *StateManager) handleStateForks(ctx context.Context, st types.StateTree, height abi.ChainEpoch, ts *types.TipSet) (err error) { +func (sm *StateManager) handleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, cb ExecCallback, ts *types.TipSet) (cid.Cid, error) { + retCid := root + var err error f, ok := ForksAtHeight[height] if ok { - err := f(ctx, sm, st, ts) + retCid, err = f(ctx, sm, cb, root, ts) if err != nil { - return err + return cid.Undef, err } } - return nil + return retCid, nil } type forEachTree interface { ForEach(func(address.Address, *types.Actor) error) error } -func doTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmount) error { +func doTransfer(cb ExecCallback, tree types.StateTree, from, to address.Address, amt abi.TokenAmount) error { fromAct, err := tree.GetActor(from) if err != nil { return xerrors.Errorf("failed to get 'from' actor for transfer: %w", err) @@ -64,10 +83,43 @@ func doTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmo return xerrors.Errorf("failed to persist to actor: %w", err) } + if cb != nil { + // record the transfer in execution traces + + fakeMsg := &types.Message{ + From: from, + To: to, + Value: amt, + Nonce: math.MaxUint64, + } + fakeRct := &types.MessageReceipt{ + ExitCode: 0, + Return: nil, + GasUsed: 0, + } + + if err := cb(fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{ + MessageReceipt: *fakeRct, + ActorErr: nil, + ExecutionTrace: types.ExecutionTrace{ + Msg: fakeMsg, + MsgRct: fakeRct, + Error: "", + Duration: 0, + GasCharges: nil, + Subcalls: nil, + }, + Duration: 0, + GasCosts: vm.ZeroGasOutputs(), + }); err != nil { + return xerrors.Errorf("recording transfer: %w", err) + } + } + return nil } -func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, tree types.StateTree, ts *types.TipSet) error { +func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, ts *types.TipSet) (cid.Cid, error) { // Some initial parameters FundsForMiners := types.FromFil(1_000_000) LookbackEpoch := abi.ChainEpoch(32000) @@ -94,22 +146,22 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, tree types // Grab lookback state for account checks lbts, err := sm.ChainStore().GetTipsetByHeight(ctx, LookbackEpoch, ts, false) if err != nil { - return xerrors.Errorf("failed to get tipset at lookback height: %w", err) + return cid.Undef, xerrors.Errorf("failed to get tipset at lookback height: %w", err) } lbtree, err := sm.ParentState(lbts) if err != nil { - return xerrors.Errorf("loading state tree failed: %w", err) + return cid.Undef, xerrors.Errorf("loading state tree failed: %w", err) } ReserveAddress, err := address.NewFromString("t090") if err != nil { - return xerrors.Errorf("failed to parse reserve address: %w", err) + return cid.Undef, xerrors.Errorf("failed to parse reserve address: %w", err) } - fetree, ok := tree.(forEachTree) - if !ok { - return xerrors.Errorf("fork transition state tree doesnt support ForEach (%T)", tree) + tree, err := sm.StateTree(root) + if err != nil { + return cid.Undef, xerrors.Errorf("getting state tree: %w", err) } type transfer struct { @@ -121,7 +173,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, tree types var transfers []transfer // Take all excess funds away, put them into the reserve account - err = fetree.ForEach(func(addr address.Address, act *types.Actor) error { + err = tree.ForEach(func(addr address.Address, act *types.Actor) error { switch act.Code { case builtin0.AccountActorCodeID, builtin0.MultisigActorCodeID, builtin0.PaymentChannelActorCodeID: sysAcc, err := isSystemAccount(addr) @@ -163,13 +215,13 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, tree types return nil }) if err != nil { - return xerrors.Errorf("foreach over state tree failed: %w", err) + return cid.Undef, xerrors.Errorf("foreach over state tree failed: %w", err) } // Execute transfers from previous step for _, t := range transfers { - if err := doTransfer(tree, t.From, t.To, t.Amt); err != nil { - return xerrors.Errorf("transfer %s %s->%s failed: %w", t.Amt, t.From, t.To, err) + if err := doTransfer(cb, tree, t.From, t.To, t.Amt); err != nil { + return cid.Undef, xerrors.Errorf("transfer %s %s->%s failed: %w", t.Amt, t.From, t.To, err) } } @@ -177,19 +229,19 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, tree types var ps power0.State powAct, err := tree.GetActor(builtin0.StoragePowerActorAddr) if err != nil { - return xerrors.Errorf("failed to load power actor: %w", err) + return cid.Undef, xerrors.Errorf("failed to load power actor: %w", err) } cst := cbor.NewCborStore(sm.ChainStore().Blockstore()) if err := cst.Get(ctx, powAct.Head, &ps); err != nil { - return xerrors.Errorf("failed to get power actor state: %w", err) + return cid.Undef, xerrors.Errorf("failed to get power actor state: %w", err) } totalPower := ps.TotalBytesCommitted var transfersBack []transfer // Now, we return some funds to places where they are needed - err = fetree.ForEach(func(addr address.Address, act *types.Actor) error { + err = tree.ForEach(func(addr address.Address, act *types.Actor) error { lbact, err := lbtree.GetActor(addr) if err != nil { if !xerrors.Is(err, types.ErrActorNotFound) { @@ -267,53 +319,310 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, tree types return nil }) if err != nil { - return xerrors.Errorf("foreach over state tree failed: %w", err) + return cid.Undef, xerrors.Errorf("foreach over state tree failed: %w", err) } for _, t := range transfersBack { - if err := doTransfer(tree, t.From, t.To, t.Amt); err != nil { - return xerrors.Errorf("transfer %s %s->%s failed: %w", t.Amt, t.From, t.To, err) + if err := doTransfer(cb, tree, t.From, t.To, t.Amt); err != nil { + return cid.Undef, xerrors.Errorf("transfer %s %s->%s failed: %w", t.Amt, t.From, t.To, err) } } // transfer all burnt funds back to the reserve account burntAct, err := tree.GetActor(builtin0.BurntFundsActorAddr) if err != nil { - return xerrors.Errorf("failed to load burnt funds actor: %w", err) + return cid.Undef, xerrors.Errorf("failed to load burnt funds actor: %w", err) } - if err := doTransfer(tree, builtin0.BurntFundsActorAddr, ReserveAddress, burntAct.Balance); err != nil { - return xerrors.Errorf("failed to unburn funds: %w", err) + if err := doTransfer(cb, tree, builtin0.BurntFundsActorAddr, ReserveAddress, burntAct.Balance); err != nil { + return cid.Undef, xerrors.Errorf("failed to unburn funds: %w", err) } // Top up the reimbursement service reimbAddr, err := address.NewFromString("t0111") if err != nil { - return xerrors.Errorf("failed to parse reimbursement service address") + return cid.Undef, xerrors.Errorf("failed to parse reimbursement service address") } reimb, err := tree.GetActor(reimbAddr) if err != nil { - return xerrors.Errorf("failed to load reimbursement account actor: %w", err) + return cid.Undef, xerrors.Errorf("failed to load reimbursement account actor: %w", err) } difference := types.BigSub(DesiredReimbursementBalance, reimb.Balance) - if err := doTransfer(tree, ReserveAddress, reimbAddr, difference); err != nil { - return xerrors.Errorf("failed to top up reimbursement account: %w", err) + if err := doTransfer(cb, tree, ReserveAddress, reimbAddr, difference); err != nil { + return cid.Undef, xerrors.Errorf("failed to top up reimbursement account: %w", err) } // Now, a final sanity check to make sure the balances all check out total := abi.NewTokenAmount(0) - err = fetree.ForEach(func(addr address.Address, act *types.Actor) error { + err = tree.ForEach(func(addr address.Address, act *types.Actor) error { total = types.BigAdd(total, act.Balance) return nil }) if err != nil { - return xerrors.Errorf("checking final state balance failed: %w", err) + return cid.Undef, xerrors.Errorf("checking final state balance failed: %w", err) } exp := types.FromFil(build.FilBase) if !exp.Equals(total) { - return xerrors.Errorf("resultant state tree account balance was not correct: %s", total) + return cid.Undef, xerrors.Errorf("resultant state tree account balance was not correct: %s", total) + } + + return tree.Flush(ctx) +} + +func UpgradeIgnition(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, ts *types.TipSet) (cid.Cid, error) { + store := sm.cs.Store(ctx) + + nst, err := nv3.MigrateStateTree(ctx, store, root, build.UpgradeIgnitionHeight) + if err != nil { + return cid.Undef, xerrors.Errorf("migrating actors state: %w", err) + } + + tree, err := sm.StateTree(nst) + if err != nil { + return cid.Undef, xerrors.Errorf("getting state tree: %w", err) + } + + err = setNetworkName(ctx, store, tree, "ignition") + if err != nil { + return cid.Undef, xerrors.Errorf("setting network name: %w", err) + } + + split1, err := address.NewFromString("t0115") + if err != nil { + return cid.Undef, xerrors.Errorf("first split address: %w", err) + } + + split2, err := address.NewFromString("t0116") + if err != nil { + return cid.Undef, xerrors.Errorf("second split address: %w", err) + } + + err = resetGenesisMsigs(ctx, sm, store, tree) + if err != nil { + return cid.Undef, xerrors.Errorf("resetting genesis msig start epochs: %w", err) + } + + err = splitGenesisMultisig(ctx, cb, split1, store, tree, 50) + if err != nil { + return cid.Undef, xerrors.Errorf("splitting first msig: %w", err) + } + + err = splitGenesisMultisig(ctx, cb, split2, store, tree, 50) + if err != nil { + return cid.Undef, xerrors.Errorf("splitting second msig: %w", err) + } + + err = nv3.CheckStateTree(ctx, store, nst, build.UpgradeIgnitionHeight, builtin0.TotalFilecoin) + if err != nil { + return cid.Undef, xerrors.Errorf("sanity check after ignition upgrade failed: %w", err) + } + + return tree.Flush(ctx) +} + +func UpgradeLiftoff(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, ts *types.TipSet) (cid.Cid, error) { + tree, err := sm.StateTree(root) + if err != nil { + return cid.Undef, xerrors.Errorf("getting state tree: %w", err) + } + + err = setNetworkName(ctx, sm.cs.Store(ctx), tree, "mainnet") + if err != nil { + return cid.Undef, xerrors.Errorf("setting network name: %w", err) + } + + return tree.Flush(ctx) +} + +func setNetworkName(ctx context.Context, store adt0.Store, tree *state.StateTree, name string) error { + ia, err := tree.GetActor(builtin0.InitActorAddr) + if err != nil { + return xerrors.Errorf("getting init actor: %w", err) + } + + var initState init0.State + if err := store.Get(ctx, ia.Head, &initState); err != nil { + return xerrors.Errorf("reading init state: %w", err) + } + + initState.NetworkName = name + + ia.Head, err = store.Put(ctx, &initState) + if err != nil { + return xerrors.Errorf("writing new init state: %w", err) + } + + if err := tree.SetActor(builtin0.InitActorAddr, ia); err != nil { + return xerrors.Errorf("setting init actor: %w", err) + } + + return nil +} + +func splitGenesisMultisig(ctx context.Context, cb ExecCallback, addr address.Address, store adt0.Store, tree *state.StateTree, portions uint64) error { + if portions < 1 { + return xerrors.Errorf("cannot split into 0 portions") + } + + mact, err := tree.GetActor(addr) + if err != nil { + return xerrors.Errorf("getting msig actor: %w", err) + } + + mst, err := multisig.Load(store, mact) + if err != nil { + return xerrors.Errorf("getting msig state: %w", err) + } + + signers, err := mst.Signers() + if err != nil { + return xerrors.Errorf("getting msig signers: %w", err) + } + + thresh, err := mst.Threshold() + if err != nil { + return xerrors.Errorf("getting msig threshold: %w", err) + } + + ibal, err := mst.InitialBalance() + if err != nil { + return xerrors.Errorf("getting msig initial balance: %w", err) + } + + se, err := mst.StartEpoch() + if err != nil { + return xerrors.Errorf("getting msig start epoch: %w", err) + } + + ud, err := mst.UnlockDuration() + if err != nil { + return xerrors.Errorf("getting msig unlock duration: %w", err) + } + + pending, err := adt0.MakeEmptyMap(store).Root() + if err != nil { + return xerrors.Errorf("failed to create empty map: %w", err) + } + + newIbal := big.Div(ibal, types.NewInt(portions)) + newState := &multisig0.State{ + Signers: signers, + NumApprovalsThreshold: thresh, + NextTxnID: 0, + InitialBalance: newIbal, + StartEpoch: se, + UnlockDuration: ud, + PendingTxns: pending, + } + + scid, err := store.Put(ctx, newState) + if err != nil { + return xerrors.Errorf("storing new state: %w", err) + } + + newActor := types.Actor{ + Code: builtin0.MultisigActorCodeID, + Head: scid, + Nonce: 0, + Balance: big.Zero(), + } + + i := uint64(0) + for i < portions { + keyAddr, err := makeKeyAddr(addr, i) + if err != nil { + return xerrors.Errorf("creating key address: %w", err) + } + + idAddr, err := tree.RegisterNewAddress(keyAddr) + if err != nil { + return xerrors.Errorf("registering new address: %w", err) + } + + err = tree.SetActor(idAddr, &newActor) + if err != nil { + return xerrors.Errorf("setting new msig actor state: %w", err) + } + + if err := doTransfer(cb, tree, addr, idAddr, newIbal); err != nil { + return xerrors.Errorf("transferring split msig balance: %w", err) + } + + i++ + } + + return nil +} + +func makeKeyAddr(splitAddr address.Address, count uint64) (address.Address, error) { + var b bytes.Buffer + if err := splitAddr.MarshalCBOR(&b); err != nil { + return address.Undef, xerrors.Errorf("marshalling split address: %w", err) + } + + if err := binary.Write(&b, binary.BigEndian, count); err != nil { + return address.Undef, xerrors.Errorf("writing count into a buffer: %w", err) + } + + if err := binary.Write(&b, binary.BigEndian, []byte("Ignition upgrade")); err != nil { + return address.Undef, xerrors.Errorf("writing fork name into a buffer: %w", err) + } + + addr, err := address.NewActorAddress(b.Bytes()) + if err != nil { + return address.Undef, xerrors.Errorf("create actor address: %w", err) + } + + return addr, nil +} + +func resetGenesisMsigs(ctx context.Context, sm *StateManager, store adt0.Store, tree *state.StateTree) error { + gb, err := sm.cs.GetGenesis() + if err != nil { + return xerrors.Errorf("getting genesis block: %w", err) + } + + gts, err := types.NewTipSet([]*types.BlockHeader{gb}) + if err != nil { + return xerrors.Errorf("getting genesis tipset: %w", err) + } + + cst := cbor.NewCborStore(sm.cs.Blockstore()) + genesisTree, err := state.LoadStateTree(cst, gts.ParentState()) + if err != nil { + return xerrors.Errorf("loading state tree: %w", err) + } + + err = genesisTree.ForEach(func(addr address.Address, genesisActor *types.Actor) error { + if genesisActor.Code == builtin0.MultisigActorCodeID { + currActor, err := tree.GetActor(addr) + if err != nil { + return xerrors.Errorf("loading actor: %w", err) + } + + var currState multisig0.State + if err := store.Get(ctx, currActor.Head, &currState); err != nil { + return xerrors.Errorf("reading multisig state: %w", err) + } + + currState.StartEpoch = build.UpgradeLiftoffHeight + + currActor.Head, err = store.Put(ctx, &currState) + if err != nil { + return xerrors.Errorf("writing new multisig state: %w", err) + } + + if err := tree.SetActor(addr, currActor); err != nil { + return xerrors.Errorf("setting multisig actor: %w", err) + } + } + return nil + }) + + if err != nil { + return xerrors.Errorf("iterating over genesis actors: %w", err) } return nil diff --git a/chain/stmgr/forks_test.go b/chain/stmgr/forks_test.go index 516058a81..a3423ccdd 100644 --- a/chain/stmgr/forks_test.go +++ b/chain/stmgr/forks_test.go @@ -25,6 +25,7 @@ import ( _ "github.com/filecoin-project/lotus/lib/sigs/bls" _ "github.com/filecoin-project/lotus/lib/sigs/secp" + "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" logging "github.com/ipfs/go-log" cbg "github.com/whyrusleeping/cbor-gen" @@ -114,33 +115,38 @@ func TestForkHeightTriggers(t *testing.T) { t.Fatal(err) } - stmgr.ForksAtHeight[testForkHeight] = func(ctx context.Context, sm *StateManager, st types.StateTree, ts *types.TipSet) error { + stmgr.ForksAtHeight[testForkHeight] = func(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, ts *types.TipSet) (cid.Cid, error) { cst := cbor.NewCborStore(sm.ChainStore().Blockstore()) + st, err := sm.StateTree(root) + if err != nil { + return cid.Undef, xerrors.Errorf("getting state tree: %w", err) + } + act, err := st.GetActor(taddr) if err != nil { - return err + return cid.Undef, err } var tas testActorState if err := cst.Get(ctx, act.Head, &tas); err != nil { - return xerrors.Errorf("in fork handler, failed to run get: %w", err) + return cid.Undef, xerrors.Errorf("in fork handler, failed to run get: %w", err) } tas.HasUpgraded = 55 ns, err := cst.Put(ctx, &tas) if err != nil { - return err + return cid.Undef, err } act.Head = ns if err := st.SetActor(taddr, act); err != nil { - return err + return cid.Undef, err } - return nil + return st.Flush(ctx) } inv.Register(builtin.PaymentChannelActorCodeID, &testActor{}, &testActorState{}) diff --git a/chain/stmgr/stmgr.go b/chain/stmgr/stmgr.go index eaf9215db..e800ce665 100644 --- a/chain/stmgr/stmgr.go +++ b/chain/stmgr/stmgr.go @@ -41,12 +41,13 @@ var log = logging.Logger("statemgr") type StateManager struct { cs *store.ChainStore - stCache map[string][]cid.Cid - compWait map[string]chan struct{} - stlk sync.Mutex - genesisMsigLk sync.Mutex - newVM func(context.Context, *vm.VMOpts) (*vm.VM, error) - genInfo *genesisInfo + stCache map[string][]cid.Cid + compWait map[string]chan struct{} + stlk sync.Mutex + genesisMsigLk sync.Mutex + newVM func(context.Context, *vm.VMOpts) (*vm.VM, error) + preIgnitionGenInfos *genesisInfo + postIgnitionGenInfos *genesisInfo } func NewStateManager(cs *store.ChainStore) *StateManager { @@ -123,9 +124,8 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c return st, rec, nil } -func (sm *StateManager) ExecutionTrace(ctx context.Context, ts *types.TipSet) (cid.Cid, []*api.InvocResult, error) { - var trace []*api.InvocResult - st, _, err := sm.computeTipSetState(ctx, ts, func(mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet) error { +func traceFunc(trace *[]*api.InvocResult) func(mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet) error { + return func(mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet) error { ir := &api.InvocResult{ Msg: msg, MsgRct: &ret.MessageReceipt, @@ -135,9 +135,14 @@ func (sm *StateManager) ExecutionTrace(ctx context.Context, ts *types.TipSet) (c if ret.ActorErr != nil { ir.Error = ret.ActorErr.Error() } - trace = append(trace, ir) + *trace = append(*trace, ir) return nil - }) + } +} + +func (sm *StateManager) ExecutionTrace(ctx context.Context, ts *types.TipSet) (cid.Cid, []*api.InvocResult, error) { + var trace []*api.InvocResult + st, _, err := sm.computeTipSetState(ctx, ts, traceFunc(&trace)) if err != nil { return cid.Undef, nil, err } @@ -149,20 +154,24 @@ type ExecCallback func(cid.Cid, *types.Message, *vm.ApplyRet) error func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEpoch, pstate cid.Cid, bms []store.BlockMessages, epoch abi.ChainEpoch, r vm.Rand, cb ExecCallback, baseFee abi.TokenAmount, ts *types.TipSet) (cid.Cid, cid.Cid, error) { - vmopt := &vm.VMOpts{ - StateBase: pstate, - Epoch: epoch, - Rand: r, - Bstore: sm.cs.Blockstore(), - Syscalls: sm.cs.VMSys(), - CircSupplyCalc: sm.GetCirculatingSupply, - NtwkVersion: sm.GetNtwkVersion, - BaseFee: baseFee, + makeVmWithBaseState := func(base cid.Cid) (*vm.VM, error) { + vmopt := &vm.VMOpts{ + StateBase: base, + Epoch: epoch, + Rand: r, + Bstore: sm.cs.Blockstore(), + Syscalls: sm.cs.VMSys(), + CircSupplyCalc: sm.GetCirculatingSupply, + NtwkVersion: sm.GetNtwkVersion, + BaseFee: baseFee, + } + + return sm.newVM(ctx, vmopt) } - vmi, err := sm.newVM(ctx, vmopt) + vmi, err := makeVmWithBaseState(pstate) if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("instantiating VM failed: %w", err) + return cid.Undef, cid.Undef, xerrors.Errorf("making vm: %w", err) } runCron := func() error { @@ -202,19 +211,32 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp for i := parentEpoch; i < epoch; i++ { // handle state forks // XXX: The state tree - err = sm.handleStateForks(ctx, vmi.StateTree(), i, ts) + newState, err := sm.handleStateForks(ctx, pstate, i, cb, ts) if err != nil { return cid.Undef, cid.Undef, xerrors.Errorf("error handling state forks: %w", err) } + if pstate != newState { + vmi, err = makeVmWithBaseState(newState) + if err != nil { + return cid.Undef, cid.Undef, xerrors.Errorf("making vm: %w", err) + } + } + if i > parentEpoch { // run cron for null rounds if any if err := runCron(); err != nil { return cid.Cid{}, cid.Cid{}, err } + + newState, err = vmi.Flush(ctx) + if err != nil { + return cid.Undef, cid.Undef, xerrors.Errorf("flushing vm: %w", err) + } } vmi.SetBlockHeight(i + 1) + pstate = newState } var receipts []cbg.CBORMarshaler @@ -904,7 +926,7 @@ func (sm *StateManager) setupGenesisActors(ctx context.Context) error { gi.genesisMsigs = append(gi.genesisMsigs, ns) } - sm.genInfo = &gi + sm.preIgnitionGenInfos = &gi return nil } @@ -912,7 +934,7 @@ func (sm *StateManager) setupGenesisActors(ctx context.Context) error { // sets up information about the actors in the genesis state // For testnet we use a hardcoded set of multisig states, instead of what's actually in the genesis multisigs // We also do not consider ANY account actors (including the faucet) -func (sm *StateManager) setupGenesisActorsTestnet(ctx context.Context) error { +func (sm *StateManager) setupPreIgnitionGenesisActorsTestnet(ctx context.Context) error { gi := genesisInfo{} @@ -981,7 +1003,87 @@ func (sm *StateManager) setupGenesisActorsTestnet(ctx context.Context) error { gi.genesisMsigs = append(gi.genesisMsigs, ns) } - sm.genInfo = &gi + sm.preIgnitionGenInfos = &gi + + return nil +} + +// sets up information about the actors in the genesis state, post the ignition fork +func (sm *StateManager) setupPostIgnitionGenesisActors(ctx context.Context) error { + + gi := genesisInfo{} + + gb, err := sm.cs.GetGenesis() + if err != nil { + return xerrors.Errorf("getting genesis block: %w", err) + } + + gts, err := types.NewTipSet([]*types.BlockHeader{gb}) + if err != nil { + return xerrors.Errorf("getting genesis tipset: %w", err) + } + + st, _, err := sm.TipSetState(ctx, gts) + if err != nil { + return xerrors.Errorf("getting genesis tipset state: %w", err) + } + + cst := cbor.NewCborStore(sm.cs.Blockstore()) + sTree, err := state.LoadStateTree(cst, st) + if err != nil { + return xerrors.Errorf("loading state tree: %w", err) + } + + // Unnecessary, should be removed + gi.genesisMarketFunds, err = getFilMarketLocked(ctx, sTree) + if err != nil { + return xerrors.Errorf("setting up genesis market funds: %w", err) + } + + // Unnecessary, should be removed + gi.genesisPledge, err = getFilPowerLocked(ctx, sTree) + if err != nil { + return xerrors.Errorf("setting up genesis pledge: %w", err) + } + + totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount) + + // 6 months + sixMonths := abi.ChainEpoch(183 * builtin0.EpochsInDay) + totalsByEpoch[sixMonths] = big.NewInt(49_929_341) + totalsByEpoch[sixMonths] = big.Add(totalsByEpoch[sixMonths], big.NewInt(32_787_700)) + + // 1 year + oneYear := abi.ChainEpoch(365 * builtin0.EpochsInDay) + totalsByEpoch[oneYear] = big.NewInt(22_421_712) + + // 2 years + twoYears := abi.ChainEpoch(2 * 365 * builtin0.EpochsInDay) + totalsByEpoch[twoYears] = big.NewInt(7_223_364) + + // 3 years + threeYears := abi.ChainEpoch(3 * 365 * builtin0.EpochsInDay) + totalsByEpoch[threeYears] = big.NewInt(87_637_883) + + // 6 years + sixYears := abi.ChainEpoch(6 * 365 * builtin0.EpochsInDay) + totalsByEpoch[sixYears] = big.NewInt(100_000_000) + totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(300_000_000)) + + gi.genesisMsigs = make([]msig0.State, 0, len(totalsByEpoch)) + for k, v := range totalsByEpoch { + ns := msig0.State{ + // In the pre-ignition logic, we incorrectly set this value in Fil, not attoFil, an off-by-10^18 error + InitialBalance: big.Mul(v, big.NewInt(int64(build.FilecoinPrecision))), + UnlockDuration: k, + PendingTxns: cid.Undef, + // In the pre-ignition logic, the start epoch was 0. This changes in the fork logic of the Ignition upgrade itself. + StartEpoch: build.UpgradeLiftoffHeight, + } + gi.genesisMsigs = append(gi.genesisMsigs, ns) + } + + sm.postIgnitionGenInfos = &gi return nil } @@ -991,13 +1093,23 @@ func (sm *StateManager) setupGenesisActorsTestnet(ctx context.Context) error { // - For Accounts, it counts max(currentBalance - genesisBalance, 0). func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (abi.TokenAmount, error) { vf := big.Zero() - for _, v := range sm.genInfo.genesisMsigs { - au := big.Sub(v.InitialBalance, v.AmountLocked(height)) - vf = big.Add(vf, au) + if height <= build.UpgradeIgnitionHeight { + for _, v := range sm.preIgnitionGenInfos.genesisMsigs { + au := big.Sub(v.InitialBalance, v.AmountLocked(height)) + vf = big.Add(vf, au) + } + } else { + for _, v := range sm.postIgnitionGenInfos.genesisMsigs { + // In the pre-ignition logic, we simply called AmountLocked(height), assuming startEpoch was 0. + // The start epoch changed in the Ignition upgrade. + au := big.Sub(v.InitialBalance, v.AmountLocked(height-v.StartEpoch)) + vf = big.Add(vf, au) + } } // there should not be any such accounts in testnet (and also none in mainnet?) - for _, v := range sm.genInfo.genesisActors { + // continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch + for _, v := range sm.preIgnitionGenInfos.genesisActors { act, err := st.GetActor(v.addr) if err != nil { return big.Zero(), xerrors.Errorf("failed to get actor: %w", err) @@ -1009,8 +1121,10 @@ func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch, } } - vf = big.Add(vf, sm.genInfo.genesisPledge) - vf = big.Add(vf, sm.genInfo.genesisMarketFunds) + // continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch + vf = big.Add(vf, sm.preIgnitionGenInfos.genesisPledge) + // continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch + vf = big.Add(vf, sm.preIgnitionGenInfos.genesisMarketFunds) return vf, nil } @@ -1084,10 +1198,16 @@ func GetFilBurnt(ctx context.Context, st *state.StateTree) (abi.TokenAmount, err func (sm *StateManager) GetCirculatingSupplyDetailed(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (api.CirculatingSupply, error) { sm.genesisMsigLk.Lock() defer sm.genesisMsigLk.Unlock() - if sm.genInfo == nil { - err := sm.setupGenesisActorsTestnet(ctx) + if sm.preIgnitionGenInfos == nil { + err := sm.setupPreIgnitionGenesisActorsTestnet(ctx) if err != nil { - return api.CirculatingSupply{}, xerrors.Errorf("failed to setup genesis information: %w", err) + return api.CirculatingSupply{}, xerrors.Errorf("failed to setup pre-ignition genesis information: %w", err) + } + } + if sm.postIgnitionGenInfos == nil { + err := sm.setupPostIgnitionGenesisActors(ctx) + if err != nil { + return api.CirculatingSupply{}, xerrors.Errorf("failed to setup post-ignition genesis information: %w", err) } } @@ -1152,6 +1272,10 @@ func (sm *StateManager) GetNtwkVersion(ctx context.Context, height abi.ChainEpoc return network.Version1 } + if height <= build.UpgradeIgnitionHeight { + return network.Version2 + } + return build.NewestNetworkVersion } diff --git a/chain/stmgr/utils.go b/chain/stmgr/utils.go index 3493afca3..58e7f480f 100644 --- a/chain/stmgr/utils.go +++ b/chain/stmgr/utils.go @@ -368,6 +368,16 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch, return cid.Undef, nil, err } + for i := ts.Height(); i < height; i++ { + // handle state forks + base, err = sm.handleStateForks(ctx, base, i, traceFunc(&trace), ts) + if err != nil { + return cid.Undef, nil, xerrors.Errorf("error handling state forks: %w", err) + } + + // TODO: should we also run cron here? + } + r := store.NewChainRand(sm.cs, ts.Cids()) vmopt := &vm.VMOpts{ StateBase: base, @@ -384,16 +394,6 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch, return cid.Undef, nil, err } - for i := ts.Height(); i < height; i++ { - // handle state forks - err = sm.handleStateForks(ctx, vmi.StateTree(), i, ts) - if err != nil { - return cid.Undef, nil, xerrors.Errorf("error handling state forks: %w", err) - } - - // TODO: should we also run cron here? - } - for i, msg := range msgs { // TODO: Use the signed message length for secp messages ret, err := vmi.ApplyMessage(ctx, msg) diff --git a/cli/state.go b/cli/state.go index d96c93c54..b25a5e4a7 100644 --- a/cli/state.go +++ b/cli/state.go @@ -19,6 +19,7 @@ import ( "github.com/multiformats/go-multiaddr" "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" "github.com/libp2p/go-libp2p-core/peer" "github.com/multiformats/go-multihash" "github.com/urfave/cli/v2" @@ -33,7 +34,9 @@ import ( "github.com/filecoin-project/specs-actors/actors/builtin/exported" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/apibstore" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" ) @@ -834,14 +837,14 @@ var stateComputeStateCmd = &cli.Command{ } h := abi.ChainEpoch(cctx.Uint64("vm-height")) - if h == 0 { - if ts == nil { - head, err := api.ChainHead(ctx) - if err != nil { - return err - } - ts = head + if ts == nil { + head, err := api.ChainHead(ctx) + if err != nil { + return err } + ts = head + } + if h == 0 { h = ts.Height() } @@ -863,13 +866,18 @@ var stateComputeStateCmd = &cli.Command{ } if cctx.Bool("html") { + st, err := state.LoadStateTree(cbor.NewCborStore(apibstore.NewAPIBlockstore(api)), stout.Root) + if err != nil { + return xerrors.Errorf("loading state tree: %w", err) + } + codeCache := map[address.Address]cid.Cid{} getCode := func(addr address.Address) (cid.Cid, error) { if c, found := codeCache[addr]; found { return c, nil } - c, err := api.StateGetActor(ctx, addr, ts.Key()) + c, err := st.GetActor(addr) if err != nil { return cid.Cid{}, err } diff --git a/cmd/lotus-shed/balances.go b/cmd/lotus-shed/balances.go index c156de931..1c89a00cf 100644 --- a/cmd/lotus-shed/balances.go +++ b/cmd/lotus-shed/balances.go @@ -3,9 +3,16 @@ package main import ( "context" "fmt" + "strconv" + + "github.com/docker/go-units" + lotusbuiltin "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/actors/builtin/reward" "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" + logging "github.com/ipfs/go-log/v2" "github.com/urfave/cli/v2" "golang.org/x/xerrors" @@ -44,6 +51,7 @@ var auditsCmd = &cli.Command{ Subcommands: []*cli.Command{ chainBalanceCmd, chainBalanceStateCmd, + chainPledgeCmd, }, } @@ -248,3 +256,133 @@ var chainBalanceStateCmd = &cli.Command{ return nil }, } + +var chainPledgeCmd = &cli.Command{ + Name: "stateroot-pledge", + Description: "Calculate sector pledge numbers", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "repo", + Value: "~/.lotus", + }, + }, + ArgsUsage: "[stateroot epoch]", + Action: func(cctx *cli.Context) error { + logging.SetLogLevel("badger", "ERROR") + ctx := context.TODO() + + if !cctx.Args().Present() { + return fmt.Errorf("must pass state root") + } + + sroot, err := cid.Decode(cctx.Args().First()) + if err != nil { + return fmt.Errorf("failed to parse input: %w", err) + } + + epoch, err := strconv.ParseInt(cctx.Args().Get(1), 10, 64) + if err != nil { + return xerrors.Errorf("parsing epoch arg: %w", err) + } + + fsrepo, err := repo.NewFS(cctx.String("repo")) + if err != nil { + return err + } + + lkrepo, err := fsrepo.Lock(repo.FullNode) + if err != nil { + return err + } + + defer lkrepo.Close() //nolint:errcheck + + ds, err := lkrepo.Datastore("/chain") + if err != nil { + return err + } + + mds, err := lkrepo.Datastore("/metadata") + if err != nil { + return err + } + + bs := blockstore.NewBlockstore(ds) + + cs := store.NewChainStore(bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier)) + + cst := cbor.NewCborStore(bs) + store := adt.WrapStore(ctx, cst) + + sm := stmgr.NewStateManager(cs) + + state, err := state.LoadStateTree(cst, sroot) + if err != nil { + return err + } + + var ( + powerSmoothed lotusbuiltin.FilterEstimate + pledgeCollateral abi.TokenAmount + ) + if act, err := state.GetActor(power.Address); err != nil { + return xerrors.Errorf("loading miner actor: %w", err) + } else if s, err := power.Load(store, act); err != nil { + return xerrors.Errorf("loading power actor state: %w", err) + } else if p, err := s.TotalPowerSmoothed(); err != nil { + return xerrors.Errorf("failed to determine total power: %w", err) + } else if c, err := s.TotalLocked(); err != nil { + return xerrors.Errorf("failed to determine pledge collateral: %w", err) + } else { + powerSmoothed = p + pledgeCollateral = c + } + + circ, err := sm.GetCirculatingSupplyDetailed(ctx, abi.ChainEpoch(epoch), state) + if err != nil { + return err + } + + fmt.Println("(real) circulating supply: ", types.FIL(circ.FilCirculating)) + if circ.FilCirculating.LessThan(big.Zero()) { + circ.FilCirculating = big.Zero() + } + + rewardActor, err := state.GetActor(reward.Address) + if err != nil { + return xerrors.Errorf("loading miner actor: %w", err) + } + + rewardState, err := reward.Load(store, rewardActor) + if err != nil { + return xerrors.Errorf("loading reward actor state: %w", err) + } + + fmt.Println("FilVested", types.FIL(circ.FilVested)) + fmt.Println("FilMined", types.FIL(circ.FilMined)) + fmt.Println("FilBurnt", types.FIL(circ.FilBurnt)) + fmt.Println("FilLocked", types.FIL(circ.FilLocked)) + fmt.Println("FilCirculating", types.FIL(circ.FilCirculating)) + + for _, sectorWeight := range []abi.StoragePower{ + types.NewInt(32 << 30), + types.NewInt(64 << 30), + types.NewInt(32 << 30 * 10), + types.NewInt(64 << 30 * 10), + } { + initialPledge, err := rewardState.InitialPledgeForPower( + sectorWeight, + pledgeCollateral, + &powerSmoothed, + circ.FilCirculating, + ) + if err != nil { + return xerrors.Errorf("calculating initial pledge: %w", err) + } + + fmt.Println("IP ", units.HumanSize(float64(sectorWeight.Uint64())), types.FIL(initialPledge)) + } + + return nil + }, +} diff --git a/go.mod b/go.mod index ca7e0760d..b0de7dfd6 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/filecoin-project/go-statemachine v0.0.0-20200813232949-df9b130df370 github.com/filecoin-project/go-statestore v0.1.0 github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b - github.com/filecoin-project/specs-actors v0.9.10 + github.com/filecoin-project/specs-actors v0.9.11 github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796 github.com/filecoin-project/test-vectors/schema v0.0.1 github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 diff --git a/go.sum b/go.sum index 1ca615b59..6412fe743 100644 --- a/go.sum +++ b/go.sum @@ -254,8 +254,8 @@ github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8= github.com/filecoin-project/specs-actors v0.9.4/go.mod h1:BStZQzx5x7TmCkLv0Bpa07U6cPKol6fd3w9KjMPZ6Z4= github.com/filecoin-project/specs-actors v0.9.7/go.mod h1:wM2z+kwqYgXn5Z7scV1YHLyd1Q1cy0R8HfTIWQ0BFGU= -github.com/filecoin-project/specs-actors v0.9.10 h1:gU0TrRhgkCsBEOP42sGDE7RQuR0Cov9hJhBqq+RJmjU= -github.com/filecoin-project/specs-actors v0.9.10/go.mod h1:czlvLQGEX0fjLLfdNHD7xLymy6L3n7aQzRWzsYGf+ys= +github.com/filecoin-project/specs-actors v0.9.11 h1:TnpG7HAeiUrfj0mJM7UaPW0P2137H62RGof7ftT5Mas= +github.com/filecoin-project/specs-actors v0.9.11/go.mod h1:czlvLQGEX0fjLLfdNHD7xLymy6L3n7aQzRWzsYGf+ys= github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796 h1:dJsTPWpG2pcTeojO2pyn0c6l+x/3MZYCBgo/9d11JEk= github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g= github.com/filecoin-project/test-vectors/schema v0.0.1 h1:5fNF76nl4qolEvcIsjc0kUADlTMVHO73tW4kXXPnsus= diff --git a/node/modules/chain.go b/node/modules/chain.go index 5eda51078..66f54a76a 100644 --- a/node/modules/chain.go +++ b/node/modules/chain.go @@ -18,6 +18,7 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain" "github.com/filecoin-project/lotus/chain/beacon" "github.com/filecoin-project/lotus/chain/exchange" @@ -157,6 +158,10 @@ func SetGenesis(cs *store.ChainStore, g Genesis) (dtypes.AfterGenesisSet, error) } func NetworkName(mctx helpers.MetricsCtx, lc fx.Lifecycle, cs *store.ChainStore, _ dtypes.AfterGenesisSet) (dtypes.NetworkName, error) { + if !build.Devnet { + return "testnetnet", nil + } + ctx := helpers.LifecycleCtx(mctx, lc) netName, err := stmgr.GetNetworkName(ctx, stmgr.NewStateManager(cs), cs.GetHeaviestTipSet().ParentState()) diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go index 9a94a56a5..de466b004 100644 --- a/node/modules/storageminer.go +++ b/node/modules/storageminer.go @@ -109,6 +109,9 @@ func MinerID(ma dtypes.MinerAddress) (dtypes.MinerID, error) { } func StorageNetworkName(ctx helpers.MetricsCtx, a lapi.FullNode) (dtypes.NetworkName, error) { + if !build.Devnet { + return "testnetnet", nil + } return a.StateNetworkName(ctx) } diff --git a/storage/wdpost_run.go b/storage/wdpost_run.go index 9a497f879..59ffcb74c 100644 --- a/storage/wdpost_run.go +++ b/storage/wdpost_run.go @@ -371,6 +371,10 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty return j }) + if ts.Height() > build.UpgradeIgnitionHeight { + return // FORK: declaring faults after ignition upgrade makes no sense + } + if faults, sigmsg, err = s.checkNextFaults(context.TODO(), declDeadline, partitions); err != nil { // TODO: This is also potentially really bad, but we try to post anyways log.Errorf("checking sector faults: %v", err) From 12e97dbea743c02031ffbcd4f9800cc2d414e2f1 Mon Sep 17 00:00:00 2001 From: Aayush Rajasekaran Date: Sat, 26 Sep 2020 02:59:24 -0400 Subject: [PATCH 34/88] Fix docs and linter --- chain/stmgr/forks.go | 4 ---- documentation/en/api-methods.md | 2 +- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/chain/stmgr/forks.go b/chain/stmgr/forks.go index 872c70b1e..252b731d7 100644 --- a/chain/stmgr/forks.go +++ b/chain/stmgr/forks.go @@ -53,10 +53,6 @@ func (sm *StateManager) handleStateForks(ctx context.Context, root cid.Cid, heig return retCid, nil } -type forEachTree interface { - ForEach(func(address.Address, *types.Actor) error) error -} - func doTransfer(cb ExecCallback, tree types.StateTree, from, to address.Address, amt abi.TokenAmount) error { fromAct, err := tree.GetActor(from) if err != nil { diff --git a/documentation/en/api-methods.md b/documentation/en/api-methods.md index e489fcb0f..ed082ccbf 100644 --- a/documentation/en/api-methods.md +++ b/documentation/en/api-methods.md @@ -3825,7 +3825,7 @@ Inputs: ] ``` -Response: `2` +Response: `3` ### StateReadState StateReadState returns the indicated actor's state. From 567261e2c7bc5b6cb764ab0ef3c9f1f30600448d Mon Sep 17 00:00:00 2001 From: whyrusleeping Date: Fri, 25 Sep 2020 23:59:59 -0700 Subject: [PATCH 35/88] set upgrade heights for testground builds --- build/params_testground.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/build/params_testground.go b/build/params_testground.go index 954b5ccfd..07cc88688 100644 --- a/build/params_testground.go +++ b/build/params_testground.go @@ -74,7 +74,9 @@ var ( UpgradeBreezeHeight abi.ChainEpoch = -1 BreezeGasTampingDuration abi.ChainEpoch = 0 - UpgradeSmokeHeight abi.ChainEpoch = -1 + UpgradeSmokeHeight abi.ChainEpoch = -1 + UpgradeIgnitionHeight abi.ChainEpoch = -2 + UpgradeLiftoffHeight abi.ChainEpoch = -3 DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, @@ -82,4 +84,6 @@ var ( NewestNetworkVersion = network.Version2 ActorUpgradeNetworkVersion = network.Version3 + + Devnet = true ) From 45eadc1b3aeddf794b8f29186863e43e6e25034b Mon Sep 17 00:00:00 2001 From: Aayush Rajasekaran Date: Sat, 26 Sep 2020 01:45:22 -0400 Subject: [PATCH 36/88] Lotus version 0.8.0 --- CHANGELOG.md | 57 ++++++++++++++++++++++++++++++++++++++++++++++++ build/version.go | 2 +- 2 files changed, 58 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 16ced709b..ac687675e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,62 @@ # Lotus changelog +# 0.8.0 / 2020-09-26 + +This consensus-breaking release of Lotus introduces an upgrade to the network. The changes that break consensus are: + +- Upgrading to specs-actors v0.9.11, which reduces WindowPoSt faults per [FIP 0002](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0002.md) to reduce cost for honest miners with occasional faults (see https://github.com/filecoin-project/specs-actors/pull/1181) +- Revisions to some cryptoeconomics and network params + +This release also updates go-fil-markets to fix an incompatibility issue between v0.7.2 and earlier versions. + +## Changes + +#### Dependencies + +- Update spec actors to 0.9.11 (https://github.com/filecoin-project/lotus/pull/4039) +- Update markets to 0.6.3 (https://github.com/filecoin-project/lotus/pull/4013) + +#### Core Lotus + +- Network upgrade (https://github.com/filecoin-project/lotus/pull/4039) +- Fix AddSupportedProofTypes (https://github.com/filecoin-project/lotus/pull/4033) +- Return an error when we fail to find a sector when checking sector expiration (https://github.com/filecoin-project/lotus/pull/4026) +- Batch blockstore copies after block validation (https://github.com/filecoin-project/lotus/pull/3980) +- Remove a misleading miner actor abstraction (https://github.com/filecoin-project/lotus/pull/3977) +- Fix out-of-bounds when loading all sector infos (https://github.com/filecoin-project/lotus/pull/3976) +- Fix break condition in the miner (https://github.com/filecoin-project/lotus/pull/3953) + +#### UX + +- Correct helptext around miners setting ask (https://github.com/filecoin-project/lotus/pull/4009) +- Make sync wait nicer (https://github.com/filecoin-project/lotus/pull/3991) + +#### Tooling and validation + +- Small adjustments following network upgradability changes (https://github.com/filecoin-project/lotus/pull/3996) +- Add some more big pictures stats to stateroot stat (https://github.com/filecoin-project/lotus/pull/3995) +- Add some actors policy setters for testing (https://github.com/filecoin-project/lotus/pull/3975) + +## Contributors + +The following contributors had 5 or more commits go into this release. +We are grateful for every contribution! + +| Contributor | Commits | Lines ± | +|--------------------|---------|---------------| +| arajasek | 66 | +3140/-1261 | +| Stebalien | 64 | +3797/-3434 | +| magik6k | 48 | +1892/-976 | +| raulk | 40 | +2412/-1549 | +| vyzo | 22 | +287/-196 | +| alanshaw | 15 | +761/-146 | +| whyrusleeping | 15 | +736/-52 | +| hannahhoward | 14 | +1237/837- | +| anton | 6 | +32/-8 | +| travisperson | 5 | +502/-6 | +| Frank | 5 | +78/-39 | +| Jennifer | 5 | +148/-41 | + # 0.7.2 / 2020-09-23 This optional release of Lotus introduces a major refactor around how a Lotus node interacts with code from the specs-actors repo. We now use interfaces to read the state of actors, which is required to be able to reason about different versions of actors code at the same time. diff --git a/build/version.go b/build/version.go index cfc8c3ab9..77b98f008 100644 --- a/build/version.go +++ b/build/version.go @@ -29,7 +29,7 @@ func buildType() string { } // BuildVersion is the local build version, set by build system -const BuildVersion = "0.7.2" +const BuildVersion = "0.8.0" func UserVersion() string { return BuildVersion + buildType() + CurrentCommit From 81a30cbf062fb0e031dfdfc0826f61faeacc6b22 Mon Sep 17 00:00:00 2001 From: Steven Li Date: Sat, 26 Sep 2020 16:01:10 +0800 Subject: [PATCH 37/88] Add one more node located in China --- build/bootstrap/bootstrappers.pi | 1 + 1 file changed, 1 insertion(+) diff --git a/build/bootstrap/bootstrappers.pi b/build/bootstrap/bootstrappers.pi index 465f3b5e9..1c8b77709 100644 --- a/build/bootstrap/bootstrappers.pi +++ b/build/bootstrap/bootstrappers.pi @@ -4,3 +4,4 @@ /dns4/bootstrap-4.testnet.fildev.network/tcp/1347/p2p/12D3KooWPkL9LrKRQgHtq7kn9ecNhGU9QaziG8R5tX8v9v7t3h34 /dns4/bootstrap-3.testnet.fildev.network/tcp/1347/p2p/12D3KooWKYSsbpgZ3HAjax5M1BXCwXLa6gVkUARciz7uN3FNtr7T /dns4/bootstrap-5.testnet.fildev.network/tcp/1347/p2p/12D3KooWQYzqnLASJAabyMpPb1GcWZvNSe7JDcRuhdRqonFoiK9W +/dns4/lotus-bootstrap.forceup.cn/tcp/41778/p2p/12D3KooWFQsv3nRMUevZNWWsY1Wu6NUzUbawnWU5NcRhgKuJA37C From bddd6dd8a8aaa89d38a5ccebc3b066224ca6b510 Mon Sep 17 00:00:00 2001 From: whyrusleeping Date: Sat, 26 Sep 2020 11:06:16 -0500 Subject: [PATCH 38/88] fix GetPower with no miner address --- chain/stmgr/utils.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/chain/stmgr/utils.go b/chain/stmgr/utils.go index 58e7f480f..bac5a31f5 100644 --- a/chain/stmgr/utils.go +++ b/chain/stmgr/utils.go @@ -102,6 +102,7 @@ func GetPowerRaw(ctx context.Context, sm *StateManager, st cid.Cid, maddr addres } var mpow power.Claim + var minpow bool if maddr != address.Undef { var found bool mpow, found, err = pas.MinerPower(maddr) @@ -109,11 +110,11 @@ func GetPowerRaw(ctx context.Context, sm *StateManager, st cid.Cid, maddr addres // TODO: return an error when not found? return power.Claim{}, power.Claim{}, false, err } - } - minpow, err := pas.MinerNominalPowerMeetsConsensusMinimum(maddr) - if err != nil { - return power.Claim{}, power.Claim{}, false, err + minpow, err = pas.MinerNominalPowerMeetsConsensusMinimum(maddr) + if err != nil { + return power.Claim{}, power.Claim{}, false, err + } } return mpow, tpow, minpow, nil From 1c5cb50da340b19f287b211a40238e8782378cad Mon Sep 17 00:00:00 2001 From: Travis Person Date: Sat, 26 Sep 2020 17:09:16 +0000 Subject: [PATCH 39/88] Add back network power to stats --- tools/stats/metrics.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tools/stats/metrics.go b/tools/stats/metrics.go index 22069c3d0..dd51ee69f 100644 --- a/tools/stats/metrics.go +++ b/tools/stats/metrics.go @@ -10,6 +10,7 @@ import ( "strings" "time" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/builtin/reward" @@ -252,6 +253,14 @@ func RecordTipsetStatePoints(ctx context.Context, api api.FullNode, pl *PointLis p := NewPoint("network.balance", netBalFilFloat) pl.AddPoint(p) + totalPower, err := api.StateMinerPower(ctx, address.Address{}, tipset.Key()) + if err != nil { + return err + } + + p = NewPoint("chain.power", totalPower.TotalPower.QualityAdjPower.Int64()) + pl.AddPoint(p) + miners, err := api.StateListMiners(ctx, tipset.Key()) if err != nil { return err From 8955b8d8a7ae3110acd82f072e9d739be7f73ac1 Mon Sep 17 00:00:00 2001 From: Peter Rabbitson Date: Sat, 26 Sep 2020 21:16:28 +0200 Subject: [PATCH 40/88] Centralize filtering, output wallet addresses --- cmd/lotus-shed/dealtracker.go | 186 ++++++++++++++++++---------------- 1 file changed, 97 insertions(+), 89 deletions(-) diff --git a/cmd/lotus-shed/dealtracker.go b/cmd/lotus-shed/dealtracker.go index d39f51bd1..a21923009 100644 --- a/cmd/lotus-shed/dealtracker.go +++ b/cmd/lotus-shed/dealtracker.go @@ -5,8 +5,7 @@ import ( "encoding/json" "net" "net/http" - "os" - "strings" + "sync" "github.com/filecoin-project/go-address" "github.com/filecoin-project/lotus/api" @@ -19,23 +18,27 @@ type dealStatsServer struct { api api.FullNode } -var filteredClients map[address.Address]bool +// these lists grow continuously with the network +// TODO: need to switch this to an LRU of sorts, to ensure refreshes +var knownFiltered = new(sync.Map) +var resolvedWallets = new(sync.Map) func init() { - fc := []string{"t0112", "t0113", "t0114", "t010089"} - - filtered, set := os.LookupEnv("FILTERED_CLIENTS") - if set { - fc = strings.Split(filtered, ":") - } - - filteredClients = make(map[address.Address]bool) - for _, a := range fc { - addr, err := address.NewFromString(a) + for _, a := range []string{ + "t0100", // client for genesis miner + "t0112", // client for genesis miner + "t0113", // client for genesis miner + "t0114", // client for genesis miner + "t1nslxql4pck5pq7hddlzym3orxlx35wkepzjkm3i", // SR1 dealbot wallet + "t1stghxhdp2w53dym2nz2jtbpk6ccd4l2lxgmezlq", // SR1 dealbot wallet + "t1mcr5xkgv4jdl3rnz77outn6xbmygb55vdejgbfi", // SR1 dealbot wallet + "t1qiqdbbmrdalbntnuapriirduvxu5ltsc5mhy7si", // SR1 dealbot wallet + } { + a, err := address.NewFromString(a) if err != nil { panic(err) } - filteredClients[addr] = true + knownFiltered.Store(a, true) } } @@ -45,32 +48,16 @@ type dealCountResp struct { } func (dss *dealStatsServer) handleStorageDealCount(w http.ResponseWriter, r *http.Request) { - ctx := context.Background() - head, err := dss.api.ChainHead(ctx) - if err != nil { - log.Warnf("failed to get chain head: %s", err) + epoch, deals := dss.filteredDealList() + if epoch == 0 { w.WriteHeader(500) return } - deals, err := dss.api.StateMarketDeals(ctx, head.Key()) - if err != nil { - log.Warnf("failed to get market deals: %s", err) - w.WriteHeader(500) - return - } - - var count int64 - for _, d := range deals { - if !filteredClients[d.Proposal.Client] { - count++ - } - } - if err := json.NewEncoder(w).Encode(&dealCountResp{ - Total: count, - Epoch: int64(head.Height()), + Total: int64(len(deals)), + Epoch: epoch, }); err != nil { log.Warnf("failed to write back deal count response: %s", err) return @@ -83,34 +70,21 @@ type dealAverageResp struct { } func (dss *dealStatsServer) handleStorageDealAverageSize(w http.ResponseWriter, r *http.Request) { - ctx := context.Background() - head, err := dss.api.ChainHead(ctx) - if err != nil { - log.Warnf("failed to get chain head: %s", err) + epoch, deals := dss.filteredDealList() + if epoch == 0 { w.WriteHeader(500) return } - deals, err := dss.api.StateMarketDeals(ctx, head.Key()) - if err != nil { - log.Warnf("failed to get market deals: %s", err) - w.WriteHeader(500) - return - } - - var count int64 var totalBytes int64 for _, d := range deals { - if !filteredClients[d.Proposal.Client] { - count++ - totalBytes += int64(d.Proposal.PieceSize.Unpadded()) - } + totalBytes += int64(d.deal.Proposal.PieceSize.Unpadded()) } if err := json.NewEncoder(w).Encode(&dealAverageResp{ - AverageSize: totalBytes / count, - Epoch: int64(head.Height()), + AverageSize: totalBytes / int64(len(deals)), + Epoch: epoch, }); err != nil { log.Warnf("failed to write back deal average response: %s", err) return @@ -123,32 +97,20 @@ type dealTotalResp struct { } func (dss *dealStatsServer) handleStorageDealTotalReal(w http.ResponseWriter, r *http.Request) { - ctx := context.Background() - - head, err := dss.api.ChainHead(ctx) - if err != nil { - log.Warnf("failed to get chain head: %s", err) - w.WriteHeader(500) - return - } - - deals, err := dss.api.StateMarketDeals(ctx, head.Key()) - if err != nil { - log.Warnf("failed to get market deals: %s", err) + epoch, deals := dss.filteredDealList() + if epoch == 0 { w.WriteHeader(500) return } var totalBytes int64 for _, d := range deals { - if !filteredClients[d.Proposal.Client] { - totalBytes += int64(d.Proposal.PieceSize.Unpadded()) - } + totalBytes += int64(d.deal.Proposal.PieceSize.Unpadded()) } if err := json.NewEncoder(w).Encode(&dealTotalResp{ TotalBytes: totalBytes, - Epoch: int64(head.Height()), + Epoch: epoch, }); err != nil { log.Warnf("failed to write back deal average response: %s", err) return @@ -168,18 +130,8 @@ type clientStatsOutput struct { } func (dss *dealStatsServer) handleStorageClientStats(w http.ResponseWriter, r *http.Request) { - ctx := context.Background() - - head, err := dss.api.ChainHead(ctx) - if err != nil { - log.Warnf("failed to get chain head: %s", err) - w.WriteHeader(500) - return - } - - deals, err := dss.api.StateMarketDeals(ctx, head.Key()) - if err != nil { - log.Warnf("failed to get market deals: %s", err) + epoch, deals := dss.filteredDealList() + if epoch == 0 { w.WriteHeader(500) return } @@ -187,23 +139,20 @@ func (dss *dealStatsServer) handleStorageClientStats(w http.ResponseWriter, r *h stats := make(map[address.Address]*clientStatsOutput) for _, d := range deals { - if filteredClients[d.Proposal.Client] { - continue - } - st, ok := stats[d.Proposal.Client] + st, ok := stats[d.deal.Proposal.Client] if !ok { st = &clientStatsOutput{ - Client: d.Proposal.Client, + Client: d.resolvedWallet, cids: make(map[cid.Cid]bool), providers: make(map[address.Address]bool), } - stats[d.Proposal.Client] = st + stats[d.deal.Proposal.Client] = st } - st.DataSize += int64(d.Proposal.PieceSize.Unpadded()) - st.cids[d.Proposal.PieceCID] = true - st.providers[d.Proposal.Provider] = true + st.DataSize += int64(d.deal.Proposal.PieceSize.Unpadded()) + st.cids[d.deal.Proposal.PieceCID] = true + st.providers[d.deal.Proposal.Provider] = true st.NumDeals++ } @@ -221,6 +170,65 @@ func (dss *dealStatsServer) handleStorageClientStats(w http.ResponseWriter, r *h } } +type dealInfo struct { + deal api.MarketDeal + resolvedWallet address.Address +} + +// filteredDealList returns the current epoch and a list of filtered deals +// on error returns an epoch of 0 +func (dss *dealStatsServer) filteredDealList() (int64, map[string]dealInfo) { + ctx := context.Background() + + head, err := dss.api.ChainHead(ctx) + if err != nil { + log.Warnf("failed to get chain head: %s", err) + return 0, nil + } + + deals, err := dss.api.StateMarketDeals(ctx, head.Key()) + if err != nil { + log.Warnf("failed to get market deals: %s", err) + return 0, nil + } + + ret := make(map[string]dealInfo, len(deals)) + for dealKey, d := range deals { + + // Counting no-longer-active deals as per Pooja's request + // // https://github.com/filecoin-project/specs-actors/blob/v0.9.9/actors/builtin/market/deal.go#L81-L85 + // if d.State.SectorStartEpoch < 0 { + // continue + // } + + if _, isFiltered := knownFiltered.Load(d.Proposal.Client); isFiltered { + continue + } + + if _, wasSeen := resolvedWallets.Load(d.Proposal.Client); !wasSeen { + w, err := dss.api.StateAccountKey(ctx, d.Proposal.Client, head.Key()) + if err != nil { + log.Warnf("failed to resolve id '%s' to wallet address: %s", d.Proposal.Client, err) + continue + } else { + resolvedWallets.Store(d.Proposal.Client, w) + } + } + + w, _ := resolvedWallets.Load(d.Proposal.Client) + if _, isFiltered := knownFiltered.Load(w); isFiltered { + continue + } + + ret[dealKey] = dealInfo{ + deal: d, + resolvedWallet: w.(address.Address), + } + } + + return int64(head.Height()), ret +} + var serveDealStatsCmd = &cli.Command{ Name: "serve-deal-stats", Flags: []cli.Flag{}, From 10cdbadd82158cc36959877733ad008188b6aa14 Mon Sep 17 00:00:00 2001 From: Peter Rabbitson Date: Sat, 26 Sep 2020 21:29:11 +0200 Subject: [PATCH 41/88] Arrange json as the frontend expects it --- cmd/lotus-shed/dealtracker.go | 55 ++++++++++++++++++++++------------- 1 file changed, 35 insertions(+), 20 deletions(-) diff --git a/cmd/lotus-shed/dealtracker.go b/cmd/lotus-shed/dealtracker.go index a21923009..1340dc9c6 100644 --- a/cmd/lotus-shed/dealtracker.go +++ b/cmd/lotus-shed/dealtracker.go @@ -43,8 +43,9 @@ func init() { } type dealCountResp struct { - Total int64 `json:"total"` - Epoch int64 `json:"epoch"` + Epoch int64 `json:"epoch"` + Endpoint string `json:"endpoint"` + Payload int64 `json:"payload"` } func (dss *dealStatsServer) handleStorageDealCount(w http.ResponseWriter, r *http.Request) { @@ -56,8 +57,9 @@ func (dss *dealStatsServer) handleStorageDealCount(w http.ResponseWriter, r *htt } if err := json.NewEncoder(w).Encode(&dealCountResp{ - Total: int64(len(deals)), - Epoch: epoch, + Endpoint: "COUNT_DEALS", + Payload: int64(len(deals)), + Epoch: epoch, }); err != nil { log.Warnf("failed to write back deal count response: %s", err) return @@ -65,8 +67,9 @@ func (dss *dealStatsServer) handleStorageDealCount(w http.ResponseWriter, r *htt } type dealAverageResp struct { - AverageSize int64 `json:"average_size"` - Epoch int64 `json:"epoch"` + Epoch int64 `json:"epoch"` + Endpoint string `json:"endpoint"` + Payload int64 `json:"payload"` } func (dss *dealStatsServer) handleStorageDealAverageSize(w http.ResponseWriter, r *http.Request) { @@ -83,8 +86,9 @@ func (dss *dealStatsServer) handleStorageDealAverageSize(w http.ResponseWriter, } if err := json.NewEncoder(w).Encode(&dealAverageResp{ - AverageSize: totalBytes / int64(len(deals)), - Epoch: epoch, + Endpoint: "AVERAGE_DEAL_SIZE", + Payload: totalBytes / int64(len(deals)), + Epoch: epoch, }); err != nil { log.Warnf("failed to write back deal average response: %s", err) return @@ -92,8 +96,9 @@ func (dss *dealStatsServer) handleStorageDealAverageSize(w http.ResponseWriter, } type dealTotalResp struct { - TotalBytes int64 `json:"total_size"` - Epoch int64 `json:"epoch"` + Epoch int64 `json:"epoch"` + Endpoint string `json:"endpoint"` + Payload int64 `json:"payload"` } func (dss *dealStatsServer) handleStorageDealTotalReal(w http.ResponseWriter, r *http.Request) { @@ -109,8 +114,9 @@ func (dss *dealStatsServer) handleStorageDealTotalReal(w http.ResponseWriter, r } if err := json.NewEncoder(w).Encode(&dealTotalResp{ - TotalBytes: totalBytes, - Epoch: epoch, + Endpoint: "DEAL_BYTES", + Payload: totalBytes, + Epoch: epoch, }); err != nil { log.Warnf("failed to write back deal average response: %s", err) return @@ -119,6 +125,12 @@ func (dss *dealStatsServer) handleStorageDealTotalReal(w http.ResponseWriter, r } type clientStatsOutput struct { + Epoch int64 `json:"epoch"` + Endpoint string `json:"endpoint"` + Payload []*clientStats `json:"payload"` +} + +type clientStats struct { Client address.Address `json:"client"` DataSize int64 `json:"data_size"` NumCids int `json:"num_cids"` @@ -136,13 +148,13 @@ func (dss *dealStatsServer) handleStorageClientStats(w http.ResponseWriter, r *h return } - stats := make(map[address.Address]*clientStatsOutput) + stats := make(map[address.Address]*clientStats) for _, d := range deals { st, ok := stats[d.deal.Proposal.Client] if !ok { - st = &clientStatsOutput{ + st = &clientStats{ Client: d.resolvedWallet, cids: make(map[cid.Cid]bool), providers: make(map[address.Address]bool), @@ -156,12 +168,15 @@ func (dss *dealStatsServer) handleStorageClientStats(w http.ResponseWriter, r *h st.NumDeals++ } - out := make([]*clientStatsOutput, 0, len(stats)) - for _, cso := range stats { - cso.NumCids = len(cso.cids) - cso.NumMiners = len(cso.providers) - - out = append(out, cso) + out := clientStatsOutput{ + Epoch: epoch, + Endpoint: "CLIENT_DEAL_STATS", + Payload: make([]*clientStats, 0, len(stats)), + } + for _, cs := range stats { + cs.NumCids = len(cs.cids) + cs.NumMiners = len(cs.providers) + out.Payload = append(out.Payload, cs) } if err := json.NewEncoder(w).Encode(out); err != nil { From 1483f1e59adcbd91ab90ca01a3b8591e8f37c1fc Mon Sep 17 00:00:00 2001 From: Peter Rabbitson Date: Sat, 26 Sep 2020 21:43:49 +0200 Subject: [PATCH 42/88] Add filtering of addresses associated with miners --- cmd/lotus-shed/dealtracker.go | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/cmd/lotus-shed/dealtracker.go b/cmd/lotus-shed/dealtracker.go index 1340dc9c6..719e821a5 100644 --- a/cmd/lotus-shed/dealtracker.go +++ b/cmd/lotus-shed/dealtracker.go @@ -201,6 +201,26 @@ func (dss *dealStatsServer) filteredDealList() (int64, map[string]dealInfo) { return 0, nil } + // Exclude any address associated with a miner + miners, err := dss.api.StateListMiners(ctx, head.Key()) + if err != nil { + log.Warnf("failed to get miner list: %s", err) + return 0, nil + } + for _, m := range miners { + info, err := dss.api.StateMinerInfo(ctx, m, head.Key()) + if err != nil { + log.Warnf("failed to get info for known miner '%s': %s", m, err) + continue + } + + knownFiltered.Store(info.Owner, true) + knownFiltered.Store(info.Worker, true) + for _, a := range info.ControlAddresses { + knownFiltered.Store(a, true) + } + } + deals, err := dss.api.StateMarketDeals(ctx, head.Key()) if err != nil { log.Warnf("failed to get market deals: %s", err) From fb3bcc4ce527961fa172898a180d1fbb865fc133 Mon Sep 17 00:00:00 2001 From: Peter Rabbitson Date: Sat, 26 Sep 2020 21:49:05 +0200 Subject: [PATCH 43/88] Add startup warning --- cmd/lotus-shed/dealtracker.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/lotus-shed/dealtracker.go b/cmd/lotus-shed/dealtracker.go index 719e821a5..8a9d0d6f3 100644 --- a/cmd/lotus-shed/dealtracker.go +++ b/cmd/lotus-shed/dealtracker.go @@ -303,6 +303,8 @@ var serveDealStatsCmd = &cli.Command{ panic(err) } + log.Warnf("deal-stat server listening on %s\n== NOTE: QUERIES ARE EXPENSIVE - YOU MUST FRONT-CACHE THIS SERVICE\n", list.Addr().String()) + return s.Serve(list) }, } From 70071e273d10f07010e70673f0d878fbb46f2262 Mon Sep 17 00:00:00 2001 From: zgfzgf <1901989065@qq.com> Date: Sun, 27 Sep 2020 09:58:37 +0800 Subject: [PATCH 44/88] optimize tipset Equals func --- chain/types/tipset.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/chain/types/tipset.go b/chain/types/tipset.go index 44d41c29d..5d34ec89d 100644 --- a/chain/types/tipset.go +++ b/chain/types/tipset.go @@ -167,6 +167,10 @@ func (ts *TipSet) Equals(ots *TipSet) bool { return false } + if ts.height != ots.height { + return false + } + if len(ts.blks) != len(ots.blks) { return false } From 04876c663e16eb381adb6695f32db1c7f99493ab Mon Sep 17 00:00:00 2001 From: zgfzgf <1901989065@qq.com> Date: Sun, 27 Sep 2020 10:17:06 +0800 Subject: [PATCH 45/88] modify tipset Equals --- chain/types/tipset.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/chain/types/tipset.go b/chain/types/tipset.go index 5d34ec89d..07eff3734 100644 --- a/chain/types/tipset.go +++ b/chain/types/tipset.go @@ -171,12 +171,12 @@ func (ts *TipSet) Equals(ots *TipSet) bool { return false } - if len(ts.blks) != len(ots.blks) { + if len(ts.cids) != len(ots.cids) { return false } - for i, b := range ts.blks { - if b.Cid() != ots.blks[i].Cid() { + for i, cid := range ts.cids { + if cid != ots.cids[i] { return false } } From 7a14455ac8253e0a9fd23fd358baf541efe3ca8e Mon Sep 17 00:00:00 2001 From: zgfzgf <1901989065@qq.com> Date: Sun, 27 Sep 2020 15:01:42 +0800 Subject: [PATCH 46/88] miner debug where injectNulls != 0 --- miner/miner.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/miner/miner.go b/miner/miner.go index 1b79f5245..d4e7b2317 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -211,6 +211,8 @@ minerLoop: base = prebase } + base.NullRounds += injectNulls // testing + if base.TipSet.Equals(lastBase.TipSet) && lastBase.NullRounds == base.NullRounds { log.Warnf("BestMiningCandidate from the previous round: %s (nulls:%d)", lastBase.TipSet.Cids(), lastBase.NullRounds) if !m.niceSleep(time.Duration(build.BlockDelaySecs) * time.Second) { @@ -219,8 +221,6 @@ minerLoop: continue } - base.NullRounds += injectNulls // testing - b, err := m.mineOne(ctx, base) if err != nil { log.Errorf("mining block failed: %+v", err) From e4c1f090af73cc0e9f997b3de5e9a9443b493ad5 Mon Sep 17 00:00:00 2001 From: Peter Rabbitson Date: Sun, 27 Sep 2020 20:44:50 +0200 Subject: [PATCH 47/88] Disable exclusion of miner-associated addresses --- cmd/lotus-shed/dealtracker.go | 40 +++++++++++++++++++---------------- 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/cmd/lotus-shed/dealtracker.go b/cmd/lotus-shed/dealtracker.go index 8a9d0d6f3..083db8ecb 100644 --- a/cmd/lotus-shed/dealtracker.go +++ b/cmd/lotus-shed/dealtracker.go @@ -26,6 +26,8 @@ var resolvedWallets = new(sync.Map) func init() { for _, a := range []string{ "t0100", // client for genesis miner + "t0101", // client for genesis miner + "t0102", // client for genesis miner "t0112", // client for genesis miner "t0113", // client for genesis miner "t0114", // client for genesis miner @@ -201,25 +203,27 @@ func (dss *dealStatsServer) filteredDealList() (int64, map[string]dealInfo) { return 0, nil } - // Exclude any address associated with a miner - miners, err := dss.api.StateListMiners(ctx, head.Key()) - if err != nil { - log.Warnf("failed to get miner list: %s", err) - return 0, nil - } - for _, m := range miners { - info, err := dss.api.StateMinerInfo(ctx, m, head.Key()) - if err != nil { - log.Warnf("failed to get info for known miner '%s': %s", m, err) - continue - } + // Disabled as per @pooja's request + // + // // Exclude any address associated with a miner + // miners, err := dss.api.StateListMiners(ctx, head.Key()) + // if err != nil { + // log.Warnf("failed to get miner list: %s", err) + // return 0, nil + // } + // for _, m := range miners { + // info, err := dss.api.StateMinerInfo(ctx, m, head.Key()) + // if err != nil { + // log.Warnf("failed to get info for known miner '%s': %s", m, err) + // continue + // } - knownFiltered.Store(info.Owner, true) - knownFiltered.Store(info.Worker, true) - for _, a := range info.ControlAddresses { - knownFiltered.Store(a, true) - } - } + // knownFiltered.Store(info.Owner, true) + // knownFiltered.Store(info.Worker, true) + // for _, a := range info.ControlAddresses { + // knownFiltered.Store(a, true) + // } + // } deals, err := dss.api.StateMarketDeals(ctx, head.Key()) if err != nil { From be5dc2c57fb8dfde934ddaa63b22d25dd5ca0356 Mon Sep 17 00:00:00 2001 From: Peter Rabbitson Date: Sun, 27 Sep 2020 20:45:45 +0200 Subject: [PATCH 48/88] Walk back 10 epochs for stat generation --- cmd/lotus-shed/dealtracker.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/cmd/lotus-shed/dealtracker.go b/cmd/lotus-shed/dealtracker.go index 083db8ecb..8ded6bf4a 100644 --- a/cmd/lotus-shed/dealtracker.go +++ b/cmd/lotus-shed/dealtracker.go @@ -8,6 +8,7 @@ import ( "sync" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api" lcli "github.com/filecoin-project/lotus/cli" "github.com/ipfs/go-cid" @@ -18,6 +19,10 @@ type dealStatsServer struct { api api.FullNode } +// Requested by @jbenet +// How many epochs back to look at for dealstats +var epochLookback = abi.ChainEpoch(10) + // these lists grow continuously with the network // TODO: need to switch this to an LRU of sorts, to ensure refreshes var knownFiltered = new(sync.Map) @@ -203,6 +208,12 @@ func (dss *dealStatsServer) filteredDealList() (int64, map[string]dealInfo) { return 0, nil } + head, err = dss.api.ChainGetTipSetByHeight(ctx, head.Height()-epochLookback, head.Key()) + if err != nil { + log.Warnf("failed to walk back %s epochs: %s", epochLookback, err) + return 0, nil + } + // Disabled as per @pooja's request // // // Exclude any address associated with a miner From a2d24b5b33a18418edc63bea9518e9dbed72bfef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=88=98=E6=9E=97=E6=AC=A3?= Date: Mon, 28 Sep 2020 13:53:29 +0800 Subject: [PATCH 49/88] add ipfsmain bootstrapper --- build/bootstrap/bootstrappers.pi | 3 +++ 1 file changed, 3 insertions(+) diff --git a/build/bootstrap/bootstrappers.pi b/build/bootstrap/bootstrappers.pi index 465f3b5e9..51adb3cc7 100644 --- a/build/bootstrap/bootstrappers.pi +++ b/build/bootstrap/bootstrappers.pi @@ -4,3 +4,6 @@ /dns4/bootstrap-4.testnet.fildev.network/tcp/1347/p2p/12D3KooWPkL9LrKRQgHtq7kn9ecNhGU9QaziG8R5tX8v9v7t3h34 /dns4/bootstrap-3.testnet.fildev.network/tcp/1347/p2p/12D3KooWKYSsbpgZ3HAjax5M1BXCwXLa6gVkUARciz7uN3FNtr7T /dns4/bootstrap-5.testnet.fildev.network/tcp/1347/p2p/12D3KooWQYzqnLASJAabyMpPb1GcWZvNSe7JDcRuhdRqonFoiK9W + +/dns4/bootstrap-0.ipfsmain.cn/tcp/34721/p2p/12D3KooWQnwEGNqcM2nAcPtRR9rAX8Hrg4k9kJLCHoTR5chJfz6d +/dns4/bootstrap-1.ipfsmain.cn/tcp/34723/p2p/12D3KooWMKxMkD5DMpSWsW7dBddKxKT7L2GgbNuckz9otxvkvByP \ No newline at end of file From a3145bae07f3658183d95d9a6e56d2f3ae38ce76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 28 Sep 2020 12:17:54 +0200 Subject: [PATCH 50/88] daemon cmd: Add progress bar to chain import --- cmd/lotus/daemon.go | 18 +++++++++++++++++- go.mod | 1 + 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go index e0fee6564..93f6e4b8c 100644 --- a/cmd/lotus/daemon.go +++ b/cmd/lotus/daemon.go @@ -23,6 +23,7 @@ import ( "go.opencensus.io/stats/view" "go.opencensus.io/tag" "golang.org/x/xerrors" + "gopkg.in/cheggaaa/pb.v1" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" @@ -333,6 +334,11 @@ func ImportChain(r repo.Repo, fname string, snapshot bool) error { } defer fi.Close() //nolint:errcheck + st, err := os.Stat(fname) + if err != nil { + return err + } + lr, err := r.Lock(repo.FullNode) if err != nil { return err @@ -354,7 +360,17 @@ func ImportChain(r repo.Repo, fname string, snapshot bool) error { cst := store.NewChainStore(bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier)) log.Info("importing chain from file...") - ts, err := cst.Import(fi) + + bar := pb.New64(st.Size()) + br := bar.NewProxyReader(fi) + bar.ShowTimeLeft = true + bar.ShowPercent = true + bar.Units = pb.U_BYTES + + bar.Start() + ts, err := cst.Import(br) + bar.Finish() + if err != nil { return xerrors.Errorf("importing chain failed: %w", err) } diff --git a/go.mod b/go.mod index 67d87347f..2c0322ecc 100644 --- a/go.mod +++ b/go.mod @@ -125,6 +125,7 @@ require ( golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980 golang.org/x/time v0.0.0-20191024005414-555d28b269f0 golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 + gopkg.in/cheggaaa/pb.v1 v1.0.28 gotest.tools v2.2.0+incompatible launchpad.net/gocheck v0.0.0-20140225173054-000000000087 // indirect ) From f8095296997f6d5d8ba05d66acf291cfa048ae50 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Mon, 28 Sep 2020 12:28:12 +0200 Subject: [PATCH 51/88] daemon cmd: Support imports straight from http --- cmd/lotus/daemon.go | 64 +++++++++++++++++++++++++++++++-------------- 1 file changed, 45 insertions(+), 19 deletions(-) diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go index 93f6e4b8c..b976fde79 100644 --- a/cmd/lotus/daemon.go +++ b/cmd/lotus/daemon.go @@ -3,11 +3,14 @@ package main import ( + "bufio" "context" "encoding/hex" "encoding/json" "fmt" + "io" "io/ioutil" + "net/http" "os" "runtime/pprof" "strings" @@ -101,11 +104,11 @@ var DaemonCmd = &cli.Command{ }, &cli.StringFlag{ Name: "import-chain", - Usage: "on first run, load chain from given file and validate", + Usage: "on first run, load chain from given file or url and validate", }, &cli.StringFlag{ Name: "import-snapshot", - Usage: "import chain state from a given chain export file", + Usage: "import chain state from a given chain export file or url", }, &cli.BoolFlag{ Name: "halt-after-import", @@ -207,11 +210,6 @@ var DaemonCmd = &cli.Command{ issnapshot = true } - chainfile, err := homedir.Expand(chainfile) - if err != nil { - return err - } - if err := ImportChain(r, chainfile, issnapshot); err != nil { return err } @@ -327,16 +325,41 @@ func importKey(ctx context.Context, api api.FullNode, f string) error { return nil } -func ImportChain(r repo.Repo, fname string, snapshot bool) error { - fi, err := os.Open(fname) - if err != nil { - return err - } - defer fi.Close() //nolint:errcheck +func ImportChain(r repo.Repo, fname string, snapshot bool) (err error) { + var rd io.Reader + var l int64 + if strings.HasPrefix(fname, "http://") || strings.HasPrefix(fname, "https://") { + resp, err := http.Get(fname) //nolint:gosec + if err != nil { + return err + } + defer resp.Body.Close() //nolint:errcheck - st, err := os.Stat(fname) - if err != nil { - return err + if resp.StatusCode != http.StatusOK { + return xerrors.Errorf("non-200 response: %d", resp.StatusCode) + } + + rd = resp.Body + l = resp.ContentLength + } else { + fname, err = homedir.Expand(fname) + if err != nil { + return err + } + + fi, err := os.Open(fname) + if err != nil { + return err + } + defer fi.Close() //nolint:errcheck + + st, err := os.Stat(fname) + if err != nil { + return err + } + + rd = fi + l = st.Size() } lr, err := r.Lock(repo.FullNode) @@ -359,12 +382,15 @@ func ImportChain(r repo.Repo, fname string, snapshot bool) error { cst := store.NewChainStore(bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier)) - log.Info("importing chain from file...") + log.Infof("importing chain from %s...", fname) - bar := pb.New64(st.Size()) - br := bar.NewProxyReader(fi) + bufr := bufio.NewReaderSize(rd, 1<<20) + + bar := pb.New64(l) + br := bar.NewProxyReader(bufr) bar.ShowTimeLeft = true bar.ShowPercent = true + bar.ShowSpeed = true bar.Units = pb.U_BYTES bar.Start() From 6b16d48bad56544d547449e3df8972cf5feb72ee Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Mon, 28 Sep 2020 15:56:44 +0200 Subject: [PATCH 52/88] refactor: fetch tipset blocks in parallel --- chain/store/store.go | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/chain/store/store.go b/chain/store/store.go index 1dbf69547..e68857e0b 100644 --- a/chain/store/store.go +++ b/chain/store/store.go @@ -10,6 +10,8 @@ import ( "strconv" "sync" + "golang.org/x/sync/errgroup" + "github.com/filecoin-project/go-state-types/crypto" "github.com/minio/blake2b-simd" @@ -467,14 +469,25 @@ func (cs *ChainStore) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) { return v.(*types.TipSet), nil } - var blks []*types.BlockHeader - for _, c := range tsk.Cids() { - b, err := cs.GetBlock(c) - if err != nil { - return nil, xerrors.Errorf("get block %s: %w", c, err) - } + // Fetch tipset block headers from blockstore in parallel + var eg errgroup.Group + cids := tsk.Cids() + blks := make([]*types.BlockHeader, 0, len(cids)) + for _, c := range cids { + c := c + eg.Go(func() error { + b, err := cs.GetBlock(c) + if err != nil { + return xerrors.Errorf("get block %s: %w", c, err) + } - blks = append(blks, b) + blks = append(blks, b) + return nil + }) + } + err := eg.Wait() + if err != nil { + return nil, err } ts, err := types.NewTipSet(blks) From cfe6f595b036c332843697413fce5b78104159af Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Mon, 28 Sep 2020 16:35:37 +0200 Subject: [PATCH 53/88] fix: unsafe append in LoadTipSet --- chain/store/store.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/chain/store/store.go b/chain/store/store.go index e68857e0b..6c93db7a0 100644 --- a/chain/store/store.go +++ b/chain/store/store.go @@ -472,16 +472,16 @@ func (cs *ChainStore) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) { // Fetch tipset block headers from blockstore in parallel var eg errgroup.Group cids := tsk.Cids() - blks := make([]*types.BlockHeader, 0, len(cids)) - for _, c := range cids { - c := c + blks := make([]*types.BlockHeader, len(cids)) + for i, c := range cids { + i, c := i, c eg.Go(func() error { b, err := cs.GetBlock(c) if err != nil { return xerrors.Errorf("get block %s: %w", c, err) } - blks = append(blks, b) + blks[i] = b return nil }) } From 64f24fd276d0f964b78ece61fd2be6cdbb674b1f Mon Sep 17 00:00:00 2001 From: jennijuju Date: Mon, 28 Sep 2020 15:34:06 -0400 Subject: [PATCH 54/88] Added an option to hide sector info for `lotus-miner info` --- cmd/lotus-storage-miner/info.go | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/cmd/lotus-storage-miner/info.go b/cmd/lotus-storage-miner/info.go index 3ccfd67da..213d62e6e 100644 --- a/cmd/lotus-storage-miner/info.go +++ b/cmd/lotus-storage-miner/info.go @@ -33,6 +33,12 @@ var infoCmd = &cli.Command{ Subcommands: []*cli.Command{ infoAllCmd, }, + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "hide-sectors-info", + Usage: "hide sectors info", + }, + }, Action: infoCmdAct, } @@ -199,10 +205,12 @@ func infoCmdAct(cctx *cli.Context) error { fmt.Printf("Expected Seal Duration: %s\n\n", sealdur) - fmt.Println("Sectors:") - err = sectorsInfo(ctx, nodeApi) - if err != nil { - return err + if !cctx.Bool("hide-sectors-info") { + fmt.Println("Sectors:") + err = sectorsInfo(ctx, nodeApi) + if err != nil { + return err + } } // TODO: grab actr state / info From eaece306b6c16efe642160f52d5e73a6a687d976 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 29 Sep 2020 11:17:23 +0200 Subject: [PATCH 55/88] wallet list cli: Print balances/nonces --- cli/wallet.go | 69 +++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 62 insertions(+), 7 deletions(-) diff --git a/cli/wallet.go b/cli/wallet.go index 27993a1ba..0d69673f9 100644 --- a/cli/wallet.go +++ b/cli/wallet.go @@ -9,13 +9,16 @@ import ( "os" "strings" - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/crypto" - types "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/wallet" + "github.com/urfave/cli/v2" "golang.org/x/xerrors" - "github.com/urfave/cli/v2" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" + + types "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/wallet" + "github.com/filecoin-project/lotus/lib/tablewriter" ) var walletCmd = &cli.Command{ @@ -66,6 +69,13 @@ var walletNew = &cli.Command{ var walletList = &cli.Command{ Name: "list", Usage: "List wallet address", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "addr-only", + Usage: "Only print addresses", + Aliases: []string{"a"}, + }, + }, Action: func(cctx *cli.Context) error { api, closer, err := GetFullNodeAPI(cctx) if err != nil { @@ -79,9 +89,54 @@ var walletList = &cli.Command{ return err } - for _, addr := range addrs { - fmt.Println(addr.String()) + def, err := api.WalletDefaultAddress(ctx) + if err != nil { + return err } + + tw := tablewriter.New( + tablewriter.Col("Address"), + tablewriter.Col("Balance"), + tablewriter.Col("Nonce"), + tablewriter.Col("Default"), + tablewriter.NewLineCol("Error")) + + for _, addr := range addrs { + if cctx.Bool("addr-only") { + fmt.Println(addr.String()) + } else { + a, err := api.StateGetActor(ctx, addr, types.EmptyTSK) + if err != nil { + if !strings.Contains(err.Error(), "actor not found") { + tw.Write(map[string]interface{}{ + "Address": addr, + "Error": err, + }) + continue + } + + a = &types.Actor{ + Balance: big.Zero(), + } + } + + row := map[string]interface{}{ + "Address": addr, + "Balance": types.FIL(a.Balance), + "Nonce": a.Nonce, + } + if addr == def { + row["Default"] = "X" + } + + tw.Write(row) + } + } + + if !cctx.Bool("addr-only") { + return tw.Flush(os.Stdout) + } + return nil }, } From d1c10a61dd61a38a90c7d49dcd1cfeb74318eba4 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Tue, 29 Sep 2020 12:19:04 +0200 Subject: [PATCH 56/88] fix: message signer - always compare with mpool nonce --- chain/messagesigner/messagesigner.go | 44 +++++++++++++---------- chain/messagesigner/messagesigner_test.go | 5 ++- node/builder.go | 3 +- node/impl/full/mpool.go | 3 +- 4 files changed, 30 insertions(+), 25 deletions(-) diff --git a/chain/messagesigner/messagesigner.go b/chain/messagesigner/messagesigner.go index 41b0edee9..1ad83543b 100644 --- a/chain/messagesigner/messagesigner.go +++ b/chain/messagesigner/messagesigner.go @@ -4,21 +4,22 @@ import ( "bytes" "context" - "github.com/filecoin-project/lotus/chain/wallet" - - "github.com/filecoin-project/lotus/chain/messagepool" - "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/wallet" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/namespace" + logging "github.com/ipfs/go-log/v2" cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" ) const dsKeyActorNonce = "ActorNonce" +var log = logging.Logger("messagesigner") + type mpoolAPI interface { GetNonce(address.Address) (uint64, error) } @@ -67,30 +68,30 @@ func (ms *MessageSigner) SignMessage(ctx context.Context, msg *types.Message) (* // nextNonce increments the nonce. // If there is no nonce in the datastore, gets the nonce from the message pool. func (ms *MessageSigner) nextNonce(addr address.Address) (uint64, error) { - addrNonceKey := datastore.KeyWithNamespaces([]string{dsKeyActorNonce, addr.String()}) + // Nonces used to be created by the mempool and we need to support nodes + // that have mempool nonces, so first check the mempool for a nonce for + // this address. Note that the mempool returns the actor state's nonce + // by default. + nonce, err := ms.mpool.GetNonce(addr) + if err != nil { + return 0, xerrors.Errorf("failed to get nonce from mempool: %w", err) + } // Get the nonce for this address from the datastore - nonceBytes, err := ms.ds.Get(addrNonceKey) + addrNonceKey := datastore.KeyWithNamespaces([]string{dsKeyActorNonce, addr.String()}) + dsNonceBytes, err := ms.ds.Get(addrNonceKey) - var nonce uint64 switch { case xerrors.Is(err, datastore.ErrNotFound): // If a nonce for this address hasn't yet been created in the - // datastore, check the mempool - nonces used to be created by - // the mempool so we need to support nodes that still have mempool - // nonces. Note that the mempool returns the actor state's nonce by - // default. - nonce, err = ms.mpool.GetNonce(addr) - if err != nil { - return 0, xerrors.Errorf("failed to get nonce from mempool: %w", err) - } + // datastore, just use the nonce from the mempool case err != nil: return 0, xerrors.Errorf("failed to get nonce from datastore: %w", err) default: - // There is a nonce in the mempool, so unmarshall and increment it - maj, val, err := cbg.CborReadHeader(bytes.NewReader(nonceBytes)) + // There is a nonce in the datastore, so unmarshall and increment it + maj, val, err := cbg.CborReadHeader(bytes.NewReader(dsNonceBytes)) if err != nil { return 0, xerrors.Errorf("failed to parse nonce from datastore: %w", err) } @@ -98,7 +99,14 @@ func (ms *MessageSigner) nextNonce(addr address.Address) (uint64, error) { return 0, xerrors.Errorf("bad cbor type parsing nonce from datastore") } - nonce = val + 1 + dsNonce := val + 1 + + // The message pool nonce should be <= than the datastore nonce + if nonce <= dsNonce { + nonce = dsNonce + } else { + log.Warnf("mempool nonce was larger than datastore nonce (%d > %d)", nonce, dsNonce) + } } // Write the nonce for this address to the datastore diff --git a/chain/messagesigner/messagesigner_test.go b/chain/messagesigner/messagesigner_test.go index e52137892..55676b258 100644 --- a/chain/messagesigner/messagesigner_test.go +++ b/chain/messagesigner/messagesigner_test.go @@ -98,10 +98,9 @@ func TestMessageSignerSignMessage(t *testing.T) { To: to1, From: from1, }, - // Should ignore mpool nonce because after the first message nonce - // will come from the datastore + // Should adjust datastore nonce because mpool nonce is higher mpoolNonce: [1]uint64{10}, - expNonce: 6, + expNonce: 10, }}, }, { // Nonce should increment independently for each address diff --git a/node/builder.go b/node/builder.go index c49789a6a..da2924338 100644 --- a/node/builder.go +++ b/node/builder.go @@ -6,8 +6,6 @@ import ( "os" "time" - "github.com/filecoin-project/lotus/chain/messagesigner" - logging "github.com/ipfs/go-log" ci "github.com/libp2p/go-libp2p-core/crypto" "github.com/libp2p/go-libp2p-core/host" @@ -37,6 +35,7 @@ import ( "github.com/filecoin-project/lotus/chain/gen/slashfilter" "github.com/filecoin-project/lotus/chain/market" "github.com/filecoin-project/lotus/chain/messagepool" + "github.com/filecoin-project/lotus/chain/messagesigner" "github.com/filecoin-project/lotus/chain/metrics" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" diff --git a/node/impl/full/mpool.go b/node/impl/full/mpool.go index 003260496..066aafdc5 100644 --- a/node/impl/full/mpool.go +++ b/node/impl/full/mpool.go @@ -4,14 +4,13 @@ import ( "context" "encoding/json" - "github.com/filecoin-project/lotus/chain/messagesigner" - "github.com/filecoin-project/go-address" "github.com/ipfs/go-cid" "go.uber.org/fx" "golang.org/x/xerrors" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/messagesigner" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/node/modules/dtypes" ) From 09e5cc90a40791b3139e5eac8912532246ca8058 Mon Sep 17 00:00:00 2001 From: Hector Sanjuan Date: Tue, 29 Sep 2020 14:45:55 +0200 Subject: [PATCH 57/88] Add README to documentation/en with explanations --- documentation/en/README.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 documentation/en/README.md diff --git a/documentation/en/README.md b/documentation/en/README.md new file mode 100644 index 000000000..76f11ed90 --- /dev/null +++ b/documentation/en/README.md @@ -0,0 +1,16 @@ +# Lotus documentation + +This folder contains some Lotus documentation mostly intended for Lotus developers. + +User documentation (including documentation for miners) has been moved to specific Lotus sections in https://docs.filecoin.io: + +- https://docs.filecoin.io/get-started/lotus +- https://docs.filecoin.io/store/lotus +- https://docs.filecoin.io/mine/lotus +- https://docs.filecoin.io/build/lotus + +## The Lotu.sh site + +The https://lotu.sh and https://docs.lotu.sh sites are generated from this folder based on the index provided by [.library.json](.library.json). This is done at the [lotus-docs repository](https://github.com/filecoin-project/lotus-docs), which contains Lotus as a git submodule. + +To update the site, the lotus-docs repository should be updated with the desired version for the lotus git submodule. Once pushed to master, it will be auto-deployed. From 96193c20448a530e3f0c542700fb78fa928bc880 Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Wed, 16 Sep 2020 16:34:54 +0200 Subject: [PATCH 58/88] Implement bench-cache Signed-off-by: Jakub Sztandera --- cmd/lotus-bench/caching_verifier.go | 33 +++++++++++++++ cmd/lotus-bench/import.go | 63 +++++++++++++++++++++-------- 2 files changed, 80 insertions(+), 16 deletions(-) create mode 100644 cmd/lotus-bench/caching_verifier.go diff --git a/cmd/lotus-bench/caching_verifier.go b/cmd/lotus-bench/caching_verifier.go new file mode 100644 index 000000000..cd794e647 --- /dev/null +++ b/cmd/lotus-bench/caching_verifier.go @@ -0,0 +1,33 @@ +package main + +import ( + "context" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/specs-actors/actors/runtime/proof" + "github.com/ipfs/go-datastore" +) + +type cachingVerifier struct { + ds datastore.Datastore + backend ffiwrapper.Verifier +} + +func (cv *cachingVerifier) VerifySeal(svi proof.SealVerifyInfo) (bool, error) { + svi.MarshalCBOR(nil) + return cv.backend.VerifySeal(svi) +} +func (cv *cachingVerifier) VerifyWinningPoSt(ctx context.Context, info proof.WinningPoStVerifyInfo) (bool, error) { + info.MarshalCBOR(nil) + return cv.backend.VerifyWinningPoSt(ctx, info) +} +func (cv *cachingVerifier) VerifyWindowPoSt(ctx context.Context, info proof.WindowPoStVerifyInfo) (bool, error) { + info.MarshalCBOR(nil) + return cv.backend.VerifyWindowPoSt(ctx, info) +} +func (cv *cachingVerifier) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredPoStProof, a abi.ActorID, rnd abi.PoStRandomness, u uint64) ([]uint64, error) { + return cv.backend.GenerateWinningPoStSectorChallenge(ctx, proofType, a, rnd, u) +} + +var _ ffiwrapper.Verifier = (*cachingVerifier)(nil) diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index f2845ba20..fc81c600e 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -60,6 +60,15 @@ var importBenchCmd = &cli.Command{ Name: "repodir", Usage: "set the repo directory for the lotus bench run (defaults to /tmp)", }, + &cli.StringFlag{ + Name: "syscall-cache", + Usage: "read and write syscall results from datastore", + }, + &cli.BoolFlag{ + Name: "export-traces", + Usage: "should we export execution traces", + Value: true, + }, }, Action: func(cctx *cli.Context) error { vm.BatchSealVerifyParallelism = cctx.Int("batch-seal-verify-threads") @@ -85,7 +94,10 @@ var importBenchCmd = &cli.Command{ tdir = tmp } - bds, err := badger.NewDatastore(tdir, nil) + bdgOpt := badger.DefaultOptions + bdgOpt.GcInterval = 0 + + bds, err := badger.NewDatastore(tdir, &bdgOpt) if err != nil { return err } @@ -96,7 +108,21 @@ var importBenchCmd = &cli.Command{ } bs = cbs ds := datastore.NewMapDatastore() - cs := store.NewChainStore(bs, ds, vm.Syscalls(ffiwrapper.ProofVerifier)) + + var verifier ffiwrapper.Verifier = ffiwrapper.ProofVerifier + if cctx.IsSet("syscall-cache") { + + scds, err := badger.NewDatastore(cctx.String("syscall-cache"), &bdgOpt) + if err != nil { + return xerrors.Errorf("opening syscall-cache datastore: %w", err) + } + verifier = &cachingVerifier{ + ds: scds, + backend: verifier, + } + } + + cs := store.NewChainStore(bs, ds, vm.Syscalls(verifier)) stm := stmgr.NewStateManager(cs) prof, err := os.Create("import-bench.prof") @@ -144,13 +170,16 @@ var importBenchCmd = &cli.Command{ ts = next } - ibj, err := os.Create("import-bench.json") - if err != nil { - return err - } - defer ibj.Close() //nolint:errcheck + var enc *json.Encoder + if cctx.Bool("export-traces") { + ibj, err := os.Create("import-bench.json") + if err != nil { + return err + } + defer ibj.Close() //nolint:errcheck - enc := json.NewEncoder(ibj) + enc = json.NewEncoder(ibj) + } var lastTse *TipSetExec @@ -173,17 +202,19 @@ var importBenchCmd = &cli.Command{ if err != nil { return err } - stripCallers(trace) + if enc != nil { + stripCallers(trace) - lastTse = &TipSetExec{ - TipSet: cur.Key(), - Trace: trace, - Duration: time.Since(start), + lastTse = &TipSetExec{ + TipSet: cur.Key(), + Trace: trace, + Duration: time.Since(start), + } + if err := enc.Encode(lastTse); err != nil { + return xerrors.Errorf("failed to write out tipsetexec: %w", err) + } } lastState = st - if err := enc.Encode(lastTse); err != nil { - return xerrors.Errorf("failed to write out tipsetexec: %w", err) - } } pprof.StopCPUProfile() From 79ba4598d6f85bdc3e95faeb0e394af61e5e0666 Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Wed, 16 Sep 2020 17:54:22 +0200 Subject: [PATCH 59/88] Implement cache Signed-off-by: Jakub Sztandera --- cmd/lotus-bench/caching_verifier.go | 75 +++++++++++++++++++++++++++-- 1 file changed, 70 insertions(+), 5 deletions(-) diff --git a/cmd/lotus-bench/caching_verifier.go b/cmd/lotus-bench/caching_verifier.go index cd794e647..28897071a 100644 --- a/cmd/lotus-bench/caching_verifier.go +++ b/cmd/lotus-bench/caching_verifier.go @@ -1,12 +1,16 @@ package main import ( + "bufio" "context" + "errors" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/specs-actors/actors/runtime/proof" "github.com/ipfs/go-datastore" + "github.com/minio/blake2b-simd" + cbg "github.com/whyrusleeping/cbor-gen" ) type cachingVerifier struct { @@ -14,17 +18,78 @@ type cachingVerifier struct { backend ffiwrapper.Verifier } +const bufsize = 128 + +func (cv cachingVerifier) withCache(execute func() (bool, error), param cbg.CBORMarshaler) (bool, error) { + hasher := blake2b.New256() + wr := bufio.NewWriterSize(hasher, bufsize) + err := param.MarshalCBOR(wr) + if err != nil { + log.Errorf("could not marshal call info: %+v", err) + return execute() + } + err = wr.Flush() + if err != nil { + log.Errorf("could not flush: %+v", err) + return execute() + } + hash := hasher.Sum(nil) + key := datastore.NewKey(string(hash)) + fromDs, err := cv.ds.Get(key) + if err == nil { + switch fromDs[0] { + case 's': + return true, nil + case 'f': + return false, nil + case 'e': + return false, errors.New(string(fromDs[1:])) + default: + log.Errorf("bad cached result in cache %s(%x)", fromDs[0], fromDs[0]) + return execute() + } + } else if errors.Is(err, datastore.ErrNotFound) { + // recalc + ok, err := execute() + var save []byte + if err != nil { + if ok { + log.Errorf("sucess with an error: %+v", err) + } else { + save = append([]byte{'e'}, []byte(err.Error())...) + } + } else if ok { + save = []byte{'s'} + } else { + save = []byte{'f'} + } + + if len(save) != 0 { + errSave := cv.ds.Put(key, save) + if errSave != nil { + log.Errorf("error saving result: %+v", errSave) + } + } + + return ok, err + } else { + log.Errorf("could not get data from cache: %+v", err) + return execute() + } +} + func (cv *cachingVerifier) VerifySeal(svi proof.SealVerifyInfo) (bool, error) { - svi.MarshalCBOR(nil) - return cv.backend.VerifySeal(svi) + return cv.withCache(func() (bool, error) { + return cv.backend.VerifySeal(svi) + }, &svi) } func (cv *cachingVerifier) VerifyWinningPoSt(ctx context.Context, info proof.WinningPoStVerifyInfo) (bool, error) { - info.MarshalCBOR(nil) return cv.backend.VerifyWinningPoSt(ctx, info) } func (cv *cachingVerifier) VerifyWindowPoSt(ctx context.Context, info proof.WindowPoStVerifyInfo) (bool, error) { - info.MarshalCBOR(nil) - return cv.backend.VerifyWindowPoSt(ctx, info) + return cv.withCache(func() (bool, error) { + return cv.backend.VerifyWindowPoSt(ctx, info) + }, &info) } func (cv *cachingVerifier) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredPoStProof, a abi.ActorID, rnd abi.PoStRandomness, u uint64) ([]uint64, error) { return cv.backend.GenerateWinningPoStSectorChallenge(ctx, proofType, a, rnd, u) From 53ab17cf50f5d7a69b81a6945bbe317a747bd10d Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Wed, 16 Sep 2020 20:10:00 +0200 Subject: [PATCH 60/88] Add no import to import-bench Signed-off-by: Jakub Sztandera --- cmd/lotus-bench/import.go | 37 +++++++++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 10 deletions(-) diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index fc81c600e..c16796a13 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -24,6 +24,7 @@ import ( "github.com/filecoin-project/lotus/lib/blockstore" _ "github.com/filecoin-project/lotus/lib/sigs/bls" _ "github.com/filecoin-project/lotus/lib/sigs/secp" + "github.com/ipld/go-car" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" @@ -69,6 +70,10 @@ var importBenchCmd = &cli.Command{ Usage: "should we export execution traces", Value: true, }, + &cli.BoolFlag{ + Name: "no-import", + Usage: "should we import the chain? if set to true chain has to be previously imported", + }, }, Action: func(cctx *cli.Context) error { vm.BatchSealVerifyParallelism = cctx.Int("batch-seal-verify-threads") @@ -111,7 +116,6 @@ var importBenchCmd = &cli.Command{ var verifier ffiwrapper.Verifier = ffiwrapper.ProofVerifier if cctx.IsSet("syscall-cache") { - scds, err := badger.NewDatastore(cctx.String("syscall-cache"), &bdgOpt) if err != nil { return xerrors.Errorf("opening syscall-cache datastore: %w", err) @@ -135,9 +139,21 @@ var importBenchCmd = &cli.Command{ return err } - head, err := cs.Import(cfi) - if err != nil { - return err + var head *types.TipSet + if !cctx.Bool("no-import") { + head, err = cs.Import(cfi) + if err != nil { + return err + } + } else { + cr, err := car.NewCarReader(cfi) + if err != nil { + return err + } + head, err = cs.LoadTipSet(types.NewTipSetKey(cr.Header.Roots...)) + if err != nil { + return err + } } gb, err := cs.GetTipsetByHeight(context.TODO(), 0, head, true) @@ -188,6 +204,7 @@ var importBenchCmd = &cli.Command{ cur := tschain[i] log.Infof("computing state (height: %d, ts=%s)", cur.Height(), cur.Cids()) if cur.ParentState() != lastState { + stripCallers(lastTse.Trace) lastTrace := lastTse.Trace d, err := json.MarshalIndent(lastTrace, "", " ") if err != nil { @@ -202,14 +219,14 @@ var importBenchCmd = &cli.Command{ if err != nil { return err } + lastTse = &TipSetExec{ + TipSet: cur.Key(), + Trace: trace, + Duration: time.Since(start), + } if enc != nil { - stripCallers(trace) + stripCallers(lastTse.Trace) - lastTse = &TipSetExec{ - TipSet: cur.Key(), - Trace: trace, - Duration: time.Since(start), - } if err := enc.Encode(lastTse); err != nil { return xerrors.Errorf("failed to write out tipsetexec: %w", err) } From 1f4d1dcc58fc2da3574c479159775e4b19b1a720 Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Wed, 16 Sep 2020 20:38:28 +0200 Subject: [PATCH 61/88] Do not sync Signed-off-by: Jakub Sztandera --- cmd/lotus-bench/import.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index c16796a13..443e67950 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -101,6 +101,8 @@ var importBenchCmd = &cli.Command{ bdgOpt := badger.DefaultOptions bdgOpt.GcInterval = 0 + bdgOpt.Options.SyncWrites = false + bdgOpt.Options.Truncate = true bds, err := badger.NewDatastore(tdir, &bdgOpt) if err != nil { From 12a0dd3d0a0a6b80c1e11c2e05d39b5024d55139 Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Thu, 17 Sep 2020 00:06:20 +0200 Subject: [PATCH 62/88] <3 to linter Signed-off-by: Jakub Sztandera --- cmd/lotus-bench/caching_verifier.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/lotus-bench/caching_verifier.go b/cmd/lotus-bench/caching_verifier.go index 28897071a..51ab696f7 100644 --- a/cmd/lotus-bench/caching_verifier.go +++ b/cmd/lotus-bench/caching_verifier.go @@ -54,7 +54,7 @@ func (cv cachingVerifier) withCache(execute func() (bool, error), param cbg.CBOR var save []byte if err != nil { if ok { - log.Errorf("sucess with an error: %+v", err) + log.Errorf("success with an error: %+v", err) } else { save = append([]byte{'e'}, []byte(err.Error())...) } From 108fe7823c9dadf3d7e3812d79a66f4e6769ef5e Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Fri, 18 Sep 2020 13:39:38 +0200 Subject: [PATCH 63/88] Add command to trigger gc Signed-off-by: Jakub Sztandera --- cmd/lotus-bench/import.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index 443e67950..72ac5d60b 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -74,6 +74,9 @@ var importBenchCmd = &cli.Command{ Name: "no-import", Usage: "should we import the chain? if set to true chain has to be previously imported", }, + &cli.BoolFlag{ + Name: "only-gc", + }, }, Action: func(cctx *cli.Context) error { vm.BatchSealVerifyParallelism = cctx.Int("batch-seal-verify-threads") @@ -103,11 +106,15 @@ var importBenchCmd = &cli.Command{ bdgOpt.GcInterval = 0 bdgOpt.Options.SyncWrites = false bdgOpt.Options.Truncate = true + bdgOpt.Options.DetectConflicts = false + bdgOpt.Options.MaxTableSize = 64 << 20 bds, err := badger.NewDatastore(tdir, &bdgOpt) if err != nil { return err } + + bds.CollectGarbage() bs := blockstore.NewBlockstore(bds) cbs, err := blockstore.CachedBlockstore(context.TODO(), bs, blockstore.DefaultCacheOpts()) if err != nil { @@ -122,11 +129,15 @@ var importBenchCmd = &cli.Command{ if err != nil { return xerrors.Errorf("opening syscall-cache datastore: %w", err) } + scds.CollectGarbage() verifier = &cachingVerifier{ ds: scds, backend: verifier, } } + if cctx.Bool("only-gc") { + return nil + } cs := store.NewChainStore(bs, ds, vm.Syscalls(verifier)) stm := stmgr.NewStateManager(cs) From 782717948ae9926e4afa7a42f758e8c70abe13d8 Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Fri, 18 Sep 2020 13:54:20 +0200 Subject: [PATCH 64/88] Add logs Signed-off-by: Jakub Sztandera --- cmd/lotus-bench/import.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index 72ac5d60b..c6d49a78c 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -114,7 +114,11 @@ var importBenchCmd = &cli.Command{ return err } - bds.CollectGarbage() + if cctx.Bool("only-gc") { + log.Info("calling CollectGarbage on main ds") + bds.CollectGarbage() + log.Info("done calling CollectGarbage on main ds") + } bs := blockstore.NewBlockstore(bds) cbs, err := blockstore.CachedBlockstore(context.TODO(), bs, blockstore.DefaultCacheOpts()) if err != nil { @@ -129,7 +133,12 @@ var importBenchCmd = &cli.Command{ if err != nil { return xerrors.Errorf("opening syscall-cache datastore: %w", err) } - scds.CollectGarbage() + + if cctx.Bool("only-gc") { + log.Info("calling CollectGarbage on syscall ds") + scds.CollectGarbage() + log.Info("done calling CollectGarbage on syscall ds") + } verifier = &cachingVerifier{ ds: scds, backend: verifier, From 3858309368bfa069f86f71b3558ef4532d6f422f Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Fri, 18 Sep 2020 13:55:37 +0200 Subject: [PATCH 65/88] Add http to import bench Signed-off-by: Jakub Sztandera --- cmd/lotus-bench/import.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index c6d49a78c..8c874206f 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -91,6 +91,10 @@ var importBenchCmd = &cli.Command{ } defer cfi.Close() //nolint:errcheck // read only file + go func() { + http.ListenAndServe("localhost:6060", nil) //nolint:errcheck + }() + var tdir string if rdir := cctx.String("repodir"); rdir != "" { tdir = rdir From 01386a206c0c8c9d9007d35a80307cae93b0781b Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Fri, 18 Sep 2020 15:17:13 +0200 Subject: [PATCH 66/88] Update options Signed-off-by: Jakub Sztandera --- cmd/lotus-bench/import.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index 8c874206f..db87ebeb6 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -29,8 +29,10 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + bdg "github.com/dgraph-io/badger/v2" "github.com/ipfs/go-datastore" badger "github.com/ipfs/go-ds-badger2" + "github.com/urfave/cli/v2" "golang.org/x/xerrors" ) @@ -108,10 +110,10 @@ var importBenchCmd = &cli.Command{ bdgOpt := badger.DefaultOptions bdgOpt.GcInterval = 0 + bdgOpt.Options = bdg.DefaultOptions("") bdgOpt.Options.SyncWrites = false bdgOpt.Options.Truncate = true bdgOpt.Options.DetectConflicts = false - bdgOpt.Options.MaxTableSize = 64 << 20 bds, err := badger.NewDatastore(tdir, &bdgOpt) if err != nil { From f21c5cbbe28fe382c1670298e11819cf4949e7af Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Sat, 19 Sep 2020 19:11:37 +0200 Subject: [PATCH 67/88] Add start-at Signed-off-by: Jakub Sztandera --- cmd/lotus-bench/import.go | 69 ++++++++++++++++++++++++--------------- go.mod | 9 ++--- go.sum | 31 ++++++++++++++++++ 3 files changed, 78 insertions(+), 31 deletions(-) diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index db87ebeb6..3c5bd0cda 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -28,6 +28,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/statediff" bdg "github.com/dgraph-io/badger/v2" "github.com/ipfs/go-datastore" @@ -79,6 +80,9 @@ var importBenchCmd = &cli.Command{ &cli.BoolFlag{ Name: "only-gc", }, + &cli.Int64Flag{ + Name: "start-at", + }, }, Action: func(cctx *cli.Context) error { vm.BatchSealVerifyParallelism = cctx.Int("batch-seal-verify-threads") @@ -194,6 +198,20 @@ var importBenchCmd = &cli.Command{ return err } + startEpoch := abi.ChainEpoch(1) + if cctx.IsSet("start-at") { + startEpoch = abi.ChainEpoch(cctx.Int64("start-at")) + start, err := cs.GetTipsetByHeight(context.TODO(), abi.ChainEpoch(cctx.Int64("start-at")), head, true) + if err != nil { + return err + } + + err = cs.SetHead(start) + if err != nil { + return err + } + } + if h := cctx.Int64("height"); h != 0 { tsh, err := cs.GetTipsetByHeight(context.TODO(), abi.ChainEpoch(h), head, true) if err != nil { @@ -204,7 +222,7 @@ var importBenchCmd = &cli.Command{ ts := head tschain := []*types.TipSet{ts} - for ts.Height() != 0 { + for ts.Height() > startEpoch { next, err := cs.LoadTipSet(ts.Parents()) if err != nil { return err @@ -225,41 +243,38 @@ var importBenchCmd = &cli.Command{ enc = json.NewEncoder(ibj) } - var lastTse *TipSetExec - - lastState := tschain[len(tschain)-1].ParentState() - for i := len(tschain) - 2; i >= 0; i-- { + for i := len(tschain) - 1; i >= 1; i-- { cur := tschain[i] + start := time.Now() log.Infof("computing state (height: %d, ts=%s)", cur.Height(), cur.Cids()) - if cur.ParentState() != lastState { - stripCallers(lastTse.Trace) - lastTrace := lastTse.Trace + st, trace, err := stm.ExecutionTrace(context.TODO(), cur) + if err != nil { + return err + } + tse := &TipSetExec{ + TipSet: cur.Key(), + Trace: trace, + Duration: time.Since(start), + } + if enc != nil { + stripCallers(tse.Trace) + + if err := enc.Encode(tse); err != nil { + return xerrors.Errorf("failed to write out tipsetexec: %w", err) + } + } + if tschain[i-1].ParentState() != st { + stripCallers(tse.Trace) + lastTrace := tse.Trace d, err := json.MarshalIndent(lastTrace, "", " ") if err != nil { panic(err) } fmt.Println("TRACE") fmt.Println(string(d)) - return xerrors.Errorf("tipset chain had state mismatch at height %d (%s != %s)", cur.Height(), cur.ParentState(), lastState) + fmt.Println(statediff.Diff(context.Background(), bs, tschain[i-1].ParentState(), st, statediff.ExpandActors)) + return xerrors.Errorf("tipset chain had state mismatch at height %d (%s != %s)", cur.Height(), cur.ParentState(), st) } - start := time.Now() - st, trace, err := stm.ExecutionTrace(context.TODO(), cur) - if err != nil { - return err - } - lastTse = &TipSetExec{ - TipSet: cur.Key(), - Trace: trace, - Duration: time.Since(start), - } - if enc != nil { - stripCallers(lastTse.Trace) - - if err := enc.Encode(lastTse); err != nil { - return xerrors.Errorf("failed to write out tipsetexec: %w", err) - } - } - lastState = st } pprof.StopCPUProfile() diff --git a/go.mod b/go.mod index 2c0322ecc..83137d7c8 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,6 @@ require ( github.com/BurntSushi/toml v0.3.1 github.com/GeertJohan/go.rice v1.0.0 github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee - github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129 github.com/coreos/go-systemd/v22 v22.0.0 @@ -38,10 +37,10 @@ require ( github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b github.com/filecoin-project/specs-actors v0.9.11 github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796 + github.com/filecoin-project/statediff v0.0.6-0.20200918150628-da86dd0d264c github.com/filecoin-project/test-vectors/schema v0.0.1 github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 github.com/go-kit/kit v0.10.0 - github.com/go-ole/go-ole v1.2.4 // indirect github.com/google/uuid v1.1.1 github.com/gorilla/mux v1.7.4 github.com/gorilla/websocket v1.4.2 @@ -117,7 +116,6 @@ require ( github.com/whyrusleeping/pubsub v0.0.0-20131020042734-02de8aa2db3d github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 go.opencensus.io v0.22.4 - go.uber.org/dig v1.10.0 // indirect go.uber.org/fx v1.9.0 go.uber.org/multierr v1.5.0 go.uber.org/zap v1.15.0 @@ -127,9 +125,12 @@ require ( golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 gopkg.in/cheggaaa/pb.v1 v1.0.28 gotest.tools v2.2.0+incompatible - launchpad.net/gocheck v0.0.0-20140225173054-000000000087 // indirect ) +replace github.com/filecoin-project/lotus => ./ + +replace github.com/filecoin-project/statediff => ./../statediff + replace github.com/golangci/golangci-lint => github.com/golangci/golangci-lint v1.18.0 replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi diff --git a/go.sum b/go.sum index 05e643708..2455766b1 100644 --- a/go.sum +++ b/go.sum @@ -159,6 +159,8 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= github.com/davidlazar/go-crypto v0.0.0-20190912175916-7055855a373f h1:BOaYiTvg8p9vBUXpklC22XSK/mifLF7lG9jtmYYi3Tc= github.com/davidlazar/go-crypto v0.0.0-20190912175916-7055855a373f/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e h1:lj77EKYUpYXTd8CD/+QMIf8b6OIOTsfEBSXiAzuEHTU= github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e/go.mod h1:3ZQK6DMPSz/QZ73jlWxBtUhNA8xZx7LzUFSq/OfP8vk= github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= @@ -206,6 +208,7 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanw/esbuild v0.6.28/go.mod h1:mptxmSXIzBIKKCe4jo9A5SToEd1G+AKZ9JmY85dYRJ0= github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 h1:BBso6MBKW8ncyZLv37o+KNyy0HrrHgfnOaGQC2qvN+A= github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:JpoxHjuQauoxiFMl1ie8Xc/7TfLuMZ5eOCONd1sUBHg= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -218,6 +221,8 @@ github.com/filecoin-project/go-address v0.0.4 h1:gSNMv0qWwH16fGQs7ycOUrDjY6YCSsg github.com/filecoin-project/go-address v0.0.4/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 h1:t6qDiuGYYngDqaLc2ZUvdtAg4UNxPeOYaXhBWSNsVaM= github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs= +github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20200731171407-e559a0579161 h1:K6t4Hrs+rwUxBz2xg88Bdqeh4k5/rycQFdPseZhRyfE= +github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20200731171407-e559a0579161/go.mod h1:vgmwKBkx+ca5OIeEvstiQgzAZnb7R6QaqE1oEDSqa6g= github.com/filecoin-project/go-bitfield v0.2.0 h1:gCtLcjskIPtdg4NfN7gQZSQF9yrBQ7mkT0qCJxzGI2Q= github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:av5fw6wmm58FYMgJeoB/lK9XXrgdugYiTqkdxjTy9k8= @@ -280,6 +285,7 @@ github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclK github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= @@ -506,6 +512,11 @@ github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28 github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= github.com/ipfs/go-graphsync v0.2.1 h1:MdehhqBSuTI2LARfKLkpYnt0mUrqHs/mtuDnESXHBfU= github.com/ipfs/go-graphsync v0.2.1/go.mod h1:gEBvJUNelzMkaRPJTpg/jaKN4AQW/7wDWu0K92D8o10= +github.com/ipfs/go-graphsync v0.2.0 h1:x94MvHLNuRwBlZzVal7tR1RYK7T7H6bqQLPopxDbIF0= +github.com/ipfs/go-graphsync v0.2.0/go.mod h1:gEBvJUNelzMkaRPJTpg/jaKN4AQW/7wDWu0K92D8o10= +github.com/ipfs/go-graphsync v0.1.2 h1:25Ll9kIXCE+DY0dicvfS3KMw+U5sd01b/FJbA7KAbhg= +github.com/ipfs/go-graphsync v0.1.2/go.mod h1:sLXVXm1OxtE2XYPw62MuXCdAuNwkAdsbnfrmos5odbA= +github.com/ipfs/go-hamt-ipld v0.1.1 h1:0IQdvwnAAUKmDE+PMJa5y1QiwOPHpI9+eAbQEEEYthk= github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= @@ -688,6 +699,7 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lib/pq v1.7.0 h1:h93mCPfUSkaul3Ka/VG8uZdmW1uMHDGxzu0NWHuJmHY= github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= @@ -1015,6 +1027,8 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= @@ -1317,6 +1331,8 @@ github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5J github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -1369,6 +1385,7 @@ github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:X github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d/go.mod h1:W5MvapuoHRP8rz4vxjwCK1pDqF1aQcWsV5PZ+AHbqdg= github.com/whyrusleeping/cbor-gen v0.0.0-20200710004633-5379fc63235d/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200723185710-6a3894a6352b/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200810223238-211df3b9e24c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200812213548-958ddffe352c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200814224545-656e08ce49ee h1:U7zWWvvAjT76EiuWPSOiZlQDnaQYPxPoxugTtTAcJK0= @@ -1395,6 +1412,8 @@ github.com/whyrusleeping/pubsub v0.0.0-20131020042734-02de8aa2db3d/go.mod h1:g7c github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow= github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg= github.com/whyrusleeping/yamux v1.1.5/go.mod h1:E8LnQQ8HKx5KD29HZFUwM1PxCOdPRzGwur1mcYhXcD8= +github.com/willscott/go-cmp v0.5.2-0.20200812183318-8affb9542345 h1:IJVAwIctqDFOrO0C2qzksXmANviyHJzrklU27e1ltzE= +github.com/willscott/go-cmp v0.5.2-0.20200812183318-8affb9542345/go.mod h1:D7hA8H5pyQx7Y5Em7IWx1R4vNJzfon3gpG9nxjkITjQ= github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/c-for-go v0.0.0-20200718154222-87b0065af829 h1:wb7xrDzfkLgPHsSEBm+VSx6aDdi64VtV0xvP0E6j8bk= @@ -1483,6 +1502,7 @@ golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1495,6 +1515,8 @@ golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd h1:zkO/Lhoka23X63N9OSzpSeROEUQ5ODw47tM3YWjygbs= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1508,11 +1530,14 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -1614,6 +1639,7 @@ golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1623,6 +1649,7 @@ golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1631,6 +1658,7 @@ golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501145240-bc7a7d42d5c3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980 h1:OjiUf46hAmXblsZdnoSXsEUSKU8r1UEzcL5RVZ4gO9Y= @@ -1674,6 +1702,7 @@ golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1802,6 +1831,8 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= launchpad.net/gocheck v0.0.0-20140225173054-000000000087 h1:Izowp2XBH6Ya6rv+hqbceQyw/gSGoXfH/UPoTGduL54= From ebc8489ff183859cc0d567bd95b5904183b25152 Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Sat, 19 Sep 2020 20:27:24 +0200 Subject: [PATCH 68/88] Add global-profile option Signed-off-by: Jakub Sztandera --- cmd/lotus-bench/import.go | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index 3c5bd0cda..e201baa85 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -80,6 +80,10 @@ var importBenchCmd = &cli.Command{ &cli.BoolFlag{ Name: "only-gc", }, + &cli.BoolFlag{ + Name: "global-profile", + Value: true, + }, &cli.Int64Flag{ Name: "start-at", }, @@ -161,14 +165,16 @@ var importBenchCmd = &cli.Command{ cs := store.NewChainStore(bs, ds, vm.Syscalls(verifier)) stm := stmgr.NewStateManager(cs) - prof, err := os.Create("import-bench.prof") - if err != nil { - return err - } - defer prof.Close() //nolint:errcheck + if cctx.Bool("global-profile") { + prof, err := os.Create("import-bench.prof") + if err != nil { + return err + } + defer prof.Close() //nolint:errcheck - if err := pprof.StartCPUProfile(prof); err != nil { - return err + if err := pprof.StartCPUProfile(prof); err != nil { + return err + } } var head *types.TipSet From 35cf69ae646574557acfaaa8bd65a4693560b352 Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Sat, 19 Sep 2020 20:49:40 +0200 Subject: [PATCH 69/88] Disable bloomcache Signed-off-by: Jakub Sztandera --- cmd/lotus-bench/import.go | 3 +++ go.mod | 1 + 2 files changed, 4 insertions(+) diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index e201baa85..5da4f2d3d 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -134,6 +134,9 @@ var importBenchCmd = &cli.Command{ log.Info("done calling CollectGarbage on main ds") } bs := blockstore.NewBlockstore(bds) + cacheOpts := blockstore.DefaultCacheOpts() + cacheOpts.HasBloomFilterSize = 0 + cbs, err := blockstore.CachedBlockstore(context.TODO(), bs, blockstore.DefaultCacheOpts()) if err != nil { return err diff --git a/go.mod b/go.mod index 83137d7c8..d2e552ffc 100644 --- a/go.mod +++ b/go.mod @@ -116,6 +116,7 @@ require ( github.com/whyrusleeping/pubsub v0.0.0-20131020042734-02de8aa2db3d github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 go.opencensus.io v0.22.4 + go.uber.org/dig v1.10.0 // indirect go.uber.org/fx v1.9.0 go.uber.org/multierr v1.5.0 go.uber.org/zap v1.15.0 From 242a77b391c001ddf3fad53c3f5c8160e157c4c3 Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Sat, 19 Sep 2020 20:50:06 +0200 Subject: [PATCH 70/88] go mod tidy Signed-off-by: Jakub Sztandera --- cmd/lotus-bench/import.go | 2 +- go.mod | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index 5da4f2d3d..428fd4212 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -137,7 +137,7 @@ var importBenchCmd = &cli.Command{ cacheOpts := blockstore.DefaultCacheOpts() cacheOpts.HasBloomFilterSize = 0 - cbs, err := blockstore.CachedBlockstore(context.TODO(), bs, blockstore.DefaultCacheOpts()) + cbs, err := blockstore.CachedBlockstore(context.TODO(), bs, cacheOpts) if err != nil { return err } diff --git a/go.mod b/go.mod index d2e552ffc..83137d7c8 100644 --- a/go.mod +++ b/go.mod @@ -116,7 +116,6 @@ require ( github.com/whyrusleeping/pubsub v0.0.0-20131020042734-02de8aa2db3d github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 go.opencensus.io v0.22.4 - go.uber.org/dig v1.10.0 // indirect go.uber.org/fx v1.9.0 go.uber.org/multierr v1.5.0 go.uber.org/zap v1.15.0 From b7f18b460147f43d59203f43f5845164d9f81066 Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Sat, 19 Sep 2020 21:00:38 +0200 Subject: [PATCH 71/88] Disable callers Signed-off-by: Jakub Sztandera --- chain/vm/runtime.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/chain/vm/runtime.go b/chain/vm/runtime.go index 156d57282..eb4476718 100644 --- a/chain/vm/runtime.go +++ b/chain/vm/runtime.go @@ -5,7 +5,6 @@ import ( "context" "encoding/binary" "fmt" - gruntime "runtime" "time" "github.com/filecoin-project/go-address" @@ -493,7 +492,8 @@ func (rt *Runtime) chargeGasFunc(skip int) func(GasCharge) { func (rt *Runtime) chargeGasInternal(gas GasCharge, skip int) aerrors.ActorError { toUse := gas.Total() var callers [10]uintptr - cout := gruntime.Callers(2+skip, callers[:]) + + cout := 0 //gruntime.Callers(2+skip, callers[:]) now := build.Clock.Now() if rt.lastGasCharge != nil { From 1c6214b76d1cb1022afeb8ac66e475c046ec4a4f Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Sun, 20 Sep 2020 03:22:41 +0200 Subject: [PATCH 72/88] Usage go-bitfield with buffer pool Signed-off-by: Jakub Sztandera --- go.mod | 2 +- go.sum | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 83137d7c8..afa317614 100644 --- a/go.mod +++ b/go.mod @@ -21,7 +21,7 @@ require ( github.com/fatih/color v1.8.0 github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200716204036-cddc56607e1d github.com/filecoin-project/go-address v0.0.4 - github.com/filecoin-project/go-bitfield v0.2.0 + github.com/filecoin-project/go-bitfield v0.2.1-0.20200920172649-837cbe6a1ed3 github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 github.com/filecoin-project/go-data-transfer v0.6.6 diff --git a/go.sum b/go.sum index 2455766b1..28606d17a 100644 --- a/go.sum +++ b/go.sum @@ -225,6 +225,10 @@ github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20200731171407-e559a0579161 github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20200731171407-e559a0579161/go.mod h1:vgmwKBkx+ca5OIeEvstiQgzAZnb7R6QaqE1oEDSqa6g= github.com/filecoin-project/go-bitfield v0.2.0 h1:gCtLcjskIPtdg4NfN7gQZSQF9yrBQ7mkT0qCJxzGI2Q= github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= +github.com/filecoin-project/go-bitfield v0.2.1-0.20200920171219-7c2059195a8c h1:eEmdVMWo7AngX9fGZSSAm/V6+7tqiBawFfHRjW35JwU= +github.com/filecoin-project/go-bitfield v0.2.1-0.20200920171219-7c2059195a8c/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= +github.com/filecoin-project/go-bitfield v0.2.1-0.20200920172649-837cbe6a1ed3 h1:HQa4+yCYsLq1TLM0kopeAhSCLbtZ541cWEi5N5rO+9g= +github.com/filecoin-project/go-bitfield v0.2.1-0.20200920172649-837cbe6a1ed3/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:av5fw6wmm58FYMgJeoB/lK9XXrgdugYiTqkdxjTy9k8= github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= From 0771c23fb02d2c6cb456f81c297cdb441345e7a6 Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Mon, 21 Sep 2020 22:47:03 +0200 Subject: [PATCH 73/88] Use pebble Signed-off-by: Jakub Sztandera --- cmd/lotus-bench/import.go | 26 ++++++++++++++++++++++++-- go.mod | 4 +++- go.sum | 23 +++++++++++++++++++++-- 3 files changed, 48 insertions(+), 5 deletions(-) diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index 428fd4212..b535ed96c 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -16,6 +16,8 @@ import ( "sort" "time" + "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/bloom" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" @@ -33,6 +35,7 @@ import ( bdg "github.com/dgraph-io/badger/v2" "github.com/ipfs/go-datastore" badger "github.com/ipfs/go-ds-badger2" + pebbleds "github.com/ipfs/go-ds-pebble" "github.com/urfave/cli/v2" "golang.org/x/xerrors" @@ -123,14 +126,33 @@ var importBenchCmd = &cli.Command{ bdgOpt.Options.Truncate = true bdgOpt.Options.DetectConflicts = false - bds, err := badger.NewDatastore(tdir, &bdgOpt) + cache := 512 + bds, err := pebbleds.NewDatastore(tdir, &pebble.Options{ + // Pebble has a single combined cache area and the write + // buffers are taken from this too. Assign all available + // memory allowance for cache. + Cache: pebble.NewCache(int64(cache * 1024 * 1024)), + // The size of memory table(as well as the write buffer). + // Note, there may have more than two memory tables in the system. + // MemTableStopWritesThreshold can be configured to avoid the memory abuse. + MemTableSize: cache * 1024 * 1024 / 4, + // The default compaction concurrency(1 thread), + // Here use all available CPUs for faster compaction. + MaxConcurrentCompactions: runtime.NumCPU(), + // Per-level options. Options for at least one level must be specified. The + // options for the last level are used for all subsequent levels. + Levels: []pebble.LevelOptions{ + {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, + }, + Logger: log, + }) if err != nil { return err } if cctx.Bool("only-gc") { log.Info("calling CollectGarbage on main ds") - bds.CollectGarbage() + //bds.CollectGarbage() log.Info("done calling CollectGarbage on main ds") } bs := blockstore.NewBlockstore(bds) diff --git a/go.mod b/go.mod index afa317614..3b3b4fed7 100644 --- a/go.mod +++ b/go.mod @@ -10,6 +10,7 @@ require ( github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129 + github.com/cockroachdb/pebble v0.0.0-20200916222308-4e219a90ba5b github.com/coreos/go-systemd/v22 v22.0.0 github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e github.com/dgraph-io/badger/v2 v2.2007.2 @@ -53,10 +54,11 @@ require ( github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834 github.com/ipfs/go-cid v0.0.7 github.com/ipfs/go-cidutil v0.0.2 - github.com/ipfs/go-datastore v0.4.4 + github.com/ipfs/go-datastore v0.4.5 github.com/ipfs/go-ds-badger2 v0.1.1-0.20200708190120-187fc06f714e github.com/ipfs/go-ds-leveldb v0.4.2 github.com/ipfs/go-ds-measure v0.1.0 + github.com/ipfs/go-ds-pebble v0.0.2-0.20200921211847-f1ffb3128b61 github.com/ipfs/go-filestore v1.0.0 github.com/ipfs/go-fs-lock v0.0.6 github.com/ipfs/go-graphsync v0.2.1 diff --git a/go.sum b/go.sum index 28606d17a..19e33d294 100644 --- a/go.sum +++ b/go.sum @@ -112,6 +112,8 @@ github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7 github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894 h1:JLaf/iINcLyjwbtTsCJjc6rtlASgHeIJPrB6QmwURnA= +github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= @@ -125,6 +127,14 @@ github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4 github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs= +github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= +github.com/cockroachdb/pebble v0.0.0-20200916222308-4e219a90ba5b h1:OKALTB609+19AM7wsO0k8yMwAqjEIppcnYvyIhA+ZlQ= +github.com/cockroachdb/pebble v0.0.0-20200916222308-4e219a90ba5b/go.mod h1:hU7vhtrqonEphNF+xt8/lHdaBprxmV1h8BOGrd9XwmQ= +github.com/cockroachdb/redact v0.0.0-20200622112456-cd282804bbd3 h1:2+dpIJzYMSbLi0587YXpi8tOJT52qCOI/1I0UNThc/I= +github.com/cockroachdb/redact v0.0.0-20200622112456-cd282804bbd3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -225,8 +235,6 @@ github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20200731171407-e559a0579161 github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20200731171407-e559a0579161/go.mod h1:vgmwKBkx+ca5OIeEvstiQgzAZnb7R6QaqE1oEDSqa6g= github.com/filecoin-project/go-bitfield v0.2.0 h1:gCtLcjskIPtdg4NfN7gQZSQF9yrBQ7mkT0qCJxzGI2Q= github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= -github.com/filecoin-project/go-bitfield v0.2.1-0.20200920171219-7c2059195a8c h1:eEmdVMWo7AngX9fGZSSAm/V6+7tqiBawFfHRjW35JwU= -github.com/filecoin-project/go-bitfield v0.2.1-0.20200920171219-7c2059195a8c/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-bitfield v0.2.1-0.20200920172649-837cbe6a1ed3 h1:HQa4+yCYsLq1TLM0kopeAhSCLbtZ541cWEi5N5rO+9g= github.com/filecoin-project/go-bitfield v0.2.1-0.20200920172649-837cbe6a1ed3/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:av5fw6wmm58FYMgJeoB/lK9XXrgdugYiTqkdxjTy9k8= @@ -283,6 +291,9 @@ github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 h1:EzDjxMg43q1tA2c0MV3tNbaontnHLplHyFF6M5KiVP0= github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1/go.mod h1:0eHX/BVySxPc6SE2mZRoppGq7qcEagxdmQnA3dzork8= +github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= @@ -351,6 +362,8 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf h1:gFVkHXmVAhEbxZVDln5V9GKrLaluNoFHDbrZwAWZgws= +github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -491,6 +504,8 @@ github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13X github.com/ipfs/go-datastore v0.4.2/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.4 h1:rjvQ9+muFaJ+QZ7dN5B1MSDNQ0JVZKkkES/rMZmA8X8= github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.5 h1:cwOUcGMLdLPWgu3SlrCckCMznaGADbPqE0r8h768/Dg= +github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= @@ -509,6 +524,8 @@ github.com/ipfs/go-ds-leveldb v0.4.2 h1:QmQoAJ9WkPMUfBLnu1sBVy0xWWlJPg0m4kRAiJL9 github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= github.com/ipfs/go-ds-measure v0.1.0 h1:vE4TyY4aeLeVgnnPBC5QzKIjKrqzha0NCujTfgvVbVQ= github.com/ipfs/go-ds-measure v0.1.0/go.mod h1:1nDiFrhLlwArTME1Ees2XaBOl49OoCgd2A3f8EchMSY= +github.com/ipfs/go-ds-pebble v0.0.2-0.20200921211847-f1ffb3128b61 h1:2wNNdpETSZgnsgy7wx7O6ueu+LCSZRedWrAsIPiOeFE= +github.com/ipfs/go-ds-pebble v0.0.2-0.20200921211847-f1ffb3128b61/go.mod h1:oh4liWHulKcDKVhCska5NLelE3MatWl+1FwSz3tY91g= github.com/ipfs/go-filestore v1.0.0 h1:QR7ekKH+q2AGiWDc7W2Q0qHuYSRZGUJqUn0GsegEPb0= github.com/ipfs/go-filestore v1.0.0/go.mod h1:/XOCuNtIe2f1YPbiXdYvD0BKLA0JR1MgPiFOdcuu9SM= github.com/ipfs/go-fs-lock v0.0.6 h1:sn3TWwNVQqSeNjlWy6zQ1uUGAZrV3hPOyEA6y1/N2a0= @@ -1521,6 +1538,8 @@ golang.org/x/exp v0.0.0-20200207192155-f17229e696bd h1:zkO/Lhoka23X63N9OSzpSeROE golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20200513190911-00229845015e h1:rMqLP+9XLy+LdbCXHjJHAmTfXCr93W7oruWA6Hq1Alc= +golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= From 55c6b88537dd9f7b5db9aa601127634b9d13c96b Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Tue, 22 Sep 2020 00:46:31 +0200 Subject: [PATCH 74/88] Add toggle for badger, flag out gas tracing Signed-off-by: Jakub Sztandera --- chain/vm/runtime.go | 56 ++++++++++++++++++++----------------- chain/vm/vm.go | 19 +++++++++---- cmd/lotus-bench/import.go | 58 +++++++++++++++++++++------------------ 3 files changed, 75 insertions(+), 58 deletions(-) diff --git a/chain/vm/runtime.go b/chain/vm/runtime.go index eb4476718..72dd413ed 100644 --- a/chain/vm/runtime.go +++ b/chain/vm/runtime.go @@ -459,8 +459,10 @@ func (rt *Runtime) stateCommit(oldh, newh cid.Cid) aerrors.ActorError { } func (rt *Runtime) finilizeGasTracing() { - if rt.lastGasCharge != nil { - rt.lastGasCharge.TimeTaken = time.Since(rt.lastGasChargeTime) + if enableTracing { + if rt.lastGasCharge != nil { + rt.lastGasCharge.TimeTaken = time.Since(rt.lastGasChargeTime) + } } } @@ -489,35 +491,39 @@ func (rt *Runtime) chargeGasFunc(skip int) func(GasCharge) { } +var enableTracing = false + func (rt *Runtime) chargeGasInternal(gas GasCharge, skip int) aerrors.ActorError { toUse := gas.Total() - var callers [10]uintptr + if enableTracing { + var callers [10]uintptr - cout := 0 //gruntime.Callers(2+skip, callers[:]) + cout := 0 //gruntime.Callers(2+skip, callers[:]) - now := build.Clock.Now() - if rt.lastGasCharge != nil { - rt.lastGasCharge.TimeTaken = now.Sub(rt.lastGasChargeTime) + now := build.Clock.Now() + if rt.lastGasCharge != nil { + rt.lastGasCharge.TimeTaken = now.Sub(rt.lastGasChargeTime) + } + + gasTrace := types.GasTrace{ + Name: gas.Name, + Extra: gas.Extra, + + TotalGas: toUse, + ComputeGas: gas.ComputeGas, + StorageGas: gas.StorageGas, + + TotalVirtualGas: gas.VirtualCompute*GasComputeMulti + gas.VirtualStorage*GasStorageMulti, + VirtualComputeGas: gas.VirtualCompute, + VirtualStorageGas: gas.VirtualStorage, + + Callers: callers[:cout], + } + rt.executionTrace.GasCharges = append(rt.executionTrace.GasCharges, &gasTrace) + rt.lastGasChargeTime = now + rt.lastGasCharge = &gasTrace } - gasTrace := types.GasTrace{ - Name: gas.Name, - Extra: gas.Extra, - - TotalGas: toUse, - ComputeGas: gas.ComputeGas, - StorageGas: gas.StorageGas, - - TotalVirtualGas: gas.VirtualCompute*GasComputeMulti + gas.VirtualStorage*GasStorageMulti, - VirtualComputeGas: gas.VirtualCompute, - VirtualStorageGas: gas.VirtualStorage, - - Callers: callers[:cout], - } - rt.executionTrace.GasCharges = append(rt.executionTrace.GasCharges, &gasTrace) - rt.lastGasChargeTime = now - rt.lastGasCharge = &gasTrace - // overflow safe if rt.gasUsed > rt.gasAvailable-toUse { rt.gasUsed = rt.gasAvailable diff --git a/chain/vm/vm.go b/chain/vm/vm.go index 54ea47698..c566ec1eb 100644 --- a/chain/vm/vm.go +++ b/chain/vm/vm.go @@ -227,14 +227,21 @@ func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime, } rt := vm.makeRuntime(ctx, msg, origin, on, gasUsed, nac) - rt.lastGasChargeTime = start + if enableTracing { + rt.lastGasChargeTime = start + if parent != nil { + rt.lastGasChargeTime = parent.lastGasChargeTime + rt.lastGasCharge = parent.lastGasCharge + defer func() { + parent.lastGasChargeTime = rt.lastGasChargeTime + parent.lastGasCharge = rt.lastGasCharge + }() + } + } + if parent != nil { - rt.lastGasChargeTime = parent.lastGasChargeTime - rt.lastGasCharge = parent.lastGasCharge defer func() { - parent.gasUsed = rt.gasUsed - parent.lastGasChargeTime = rt.lastGasChargeTime - parent.lastGasCharge = rt.lastGasCharge + parent.gasUsed += rt.gasUsed }() } if gasCharge != nil { diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index b535ed96c..c3554f939 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -119,33 +119,37 @@ var importBenchCmd = &cli.Command{ tdir = tmp } - bdgOpt := badger.DefaultOptions - bdgOpt.GcInterval = 0 - bdgOpt.Options = bdg.DefaultOptions("") - bdgOpt.Options.SyncWrites = false - bdgOpt.Options.Truncate = true - bdgOpt.Options.DetectConflicts = false - - cache := 512 - bds, err := pebbleds.NewDatastore(tdir, &pebble.Options{ - // Pebble has a single combined cache area and the write - // buffers are taken from this too. Assign all available - // memory allowance for cache. - Cache: pebble.NewCache(int64(cache * 1024 * 1024)), - // The size of memory table(as well as the write buffer). - // Note, there may have more than two memory tables in the system. - // MemTableStopWritesThreshold can be configured to avoid the memory abuse. - MemTableSize: cache * 1024 * 1024 / 4, - // The default compaction concurrency(1 thread), - // Here use all available CPUs for faster compaction. - MaxConcurrentCompactions: runtime.NumCPU(), - // Per-level options. Options for at least one level must be specified. The - // options for the last level are used for all subsequent levels. - Levels: []pebble.LevelOptions{ - {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, - }, - Logger: log, - }) + var bds datastore.Batching + if false { + cache := 512 + bds, err = pebbleds.NewDatastore(tdir, &pebble.Options{ + // Pebble has a single combined cache area and the write + // buffers are taken from this too. Assign all available + // memory allowance for cache. + Cache: pebble.NewCache(int64(cache * 1024 * 1024)), + // The size of memory table(as well as the write buffer). + // Note, there may have more than two memory tables in the system. + // MemTableStopWritesThreshold can be configured to avoid the memory abuse. + MemTableSize: cache * 1024 * 1024 / 4, + // The default compaction concurrency(1 thread), + // Here use all available CPUs for faster compaction. + MaxConcurrentCompactions: runtime.NumCPU(), + // Per-level options. Options for at least one level must be specified. The + // options for the last level are used for all subsequent levels. + Levels: []pebble.LevelOptions{ + {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, + }, + Logger: log, + }) + } else { + bdgOpt := badger.DefaultOptions + bdgOpt.GcInterval = 0 + bdgOpt.Options = bdg.DefaultOptions("") + bdgOpt.Options.SyncWrites = false + bdgOpt.Options.Truncate = true + bdgOpt.Options.DetectConflicts = false + bds, err = badger.NewDatastore(tdir, &bdgOpt) + } if err != nil { return err } From ff8c0af8c82c022b86fd7f1043460711c31a6c43 Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Tue, 22 Sep 2020 00:54:11 +0200 Subject: [PATCH 75/88] Add only-import option Signed-off-by: Jakub Sztandera --- cmd/lotus-bench/import.go | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index c3554f939..6af1bfc02 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -90,6 +90,9 @@ var importBenchCmd = &cli.Command{ &cli.Int64Flag{ Name: "start-at", }, + &cli.BoolFlag{ + Name: "only-import", + }, }, Action: func(cctx *cli.Context) error { vm.BatchSealVerifyParallelism = cctx.Int("batch-seal-verify-threads") @@ -119,6 +122,13 @@ var importBenchCmd = &cli.Command{ tdir = tmp } + bdgOpt := badger.DefaultOptions + bdgOpt.GcInterval = 0 + bdgOpt.Options = bdg.DefaultOptions("") + bdgOpt.Options.SyncWrites = false + bdgOpt.Options.Truncate = true + bdgOpt.Options.DetectConflicts = false + var bds datastore.Batching if false { cache := 512 @@ -142,17 +152,12 @@ var importBenchCmd = &cli.Command{ Logger: log, }) } else { - bdgOpt := badger.DefaultOptions - bdgOpt.GcInterval = 0 - bdgOpt.Options = bdg.DefaultOptions("") - bdgOpt.Options.SyncWrites = false - bdgOpt.Options.Truncate = true - bdgOpt.Options.DetectConflicts = false bds, err = badger.NewDatastore(tdir, &bdgOpt) } if err != nil { return err } + defer bds.Close() if cctx.Bool("only-gc") { log.Info("calling CollectGarbage on main ds") @@ -176,6 +181,7 @@ var importBenchCmd = &cli.Command{ if err != nil { return xerrors.Errorf("opening syscall-cache datastore: %w", err) } + defer scds.Close() if cctx.Bool("only-gc") { log.Info("calling CollectGarbage on syscall ds") @@ -223,6 +229,10 @@ var importBenchCmd = &cli.Command{ } } + if cctx.Bool("only-import") { + return nil + } + gb, err := cs.GetTipsetByHeight(context.TODO(), 0, head, true) if err != nil { return err From 76db65b1afcfa3f752ff51afcdc9021bacd38415 Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Tue, 22 Sep 2020 00:57:37 +0200 Subject: [PATCH 76/88] Update pebble Signed-off-by: Jakub Sztandera --- chain/vm/vm.go | 2 +- cmd/lotus-bench/import.go | 4 ++-- go.mod | 2 +- go.sum | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/chain/vm/vm.go b/chain/vm/vm.go index c566ec1eb..44979454f 100644 --- a/chain/vm/vm.go +++ b/chain/vm/vm.go @@ -241,7 +241,7 @@ func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime, if parent != nil { defer func() { - parent.gasUsed += rt.gasUsed + parent.gasUsed = rt.gasUsed }() } if gasCharge != nil { diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index 6af1bfc02..94dae0d98 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -130,7 +130,7 @@ var importBenchCmd = &cli.Command{ bdgOpt.Options.DetectConflicts = false var bds datastore.Batching - if false { + if true { cache := 512 bds, err = pebbleds.NewDatastore(tdir, &pebble.Options{ // Pebble has a single combined cache area and the write @@ -147,7 +147,7 @@ var importBenchCmd = &cli.Command{ // Per-level options. Options for at least one level must be specified. The // options for the last level are used for all subsequent levels. Levels: []pebble.LevelOptions{ - {TargetFileSize: 2 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10)}, + {TargetFileSize: 16 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10), Compression: pebble.NoCompression}, }, Logger: log, }) diff --git a/go.mod b/go.mod index 3b3b4fed7..17cb2fd36 100644 --- a/go.mod +++ b/go.mod @@ -58,7 +58,7 @@ require ( github.com/ipfs/go-ds-badger2 v0.1.1-0.20200708190120-187fc06f714e github.com/ipfs/go-ds-leveldb v0.4.2 github.com/ipfs/go-ds-measure v0.1.0 - github.com/ipfs/go-ds-pebble v0.0.2-0.20200921211847-f1ffb3128b61 + github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459 github.com/ipfs/go-filestore v1.0.0 github.com/ipfs/go-fs-lock v0.0.6 github.com/ipfs/go-graphsync v0.2.1 diff --git a/go.sum b/go.sum index 19e33d294..6929c9a23 100644 --- a/go.sum +++ b/go.sum @@ -524,8 +524,8 @@ github.com/ipfs/go-ds-leveldb v0.4.2 h1:QmQoAJ9WkPMUfBLnu1sBVy0xWWlJPg0m4kRAiJL9 github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= github.com/ipfs/go-ds-measure v0.1.0 h1:vE4TyY4aeLeVgnnPBC5QzKIjKrqzha0NCujTfgvVbVQ= github.com/ipfs/go-ds-measure v0.1.0/go.mod h1:1nDiFrhLlwArTME1Ees2XaBOl49OoCgd2A3f8EchMSY= -github.com/ipfs/go-ds-pebble v0.0.2-0.20200921211847-f1ffb3128b61 h1:2wNNdpETSZgnsgy7wx7O6ueu+LCSZRedWrAsIPiOeFE= -github.com/ipfs/go-ds-pebble v0.0.2-0.20200921211847-f1ffb3128b61/go.mod h1:oh4liWHulKcDKVhCska5NLelE3MatWl+1FwSz3tY91g= +github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459 h1:W3YMLEvOXqdW+sYMiguhWP6txJwQvIQqhvpU8yAMGQs= +github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459/go.mod h1:oh4liWHulKcDKVhCska5NLelE3MatWl+1FwSz3tY91g= github.com/ipfs/go-filestore v1.0.0 h1:QR7ekKH+q2AGiWDc7W2Q0qHuYSRZGUJqUn0GsegEPb0= github.com/ipfs/go-filestore v1.0.0/go.mod h1:/XOCuNtIe2f1YPbiXdYvD0BKLA0JR1MgPiFOdcuu9SM= github.com/ipfs/go-fs-lock v0.0.6 h1:sn3TWwNVQqSeNjlWy6zQ1uUGAZrV3hPOyEA6y1/N2a0= From 0d914ac1d4c4d0e8955c7074e9dd03eb49f44900 Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Tue, 22 Sep 2020 22:01:41 +0200 Subject: [PATCH 77/88] Switch to badger Signed-off-by: Jakub Sztandera --- cmd/lotus-bench/import.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index 94dae0d98..c8328684e 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -130,7 +130,7 @@ var importBenchCmd = &cli.Command{ bdgOpt.Options.DetectConflicts = false var bds datastore.Batching - if true { + if false { cache := 512 bds, err = pebbleds.NewDatastore(tdir, &pebble.Options{ // Pebble has a single combined cache area and the write From 7e8c6e507055f1f75607facbc0d0220608540343 Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Wed, 23 Sep 2020 18:44:41 +0200 Subject: [PATCH 78/88] Remove statediff, fix lint, go mod tidy Signed-off-by: Jakub Sztandera --- cmd/lotus-bench/import.go | 20 +++----------------- go.mod | 7 ++++--- go.sum | 23 ----------------------- 3 files changed, 7 insertions(+), 43 deletions(-) diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index c8328684e..3d93b0e5e 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -30,7 +30,6 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" - "github.com/filecoin-project/statediff" bdg "github.com/dgraph-io/badger/v2" "github.com/ipfs/go-datastore" @@ -80,9 +79,6 @@ var importBenchCmd = &cli.Command{ Name: "no-import", Usage: "should we import the chain? if set to true chain has to be previously imported", }, - &cli.BoolFlag{ - Name: "only-gc", - }, &cli.BoolFlag{ Name: "global-profile", Value: true, @@ -157,13 +153,8 @@ var importBenchCmd = &cli.Command{ if err != nil { return err } - defer bds.Close() + defer bds.Close() //nolint:errcheck - if cctx.Bool("only-gc") { - log.Info("calling CollectGarbage on main ds") - //bds.CollectGarbage() - log.Info("done calling CollectGarbage on main ds") - } bs := blockstore.NewBlockstore(bds) cacheOpts := blockstore.DefaultCacheOpts() cacheOpts.HasBloomFilterSize = 0 @@ -181,13 +172,8 @@ var importBenchCmd = &cli.Command{ if err != nil { return xerrors.Errorf("opening syscall-cache datastore: %w", err) } - defer scds.Close() + defer scds.Close() //nolint:errcheck - if cctx.Bool("only-gc") { - log.Info("calling CollectGarbage on syscall ds") - scds.CollectGarbage() - log.Info("done calling CollectGarbage on syscall ds") - } verifier = &cachingVerifier{ ds: scds, backend: verifier, @@ -317,7 +303,7 @@ var importBenchCmd = &cli.Command{ } fmt.Println("TRACE") fmt.Println(string(d)) - fmt.Println(statediff.Diff(context.Background(), bs, tschain[i-1].ParentState(), st, statediff.ExpandActors)) + //fmt.Println(statediff.Diff(context.Background(), bs, tschain[i-1].ParentState(), st, statediff.ExpandActors)) return xerrors.Errorf("tipset chain had state mismatch at height %d (%s != %s)", cur.Height(), cur.ParentState(), st) } } diff --git a/go.mod b/go.mod index 17cb2fd36..b8896d78f 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,7 @@ require ( github.com/BurntSushi/toml v0.3.1 github.com/GeertJohan/go.rice v1.0.0 github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee + github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129 github.com/cockroachdb/pebble v0.0.0-20200916222308-4e219a90ba5b @@ -38,10 +39,10 @@ require ( github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b github.com/filecoin-project/specs-actors v0.9.11 github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796 - github.com/filecoin-project/statediff v0.0.6-0.20200918150628-da86dd0d264c github.com/filecoin-project/test-vectors/schema v0.0.1 github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 github.com/go-kit/kit v0.10.0 + github.com/go-ole/go-ole v1.2.4 // indirect github.com/google/uuid v1.1.1 github.com/gorilla/mux v1.7.4 github.com/gorilla/websocket v1.4.2 @@ -118,6 +119,7 @@ require ( github.com/whyrusleeping/pubsub v0.0.0-20131020042734-02de8aa2db3d github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 go.opencensus.io v0.22.4 + go.uber.org/dig v1.10.0 // indirect go.uber.org/fx v1.9.0 go.uber.org/multierr v1.5.0 go.uber.org/zap v1.15.0 @@ -127,12 +129,11 @@ require ( golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 gopkg.in/cheggaaa/pb.v1 v1.0.28 gotest.tools v2.2.0+incompatible + launchpad.net/gocheck v0.0.0-20140225173054-000000000087 // indirect ) replace github.com/filecoin-project/lotus => ./ -replace github.com/filecoin-project/statediff => ./../statediff - replace github.com/golangci/golangci-lint => github.com/golangci/golangci-lint v1.18.0 replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi diff --git a/go.sum b/go.sum index 6929c9a23..cbceb52c6 100644 --- a/go.sum +++ b/go.sum @@ -169,8 +169,6 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= github.com/davidlazar/go-crypto v0.0.0-20190912175916-7055855a373f h1:BOaYiTvg8p9vBUXpklC22XSK/mifLF7lG9jtmYYi3Tc= github.com/davidlazar/go-crypto v0.0.0-20190912175916-7055855a373f/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= -github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= -github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e h1:lj77EKYUpYXTd8CD/+QMIf8b6OIOTsfEBSXiAzuEHTU= github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e/go.mod h1:3ZQK6DMPSz/QZ73jlWxBtUhNA8xZx7LzUFSq/OfP8vk= github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= @@ -218,7 +216,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanw/esbuild v0.6.28/go.mod h1:mptxmSXIzBIKKCe4jo9A5SToEd1G+AKZ9JmY85dYRJ0= github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 h1:BBso6MBKW8ncyZLv37o+KNyy0HrrHgfnOaGQC2qvN+A= github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:JpoxHjuQauoxiFMl1ie8Xc/7TfLuMZ5eOCONd1sUBHg= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -231,8 +228,6 @@ github.com/filecoin-project/go-address v0.0.4 h1:gSNMv0qWwH16fGQs7ycOUrDjY6YCSsg github.com/filecoin-project/go-address v0.0.4/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 h1:t6qDiuGYYngDqaLc2ZUvdtAg4UNxPeOYaXhBWSNsVaM= github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs= -github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20200731171407-e559a0579161 h1:K6t4Hrs+rwUxBz2xg88Bdqeh4k5/rycQFdPseZhRyfE= -github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20200731171407-e559a0579161/go.mod h1:vgmwKBkx+ca5OIeEvstiQgzAZnb7R6QaqE1oEDSqa6g= github.com/filecoin-project/go-bitfield v0.2.0 h1:gCtLcjskIPtdg4NfN7gQZSQF9yrBQ7mkT0qCJxzGI2Q= github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-bitfield v0.2.1-0.20200920172649-837cbe6a1ed3 h1:HQa4+yCYsLq1TLM0kopeAhSCLbtZ541cWEi5N5rO+9g= @@ -720,7 +715,6 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lib/pq v1.7.0 h1:h93mCPfUSkaul3Ka/VG8uZdmW1uMHDGxzu0NWHuJmHY= github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= @@ -1048,8 +1042,6 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= @@ -1352,8 +1344,6 @@ github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5J github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -1406,7 +1396,6 @@ github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:X github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d/go.mod h1:W5MvapuoHRP8rz4vxjwCK1pDqF1aQcWsV5PZ+AHbqdg= github.com/whyrusleeping/cbor-gen v0.0.0-20200710004633-5379fc63235d/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= -github.com/whyrusleeping/cbor-gen v0.0.0-20200723185710-6a3894a6352b/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200810223238-211df3b9e24c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200812213548-958ddffe352c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200814224545-656e08ce49ee h1:U7zWWvvAjT76EiuWPSOiZlQDnaQYPxPoxugTtTAcJK0= @@ -1433,8 +1422,6 @@ github.com/whyrusleeping/pubsub v0.0.0-20131020042734-02de8aa2db3d/go.mod h1:g7c github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow= github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg= github.com/whyrusleeping/yamux v1.1.5/go.mod h1:E8LnQQ8HKx5KD29HZFUwM1PxCOdPRzGwur1mcYhXcD8= -github.com/willscott/go-cmp v0.5.2-0.20200812183318-8affb9542345 h1:IJVAwIctqDFOrO0C2qzksXmANviyHJzrklU27e1ltzE= -github.com/willscott/go-cmp v0.5.2-0.20200812183318-8affb9542345/go.mod h1:D7hA8H5pyQx7Y5Em7IWx1R4vNJzfon3gpG9nxjkITjQ= github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/c-for-go v0.0.0-20200718154222-87b0065af829 h1:wb7xrDzfkLgPHsSEBm+VSx6aDdi64VtV0xvP0E6j8bk= @@ -1523,7 +1510,6 @@ golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1536,8 +1522,6 @@ golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd h1:zkO/Lhoka23X63N9OSzpSeROEUQ5ODw47tM3YWjygbs= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20200513190911-00229845015e h1:rMqLP+9XLy+LdbCXHjJHAmTfXCr93W7oruWA6Hq1Alc= golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= @@ -1553,8 +1537,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -1672,7 +1654,6 @@ golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1681,7 +1662,6 @@ golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501145240-bc7a7d42d5c3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980 h1:OjiUf46hAmXblsZdnoSXsEUSKU8r1UEzcL5RVZ4gO9Y= @@ -1725,7 +1705,6 @@ golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1854,8 +1833,6 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= launchpad.net/gocheck v0.0.0-20140225173054-000000000087 h1:Izowp2XBH6Ya6rv+hqbceQyw/gSGoXfH/UPoTGduL54= From 247a5e2c496eaae153cab85d88335db922e36ed0 Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Thu, 24 Sep 2020 17:49:58 +0200 Subject: [PATCH 79/88] Go mod tidy Signed-off-by: Jakub Sztandera --- go.sum | 4 ---- 1 file changed, 4 deletions(-) diff --git a/go.sum b/go.sum index cbceb52c6..0f309ce1a 100644 --- a/go.sum +++ b/go.sum @@ -528,10 +528,6 @@ github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28 github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= github.com/ipfs/go-graphsync v0.2.1 h1:MdehhqBSuTI2LARfKLkpYnt0mUrqHs/mtuDnESXHBfU= github.com/ipfs/go-graphsync v0.2.1/go.mod h1:gEBvJUNelzMkaRPJTpg/jaKN4AQW/7wDWu0K92D8o10= -github.com/ipfs/go-graphsync v0.2.0 h1:x94MvHLNuRwBlZzVal7tR1RYK7T7H6bqQLPopxDbIF0= -github.com/ipfs/go-graphsync v0.2.0/go.mod h1:gEBvJUNelzMkaRPJTpg/jaKN4AQW/7wDWu0K92D8o10= -github.com/ipfs/go-graphsync v0.1.2 h1:25Ll9kIXCE+DY0dicvfS3KMw+U5sd01b/FJbA7KAbhg= -github.com/ipfs/go-graphsync v0.1.2/go.mod h1:sLXVXm1OxtE2XYPw62MuXCdAuNwkAdsbnfrmos5odbA= github.com/ipfs/go-hamt-ipld v0.1.1 h1:0IQdvwnAAUKmDE+PMJa5y1QiwOPHpI9+eAbQEEEYthk= github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= From baef3c8dd26318fb435c894bfd030e7daf68834a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Magiera?= Date: Tue, 29 Sep 2020 15:22:46 +0200 Subject: [PATCH 80/88] sectorstorage: Fix potential panic in FinalizeSector --- .../sector-storage/ffiwrapper/sealer_cgo.go | 47 ++++++++++--------- 1 file changed, 25 insertions(+), 22 deletions(-) diff --git a/extern/sector-storage/ffiwrapper/sealer_cgo.go b/extern/sector-storage/ffiwrapper/sealer_cgo.go index 9bc2680ed..d75501838 100644 --- a/extern/sector-storage/ffiwrapper/sealer_cgo.go +++ b/extern/sector-storage/ffiwrapper/sealer_cgo.go @@ -546,34 +546,37 @@ func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID, keepU defer done() pf, err := openPartialFile(maxPieceSize, paths.Unsealed) - if xerrors.Is(err, os.ErrNotExist) { - return xerrors.Errorf("opening partial file: %w", err) - } + if err == nil { + var at uint64 + for sr.HasNext() { + r, err := sr.NextRun() + if err != nil { + _ = pf.Close() + return err + } - var at uint64 - for sr.HasNext() { - r, err := sr.NextRun() - if err != nil { - _ = pf.Close() + offset := at + at += r.Len + if !r.Val { + continue + } + + err = pf.Free(storiface.PaddedByteIndex(abi.UnpaddedPieceSize(offset).Padded()), abi.UnpaddedPieceSize(r.Len).Padded()) + if err != nil { + _ = pf.Close() + return xerrors.Errorf("free partial file range: %w", err) + } + } + + if err := pf.Close(); err != nil { return err } - - offset := at - at += r.Len - if !r.Val { - continue - } - - err = pf.Free(storiface.PaddedByteIndex(abi.UnpaddedPieceSize(offset).Padded()), abi.UnpaddedPieceSize(r.Len).Padded()) - if err != nil { - _ = pf.Close() - return xerrors.Errorf("free partial file range: %w", err) + } else { + if !xerrors.Is(err, os.ErrNotExist) { + return xerrors.Errorf("opening partial file: %w", err) } } - if err := pf.Close(); err != nil { - return err - } } paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTCache, 0, stores.PathStorage) From 9fe32b7777af8037e1572ccb84feb639970b049c Mon Sep 17 00:00:00 2001 From: Aayush Rajasekaran Date: Wed, 30 Sep 2020 01:10:03 -0400 Subject: [PATCH 81/88] Fix wallet list --- cli/wallet.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/cli/wallet.go b/cli/wallet.go index 0d69673f9..aa5b9bed3 100644 --- a/cli/wallet.go +++ b/cli/wallet.go @@ -89,10 +89,8 @@ var walletList = &cli.Command{ return err } - def, err := api.WalletDefaultAddress(ctx) - if err != nil { - return err - } + // Assume an error means no default key is set + def, _ := api.WalletDefaultAddress(ctx) tw := tablewriter.New( tablewriter.Col("Address"), From a4e71174297138fdb4d25240fafe4b90abfc656d Mon Sep 17 00:00:00 2001 From: Aayush Rajasekaran Date: Sun, 27 Sep 2020 17:52:26 -0400 Subject: [PATCH 82/88] Add lotus shed util to validate a tipset --- api/api_full.go | 3 ++ api/apistruct/struct.go | 5 ++++ chain/store/store.go | 10 +++++++ cmd/lotus-shed/main.go | 1 + cmd/lotus-shed/sync.go | 64 +++++++++++++++++++++++++++++++++++++++++ node/impl/full/sync.go | 26 +++++++++++++++++ 6 files changed, 109 insertions(+) create mode 100644 cmd/lotus-shed/sync.go diff --git a/api/api_full.go b/api/api_full.go index 6d2d0c7b5..0e5622f4c 100644 --- a/api/api_full.go +++ b/api/api_full.go @@ -176,6 +176,9 @@ type FullNode interface { // the reason. SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error) + // SyncValidateTipset indicates whether the provided tipset is valid or not + SyncValidateTipset(ctx context.Context, tsk types.TipSetKey) (bool, error) + // MethodGroup: Mpool // The Mpool methods are for interacting with the message pool. The message pool // manages all incoming and outgoing 'messages' going over the network. diff --git a/api/apistruct/struct.go b/api/apistruct/struct.go index d5b6950ad..cc2b8b5b5 100644 --- a/api/apistruct/struct.go +++ b/api/apistruct/struct.go @@ -112,6 +112,7 @@ type FullNodeStruct struct { SyncMarkBad func(ctx context.Context, bcid cid.Cid) error `perm:"admin"` SyncUnmarkBad func(ctx context.Context, bcid cid.Cid) error `perm:"admin"` SyncCheckBad func(ctx context.Context, bcid cid.Cid) (string, error) `perm:"read"` + SyncValidateTipset func(ctx context.Context, tsk types.TipSetKey) (bool, error) `perm:"read"` MpoolGetConfig func(context.Context) (*types.MpoolConfig, error) `perm:"read"` MpoolSetConfig func(context.Context, *types.MpoolConfig) error `perm:"write"` @@ -735,6 +736,10 @@ func (c *FullNodeStruct) SyncCheckBad(ctx context.Context, bcid cid.Cid) (string return c.Internal.SyncCheckBad(ctx, bcid) } +func (c *FullNodeStruct) SyncValidateTipset(ctx context.Context, tsk types.TipSetKey) (bool, error) { + return c.Internal.SyncValidateTipset(ctx, tsk) +} + func (c *FullNodeStruct) StateNetworkName(ctx context.Context) (dtypes.NetworkName, error) { return c.Internal.StateNetworkName(ctx) } diff --git a/chain/store/store.go b/chain/store/store.go index 6c93db7a0..0806fb921 100644 --- a/chain/store/store.go +++ b/chain/store/store.go @@ -286,6 +286,16 @@ func (cs *ChainStore) MarkBlockAsValidated(ctx context.Context, blkid cid.Cid) e return nil } +func (cs *ChainStore) UnmarkBlockAsValidated(ctx context.Context, blkid cid.Cid) error { + key := blockValidationCacheKeyPrefix.Instance(blkid.String()) + + if err := cs.ds.Delete(key); err != nil { + return xerrors.Errorf("removing from valid block cache: %w", err) + } + + return nil +} + func (cs *ChainStore) SetGenesis(b *types.BlockHeader) error { ts, err := types.NewTipSet([]*types.BlockHeader{b}) if err != nil { diff --git a/cmd/lotus-shed/main.go b/cmd/lotus-shed/main.go index c7ded7a25..3864d3014 100644 --- a/cmd/lotus-shed/main.go +++ b/cmd/lotus-shed/main.go @@ -38,6 +38,7 @@ func main() { exportChainCmd, consensusCmd, serveDealStatsCmd, + syncCmd, } app := &cli.App{ diff --git a/cmd/lotus-shed/sync.go b/cmd/lotus-shed/sync.go new file mode 100644 index 000000000..bfe7cc8b7 --- /dev/null +++ b/cmd/lotus-shed/sync.go @@ -0,0 +1,64 @@ +package main + +import ( + "fmt" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" + "github.com/urfave/cli/v2" +) + +var syncCmd = &cli.Command{ + Name: "sync", + Usage: "tools for diagnosing sync issues", + Flags: []cli.Flag{}, + Subcommands: []*cli.Command{ + syncValidateCmd, + }, +} + +var syncValidateCmd = &cli.Command{ + Name: "validate", + Usage: "checks whether a provided tipset is valid", + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + + defer closer() + ctx := lcli.ReqContext(cctx) + + if cctx.Args().Len() < 1 { + fmt.Println("usage: ...") + fmt.Println("At least one block cid must be provided") + return nil + } + + args := cctx.Args().Slice() + + var tscids []cid.Cid + for _, s := range args { + c, err := cid.Decode(s) + if err != nil { + return fmt.Errorf("block cid was invalid: %s", err) + } + tscids = append(tscids, c) + } + + tsk := types.NewTipSetKey(tscids...) + + valid, err := api.SyncValidateTipset(ctx, tsk) + if err != nil { + fmt.Println("Tipset is invalid: ", err) + } + + if valid { + fmt.Println("Tipset is valid") + } + + return nil + }, +} diff --git a/node/impl/full/sync.go b/node/impl/full/sync.go index dc3bfe230..221942673 100644 --- a/node/impl/full/sync.go +++ b/node/impl/full/sync.go @@ -126,3 +126,29 @@ func (a *SyncAPI) SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error return reason, nil } + +func (a *SyncAPI) SyncValidateTipset(ctx context.Context, tsk types.TipSetKey) (bool, error) { + ts, err := a.Syncer.ChainStore().LoadTipSet(tsk) + if err != nil { + return false, err + } + + fts, err := a.Syncer.ChainStore().TryFillTipSet(ts) + if err != nil { + return false, err + } + + for _, blk := range tsk.Cids() { + err = a.Syncer.ChainStore().UnmarkBlockAsValidated(ctx, blk) + if err != nil { + return false, err + } + } + + err = a.Syncer.ValidateTipSet(ctx, fts) + if err != nil { + return false, err + } + + return true, nil +} From 73d193bd9cd1b4d5e9355058febed293087a9364 Mon Sep 17 00:00:00 2001 From: Aayush Rajasekaran Date: Sun, 27 Sep 2020 18:08:58 -0400 Subject: [PATCH 83/88] Update docs --- documentation/en/api-methods.md | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/documentation/en/api-methods.md b/documentation/en/api-methods.md index ed082ccbf..29271bdd5 100644 --- a/documentation/en/api-methods.md +++ b/documentation/en/api-methods.md @@ -169,6 +169,7 @@ * [SyncState](#SyncState) * [SyncSubmitBlock](#SyncSubmitBlock) * [SyncUnmarkBad](#SyncUnmarkBad) + * [SyncValidateTipset](#SyncValidateTipset) * [Wallet](#Wallet) * [WalletBalance](#WalletBalance) * [WalletDefaultAddress](#WalletDefaultAddress) @@ -4379,6 +4380,28 @@ Inputs: Response: `{}` +### SyncValidateTipset +SyncValidateTipset indicates whether the provided tipset is valid or not + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `true` + ## Wallet From c45c8f34a16605a087bd4c6b5b022f8828784156 Mon Sep 17 00:00:00 2001 From: Aayush Rajasekaran Date: Wed, 30 Sep 2020 01:39:06 -0400 Subject: [PATCH 84/88] Parametrise whether sync validators should use cache --- chain/sync.go | 28 ++++++++++++++++------------ chain/sync_test.go | 4 ++-- node/impl/full/sync.go | 9 +-------- 3 files changed, 19 insertions(+), 22 deletions(-) diff --git a/chain/sync.go b/chain/sync.go index 78e5178d1..b2e3bb7f1 100644 --- a/chain/sync.go +++ b/chain/sync.go @@ -597,7 +597,7 @@ func isPermanent(err error) bool { return !errors.Is(err, ErrTemporal) } -func (syncer *Syncer) ValidateTipSet(ctx context.Context, fts *store.FullTipSet) error { +func (syncer *Syncer) ValidateTipSet(ctx context.Context, fts *store.FullTipSet, useCache bool) error { ctx, span := trace.StartSpan(ctx, "validateTipSet") defer span.End() @@ -613,7 +613,7 @@ func (syncer *Syncer) ValidateTipSet(ctx context.Context, fts *store.FullTipSet) b := b // rebind to a scoped variable futures = append(futures, async.Err(func() error { - if err := syncer.ValidateBlock(ctx, b); err != nil { + if err := syncer.ValidateBlock(ctx, b, useCache); err != nil { if isPermanent(err) { syncer.bad.Add(b.Cid(), NewBadBlockReason([]cid.Cid{b.Cid()}, err.Error())) } @@ -680,7 +680,7 @@ func blockSanityChecks(h *types.BlockHeader) error { } // ValidateBlock should match up with 'Semantical Validation' in validation.md in the spec -func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) (err error) { +func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, useCache bool) (err error) { defer func() { // b.Cid() could panic for empty blocks that are used in tests. if rerr := recover(); rerr != nil { @@ -689,13 +689,15 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) (er } }() - isValidated, err := syncer.store.IsBlockValidated(ctx, b.Cid()) - if err != nil { - return xerrors.Errorf("check block validation cache %s: %w", b.Cid(), err) - } + if useCache { + isValidated, err := syncer.store.IsBlockValidated(ctx, b.Cid()) + if err != nil { + return xerrors.Errorf("check block validation cache %s: %w", b.Cid(), err) + } - if isValidated { - return nil + if isValidated { + return nil + } } validationStart := build.Clock.Now() @@ -959,8 +961,10 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) (er return mulErr } - if err := syncer.store.MarkBlockAsValidated(ctx, b.Cid()); err != nil { - return xerrors.Errorf("caching block validation %s: %w", b.Cid(), err) + if useCache { + if err := syncer.store.MarkBlockAsValidated(ctx, b.Cid()); err != nil { + return xerrors.Errorf("caching block validation %s: %w", b.Cid(), err) + } } return nil @@ -1462,7 +1466,7 @@ func (syncer *Syncer) syncMessagesAndCheckState(ctx context.Context, headers []* return syncer.iterFullTipsets(ctx, headers, func(ctx context.Context, fts *store.FullTipSet) error { log.Debugw("validating tipset", "height", fts.TipSet().Height(), "size", len(fts.TipSet().Cids())) - if err := syncer.ValidateTipSet(ctx, fts); err != nil { + if err := syncer.ValidateTipSet(ctx, fts, true); err != nil { log.Errorf("failed to validate tipset: %+v", err) return xerrors.Errorf("message processing failed: %w", err) } diff --git a/chain/sync_test.go b/chain/sync_test.go index 7a839be2b..1b06f604b 100644 --- a/chain/sync_test.go +++ b/chain/sync_test.go @@ -732,7 +732,7 @@ func TestSyncInputs(t *testing.T) { err := s.ValidateBlock(context.TODO(), &types.FullBlock{ Header: &types.BlockHeader{}, - }) + }, false) if err == nil { t.Fatal("should error on empty block") } @@ -741,7 +741,7 @@ func TestSyncInputs(t *testing.T) { h.ElectionProof = nil - err = s.ValidateBlock(context.TODO(), &types.FullBlock{Header: h}) + err = s.ValidateBlock(context.TODO(), &types.FullBlock{Header: h}, false) if err == nil { t.Fatal("should error on block with nil election proof") } diff --git a/node/impl/full/sync.go b/node/impl/full/sync.go index 221942673..1bd3af415 100644 --- a/node/impl/full/sync.go +++ b/node/impl/full/sync.go @@ -138,14 +138,7 @@ func (a *SyncAPI) SyncValidateTipset(ctx context.Context, tsk types.TipSetKey) ( return false, err } - for _, blk := range tsk.Cids() { - err = a.Syncer.ChainStore().UnmarkBlockAsValidated(ctx, blk) - if err != nil { - return false, err - } - } - - err = a.Syncer.ValidateTipSet(ctx, fts) + err = a.Syncer.ValidateTipSet(ctx, fts, false) if err != nil { return false, err } From a388bcfad61d90dc5718982095c1707f6d858cdf Mon Sep 17 00:00:00 2001 From: Aayush Rajasekaran Date: Wed, 30 Sep 2020 01:43:10 -0400 Subject: [PATCH 85/88] Add an endpoint to validate whether a string is a well-formed address --- api/api_full.go | 2 ++ api/apistruct/struct.go | 29 +++++++++++++++++------------ node/impl/full/wallet.go | 4 ++++ 3 files changed, 23 insertions(+), 12 deletions(-) diff --git a/api/api_full.go b/api/api_full.go index 6d2d0c7b5..7b7574c26 100644 --- a/api/api_full.go +++ b/api/api_full.go @@ -244,6 +244,8 @@ type FullNode interface { WalletImport(context.Context, *types.KeyInfo) (address.Address, error) // WalletDelete deletes an address from the wallet. WalletDelete(context.Context, address.Address) error + // WalletValidateAddress validates whether a given string can be decoded as a well-formed address + WalletValidateAddress(context.Context, string) (address.Address, error) // Other diff --git a/api/apistruct/struct.go b/api/apistruct/struct.go index d5b6950ad..73c4d8ed5 100644 --- a/api/apistruct/struct.go +++ b/api/apistruct/struct.go @@ -129,18 +129,19 @@ type FullNodeStruct struct { MinerGetBaseInfo func(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*api.MiningBaseInfo, error) `perm:"read"` MinerCreateBlock func(context.Context, *api.BlockTemplate) (*types.BlockMsg, error) `perm:"write"` - WalletNew func(context.Context, crypto.SigType) (address.Address, error) `perm:"write"` - WalletHas func(context.Context, address.Address) (bool, error) `perm:"write"` - WalletList func(context.Context) ([]address.Address, error) `perm:"write"` - WalletBalance func(context.Context, address.Address) (types.BigInt, error) `perm:"read"` - WalletSign func(context.Context, address.Address, []byte) (*crypto.Signature, error) `perm:"sign"` - WalletSignMessage func(context.Context, address.Address, *types.Message) (*types.SignedMessage, error) `perm:"sign"` - WalletVerify func(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) `perm:"read"` - WalletDefaultAddress func(context.Context) (address.Address, error) `perm:"write"` - WalletSetDefault func(context.Context, address.Address) error `perm:"admin"` - WalletExport func(context.Context, address.Address) (*types.KeyInfo, error) `perm:"admin"` - WalletImport func(context.Context, *types.KeyInfo) (address.Address, error) `perm:"admin"` - WalletDelete func(context.Context, address.Address) error `perm:"write"` + WalletNew func(context.Context, crypto.SigType) (address.Address, error) `perm:"write"` + WalletHas func(context.Context, address.Address) (bool, error) `perm:"write"` + WalletList func(context.Context) ([]address.Address, error) `perm:"write"` + WalletBalance func(context.Context, address.Address) (types.BigInt, error) `perm:"read"` + WalletSign func(context.Context, address.Address, []byte) (*crypto.Signature, error) `perm:"sign"` + WalletSignMessage func(context.Context, address.Address, *types.Message) (*types.SignedMessage, error) `perm:"sign"` + WalletVerify func(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) `perm:"read"` + WalletDefaultAddress func(context.Context) (address.Address, error) `perm:"write"` + WalletSetDefault func(context.Context, address.Address) error `perm:"admin"` + WalletExport func(context.Context, address.Address) (*types.KeyInfo, error) `perm:"admin"` + WalletImport func(context.Context, *types.KeyInfo) (address.Address, error) `perm:"admin"` + WalletDelete func(context.Context, address.Address) error `perm:"write"` + WalletValidateAddress func(context.Context, string) (address.Address, error) `perm:"read"` ClientImport func(ctx context.Context, ref api.FileRef) (*api.ImportRes, error) `perm:"admin"` ClientListImports func(ctx context.Context) ([]api.Import, error) `perm:"write"` @@ -631,6 +632,10 @@ func (c *FullNodeStruct) WalletDelete(ctx context.Context, addr address.Address) return c.Internal.WalletDelete(ctx, addr) } +func (c *FullNodeStruct) WalletValidateAddress(ctx context.Context, str string) (address.Address, error) { + return c.Internal.WalletValidateAddress(ctx, str) +} + func (c *FullNodeStruct) MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error) { return c.Internal.MpoolGetNonce(ctx, addr) } diff --git a/node/impl/full/wallet.go b/node/impl/full/wallet.go index 64231b74e..b2ecdebbd 100644 --- a/node/impl/full/wallet.go +++ b/node/impl/full/wallet.go @@ -90,3 +90,7 @@ func (a *WalletAPI) WalletImport(ctx context.Context, ki *types.KeyInfo) (addres func (a *WalletAPI) WalletDelete(ctx context.Context, addr address.Address) error { return a.Wallet.DeleteKey(addr) } + +func (a *WalletAPI) WalletValidateAddress(ctx context.Context, str string) (address.Address, error) { + return address.NewFromString(str) +} From bc4cbdc8957e45daca3b24ed5f434ebf72ced58b Mon Sep 17 00:00:00 2001 From: Aayush Rajasekaran Date: Wed, 30 Sep 2020 01:45:03 -0400 Subject: [PATCH 86/88] Update docs --- documentation/en/api-methods.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/documentation/en/api-methods.md b/documentation/en/api-methods.md index ed082ccbf..beaf7f3b6 100644 --- a/documentation/en/api-methods.md +++ b/documentation/en/api-methods.md @@ -181,6 +181,7 @@ * [WalletSetDefault](#WalletSetDefault) * [WalletSign](#WalletSign) * [WalletSignMessage](#WalletSignMessage) + * [WalletValidateAddress](#WalletValidateAddress) * [WalletVerify](#WalletVerify) ## @@ -4585,6 +4586,21 @@ Response: } ``` +### WalletValidateAddress +WalletValidateAddress validates whether a given string can be decoded as a well-formed address + + +Perms: read + +Inputs: +```json +[ + "string value" +] +``` + +Response: `"t01234"` + ### WalletVerify WalletVerify takes an address, a signature, and some bytes, and indicates whether the signature is valid. The address does not have to be in the wallet. From 1affd498c172ac7b29192b09f67be76b83fabe2e Mon Sep 17 00:00:00 2001 From: Dan Shao Date: Wed, 30 Sep 2020 14:23:35 +0800 Subject: [PATCH 87/88] Add --no-swap flag for worker --- cmd/lotus-seal-worker/main.go | 7 +++++++ extern/sector-storage/localworker.go | 10 +++++++++- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/cmd/lotus-seal-worker/main.go b/cmd/lotus-seal-worker/main.go index e36514bb8..d2c57e680 100644 --- a/cmd/lotus-seal-worker/main.go +++ b/cmd/lotus-seal-worker/main.go @@ -109,6 +109,11 @@ var runCmd = &cli.Command{ Name: "no-local-storage", Usage: "don't use storageminer repo for sector storage", }, + &cli.BoolFlag{ + Name: "no-swap", + Usage: "don't use swap", + Value: false, + }, &cli.BoolFlag{ Name: "addpiece", Usage: "enable addpiece", @@ -346,6 +351,7 @@ var runCmd = &cli.Command{ LocalWorker: sectorstorage.NewLocalWorker(sectorstorage.WorkerConfig{ SealProof: spt, TaskTypes: taskTypes, + NoSwap: cctx.Bool("no-swap"), }, remote, localStore, nodeApi), localStore: localStore, ls: lr, @@ -465,6 +471,7 @@ func watchMinerConn(ctx context.Context, cctx *cli.Context, nodeApi api.StorageM "run", fmt.Sprintf("--listen=%s", cctx.String("listen")), fmt.Sprintf("--no-local-storage=%t", cctx.Bool("no-local-storage")), + fmt.Sprintf("--no-swap=%t", cctx.Bool("no-swap")), fmt.Sprintf("--addpiece=%t", cctx.Bool("addpiece")), fmt.Sprintf("--precommit1=%t", cctx.Bool("precommit1")), fmt.Sprintf("--unseal=%t", cctx.Bool("unseal")), diff --git a/extern/sector-storage/localworker.go b/extern/sector-storage/localworker.go index 2c3c350f7..b1193a2e2 100644 --- a/extern/sector-storage/localworker.go +++ b/extern/sector-storage/localworker.go @@ -26,6 +26,7 @@ var pathTypes = []stores.SectorFileType{stores.FTUnsealed, stores.FTSealed, stor type WorkerConfig struct { SealProof abi.RegisteredSealProof TaskTypes []sealtasks.TaskType + NoSwap bool } type LocalWorker struct { @@ -33,6 +34,7 @@ type LocalWorker struct { storage stores.Store localStore *stores.Local sindex stores.SectorIndex + noSwap bool acceptTasks map[sealtasks.TaskType]struct{} } @@ -50,6 +52,7 @@ func NewLocalWorker(wcfg WorkerConfig, store stores.Store, local *stores.Local, storage: store, localStore: local, sindex: sindex, + noSwap: wcfg.NoSwap, acceptTasks: acceptTasks, } @@ -275,11 +278,16 @@ func (l *LocalWorker) Info(context.Context) (storiface.WorkerInfo, error) { return storiface.WorkerInfo{}, xerrors.Errorf("getting memory info: %w", err) } + memSwap := mem.VirtualTotal + if l.noSwap { + memSwap = 0 + } + return storiface.WorkerInfo{ Hostname: hostname, Resources: storiface.WorkerResources{ MemPhysical: mem.Total, - MemSwap: mem.VirtualTotal, + MemSwap: memSwap, MemReserved: mem.VirtualUsed + mem.Total - mem.Available, // TODO: sub this process CPUs: uint64(runtime.NumCPU()), GPUs: gpus, From 6abccc4d5ed9c977916a812b3aa3e4407fd124ae Mon Sep 17 00:00:00 2001 From: Aayush Rajasekaran Date: Wed, 30 Sep 2020 02:56:38 -0400 Subject: [PATCH 88/88] Add an option to set config --- cmd/lotus/daemon.go | 8 ++++++++ node/repo/fsrepo.go | 34 ++++++++++++++++++++-------------- 2 files changed, 28 insertions(+), 14 deletions(-) diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go index b976fde79..a0f754a60 100644 --- a/cmd/lotus/daemon.go +++ b/cmd/lotus/daemon.go @@ -127,6 +127,10 @@ var DaemonCmd = &cli.Command{ Usage: "manage open file limit", Value: true, }, + &cli.StringFlag{ + Name: "config", + Usage: "specify path of config file to use", + }, }, Action: func(cctx *cli.Context) error { err := runmetrics.Enable(runmetrics.RunMetricOptions{ @@ -180,6 +184,10 @@ var DaemonCmd = &cli.Command{ return xerrors.Errorf("opening fs repo: %w", err) } + if cctx.String("config") != "" { + r.SetConfigPath(cctx.String("config")) + } + if err := r.Init(repo.FullNode); err != nil && err != repo.ErrRepoExists { return xerrors.Errorf("repo init error: %w", err) } diff --git a/node/repo/fsrepo.go b/node/repo/fsrepo.go index 709d78d3a..a69cdd55d 100644 --- a/node/repo/fsrepo.go +++ b/node/repo/fsrepo.go @@ -65,7 +65,8 @@ var ErrRepoExists = xerrors.New("repo exists") // FsRepo is struct for repo, use NewFS to create type FsRepo struct { - path string + path string + configPath string } var _ Repo = &FsRepo{} @@ -78,10 +79,15 @@ func NewFS(path string) (*FsRepo, error) { } return &FsRepo{ - path: path, + path: path, + configPath: filepath.Join(path, fsConfig), }, nil } +func (fsr *FsRepo) SetConfigPath(cfgPath string) { + fsr.configPath = cfgPath +} + func (fsr *FsRepo) Exists() (bool, error) { _, err := os.Stat(filepath.Join(fsr.path, fsDatastore)) notexist := os.IsNotExist(err) @@ -115,9 +121,7 @@ func (fsr *FsRepo) Init(t RepoType) error { } func (fsr *FsRepo) initConfig(t RepoType) error { - cfgP := filepath.Join(fsr.path, fsConfig) - - _, err := os.Stat(cfgP) + _, err := os.Stat(fsr.configPath) if err == nil { // exists return nil @@ -125,7 +129,7 @@ func (fsr *FsRepo) initConfig(t RepoType) error { return err } - c, err := os.Create(cfgP) + c, err := os.Create(fsr.configPath) if err != nil { return err } @@ -215,16 +219,18 @@ func (fsr *FsRepo) Lock(repoType RepoType) (LockedRepo, error) { return nil, xerrors.Errorf("could not lock the repo: %w", err) } return &fsLockedRepo{ - path: fsr.path, - repoType: repoType, - closer: closer, + path: fsr.path, + configPath: fsr.configPath, + repoType: repoType, + closer: closer, }, nil } type fsLockedRepo struct { - path string - repoType RepoType - closer io.Closer + path string + configPath string + repoType RepoType + closer io.Closer ds map[string]datastore.Batching dsErr error @@ -277,7 +283,7 @@ func (fsr *fsLockedRepo) Config() (interface{}, error) { } func (fsr *fsLockedRepo) loadConfigFromDisk() (interface{}, error) { - return config.FromFile(fsr.join(fsConfig), defConfForType(fsr.repoType)) + return config.FromFile(fsr.configPath, defConfForType(fsr.repoType)) } func (fsr *fsLockedRepo) SetConfig(c func(interface{})) error { @@ -306,7 +312,7 @@ func (fsr *fsLockedRepo) SetConfig(c func(interface{})) error { } // write buffer of TOML bytes to config file - err = ioutil.WriteFile(fsr.join(fsConfig), buf.Bytes(), 0644) + err = ioutil.WriteFile(fsr.configPath, buf.Bytes(), 0644) if err != nil { return err }