Merge branch 'master' into releases

This commit is contained in:
Aayush Rajasekaran 2021-04-27 04:11:17 -04:00
commit b79861c1ad
253 changed files with 20415 additions and 3808 deletions

View File

@ -284,11 +284,41 @@ jobs:
- install-deps
- prepare
- run: make calibnet
- run: mkdir linux-calibnet && mv lotus lotus-miner lotus-worker linux-calibnet
- run: mkdir linux-calibrationnet && mv lotus lotus-miner lotus-worker linux-calibrationnet
- persist_to_workspace:
root: "."
paths:
- linux-calibnet
- linux-calibrationnet
build-ntwk-butterfly:
description: |
Compile lotus binaries for the butterfly network
parameters:
<<: *test-params
executor: << parameters.executor >>
steps:
- install-deps
- prepare
- run: make butterflynet
- run: mkdir linux-butterflynet && mv lotus lotus-miner lotus-worker linux-butterflynet
- persist_to_workspace:
root: "."
paths:
- linux-butterflynet
build-ntwk-nerpa:
description: |
Compile lotus binaries for the nerpa network
parameters:
<<: *test-params
executor: << parameters.executor >>
steps:
- install-deps
- prepare
- run: make nerpanet
- run: mkdir linux-nerpanet && mv lotus lotus-miner lotus-worker linux-nerpanet
- persist_to_workspace:
root: "."
paths:
- linux-nerpanet
build-lotus-soup:
description: |
Compile `lotus-soup` Testground test plan
@ -364,6 +394,15 @@ jobs:
command: |
curl --location https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 --output /usr/local/bin/jq
chmod +x /usr/local/bin/jq
- run:
name: Install hwloc
command: |
mkdir ~/hwloc
curl --location https://download.open-mpi.org/release/hwloc/v2.4/hwloc-2.4.1.tar.gz --output ~/hwloc/hwloc-2.4.1.tar.gz
cd ~/hwloc
tar -xvzpf hwloc-2.4.1.tar.gz
cd hwloc-2.4.1
./configure && make && sudo make install
- restore_cache:
name: restore cargo cache
key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }}
@ -405,7 +444,7 @@ jobs:
- run: make deps
- run: go install golang.org/x/tools/cmd/goimports
- run: go install github.com/hannahhoward/cbor-gen-for
- run: go generate ./...
- run: make type-gen
- run: git --no-pager diff
- run: git --no-pager diff --quiet
@ -414,9 +453,19 @@ jobs:
steps:
- install-deps
- prepare
- run: go install golang.org/x/tools/cmd/goimports
- run: zcat build/openrpc/full.json.gz | jq > ../pre-openrpc-full
- run: zcat build/openrpc/miner.json.gz | jq > ../pre-openrpc-miner
- run: zcat build/openrpc/worker.json.gz | jq > ../pre-openrpc-worker
- run: make deps
- run: make docsgen
- run: zcat build/openrpc/full.json.gz | jq > ../post-openrpc-full
- run: zcat build/openrpc/miner.json.gz | jq > ../post-openrpc-miner
- run: zcat build/openrpc/worker.json.gz | jq > ../post-openrpc-worker
- run: git --no-pager diff
- run: diff ../pre-openrpc-full ../post-openrpc-full
- run: diff ../pre-openrpc-miner ../post-openrpc-miner
- run: diff ../pre-openrpc-worker ../post-openrpc-worker
- run: git --no-pager diff --quiet
lint: &lint
@ -595,7 +644,7 @@ jobs:
docker push $<<parameters.account-url>>/<<parameters.repo>>:${tag}
done
publish-packer:
publish-packer-mainnet:
description: build and push AWS IAM and DigitalOcean droplet.
executor:
name: packer/default
@ -607,9 +656,42 @@ jobs:
- packer/build:
template: tools/packer/lotus.pkr.hcl
args: "-var ci_workspace_bins=./linux -var lotus_network=mainnet -var git_tag=$CIRCLE_TAG"
publish-packer-calibrationnet:
description: build and push AWS IAM and DigitalOcean droplet.
executor:
name: packer/default
packer-version: 1.6.6
steps:
- checkout
- attach_workspace:
at: "."
- packer/build:
template: tools/packer/lotus.pkr.hcl
args: "-var ci_workspace_bins=./linux-calibnet -var lotus_network=calibrationnet -var git_tag=$CIRCLE_TAG"
args: "-var ci_workspace_bins=./linux-calibrationnet -var lotus_network=calibrationnet -var git_tag=$CIRCLE_TAG"
publish-packer-butterflynet:
description: build and push AWS IAM and DigitalOcean droplet.
executor:
name: packer/default
packer-version: 1.6.6
steps:
- checkout
- attach_workspace:
at: "."
- packer/build:
template: tools/packer/lotus.pkr.hcl
args: "-var ci_workspace_bins=./linux-butterflynet -var lotus_network=butterflynet -var git_tag=$CIRCLE_TAG"
publish-packer-nerpanet:
description: build and push AWS IAM and DigitalOcean droplet.
executor:
name: packer/default
packer-version: 1.6.6
steps:
- checkout
- attach_workspace:
at: "."
- packer/build:
template: tools/packer/lotus.pkr.hcl
args: "-var ci_workspace_bins=./linux-nerpanet -var lotus_network=nerpanet -var git_tag=$CIRCLE_TAG"
workflows:
version: 2.1
@ -675,8 +757,6 @@ workflows:
test-suite-name: conformance-bleeding-edge
packages: "./conformance"
vectors-branch: master
- build-ntwk-calibration
- build-lotus-soup
- trigger-testplans:
filters:
branches:
@ -690,6 +770,28 @@ workflows:
tags:
only:
- /^v\d+\.\d+\.\d+$/
- build-ntwk-calibration:
requires:
- test-short
filters:
tags:
only:
- /^v\d+\.\d+\.\d+$/
- build-ntwk-butterfly:
requires:
- test-short
filters:
tags:
only:
- /^v\d+\.\d+\.\d+$/
- build-ntwk-nerpa:
requires:
- test-short
filters:
tags:
only:
- /^v\d+\.\d+\.\d+$/
- build-lotus-soup
- build-macos:
requires:
- test-short
@ -716,9 +818,18 @@ workflows:
path: .
repo: lotus-dev
tag: '${CIRCLE_SHA1:0:8}'
- publish-packer:
- publish-packer-mainnet:
requires:
- build-all
filters:
branches:
ignore:
- /.*/
tags:
only:
- /^v\d+\.\d+\.\d+$/
- publish-packer-calibrationnet:
requires:
- build-ntwk-calibration
filters:
branches:
@ -727,4 +838,23 @@ workflows:
tags:
only:
- /^v\d+\.\d+\.\d+$/
- publish-packer-butterflynet:
requires:
- build-ntwk-butterfly
filters:
branches:
ignore:
- /.*/
tags:
only:
- /^v\d+\.\d+\.\d+$/
- publish-packer-nerpanet:
requires:
- build-ntwk-nerpa
filters:
branches:
ignore:
- /.*/
tags:
only:
- /^v\d+\.\d+\.\d+$/

8
.github/CODEOWNERS vendored
View File

@ -8,9 +8,9 @@
## the PR before merging.
### Global owners.
* @magik6k @whyrusleeping @Kubuxu
* @magik6k @arajasek
### Conformance testing.
conformance/ @raulk
extern/test-vectors @raulk
cmd/tvx @raulk
conformance/ @ZenGround0
extern/test-vectors @ZenGround0
cmd/tvx @ZenGround0

65
.github/workflows/codeql-analysis.yml vendored Normal file
View File

@ -0,0 +1,65 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [ master ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ master ]
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
language: [ 'go' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
steps:
- name: Checkout repository
uses: actions/checkout@v2
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v1
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1

2
.gitignore vendored
View File

@ -14,6 +14,8 @@
/lotus-pcr
/lotus-wallet
/lotus-keygen
/docgen-md
/docgen-openrpc
/bench.json
/lotuspond/front/node_modules
/lotuspond/front/build

View File

@ -16,6 +16,12 @@ linters:
- deadcode
- scopelint
# We don't want to skip builtin/
skip-dirs-use-default: false
skip-dirs:
- vendor$
- testdata$
- examples$
issues:
exclude:

View File

@ -1,6 +1,6 @@
# Lotus changelog
# 1.8.0 / 2021-04-05
# 1.8.0 / 2021-04-27
This is a mandatory release of Lotus that upgrades the network to version 12, which introduces various performance improvements to the cron processing of the power actor. The network will upgrade at height 712320, which is 2021-04-29T06:00:00Z.

View File

@ -233,6 +233,13 @@ testground:
.PHONY: testground
BINS+=testground
tvx:
rm -f tvx
go build -o tvx ./cmd/tvx
.PHONY: tvx
BINS+=tvx
install-chainwatch: lotus-chainwatch
install -C ./lotus-chainwatch /usr/local/bin/lotus-chainwatch
@ -318,19 +325,50 @@ dist-clean:
git submodule deinit --all -f
.PHONY: dist-clean
type-gen:
type-gen: api-gen
go run ./gen/main.go
go generate ./...
go generate -x ./...
goimports -w api/
method-gen:
method-gen: api-gen
(cd ./lotuspond/front/src/chain && go run ./methodgen.go)
gen: type-gen method-gen
api-gen:
go run ./gen/api
goimports -w api
goimports -w api
.PHONY: api-gen
docsgen:
go run ./api/docgen "api/api_full.go" "FullNode" > documentation/en/api-methods.md
go run ./api/docgen "api/api_storage.go" "StorageMiner" > documentation/en/api-methods-miner.md
go run ./api/docgen "api/api_worker.go" "WorkerAPI" > documentation/en/api-methods-worker.md
docsgen: docsgen-md docsgen-openrpc
docsgen-md-bin: api-gen
go build $(GOFLAGS) -o docgen-md ./api/docgen/cmd
docsgen-openrpc-bin: api-gen
go build $(GOFLAGS) -o docgen-openrpc ./api/docgen-openrpc/cmd
docsgen-md: docsgen-md-full docsgen-md-storage docsgen-md-worker
docsgen-md-full: docsgen-md-bin
./docgen-md "api/api_full.go" "FullNode" "api" "./api" > documentation/en/api-v1-unstable-methods.md
./docgen-md "api/v0api/full.go" "FullNode" "v0api" "./api/v0api" > documentation/en/api-v0-methods.md
docsgen-md-storage: docsgen-md-bin
./docgen-md "api/api_storage.go" "StorageMiner" "api" "./api" > documentation/en/api-v0-methods-miner.md
docsgen-md-worker: docsgen-md-bin
./docgen-md "api/api_worker.go" "Worker" "api" "./api" > documentation/en/api-v0-methods-worker.md
docsgen-openrpc: docsgen-openrpc-full docsgen-openrpc-storage docsgen-openrpc-worker
docsgen-openrpc-full: docsgen-openrpc-bin
./docgen-openrpc "api/api_full.go" "FullNode" "api" "./api" -gzip > build/openrpc/full.json.gz
docsgen-openrpc-storage: docsgen-openrpc-bin
./docgen-openrpc "api/api_storage.go" "StorageMiner" "api" "./api" -gzip > build/openrpc/miner.json.gz
docsgen-openrpc-worker: docsgen-openrpc-bin
./docgen-openrpc "api/api_worker.go" "Worker" "api" "./api" -gzip > build/openrpc/worker.json.gz
.PHONY: docsgen docsgen-md-bin docsgen-openrpc-bin
gen: type-gen method-gen docsgen api-gen
.PHONY: gen
print-%:
@echo $*=$($*)

14
api/README.md Normal file
View File

@ -0,0 +1,14 @@
## Lotus API
This package contains all lotus API definitions. Interfaces defined here are
exposed as JsonRPC 2.0 endpoints by lotus programs.
### Versions
| File | Alias File | Interface | Exposed by | Version | HTTP Endpoint | Status | Docs
|------------------|-------------------|----------------|--------------------|---------|---------------|------------------------------|------
| `api_common.go` | `v0api/latest.go` | `Common` | lotus; lotus-miner | v0 | `/rpc/v0` | Latest, Stable | [Methods](../documentation/en/api-v0-methods.md)
| `api_full.go` | `v1api/latest.go` | `FullNode` | lotus | v1 | `/rpc/v1` | Latest, **Work in progress** | [Methods](../documentation/en/api-v1-unstable-methods.md)
| `api_storage.go` | `v0api/latest.go` | `StorageMiner` | lotus-miner | v0 | `/rpc/v0` | Latest, Stable | [Methods](../documentation/en/api-v0-methods-miner.md)
| `api_worker.go` | `v0api/latest.go` | `Worker` | lotus-worker | v0 | `/rpc/v0` | Latest, Stable | [Methods](../documentation/en/api-v0-methods-worker.md)
| `v0api/full.go` | | `FullNode` | lotus | v0 | `/rpc/v0` | Stable | [Methods](../documentation/en/api-v0-methods.md)

View File

@ -11,63 +11,79 @@ import (
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
protocol "github.com/libp2p/go-libp2p-core/protocol"
apitypes "github.com/filecoin-project/lotus/api/types"
)
// MODIFYING THE API INTERFACE
//
// When adding / changing methods in this file:
// * Do the change here
// * Adjust implementation in `node/impl/`
// * Run `make gen` - this will:
// * Generate proxy structs
// * Generate mocks
// * Generate markdown docs
// * Generate openrpc blobs
type Common interface {
// MethodGroup: Auth
AuthVerify(ctx context.Context, token string) ([]auth.Permission, error)
AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error)
AuthVerify(ctx context.Context, token string) ([]auth.Permission, error) //perm:read
AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error) //perm:admin
// MethodGroup: Net
NetConnectedness(context.Context, peer.ID) (network.Connectedness, error)
NetPeers(context.Context) ([]peer.AddrInfo, error)
NetConnect(context.Context, peer.AddrInfo) error
NetAddrsListen(context.Context) (peer.AddrInfo, error)
NetDisconnect(context.Context, peer.ID) error
NetFindPeer(context.Context, peer.ID) (peer.AddrInfo, error)
NetPubsubScores(context.Context) ([]PubsubScore, error)
NetAutoNatStatus(context.Context) (NatInfo, error)
NetAgentVersion(ctx context.Context, p peer.ID) (string, error)
NetPeerInfo(context.Context, peer.ID) (*ExtendedPeerInfo, error)
NetConnectedness(context.Context, peer.ID) (network.Connectedness, error) //perm:read
NetPeers(context.Context) ([]peer.AddrInfo, error) //perm:read
NetConnect(context.Context, peer.AddrInfo) error //perm:write
NetAddrsListen(context.Context) (peer.AddrInfo, error) //perm:read
NetDisconnect(context.Context, peer.ID) error //perm:write
NetFindPeer(context.Context, peer.ID) (peer.AddrInfo, error) //perm:read
NetPubsubScores(context.Context) ([]PubsubScore, error) //perm:read
NetAutoNatStatus(context.Context) (NatInfo, error) //perm:read
NetAgentVersion(ctx context.Context, p peer.ID) (string, error) //perm:read
NetPeerInfo(context.Context, peer.ID) (*ExtendedPeerInfo, error) //perm:read
// NetBandwidthStats returns statistics about the nodes total bandwidth
// usage and current rate across all peers and protocols.
NetBandwidthStats(ctx context.Context) (metrics.Stats, error)
NetBandwidthStats(ctx context.Context) (metrics.Stats, error) //perm:read
// NetBandwidthStatsByPeer returns statistics about the nodes bandwidth
// usage and current rate per peer
NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error)
NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) //perm:read
// NetBandwidthStatsByProtocol returns statistics about the nodes bandwidth
// usage and current rate per protocol
NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error)
NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) //perm:read
// ConnectionGater API
NetBlockAdd(ctx context.Context, acl NetBlockList) error
NetBlockRemove(ctx context.Context, acl NetBlockList) error
NetBlockList(ctx context.Context) (NetBlockList, error)
NetBlockAdd(ctx context.Context, acl NetBlockList) error //perm:admin
NetBlockRemove(ctx context.Context, acl NetBlockList) error //perm:admin
NetBlockList(ctx context.Context) (NetBlockList, error) //perm:read
// MethodGroup: Common
// Discover returns an OpenRPC document describing an RPC API.
Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) //perm:read
// ID returns peerID of libp2p node backing this API
ID(context.Context) (peer.ID, error)
ID(context.Context) (peer.ID, error) //perm:read
// Version provides information about API provider
Version(context.Context) (APIVersion, error)
Version(context.Context) (APIVersion, error) //perm:read
LogList(context.Context) ([]string, error)
LogSetLevel(context.Context, string, string) error
LogList(context.Context) ([]string, error) //perm:write
LogSetLevel(context.Context, string, string) error //perm:write
// trigger graceful shutdown
Shutdown(context.Context) error
Shutdown(context.Context) error //perm:admin
// Session returns a random UUID of api provider session
Session(context.Context) (uuid.UUID, error)
Session(context.Context) (uuid.UUID, error) //perm:read
Closing(context.Context) (<-chan struct{}, error)
Closing(context.Context) (<-chan struct{}, error) //perm:read
}
// APIVersion provides various build-time information

View File

@ -6,14 +6,12 @@ import (
"fmt"
"time"
datatransfer "github.com/filecoin-project/go-data-transfer"
"github.com/filecoin-project/go-state-types/network"
"github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
datatransfer "github.com/filecoin-project/go-data-transfer"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-multistore"
@ -21,11 +19,12 @@ import (
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/dline"
apitypes "github.com/filecoin-project/lotus/api/types"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/builtin/paych"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/types"
marketevents "github.com/filecoin-project/lotus/markets/loggers"
@ -40,6 +39,22 @@ type ChainIO interface {
ChainHasObj(context.Context, cid.Cid) (bool, error)
}
const LookbackNoLimit = abi.ChainEpoch(-1)
// MODIFYING THE API INTERFACE
//
// NOTE: This is the V1 (Unstable) API - to add methods to the V0 (Stable) API
// you'll have to add those methods to interfaces in `api/v0api`
//
// When adding / changing methods in this file:
// * Do the change here
// * Adjust implementation in `node/impl/`
// * Run `make gen` - this will:
// * Generate proxy structs
// * Generate mocks
// * Generate markdown docs
// * Generate openrpc blobs
// FullNode API is a low-level interface to the Filecoin network full node
type FullNode interface {
Common
@ -50,66 +65,78 @@ type FullNode interface {
// ChainNotify returns channel with chain head updates.
// First message is guaranteed to be of len == 1, and type == 'current'.
ChainNotify(context.Context) (<-chan []*HeadChange, error)
ChainNotify(context.Context) (<-chan []*HeadChange, error) //perm:read
// ChainHead returns the current head of the chain.
ChainHead(context.Context) (*types.TipSet, error)
ChainHead(context.Context) (*types.TipSet, error) //perm:read
// ChainGetRandomnessFromTickets is used to sample the chain for randomness.
ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) //perm:read
// ChainGetRandomnessFromBeacon is used to sample the beacon for randomness.
ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) //perm:read
// ChainGetBlock returns the block specified by the given CID.
ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error)
ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error) //perm:read
// ChainGetTipSet returns the tipset specified by the given TipSetKey.
ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error)
ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) //perm:read
// ChainGetBlockMessages returns messages stored in the specified block.
ChainGetBlockMessages(ctx context.Context, blockCid cid.Cid) (*BlockMessages, error)
//
// Note: If there are multiple blocks in a tipset, it's likely that some
// messages will be duplicated. It's also possible for blocks in a tipset to have
// different messages from the same sender at the same nonce. When that happens,
// only the first message (in a block with lowest ticket) will be considered
// for execution
//
// NOTE: THIS METHOD SHOULD ONLY BE USED FOR GETTING MESSAGES IN A SPECIFIC BLOCK
//
// DO NOT USE THIS METHOD TO GET MESSAGES INCLUDED IN A TIPSET
// Use ChainGetParentMessages, which will perform correct message deduplication
ChainGetBlockMessages(ctx context.Context, blockCid cid.Cid) (*BlockMessages, error) //perm:read
// ChainGetParentReceipts returns receipts for messages in parent tipset of
// the specified block.
ChainGetParentReceipts(ctx context.Context, blockCid cid.Cid) ([]*types.MessageReceipt, error)
// the specified block. The receipts in the list returned is one-to-one with the
// messages returned by a call to ChainGetParentMessages with the same blockCid.
ChainGetParentReceipts(ctx context.Context, blockCid cid.Cid) ([]*types.MessageReceipt, error) //perm:read
// ChainGetParentMessages returns messages stored in parent tipset of the
// specified block.
ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]Message, error)
ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]Message, error) //perm:read
// ChainGetTipSetByHeight looks back for a tipset at the specified epoch.
// If there are no blocks at the specified epoch, a tipset at an earlier epoch
// will be returned.
ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error)
ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) //perm:read
// ChainReadObj reads ipld nodes referenced by the specified CID from chain
// blockstore and returns raw bytes.
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
ChainReadObj(context.Context, cid.Cid) ([]byte, error) //perm:read
// ChainDeleteObj deletes node referenced by the given CID
ChainDeleteObj(context.Context, cid.Cid) error
ChainDeleteObj(context.Context, cid.Cid) error //perm:admin
// ChainHasObj checks if a given CID exists in the chain blockstore.
ChainHasObj(context.Context, cid.Cid) (bool, error)
ChainHasObj(context.Context, cid.Cid) (bool, error) //perm:read
// ChainStatObj returns statistics about the graph referenced by 'obj'.
// If 'base' is also specified, then the returned stat will be a diff
// between the two objects.
ChainStatObj(ctx context.Context, obj cid.Cid, base cid.Cid) (ObjStat, error)
ChainStatObj(ctx context.Context, obj cid.Cid, base cid.Cid) (ObjStat, error) //perm:read
// ChainSetHead forcefully sets current chain head. Use with caution.
ChainSetHead(context.Context, types.TipSetKey) error
ChainSetHead(context.Context, types.TipSetKey) error //perm:admin
// ChainGetGenesis returns the genesis tipset.
ChainGetGenesis(context.Context) (*types.TipSet, error)
ChainGetGenesis(context.Context) (*types.TipSet, error) //perm:read
// ChainTipSetWeight computes weight for the specified tipset.
ChainTipSetWeight(context.Context, types.TipSetKey) (types.BigInt, error)
ChainGetNode(ctx context.Context, p string) (*IpldObject, error)
ChainTipSetWeight(context.Context, types.TipSetKey) (types.BigInt, error) //perm:read
ChainGetNode(ctx context.Context, p string) (*IpldObject, error) //perm:read
// ChainGetMessage reads a message referenced by the specified CID from the
// chain blockstore.
ChainGetMessage(context.Context, cid.Cid) (*types.Message, error)
ChainGetMessage(context.Context, cid.Cid) (*types.Message, error) //perm:read
// ChainGetPath returns a set of revert/apply operations needed to get from
// one tipset to another, for example:
@ -124,14 +151,14 @@ type FullNode interface {
// tRR
//```
// Would return `[revert(tBA), apply(tAB), apply(tAA)]`
ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*HeadChange, error)
ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*HeadChange, error) //perm:read
// ChainExport returns a stream of bytes with CAR dump of chain data.
// The exported chain data includes the header chain from the given tipset
// back to genesis, the entire genesis state, and the most recent 'nroots'
// state trees.
// If oldmsgskip is set, messages from before the requested roots are also not included.
ChainExport(ctx context.Context, nroots abi.ChainEpoch, oldmsgskip bool, tsk types.TipSetKey) (<-chan []byte, error)
ChainExport(ctx context.Context, nroots abi.ChainEpoch, oldmsgskip bool, tsk types.TipSetKey) (<-chan []byte, error) //perm:read
// MethodGroup: Beacon
// The Beacon method group contains methods for interacting with the random beacon (DRAND)
@ -139,74 +166,74 @@ type FullNode interface {
// BeaconGetEntry returns the beacon entry for the given filecoin epoch. If
// the entry has not yet been produced, the call will block until the entry
// becomes available
BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error)
BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) //perm:read
// GasEstimateFeeCap estimates gas fee cap
GasEstimateFeeCap(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error)
GasEstimateFeeCap(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error) //perm:read
// GasEstimateGasLimit estimates gas used by the message and returns it.
// It fails if message fails to execute.
GasEstimateGasLimit(context.Context, *types.Message, types.TipSetKey) (int64, error)
GasEstimateGasLimit(context.Context, *types.Message, types.TipSetKey) (int64, error) //perm:read
// GasEstimateGasPremium estimates what gas price should be used for a
// message to have high likelihood of inclusion in `nblocksincl` epochs.
GasEstimateGasPremium(_ context.Context, nblocksincl uint64,
sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error)
sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) //perm:read
// GasEstimateMessageGas estimates gas values for unset message gas fields
GasEstimateMessageGas(context.Context, *types.Message, *MessageSendSpec, types.TipSetKey) (*types.Message, error)
GasEstimateMessageGas(context.Context, *types.Message, *MessageSendSpec, types.TipSetKey) (*types.Message, error) //perm:read
// MethodGroup: Sync
// The Sync method group contains methods for interacting with and
// observing the lotus sync service.
// SyncState returns the current status of the lotus sync system.
SyncState(context.Context) (*SyncState, error)
SyncState(context.Context) (*SyncState, error) //perm:read
// SyncSubmitBlock can be used to submit a newly created block to the.
// network through this node
SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error
SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error //perm:write
// SyncIncomingBlocks returns a channel streaming incoming, potentially not
// yet synced block headers.
SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error)
SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) //perm:read
// SyncCheckpoint marks a blocks as checkpointed, meaning that it won't ever fork away from it.
SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) error
SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) error //perm:admin
// SyncMarkBad marks a blocks as bad, meaning that it won't ever by synced.
// Use with extreme caution.
SyncMarkBad(ctx context.Context, bcid cid.Cid) error
SyncMarkBad(ctx context.Context, bcid cid.Cid) error //perm:admin
// SyncUnmarkBad unmarks a blocks as bad, making it possible to be validated and synced again.
SyncUnmarkBad(ctx context.Context, bcid cid.Cid) error
SyncUnmarkBad(ctx context.Context, bcid cid.Cid) error //perm:admin
// SyncUnmarkAllBad purges bad block cache, making it possible to sync to chains previously marked as bad
SyncUnmarkAllBad(ctx context.Context) error
SyncUnmarkAllBad(ctx context.Context) error //perm:admin
// SyncCheckBad checks if a block was marked as bad, and if it was, returns
// the reason.
SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error)
SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error) //perm:read
// SyncValidateTipset indicates whether the provided tipset is valid or not
SyncValidateTipset(ctx context.Context, tsk types.TipSetKey) (bool, error)
SyncValidateTipset(ctx context.Context, tsk types.TipSetKey) (bool, error) //perm:read
// MethodGroup: Mpool
// The Mpool methods are for interacting with the message pool. The message pool
// manages all incoming and outgoing 'messages' going over the network.
// MpoolPending returns pending mempool messages.
MpoolPending(context.Context, types.TipSetKey) ([]*types.SignedMessage, error)
MpoolPending(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) //perm:read
// MpoolSelect returns a list of pending messages for inclusion in the next block
MpoolSelect(context.Context, types.TipSetKey, float64) ([]*types.SignedMessage, error)
MpoolSelect(context.Context, types.TipSetKey, float64) ([]*types.SignedMessage, error) //perm:read
// MpoolPush pushes a signed message to mempool.
MpoolPush(context.Context, *types.SignedMessage) (cid.Cid, error)
MpoolPush(context.Context, *types.SignedMessage) (cid.Cid, error) //perm:write
// MpoolPushUntrusted pushes a signed message to mempool from untrusted sources.
MpoolPushUntrusted(context.Context, *types.SignedMessage) (cid.Cid, error)
MpoolPushUntrusted(context.Context, *types.SignedMessage) (cid.Cid, error) //perm:write
// MpoolPushMessage atomically assigns a nonce, signs, and pushes a message
// to mempool.
@ -214,34 +241,34 @@ type FullNode interface {
//
// When maxFee is set to 0, MpoolPushMessage will guess appropriate fee
// based on current chain conditions
MpoolPushMessage(ctx context.Context, msg *types.Message, spec *MessageSendSpec) (*types.SignedMessage, error)
MpoolPushMessage(ctx context.Context, msg *types.Message, spec *MessageSendSpec) (*types.SignedMessage, error) //perm:sign
// MpoolBatchPush batch pushes a signed message to mempool.
MpoolBatchPush(context.Context, []*types.SignedMessage) ([]cid.Cid, error)
MpoolBatchPush(context.Context, []*types.SignedMessage) ([]cid.Cid, error) //perm:write
// MpoolBatchPushUntrusted batch pushes a signed message to mempool from untrusted sources.
MpoolBatchPushUntrusted(context.Context, []*types.SignedMessage) ([]cid.Cid, error)
MpoolBatchPushUntrusted(context.Context, []*types.SignedMessage) ([]cid.Cid, error) //perm:write
// MpoolBatchPushMessage batch pushes a unsigned message to mempool.
MpoolBatchPushMessage(context.Context, []*types.Message, *MessageSendSpec) ([]*types.SignedMessage, error)
MpoolBatchPushMessage(context.Context, []*types.Message, *MessageSendSpec) ([]*types.SignedMessage, error) //perm:sign
// MpoolGetNonce gets next nonce for the specified sender.
// Note that this method may not be atomic. Use MpoolPushMessage instead.
MpoolGetNonce(context.Context, address.Address) (uint64, error)
MpoolSub(context.Context) (<-chan MpoolUpdate, error)
MpoolGetNonce(context.Context, address.Address) (uint64, error) //perm:read
MpoolSub(context.Context) (<-chan MpoolUpdate, error) //perm:read
// MpoolClear clears pending messages from the mpool
MpoolClear(context.Context, bool) error
MpoolClear(context.Context, bool) error //perm:write
// MpoolGetConfig returns (a copy of) the current mpool config
MpoolGetConfig(context.Context) (*types.MpoolConfig, error)
MpoolGetConfig(context.Context) (*types.MpoolConfig, error) //perm:read
// MpoolSetConfig sets the mpool config to (a copy of) the supplied config
MpoolSetConfig(context.Context, *types.MpoolConfig) error
MpoolSetConfig(context.Context, *types.MpoolConfig) error //perm:admin
// MethodGroup: Miner
MinerGetBaseInfo(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*MiningBaseInfo, error)
MinerCreateBlock(context.Context, *BlockTemplate) (*types.BlockMsg, error)
MinerGetBaseInfo(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*MiningBaseInfo, error) //perm:read
MinerCreateBlock(context.Context, *BlockTemplate) (*types.BlockMsg, error) //perm:write
// // UX ?
@ -250,32 +277,32 @@ type FullNode interface {
// WalletNew creates a new address in the wallet with the given sigType.
// Available key types: bls, secp256k1, secp256k1-ledger
// Support for numerical types: 1 - secp256k1, 2 - BLS is deprecated
WalletNew(context.Context, types.KeyType) (address.Address, error)
WalletNew(context.Context, types.KeyType) (address.Address, error) //perm:write
// WalletHas indicates whether the given address is in the wallet.
WalletHas(context.Context, address.Address) (bool, error)
WalletHas(context.Context, address.Address) (bool, error) //perm:write
// WalletList lists all the addresses in the wallet.
WalletList(context.Context) ([]address.Address, error)
WalletList(context.Context) ([]address.Address, error) //perm:write
// WalletBalance returns the balance of the given address at the current head of the chain.
WalletBalance(context.Context, address.Address) (types.BigInt, error)
WalletBalance(context.Context, address.Address) (types.BigInt, error) //perm:read
// WalletSign signs the given bytes using the given address.
WalletSign(context.Context, address.Address, []byte) (*crypto.Signature, error)
WalletSign(context.Context, address.Address, []byte) (*crypto.Signature, error) //perm:sign
// WalletSignMessage signs the given message using the given address.
WalletSignMessage(context.Context, address.Address, *types.Message) (*types.SignedMessage, error)
WalletSignMessage(context.Context, address.Address, *types.Message) (*types.SignedMessage, error) //perm:sign
// WalletVerify takes an address, a signature, and some bytes, and indicates whether the signature is valid.
// The address does not have to be in the wallet.
WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error)
WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) //perm:read
// WalletDefaultAddress returns the address marked as default in the wallet.
WalletDefaultAddress(context.Context) (address.Address, error)
WalletDefaultAddress(context.Context) (address.Address, error) //perm:write
// WalletSetDefault marks the given address as as the default one.
WalletSetDefault(context.Context, address.Address) error
WalletSetDefault(context.Context, address.Address) error //perm:write
// WalletExport returns the private key of an address in the wallet.
WalletExport(context.Context, address.Address) (*types.KeyInfo, error)
WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin
// WalletImport receives a KeyInfo, which includes a private key, and imports it into the wallet.
WalletImport(context.Context, *types.KeyInfo) (address.Address, error)
WalletImport(context.Context, *types.KeyInfo) (address.Address, error) //perm:admin
// WalletDelete deletes an address from the wallet.
WalletDelete(context.Context, address.Address) error
WalletDelete(context.Context, address.Address) error //perm:admin
// WalletValidateAddress validates whether a given string can be decoded as a well-formed address
WalletValidateAddress(context.Context, string) (address.Address, error)
WalletValidateAddress(context.Context, string) (address.Address, error) //perm:read
// Other
@ -284,56 +311,59 @@ type FullNode interface {
// retrieval markets as a client
// ClientImport imports file under the specified path into filestore.
ClientImport(ctx context.Context, ref FileRef) (*ImportRes, error)
ClientImport(ctx context.Context, ref FileRef) (*ImportRes, error) //perm:admin
// ClientRemoveImport removes file import
ClientRemoveImport(ctx context.Context, importID multistore.StoreID) error
ClientRemoveImport(ctx context.Context, importID multistore.StoreID) error //perm:admin
// ClientStartDeal proposes a deal with a miner.
ClientStartDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error)
ClientStartDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error) //perm:admin
// ClientGetDealInfo returns the latest information about a given deal.
ClientGetDealInfo(context.Context, cid.Cid) (*DealInfo, error)
ClientGetDealInfo(context.Context, cid.Cid) (*DealInfo, error) //perm:read
// ClientListDeals returns information about the deals made by the local client.
ClientListDeals(ctx context.Context) ([]DealInfo, error)
ClientListDeals(ctx context.Context) ([]DealInfo, error) //perm:write
// ClientGetDealUpdates returns the status of updated deals
ClientGetDealUpdates(ctx context.Context) (<-chan DealInfo, error)
ClientGetDealUpdates(ctx context.Context) (<-chan DealInfo, error) //perm:write
// ClientGetDealStatus returns status given a code
ClientGetDealStatus(ctx context.Context, statusCode uint64) (string, error)
ClientGetDealStatus(ctx context.Context, statusCode uint64) (string, error) //perm:read
// ClientHasLocal indicates whether a certain CID is locally stored.
ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error)
ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) //perm:write
// ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer).
ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]QueryOffer, error)
ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]QueryOffer, error) //perm:read
// ClientMinerQueryOffer returns a QueryOffer for the specific miner and file.
ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (QueryOffer, error)
ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (QueryOffer, error) //perm:read
// ClientRetrieve initiates the retrieval of a file, as specified in the order.
ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *FileRef) error
ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *FileRef) error //perm:admin
// ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel
// of status updates.
ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *FileRef) (<-chan marketevents.RetrievalEvent, error)
ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin
// ClientQueryAsk returns a signed StorageAsk from the specified miner.
ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error)
ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) //perm:read
// ClientCalcCommP calculates the CommP and data size of the specified CID
ClientDealPieceCID(ctx context.Context, root cid.Cid) (DataCIDSize, error)
ClientDealPieceCID(ctx context.Context, root cid.Cid) (DataCIDSize, error) //perm:read
// ClientCalcCommP calculates the CommP for a specified file
ClientCalcCommP(ctx context.Context, inpath string) (*CommPRet, error)
ClientCalcCommP(ctx context.Context, inpath string) (*CommPRet, error) //perm:write
// ClientGenCar generates a CAR file for the specified file.
ClientGenCar(ctx context.Context, ref FileRef, outpath string) error
ClientGenCar(ctx context.Context, ref FileRef, outpath string) error //perm:write
// ClientDealSize calculates real deal data size
ClientDealSize(ctx context.Context, root cid.Cid) (DataSize, error)
ClientDealSize(ctx context.Context, root cid.Cid) (DataSize, error) //perm:read
// ClientListTransfers returns the status of all ongoing transfers of data
ClientListDataTransfers(ctx context.Context) ([]DataTransferChannel, error)
ClientDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error)
ClientListDataTransfers(ctx context.Context) ([]DataTransferChannel, error) //perm:write
ClientDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error) //perm:write
// ClientRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
ClientRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error
ClientRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
// ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
ClientCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error
ClientCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
// ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel
// which are stuck due to insufficient funds
ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error
ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error //perm:write
// ClientCancelRetrievalDeal cancels an ongoing retrieval deal based on DealID
ClientCancelRetrievalDeal(ctx context.Context, dealid retrievalmarket.DealID) error //perm:write
// ClientUnimport removes references to the specified file from filestore
//ClientUnimport(path string)
// ClientListImports lists imported files and their root CIDs
ClientListImports(ctx context.Context) ([]Import, error)
ClientListImports(ctx context.Context) ([]Import, error) //perm:write
//ClientListAsks() []Ask
@ -347,149 +377,217 @@ type FullNode interface {
// StateCall applies the message to the tipset's parent state. The
// message is not applied on-top-of the messages in the passed-in
// tipset.
StateCall(context.Context, *types.Message, types.TipSetKey) (*InvocResult, error)
StateCall(context.Context, *types.Message, types.TipSetKey) (*InvocResult, error) //perm:read
// StateReplay replays a given message, assuming it was included in a block in the specified tipset.
// If no tipset key is provided, the appropriate tipset is looked up.
StateReplay(context.Context, types.TipSetKey, cid.Cid) (*InvocResult, error)
//
// If a tipset key is provided, and a replacing message is found on chain,
// the method will return an error saying that the message wasn't found
//
// If no tipset key is provided, the appropriate tipset is looked up, and if
// the message was gas-repriced, the on-chain message will be replayed - in
// that case the returned InvocResult.MsgCid will not match the Cid param
//
// If the caller wants to ensure that exactly the requested message was executed,
// they MUST check that InvocResult.MsgCid is equal to the provided Cid.
// Without this check both the requested and original message may appear as
// successfully executed on-chain, which may look like a double-spend.
//
// A replacing message is a message with a different CID, any of Gas values, and
// different signature, but with all other parameters matching (source/destination,
// nonce, params, etc.)
StateReplay(context.Context, types.TipSetKey, cid.Cid) (*InvocResult, error) //perm:read
// StateGetActor returns the indicated actor's nonce and balance.
StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error)
StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) //perm:read
// StateReadState returns the indicated actor's state.
StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error)
StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error) //perm:read
// StateListMessages looks back and returns all messages with a matching to or from address, stopping at the given height.
StateListMessages(ctx context.Context, match *MessageMatch, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error)
StateListMessages(ctx context.Context, match *MessageMatch, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) //perm:read
// StateDecodeParams attempts to decode the provided params, based on the recipient actor address and method number.
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error)
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error) //perm:read
// StateNetworkName returns the name of the network the node is synced to
StateNetworkName(context.Context) (dtypes.NetworkName, error)
StateNetworkName(context.Context) (dtypes.NetworkName, error) //perm:read
// StateMinerSectors returns info about the given miner's sectors. If the filter bitfield is nil, all sectors are included.
StateMinerSectors(context.Context, address.Address, *bitfield.BitField, types.TipSetKey) ([]*miner.SectorOnChainInfo, error)
StateMinerSectors(context.Context, address.Address, *bitfield.BitField, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) //perm:read
// StateMinerActiveSectors returns info about sectors that a given miner is actively proving.
StateMinerActiveSectors(context.Context, address.Address, types.TipSetKey) ([]*miner.SectorOnChainInfo, error)
StateMinerActiveSectors(context.Context, address.Address, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) //perm:read
// StateMinerProvingDeadline calculates the deadline at some epoch for a proving period
// and returns the deadline-related calculations.
StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error)
StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) //perm:read
// StateMinerPower returns the power of the indicated miner
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error)
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error) //perm:read
// StateMinerInfo returns info about the indicated miner
StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error)
StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) //perm:read
// StateMinerDeadlines returns all the proving deadlines for the given miner
StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]Deadline, error)
StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]Deadline, error) //perm:read
// StateMinerPartitions returns all partitions in the specified deadline
StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tsk types.TipSetKey) ([]Partition, error)
StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tsk types.TipSetKey) ([]Partition, error) //perm:read
// StateMinerFaults returns a bitfield indicating the faulty sectors of the given miner
StateMinerFaults(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error)
StateMinerFaults(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) //perm:read
// StateAllMinerFaults returns all non-expired Faults that occur within lookback epochs of the given tipset
StateAllMinerFaults(ctx context.Context, lookback abi.ChainEpoch, ts types.TipSetKey) ([]*Fault, error)
StateAllMinerFaults(ctx context.Context, lookback abi.ChainEpoch, ts types.TipSetKey) ([]*Fault, error) //perm:read
// StateMinerRecoveries returns a bitfield indicating the recovering sectors of the given miner
StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error)
StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) //perm:read
// StateMinerInitialPledgeCollateral returns the precommit deposit for the specified miner's sector
StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error)
StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) //perm:read
// StateMinerInitialPledgeCollateral returns the initial pledge collateral for the specified miner's sector
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error)
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) //perm:read
// StateMinerAvailableBalance returns the portion of a miner's balance that can be withdrawn or spent
StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error)
StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) //perm:read
// StateMinerSectorAllocated checks if a sector is allocated
StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (bool, error)
StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (bool, error) //perm:read
// StateSectorPreCommitInfo returns the PreCommit info for the specified miner's sector
StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error)
StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) //perm:read
// StateSectorGetInfo returns the on-chain info for the specified miner's sector. Returns null in case the sector info isn't found
// NOTE: returned info.Expiration may not be accurate in some cases, use StateSectorExpiration to get accurate
// expiration epoch
StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error)
StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) //perm:read
// StateSectorExpiration returns epoch at which given sector will expire
StateSectorExpiration(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorExpiration, error)
StateSectorExpiration(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorExpiration, error) //perm:read
// StateSectorPartition finds deadline/partition with the specified sector
StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error)
// StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed
StateSearchMsg(context.Context, cid.Cid) (*MsgLookup, error)
// StateSearchMsgLimited looks back up to limit epochs in the chain for a message, and returns its receipt and the tipset where it was executed
StateSearchMsgLimited(ctx context.Context, msg cid.Cid, limit abi.ChainEpoch) (*MsgLookup, error)
// StateWaitMsg looks back in the chain for a message. If not found, it blocks until the
// message arrives on chain, and gets to the indicated confidence depth.
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*MsgLookup, error)
// StateWaitMsgLimited looks back up to limit epochs in the chain for a message.
StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error) //perm:read
// StateSearchMsg looks back up to limit epochs in the chain for a message, and returns its receipt and the tipset where it was executed
//
// NOTE: If a replacing message is found on chain, this method will return
// a MsgLookup for the replacing message - the MsgLookup.Message will be a different
// CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the
// result of the execution of the replacing message.
//
// If the caller wants to ensure that exactly the requested message was executed,
// they must check that MsgLookup.Message is equal to the provided 'cid', or set the
// `allowReplaced` parameter to false. Without this check, and with `allowReplaced`
// set to true, both the requested and original message may appear as
// successfully executed on-chain, which may look like a double-spend.
//
// A replacing message is a message with a different CID, any of Gas values, and
// different signature, but with all other parameters matching (source/destination,
// nonce, params, etc.)
StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error) //perm:read
// StateWaitMsg looks back up to limit epochs in the chain for a message.
// If not found, it blocks until the message arrives on chain, and gets to the
// indicated confidence depth.
StateWaitMsgLimited(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch) (*MsgLookup, error)
//
// NOTE: If a replacing message is found on chain, this method will return
// a MsgLookup for the replacing message - the MsgLookup.Message will be a different
// CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the
// result of the execution of the replacing message.
//
// If the caller wants to ensure that exactly the requested message was executed,
// they must check that MsgLookup.Message is equal to the provided 'cid', or set the
// `allowReplaced` parameter to false. Without this check, and with `allowReplaced`
// set to true, both the requested and original message may appear as
// successfully executed on-chain, which may look like a double-spend.
//
// A replacing message is a message with a different CID, any of Gas values, and
// different signature, but with all other parameters matching (source/destination,
// nonce, params, etc.)
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error) //perm:read
// StateListMiners returns the addresses of every miner that has claimed power in the Power Actor
StateListMiners(context.Context, types.TipSetKey) ([]address.Address, error)
StateListMiners(context.Context, types.TipSetKey) ([]address.Address, error) //perm:read
// StateListActors returns the addresses of every actor in the state
StateListActors(context.Context, types.TipSetKey) ([]address.Address, error)
StateListActors(context.Context, types.TipSetKey) ([]address.Address, error) //perm:read
// StateMarketBalance looks up the Escrow and Locked balances of the given address in the Storage Market
StateMarketBalance(context.Context, address.Address, types.TipSetKey) (MarketBalance, error)
StateMarketBalance(context.Context, address.Address, types.TipSetKey) (MarketBalance, error) //perm:read
// StateMarketParticipants returns the Escrow and Locked balances of every participant in the Storage Market
StateMarketParticipants(context.Context, types.TipSetKey) (map[string]MarketBalance, error)
StateMarketParticipants(context.Context, types.TipSetKey) (map[string]MarketBalance, error) //perm:read
// StateMarketDeals returns information about every deal in the Storage Market
StateMarketDeals(context.Context, types.TipSetKey) (map[string]MarketDeal, error)
StateMarketDeals(context.Context, types.TipSetKey) (map[string]MarketDeal, error) //perm:read
// StateMarketStorageDeal returns information about the indicated deal
StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*MarketDeal, error)
StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*MarketDeal, error) //perm:read
// StateLookupID retrieves the ID address of the given address
StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error)
StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) //perm:read
// StateAccountKey returns the public key address of the given ID address
StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error)
StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) //perm:read
// StateChangedActors returns all the actors whose states change between the two given state CIDs
// TODO: Should this take tipset keys instead?
StateChangedActors(context.Context, cid.Cid, cid.Cid) (map[string]types.Actor, error)
// StateGetReceipt returns the message receipt for the given message
StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error)
StateChangedActors(context.Context, cid.Cid, cid.Cid) (map[string]types.Actor, error) //perm:read
// StateMinerSectorCount returns the number of sectors in a miner's sector set and proving set
StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (MinerSectors, error)
StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (MinerSectors, error) //perm:read
// StateCompute is a flexible command that applies the given messages on the given tipset.
// The messages are run as though the VM were at the provided height.
StateCompute(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*ComputeStateOutput, error)
//
// When called, StateCompute will:
// - Load the provided tipset, or use the current chain head if not provided
// - Compute the tipset state of the provided tipset on top of the parent state
// - (note that this step runs before vmheight is applied to the execution)
// - Execute state upgrade if any were scheduled at the epoch, or in null
// blocks preceding the tipset
// - Call the cron actor on null blocks preceding the tipset
// - For each block in the tipset
// - Apply messages in blocks in the specified
// - Award block reward by calling the reward actor
// - Call the cron actor for the current epoch
// - If the specified vmheight is higher than the current epoch, apply any
// needed state upgrades to the state
// - Apply the specified messages to the state
//
// The vmheight parameter sets VM execution epoch, and can be used to simulate
// message execution in different network versions. If the specified vmheight
// epoch is higher than the epoch of the specified tipset, any state upgrades
// until the vmheight will be executed on the state before applying messages
// specified by the user.
//
// Note that the initial tipset state computation is not affected by the
// vmheight parameter - only the messages in the `apply` set are
//
// If the caller wants to simply compute the state, vmheight should be set to
// the epoch of the specified tipset.
//
// Messages in the `apply` parameter must have the correct nonces, and gas
// values set.
StateCompute(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*ComputeStateOutput, error) //perm:read
// StateVerifierStatus returns the data cap for the given address.
// Returns nil if there is no entry in the data cap table for the
// address.
StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) //perm:read
// StateVerifiedClientStatus returns the data cap for the given address.
// Returns nil if there is no entry in the data cap table for the
// address.
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) //perm:read
// StateVerifiedClientStatus returns the address of the Verified Registry's root key
StateVerifiedRegistryRootKey(ctx context.Context, tsk types.TipSetKey) (address.Address, error)
StateVerifiedRegistryRootKey(ctx context.Context, tsk types.TipSetKey) (address.Address, error) //perm:read
// StateDealProviderCollateralBounds returns the min and max collateral a storage provider
// can issue. It takes the deal size and verified status as parameters.
StateDealProviderCollateralBounds(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (DealCollateralBounds, error)
StateDealProviderCollateralBounds(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (DealCollateralBounds, error) //perm:read
// StateCirculatingSupply returns the exact circulating supply of Filecoin at the given tipset.
// This is not used anywhere in the protocol itself, and is only for external consumption.
StateCirculatingSupply(context.Context, types.TipSetKey) (abi.TokenAmount, error)
StateCirculatingSupply(context.Context, types.TipSetKey) (abi.TokenAmount, error) //perm:read
// StateVMCirculatingSupplyInternal returns an approximation of the circulating supply of Filecoin at the given tipset.
// This is the value reported by the runtime interface to actors code.
StateVMCirculatingSupplyInternal(context.Context, types.TipSetKey) (CirculatingSupply, error)
StateVMCirculatingSupplyInternal(context.Context, types.TipSetKey) (CirculatingSupply, error) //perm:read
// StateNetworkVersion returns the network version at the given tipset
StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error)
StateNetworkVersion(context.Context, types.TipSetKey) (apitypes.NetworkVersion, error) //perm:read
// MethodGroup: Msig
// The Msig methods are used to interact with multisig wallets on the
// filecoin network
// MsigGetAvailableBalance returns the portion of a multisig's balance that can be withdrawn or spent
MsigGetAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error)
MsigGetAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) //perm:read
// MsigGetVestingSchedule returns the vesting details of a given multisig.
MsigGetVestingSchedule(context.Context, address.Address, types.TipSetKey) (MsigVesting, error)
MsigGetVestingSchedule(context.Context, address.Address, types.TipSetKey) (MsigVesting, error) //perm:read
// MsigGetVested returns the amount of FIL that vested in a multisig in a certain period.
// It takes the following params: <multisig address>, <start epoch>, <end epoch>
MsigGetVested(context.Context, address.Address, types.TipSetKey, types.TipSetKey) (types.BigInt, error)
MsigGetVested(context.Context, address.Address, types.TipSetKey, types.TipSetKey) (types.BigInt, error) //perm:read
//MsigGetPending returns pending transactions for the given multisig
//wallet. Once pending transactions are fully approved, they will no longer
//appear here.
MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*MsigTransaction, error)
MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*MsigTransaction, error) //perm:read
// MsigCreate creates a multisig wallet
// It takes the following params: <required number of senders>, <approving addresses>, <unlock duration>
//<initial balance>, <sender address of the create msg>, <gas price>
MsigCreate(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (cid.Cid, error)
MsigCreate(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (cid.Cid, error) //perm:sign
// MsigPropose proposes a multisig message
// It takes the following params: <multisig address>, <recipient address>, <value to transfer>,
// <sender address of the propose msg>, <method to call in the proposed message>, <params to include in the proposed message>
MsigPropose(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error)
MsigPropose(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) //perm:sign
// MsigApprove approves a previously-proposed multisig message by transaction ID
// It takes the following params: <multisig address>, <proposed transaction ID> <signer address>
MsigApprove(context.Context, address.Address, uint64, address.Address) (cid.Cid, error)
MsigApprove(context.Context, address.Address, uint64, address.Address) (cid.Cid, error) //perm:sign
// MsigApproveTxnHash approves a previously-proposed multisig message, specified
// using both transaction ID and a hash of the parameters used in the
@ -497,80 +595,80 @@ type FullNode interface {
// exactly the transaction you think you are.
// It takes the following params: <multisig address>, <proposed message ID>, <proposer address>, <recipient address>, <value to transfer>,
// <sender address of the approve msg>, <method to call in the proposed message>, <params to include in the proposed message>
MsigApproveTxnHash(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error)
MsigApproveTxnHash(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) //perm:sign
// MsigCancel cancels a previously-proposed multisig message
// It takes the following params: <multisig address>, <proposed transaction ID>, <recipient address>, <value to transfer>,
// <sender address of the cancel msg>, <method to call in the proposed message>, <params to include in the proposed message>
MsigCancel(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error)
MsigCancel(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) //perm:sign
// MsigAddPropose proposes adding a signer in the multisig
// It takes the following params: <multisig address>, <sender address of the propose msg>,
// <new signer>, <whether the number of required signers should be increased>
MsigAddPropose(context.Context, address.Address, address.Address, address.Address, bool) (cid.Cid, error)
MsigAddPropose(context.Context, address.Address, address.Address, address.Address, bool) (cid.Cid, error) //perm:sign
// MsigAddApprove approves a previously proposed AddSigner message
// It takes the following params: <multisig address>, <sender address of the approve msg>, <proposed message ID>,
// <proposer address>, <new signer>, <whether the number of required signers should be increased>
MsigAddApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, bool) (cid.Cid, error)
MsigAddApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, bool) (cid.Cid, error) //perm:sign
// MsigAddCancel cancels a previously proposed AddSigner message
// It takes the following params: <multisig address>, <sender address of the cancel msg>, <proposed message ID>,
// <new signer>, <whether the number of required signers should be increased>
MsigAddCancel(context.Context, address.Address, address.Address, uint64, address.Address, bool) (cid.Cid, error)
MsigAddCancel(context.Context, address.Address, address.Address, uint64, address.Address, bool) (cid.Cid, error) //perm:sign
// MsigSwapPropose proposes swapping 2 signers in the multisig
// It takes the following params: <multisig address>, <sender address of the propose msg>,
// <old signer>, <new signer>
MsigSwapPropose(context.Context, address.Address, address.Address, address.Address, address.Address) (cid.Cid, error)
MsigSwapPropose(context.Context, address.Address, address.Address, address.Address, address.Address) (cid.Cid, error) //perm:sign
// MsigSwapApprove approves a previously proposed SwapSigner
// It takes the following params: <multisig address>, <sender address of the approve msg>, <proposed message ID>,
// <proposer address>, <old signer>, <new signer>
MsigSwapApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (cid.Cid, error)
MsigSwapApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (cid.Cid, error) //perm:sign
// MsigSwapCancel cancels a previously proposed SwapSigner message
// It takes the following params: <multisig address>, <sender address of the cancel msg>, <proposed message ID>,
// <old signer>, <new signer>
MsigSwapCancel(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error)
MsigSwapCancel(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error) //perm:sign
// MsigRemoveSigner proposes the removal of a signer from the multisig.
// It accepts the multisig to make the change on, the proposer address to
// send the message from, the address to be removed, and a boolean
// indicating whether or not the signing threshold should be lowered by one
// along with the address removal.
MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error)
MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) //perm:sign
// MarketAddBalance adds funds to the market actor
MarketAddBalance(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error)
MarketAddBalance(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign
// MarketGetReserved gets the amount of funds that are currently reserved for the address
MarketGetReserved(ctx context.Context, addr address.Address) (types.BigInt, error)
MarketGetReserved(ctx context.Context, addr address.Address) (types.BigInt, error) //perm:sign
// MarketReserveFunds reserves funds for a deal
MarketReserveFunds(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error)
MarketReserveFunds(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign
// MarketReleaseFunds releases funds reserved by MarketReserveFunds
MarketReleaseFunds(ctx context.Context, addr address.Address, amt types.BigInt) error
MarketReleaseFunds(ctx context.Context, addr address.Address, amt types.BigInt) error //perm:sign
// MarketWithdraw withdraws unlocked funds from the market actor
MarketWithdraw(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error)
MarketWithdraw(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign
// MethodGroup: Paych
// The Paych methods are for interacting with and managing payment channels
PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*ChannelInfo, error)
PaychGetWaitReady(context.Context, cid.Cid) (address.Address, error)
PaychAvailableFunds(ctx context.Context, ch address.Address) (*ChannelAvailableFunds, error)
PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*ChannelAvailableFunds, error)
PaychList(context.Context) ([]address.Address, error)
PaychStatus(context.Context, address.Address) (*PaychStatus, error)
PaychSettle(context.Context, address.Address) (cid.Cid, error)
PaychCollect(context.Context, address.Address) (cid.Cid, error)
PaychAllocateLane(ctx context.Context, ch address.Address) (uint64, error)
PaychNewPayment(ctx context.Context, from, to address.Address, vouchers []VoucherSpec) (*PaymentInfo, error)
PaychVoucherCheckValid(context.Context, address.Address, *paych.SignedVoucher) error
PaychVoucherCheckSpendable(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (bool, error)
PaychVoucherCreate(context.Context, address.Address, types.BigInt, uint64) (*VoucherCreateResult, error)
PaychVoucherAdd(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error)
PaychVoucherList(context.Context, address.Address) ([]*paych.SignedVoucher, error)
PaychVoucherSubmit(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error)
PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*ChannelInfo, error) //perm:sign
PaychGetWaitReady(context.Context, cid.Cid) (address.Address, error) //perm:sign
PaychAvailableFunds(ctx context.Context, ch address.Address) (*ChannelAvailableFunds, error) //perm:sign
PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*ChannelAvailableFunds, error) //perm:sign
PaychList(context.Context) ([]address.Address, error) //perm:read
PaychStatus(context.Context, address.Address) (*PaychStatus, error) //perm:read
PaychSettle(context.Context, address.Address) (cid.Cid, error) //perm:sign
PaychCollect(context.Context, address.Address) (cid.Cid, error) //perm:sign
PaychAllocateLane(ctx context.Context, ch address.Address) (uint64, error) //perm:sign
PaychNewPayment(ctx context.Context, from, to address.Address, vouchers []VoucherSpec) (*PaymentInfo, error) //perm:sign
PaychVoucherCheckValid(context.Context, address.Address, *paych.SignedVoucher) error //perm:read
PaychVoucherCheckSpendable(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (bool, error) //perm:read
PaychVoucherCreate(context.Context, address.Address, types.BigInt, uint64) (*VoucherCreateResult, error) //perm:sign
PaychVoucherAdd(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error) //perm:write
PaychVoucherList(context.Context, address.Address) ([]*paych.SignedVoucher, error) //perm:write
PaychVoucherSubmit(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error) //perm:sign
// CreateBackup creates node backup onder the specified file name. The
// method requires that the lotus daemon is running with the
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
// the path specified when calling CreateBackup is within the base path
CreateBackup(ctx context.Context, fpath string) error
CreateBackup(ctx context.Context, fpath string) error //perm:admin
}
type FileRef struct {
@ -605,6 +703,7 @@ type DealInfo struct {
ProposalCid cid.Cid
State storagemarket.StorageDealStatus
Message string // more information about deal state, particularly errors
DealStages *storagemarket.DealStages
Provider address.Address
DataRef *storagemarket.DataRef
@ -760,7 +859,7 @@ func (o *QueryOffer) Order(client address.Address) RetrievalOrder {
Client: client,
Miner: o.Miner,
MinerPeer: o.MinerPeer,
MinerPeer: &o.MinerPeer,
}
}
@ -779,6 +878,8 @@ type RetrievalOrder struct {
Root cid.Cid
Piece *cid.Cid
Size uint64
LocalStore *multistore.StoreID // if specified, get data from local store
// TODO: support offset
Total types.BigInt
UnsealPrice types.BigInt
@ -786,7 +887,7 @@ type RetrievalOrder struct {
PaymentIntervalIncrease uint64
Client address.Address
Miner address.Address
MinerPeer retrievalmarket.RetrievalPeer
MinerPeer *retrievalmarket.RetrievalPeer
}
type InvocResult struct {
@ -912,11 +1013,12 @@ type DealCollateralBounds struct {
}
type CirculatingSupply struct {
FilVested abi.TokenAmount
FilMined abi.TokenAmount
FilBurnt abi.TokenAmount
FilLocked abi.TokenAmount
FilCirculating abi.TokenAmount
FilVested abi.TokenAmount
FilMined abi.TokenAmount
FilBurnt abi.TokenAmount
FilLocked abi.TokenAmount
FilCirculating abi.TokenAmount
FilReserveDisbursed abi.TokenAmount
}
type MiningBaseInfo struct {

View File

@ -8,13 +8,27 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/dline"
"github.com/filecoin-project/go-state-types/network"
apitypes "github.com/filecoin-project/lotus/api/types"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
)
type GatewayAPI interface {
// MODIFYING THE API INTERFACE
//
// NOTE: This is the V1 (Unstable) API - to add methods to the V0 (Stable) API
// you'll have to add those methods to interfaces in `api/v0api`
//
// When adding / changing methods in this file:
// * Do the change here
// * Adjust implementation in `node/impl/`
// * Run `make gen` - this will:
// * Generate proxy structs
// * Generate mocks
// * Generate markdown docs
// * Generate openrpc blobs
type Gateway interface {
ChainHasObj(context.Context, cid.Cid) (bool, error)
ChainHead(ctx context.Context) (*types.TipSet, error)
ChainGetBlockMessages(context.Context, cid.Cid) (*BlockMessages, error)
@ -31,7 +45,6 @@ type GatewayAPI interface {
StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (DealCollateralBounds, error)
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error)
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MarketBalance, error)
@ -39,9 +52,10 @@ type GatewayAPI interface {
StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error)
StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error)
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error)
StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error)
StateSearchMsg(ctx context.Context, msg cid.Cid) (*MsgLookup, error)
StateNetworkVersion(context.Context, types.TipSetKey) (apitypes.NetworkVersion, error)
StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*MsgLookup, error)
StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error)
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error)
WalletBalance(context.Context, address.Address) (types.BigInt, error)
}

View File

@ -5,6 +5,8 @@ import (
"context"
"time"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/google/uuid"
"github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p-core/peer"
@ -24,128 +26,165 @@ import (
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
)
// MODIFYING THE API INTERFACE
//
// When adding / changing methods in this file:
// * Do the change here
// * Adjust implementation in `node/impl/`
// * Run `make gen` - this will:
// * Generate proxy structs
// * Generate mocks
// * Generate markdown docs
// * Generate openrpc blobs
// StorageMiner is a low-level interface to the Filecoin network storage miner node
type StorageMiner interface {
Common
ActorAddress(context.Context) (address.Address, error)
ActorAddress(context.Context) (address.Address, error) //perm:read
ActorSectorSize(context.Context, address.Address) (abi.SectorSize, error)
ActorAddressConfig(ctx context.Context) (AddressConfig, error)
ActorSectorSize(context.Context, address.Address) (abi.SectorSize, error) //perm:read
ActorAddressConfig(ctx context.Context) (AddressConfig, error) //perm:read
MiningBase(context.Context) (*types.TipSet, error)
MiningBase(context.Context) (*types.TipSet, error) //perm:read
// Temp api for testing
PledgeSector(context.Context) (abi.SectorID, error)
PledgeSector(context.Context) (abi.SectorID, error) //perm:write
// Get the status of a given sector by ID
SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (SectorInfo, error)
SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (SectorInfo, error) //perm:read
// List all staged sectors
SectorsList(context.Context) ([]abi.SectorNumber, error)
SectorsList(context.Context) ([]abi.SectorNumber, error) //perm:read
// Get summary info of sectors
SectorsSummary(ctx context.Context) (map[SectorState]int, error)
SectorsSummary(ctx context.Context) (map[SectorState]int, error) //perm:read
// List sectors in particular states
SectorsListInStates(context.Context, []SectorState) ([]abi.SectorNumber, error)
SectorsListInStates(context.Context, []SectorState) ([]abi.SectorNumber, error) //perm:read
SectorsRefs(context.Context) (map[string][]SealedRef, error)
SectorsRefs(context.Context) (map[string][]SealedRef, error) //perm:read
// SectorStartSealing can be called on sectors in Empty or WaitDeals states
// to trigger sealing early
SectorStartSealing(context.Context, abi.SectorNumber) error
SectorStartSealing(context.Context, abi.SectorNumber) error //perm:write
// SectorSetSealDelay sets the time that a newly-created sector
// waits for more deals before it starts sealing
SectorSetSealDelay(context.Context, time.Duration) error
SectorSetSealDelay(context.Context, time.Duration) error //perm:write
// SectorGetSealDelay gets the time that a newly-created sector
// waits for more deals before it starts sealing
SectorGetSealDelay(context.Context) (time.Duration, error)
SectorGetSealDelay(context.Context) (time.Duration, error) //perm:read
// SectorSetExpectedSealDuration sets the expected time for a sector to seal
SectorSetExpectedSealDuration(context.Context, time.Duration) error
SectorSetExpectedSealDuration(context.Context, time.Duration) error //perm:write
// SectorGetExpectedSealDuration gets the expected time for a sector to seal
SectorGetExpectedSealDuration(context.Context) (time.Duration, error)
SectorsUpdate(context.Context, abi.SectorNumber, SectorState) error
SectorGetExpectedSealDuration(context.Context) (time.Duration, error) //perm:read
SectorsUpdate(context.Context, abi.SectorNumber, SectorState) error //perm:admin
// SectorRemove removes the sector from storage. It doesn't terminate it on-chain, which can
// be done with SectorTerminate. Removing and not terminating live sectors will cause additional penalties.
SectorRemove(context.Context, abi.SectorNumber) error
SectorRemove(context.Context, abi.SectorNumber) error //perm:admin
// SectorTerminate terminates the sector on-chain (adding it to a termination batch first), then
// automatically removes it from storage
SectorTerminate(context.Context, abi.SectorNumber) error
SectorTerminate(context.Context, abi.SectorNumber) error //perm:admin
// SectorTerminateFlush immediately sends a terminate message with sectors batched for termination.
// Returns null if message wasn't sent
SectorTerminateFlush(ctx context.Context) (*cid.Cid, error)
SectorTerminateFlush(ctx context.Context) (*cid.Cid, error) //perm:admin
// SectorTerminatePending returns a list of pending sector terminations to be sent in the next batch message
SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error)
SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error
StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error)
StorageLocal(ctx context.Context) (map[stores.ID]string, error)
StorageStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error)
SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error //perm:admin
// WorkerConnect tells the node to connect to workers RPC
WorkerConnect(context.Context, string) error
WorkerStats(context.Context) (map[uuid.UUID]storiface.WorkerStats, error)
WorkerJobs(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error)
storiface.WorkerReturn
WorkerConnect(context.Context, string) error //perm:admin retry:true
WorkerStats(context.Context) (map[uuid.UUID]storiface.WorkerStats, error) //perm:admin
WorkerJobs(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) //perm:admin
//storiface.WorkerReturn
ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error //perm:admin retry:true
ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err *storiface.CallError) error //perm:admin retry:true
ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err *storiface.CallError) error //perm:admin retry:true
ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err *storiface.CallError) error //perm:admin retry:true
ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err *storiface.CallError) error //perm:admin retry:true
ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error //perm:admin retry:true
ReturnFetch(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
// SealingSchedDiag dumps internal sealing scheduler state
SealingSchedDiag(ctx context.Context, doSched bool) (interface{}, error)
SealingAbort(ctx context.Context, call storiface.CallID) error
SealingSchedDiag(ctx context.Context, doSched bool) (interface{}, error) //perm:admin
SealingAbort(ctx context.Context, call storiface.CallID) error //perm:admin
stores.SectorIndex
//stores.SectorIndex
StorageAttach(context.Context, stores.StorageInfo, fsutil.FsStat) error //perm:admin
StorageInfo(context.Context, stores.ID) (stores.StorageInfo, error) //perm:admin
StorageReportHealth(context.Context, stores.ID, stores.HealthReport) error //perm:admin
StorageDeclareSector(ctx context.Context, storageID stores.ID, s abi.SectorID, ft storiface.SectorFileType, primary bool) error //perm:admin
StorageDropSector(ctx context.Context, storageID stores.ID, s abi.SectorID, ft storiface.SectorFileType) error //perm:admin
StorageFindSector(ctx context.Context, sector abi.SectorID, ft storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]stores.SectorStorageInfo, error) //perm:admin
StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]stores.StorageInfo, error) //perm:admin
StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error //perm:admin
StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) //perm:admin
MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error
MarketListDeals(ctx context.Context) ([]MarketDeal, error)
MarketListRetrievalDeals(ctx context.Context) ([]retrievalmarket.ProviderDealState, error)
MarketGetDealUpdates(ctx context.Context) (<-chan storagemarket.MinerDeal, error)
MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error)
MarketSetAsk(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error
MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error)
MarketSetRetrievalAsk(ctx context.Context, rask *retrievalmarket.Ask) error
MarketGetRetrievalAsk(ctx context.Context) (*retrievalmarket.Ask, error)
MarketListDataTransfers(ctx context.Context) ([]DataTransferChannel, error)
MarketDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error)
StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) //perm:admin
StorageLocal(ctx context.Context) (map[stores.ID]string, error) //perm:admin
StorageStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) //perm:admin
MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error //perm:write
MarketListDeals(ctx context.Context) ([]MarketDeal, error) //perm:read
MarketListRetrievalDeals(ctx context.Context) ([]retrievalmarket.ProviderDealState, error) //perm:read
MarketGetDealUpdates(ctx context.Context) (<-chan storagemarket.MinerDeal, error) //perm:read
MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error) //perm:read
MarketSetAsk(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error //perm:admin
MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error) //perm:read
MarketSetRetrievalAsk(ctx context.Context, rask *retrievalmarket.Ask) error //perm:admin
MarketGetRetrievalAsk(ctx context.Context) (*retrievalmarket.Ask, error) //perm:read
MarketListDataTransfers(ctx context.Context) ([]DataTransferChannel, error) //perm:write
MarketDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error) //perm:write
// MarketRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
MarketRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error
MarketRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
// MarketCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
MarketCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error
MarketPendingDeals(ctx context.Context) (PendingDealInfo, error)
MarketPublishPendingDeals(ctx context.Context) error
MarketCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
MarketPendingDeals(ctx context.Context) (PendingDealInfo, error) //perm:write
MarketPublishPendingDeals(ctx context.Context) error //perm:admin
DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error
DealsList(ctx context.Context) ([]MarketDeal, error)
DealsConsiderOnlineStorageDeals(context.Context) (bool, error)
DealsSetConsiderOnlineStorageDeals(context.Context, bool) error
DealsConsiderOnlineRetrievalDeals(context.Context) (bool, error)
DealsSetConsiderOnlineRetrievalDeals(context.Context, bool) error
DealsPieceCidBlocklist(context.Context) ([]cid.Cid, error)
DealsSetPieceCidBlocklist(context.Context, []cid.Cid) error
DealsConsiderOfflineStorageDeals(context.Context) (bool, error)
DealsSetConsiderOfflineStorageDeals(context.Context, bool) error
DealsConsiderOfflineRetrievalDeals(context.Context) (bool, error)
DealsSetConsiderOfflineRetrievalDeals(context.Context, bool) error
DealsConsiderVerifiedStorageDeals(context.Context) (bool, error)
DealsSetConsiderVerifiedStorageDeals(context.Context, bool) error
DealsConsiderUnverifiedStorageDeals(context.Context) (bool, error)
DealsSetConsiderUnverifiedStorageDeals(context.Context, bool) error
DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error //perm:admin
DealsList(ctx context.Context) ([]MarketDeal, error) //perm:admin
DealsConsiderOnlineStorageDeals(context.Context) (bool, error) //perm:admin
DealsSetConsiderOnlineStorageDeals(context.Context, bool) error //perm:admin
DealsConsiderOnlineRetrievalDeals(context.Context) (bool, error) //perm:admin
DealsSetConsiderOnlineRetrievalDeals(context.Context, bool) error //perm:admin
DealsPieceCidBlocklist(context.Context) ([]cid.Cid, error) //perm:admin
DealsSetPieceCidBlocklist(context.Context, []cid.Cid) error //perm:admin
DealsConsiderOfflineStorageDeals(context.Context) (bool, error) //perm:admin
DealsSetConsiderOfflineStorageDeals(context.Context, bool) error //perm:admin
DealsConsiderOfflineRetrievalDeals(context.Context) (bool, error) //perm:admin
DealsSetConsiderOfflineRetrievalDeals(context.Context, bool) error //perm:admin
DealsConsiderVerifiedStorageDeals(context.Context) (bool, error) //perm:admin
DealsSetConsiderVerifiedStorageDeals(context.Context, bool) error //perm:admin
DealsConsiderUnverifiedStorageDeals(context.Context) (bool, error) //perm:admin
DealsSetConsiderUnverifiedStorageDeals(context.Context, bool) error //perm:admin
StorageAddLocal(ctx context.Context, path string) error
StorageAddLocal(ctx context.Context, path string) error //perm:admin
PiecesListPieces(ctx context.Context) ([]cid.Cid, error)
PiecesListCidInfos(ctx context.Context) ([]cid.Cid, error)
PiecesGetPieceInfo(ctx context.Context, pieceCid cid.Cid) (*piecestore.PieceInfo, error)
PiecesGetCIDInfo(ctx context.Context, payloadCid cid.Cid) (*piecestore.CIDInfo, error)
PiecesListPieces(ctx context.Context) ([]cid.Cid, error) //perm:read
PiecesListCidInfos(ctx context.Context) ([]cid.Cid, error) //perm:read
PiecesGetPieceInfo(ctx context.Context, pieceCid cid.Cid) (*piecestore.PieceInfo, error) //perm:read
PiecesGetCIDInfo(ctx context.Context, payloadCid cid.Cid) (*piecestore.CIDInfo, error) //perm:read
// CreateBackup creates node backup onder the specified file name. The
// method requires that the lotus-miner is running with the
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
// the path specified when calling CreateBackup is within the base path
CreateBackup(ctx context.Context, fpath string) error
CreateBackup(ctx context.Context, fpath string) error //perm:admin
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, expensive bool) (map[abi.SectorNumber]string, error)
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin
ComputeProof(ctx context.Context, ssi []builtin.SectorInfo, rand abi.PoStRandomness) ([]builtin.PoStProof, error) //perm:read
}
var _ storiface.WorkerReturn = *new(StorageMiner)
var _ stores.SectorIndex = *new(StorageMiner)
type SealRes struct {
Err string
GoErr error `json:"-"`

View File

@ -37,6 +37,18 @@ func TestDoesntDependOnFFI(t *testing.T) {
}
}
func TestDoesntDependOnBuild(t *testing.T) {
deps, err := exec.Command(goCmd(), "list", "-deps", "github.com/filecoin-project/lotus/api").Output()
if err != nil {
t.Fatal(err)
}
for _, pkg := range strings.Fields(string(deps)) {
if pkg == "github.com/filecoin-project/build" {
t.Fatal("api depends on filecoin-ffi")
}
}
}
func TestReturnTypes(t *testing.T) {
errType := reflect.TypeOf(new(error)).Elem()
bareIface := reflect.TypeOf(new(interface{})).Elem()
@ -99,5 +111,11 @@ func TestReturnTypes(t *testing.T) {
t.Run("common", tst(new(Common)))
t.Run("full", tst(new(FullNode)))
t.Run("miner", tst(new(StorageMiner)))
t.Run("worker", tst(new(WorkerAPI)))
t.Run("worker", tst(new(Worker)))
}
func TestPermTags(t *testing.T) {
_ = PermissionedFullAPI(&FullNodeStruct{})
_ = PermissionedStorMinerAPI(&StorageMinerStruct{})
_ = PermissionedWorkerAPI(&WorkerStruct{})
}

View File

@ -34,7 +34,7 @@ type MsgMeta struct {
Extra []byte
}
type WalletAPI interface {
type Wallet interface {
WalletNew(context.Context, types.KeyType) (address.Address, error)
WalletHas(context.Context, address.Address) (bool, error)
WalletList(context.Context) ([]address.Address, error)

View File

@ -2,46 +2,73 @@ package api
import (
"context"
"io"
"github.com/google/uuid"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
"github.com/filecoin-project/specs-storage/storage"
)
type WorkerAPI interface {
Version(context.Context) (Version, error)
// TODO: Info() (name, ...) ?
// MODIFYING THE API INTERFACE
//
// When adding / changing methods in this file:
// * Do the change here
// * Adjust implementation in `node/impl/`
// * Run `make gen` - this will:
// * Generate proxy structs
// * Generate mocks
// * Generate markdown docs
// * Generate openrpc blobs
TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) // TaskType -> Weight
Paths(context.Context) ([]stores.StoragePath, error)
Info(context.Context) (storiface.WorkerInfo, error)
type Worker interface {
Version(context.Context) (Version, error) //perm:admin
storiface.WorkerCalls
// TaskType -> Weight
TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) //perm:admin
Paths(context.Context) ([]stores.StoragePath, error) //perm:admin
Info(context.Context) (storiface.WorkerInfo, error) //perm:admin
TaskDisable(ctx context.Context, tt sealtasks.TaskType) error
TaskEnable(ctx context.Context, tt sealtasks.TaskType) error
// storiface.WorkerCalls
AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) //perm:admin
SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) //perm:admin
SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (storiface.CallID, error) //perm:admin
SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) //perm:admin
SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) //perm:admin
FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) //perm:admin
ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) //perm:admin
MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) //perm:admin
UnsealPiece(context.Context, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (storiface.CallID, error) //perm:admin
ReadPiece(context.Context, io.Writer, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (storiface.CallID, error) //perm:admin
Fetch(context.Context, storage.SectorRef, storiface.SectorFileType, storiface.PathType, storiface.AcquireMode) (storiface.CallID, error) //perm:admin
TaskDisable(ctx context.Context, tt sealtasks.TaskType) error //perm:admin
TaskEnable(ctx context.Context, tt sealtasks.TaskType) error //perm:admin
// Storage / Other
Remove(ctx context.Context, sector abi.SectorID) error
Remove(ctx context.Context, sector abi.SectorID) error //perm:admin
StorageAddLocal(ctx context.Context, path string) error
StorageAddLocal(ctx context.Context, path string) error //perm:admin
// SetEnabled marks the worker as enabled/disabled. Not that this setting
// may take a few seconds to propagate to task scheduler
SetEnabled(ctx context.Context, enabled bool) error
SetEnabled(ctx context.Context, enabled bool) error //perm:admin
Enabled(ctx context.Context) (bool, error)
Enabled(ctx context.Context) (bool, error) //perm:admin
// WaitQuiet blocks until there are no tasks running
WaitQuiet(ctx context.Context) error
WaitQuiet(ctx context.Context) error //perm:admin
// returns a random UUID of worker session, generated randomly when worker
// process starts
ProcessSession(context.Context) (uuid.UUID, error)
ProcessSession(context.Context) (uuid.UUID, error) //perm:admin
// Like ProcessSession, but returns an error when worker is disabled
Session(context.Context) (uuid.UUID, error)
Session(context.Context) (uuid.UUID, error) //perm:admin
}
var _ storiface.WorkerCalls = *new(Worker)

File diff suppressed because it is too large Load Diff

View File

@ -1,9 +0,0 @@
package apistruct
import "testing"
func TestPermTags(t *testing.T) {
_ = PermissionedFullAPI(&FullNodeStruct{})
_ = PermissionedStorMinerAPI(&StorageMinerStruct{})
_ = PermissionedWorkerAPI(&WorkerStruct{})
}

View File

@ -10,13 +10,14 @@ import (
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/apistruct"
"github.com/filecoin-project/lotus/api/v0api"
"github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/lib/rpcenc"
)
// NewCommonRPC creates a new http jsonrpc client.
func NewCommonRPC(ctx context.Context, addr string, requestHeader http.Header) (api.Common, jsonrpc.ClientCloser, error) {
var res apistruct.CommonStruct
// NewCommonRPCV0 creates a new http jsonrpc client.
func NewCommonRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.Common, jsonrpc.ClientCloser, error) {
var res v0api.CommonStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
[]interface{}{
&res.Internal,
@ -27,9 +28,9 @@ func NewCommonRPC(ctx context.Context, addr string, requestHeader http.Header) (
return &res, closer, err
}
// NewFullNodeRPC creates a new http jsonrpc client.
func NewFullNodeRPC(ctx context.Context, addr string, requestHeader http.Header) (api.FullNode, jsonrpc.ClientCloser, error) {
var res apistruct.FullNodeStruct
// NewFullNodeRPCV0 creates a new http jsonrpc client.
func NewFullNodeRPCV0(ctx context.Context, addr string, requestHeader http.Header) (v0api.FullNode, jsonrpc.ClientCloser, error) {
var res v0api.FullNodeStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
[]interface{}{
&res.CommonStruct.Internal,
@ -39,9 +40,21 @@ func NewFullNodeRPC(ctx context.Context, addr string, requestHeader http.Header)
return &res, closer, err
}
// NewStorageMinerRPC creates a new http jsonrpc client for miner
func NewStorageMinerRPC(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (api.StorageMiner, jsonrpc.ClientCloser, error) {
var res apistruct.StorageMinerStruct
// NewFullNodeRPCV1 creates a new http jsonrpc client.
func NewFullNodeRPCV1(ctx context.Context, addr string, requestHeader http.Header) (api.FullNode, jsonrpc.ClientCloser, error) {
var res v1api.FullNodeStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
[]interface{}{
&res.CommonStruct.Internal,
&res.Internal,
}, requestHeader)
return &res, closer, err
}
// NewStorageMinerRPCV0 creates a new http jsonrpc client for miner
func NewStorageMinerRPCV0(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (v0api.StorageMiner, jsonrpc.ClientCloser, error) {
var res v0api.StorageMinerStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
[]interface{}{
&res.CommonStruct.Internal,
@ -54,7 +67,7 @@ func NewStorageMinerRPC(ctx context.Context, addr string, requestHeader http.Hea
return &res, closer, err
}
func NewWorkerRPC(ctx context.Context, addr string, requestHeader http.Header) (api.WorkerAPI, jsonrpc.ClientCloser, error) {
func NewWorkerRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.Worker, jsonrpc.ClientCloser, error) {
u, err := url.Parse(addr)
if err != nil {
return nil, nil, err
@ -69,7 +82,7 @@ func NewWorkerRPC(ctx context.Context, addr string, requestHeader http.Header) (
u.Path = path.Join(u.Path, "../streams/v0/push")
var res apistruct.WorkerStruct
var res api.WorkerStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
[]interface{}{
&res.Internal,
@ -83,9 +96,9 @@ func NewWorkerRPC(ctx context.Context, addr string, requestHeader http.Header) (
return &res, closer, err
}
// NewGatewayRPC creates a new http jsonrpc client for a gateway node.
func NewGatewayRPC(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (api.GatewayAPI, jsonrpc.ClientCloser, error) {
var res apistruct.GatewayStruct
// NewGatewayRPCV1 creates a new http jsonrpc client for a gateway node.
func NewGatewayRPCV1(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (api.Gateway, jsonrpc.ClientCloser, error) {
var res api.GatewayStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
[]interface{}{
&res.Internal,
@ -97,8 +110,22 @@ func NewGatewayRPC(ctx context.Context, addr string, requestHeader http.Header,
return &res, closer, err
}
func NewWalletRPC(ctx context.Context, addr string, requestHeader http.Header) (api.WalletAPI, jsonrpc.ClientCloser, error) {
var res apistruct.WalletStruct
// NewGatewayRPCV0 creates a new http jsonrpc client for a gateway node.
func NewGatewayRPCV0(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (v0api.Gateway, jsonrpc.ClientCloser, error) {
var res v0api.GatewayStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
[]interface{}{
&res.Internal,
},
requestHeader,
opts...,
)
return &res, closer, err
}
func NewWalletRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.Wallet, jsonrpc.ClientCloser, error) {
var res api.WalletStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
[]interface{}{
&res.Internal,

View File

@ -0,0 +1,74 @@
package main
import (
"compress/gzip"
"encoding/json"
"io"
"log"
"os"
"github.com/filecoin-project/lotus/api/docgen"
docgen_openrpc "github.com/filecoin-project/lotus/api/docgen-openrpc"
)
/*
main defines a small program that writes an OpenRPC document describing
a Lotus API to stdout.
If the first argument is "miner", the document will describe the StorageMiner API.
If not (no, or any other args), the document will describe the Full API.
Use:
go run ./api/openrpc/cmd ["api/api_full.go"|"api/api_storage.go"|"api/api_worker.go"] ["FullNode"|"StorageMiner"|"Worker"]
With gzip compression: a '-gzip' flag is made available as an optional third argument. Note that position matters.
go run ./api/openrpc/cmd ["api/api_full.go"|"api/api_storage.go"|"api/api_worker.go"] ["FullNode"|"StorageMiner"|"Worker"] -gzip
*/
func main() {
Comments, GroupDocs := docgen.ParseApiASTInfo(os.Args[1], os.Args[2], os.Args[3], os.Args[4])
doc := docgen_openrpc.NewLotusOpenRPCDocument(Comments, GroupDocs)
i, _, _, _ := docgen.GetAPIType(os.Args[2], os.Args[3])
doc.RegisterReceiverName("Filecoin", i)
out, err := doc.Discover()
if err != nil {
log.Fatalln(err)
}
var jsonOut []byte
var writer io.WriteCloser
// Use os.Args to handle a somewhat hacky flag for the gzip option.
// Could use flags package to handle this more cleanly, but that requires changes elsewhere
// the scope of which just isn't warranted by this one use case which will usually be run
// programmatically anyways.
if len(os.Args) > 5 && os.Args[5] == "-gzip" {
jsonOut, err = json.Marshal(out)
if err != nil {
log.Fatalln(err)
}
writer = gzip.NewWriter(os.Stdout)
} else {
jsonOut, err = json.MarshalIndent(out, "", " ")
if err != nil {
log.Fatalln(err)
}
writer = os.Stdout
}
_, err = writer.Write(jsonOut)
if err != nil {
log.Fatalln(err)
}
err = writer.Close()
if err != nil {
log.Fatalln(err)
}
}

View File

@ -0,0 +1,161 @@
package docgenopenrpc
import (
"encoding/json"
"go/ast"
"net"
"reflect"
"github.com/alecthomas/jsonschema"
go_openrpc_reflect "github.com/etclabscore/go-openrpc-reflect"
"github.com/filecoin-project/lotus/api/docgen"
"github.com/filecoin-project/lotus/build"
"github.com/ipfs/go-cid"
meta_schema "github.com/open-rpc/meta-schema"
)
// schemaDictEntry represents a type association passed to the jsonschema reflector.
type schemaDictEntry struct {
example interface{}
rawJson string
}
const integerD = `{
"title": "number",
"type": "number",
"description": "Number is a number"
}`
const cidCidD = `{"title": "Content Identifier", "type": "string", "description": "Cid represents a self-describing content addressed identifier. It is formed by a Version, a Codec (which indicates a multicodec-packed content type) and a Multihash."}`
func OpenRPCSchemaTypeMapper(ty reflect.Type) *jsonschema.Type {
unmarshalJSONToJSONSchemaType := func(input string) *jsonschema.Type {
var js jsonschema.Type
err := json.Unmarshal([]byte(input), &js)
if err != nil {
panic(err)
}
return &js
}
if ty.Kind() == reflect.Ptr {
ty = ty.Elem()
}
if ty == reflect.TypeOf((*interface{})(nil)).Elem() {
return &jsonschema.Type{Type: "object", AdditionalProperties: []byte("true")}
}
// Second, handle other types.
// Use a slice instead of a map because it preserves order, as a logic safeguard/fallback.
dict := []schemaDictEntry{
{cid.Cid{}, cidCidD},
}
for _, d := range dict {
if reflect.TypeOf(d.example) == ty {
tt := unmarshalJSONToJSONSchemaType(d.rawJson)
return tt
}
}
// Handle primitive types in case there are generic cases
// specific to our services.
switch ty.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
// Return all integer types as the hex representation integer schemea.
ret := unmarshalJSONToJSONSchemaType(integerD)
return ret
case reflect.Uintptr:
return &jsonschema.Type{Type: "number", Title: "uintptr-title"}
case reflect.Struct:
case reflect.Map:
case reflect.Slice, reflect.Array:
case reflect.Float32, reflect.Float64:
case reflect.Bool:
case reflect.String:
case reflect.Ptr, reflect.Interface:
default:
}
return nil
}
// NewLotusOpenRPCDocument defines application-specific documentation and configuration for its OpenRPC document.
func NewLotusOpenRPCDocument(Comments, GroupDocs map[string]string) *go_openrpc_reflect.Document {
d := &go_openrpc_reflect.Document{}
// Register "Meta" document fields.
// These include getters for
// - Servers object
// - Info object
// - ExternalDocs object
//
// These objects represent server-specific data that cannot be
// reflected.
d.WithMeta(&go_openrpc_reflect.MetaT{
GetServersFn: func() func(listeners []net.Listener) (*meta_schema.Servers, error) {
return func(listeners []net.Listener) (*meta_schema.Servers, error) {
return nil, nil
}
},
GetInfoFn: func() (info *meta_schema.InfoObject) {
info = &meta_schema.InfoObject{}
title := "Lotus RPC API"
info.Title = (*meta_schema.InfoObjectProperties)(&title)
version := build.BuildVersion
info.Version = (*meta_schema.InfoObjectVersion)(&version)
return info
},
GetExternalDocsFn: func() (exdocs *meta_schema.ExternalDocumentationObject) {
return nil // FIXME
},
})
// Use a provided Ethereum default configuration as a base.
appReflector := &go_openrpc_reflect.EthereumReflectorT{}
// Install overrides for the json schema->type map fn used by the jsonschema reflect package.
appReflector.FnSchemaTypeMap = func() func(ty reflect.Type) *jsonschema.Type {
return OpenRPCSchemaTypeMapper
}
appReflector.FnIsMethodEligible = func(m reflect.Method) bool {
for i := 0; i < m.Func.Type().NumOut(); i++ {
if m.Func.Type().Out(i).Kind() == reflect.Chan {
return false
}
}
return go_openrpc_reflect.EthereumReflector.IsMethodEligible(m)
}
appReflector.FnGetMethodName = func(moduleName string, r reflect.Value, m reflect.Method, funcDecl *ast.FuncDecl) (string, error) {
if m.Name == "ID" {
return moduleName + "_ID", nil
}
if moduleName == "rpc" && m.Name == "Discover" {
return "rpc.discover", nil
}
return moduleName + "." + m.Name, nil
}
appReflector.FnGetMethodSummary = func(r reflect.Value, m reflect.Method, funcDecl *ast.FuncDecl) (string, error) {
if v, ok := Comments[m.Name]; ok {
return v, nil
}
return "", nil // noComment
}
appReflector.FnSchemaExamples = func(ty reflect.Type) (examples *meta_schema.Examples, err error) {
v := docgen.ExampleValue("unknown", ty, ty) // This isn't ideal, but seems to work well enough.
return &meta_schema.Examples{
meta_schema.AlwaysTrue(v),
}, nil
}
// Finally, register the configured reflector to the document.
d.WithReflector(appReflector)
return d
}

116
api/docgen/cmd/docgen.go Normal file
View File

@ -0,0 +1,116 @@
package main
import (
"encoding/json"
"fmt"
"os"
"sort"
"strings"
"github.com/filecoin-project/lotus/api/docgen"
)
func main() {
comments, groupComments := docgen.ParseApiASTInfo(os.Args[1], os.Args[2], os.Args[3], os.Args[4])
groups := make(map[string]*docgen.MethodGroup)
_, t, permStruct, commonPermStruct := docgen.GetAPIType(os.Args[2], os.Args[3])
for i := 0; i < t.NumMethod(); i++ {
m := t.Method(i)
groupName := docgen.MethodGroupFromName(m.Name)
g, ok := groups[groupName]
if !ok {
g = new(docgen.MethodGroup)
g.Header = groupComments[groupName]
g.GroupName = groupName
groups[groupName] = g
}
var args []interface{}
ft := m.Func.Type()
for j := 2; j < ft.NumIn(); j++ {
inp := ft.In(j)
args = append(args, docgen.ExampleValue(m.Name, inp, nil))
}
v, err := json.MarshalIndent(args, "", " ")
if err != nil {
panic(err)
}
outv := docgen.ExampleValue(m.Name, ft.Out(0), nil)
ov, err := json.MarshalIndent(outv, "", " ")
if err != nil {
panic(err)
}
g.Methods = append(g.Methods, &docgen.Method{
Name: m.Name,
Comment: comments[m.Name],
InputExample: string(v),
ResponseExample: string(ov),
})
}
var groupslice []*docgen.MethodGroup
for _, g := range groups {
groupslice = append(groupslice, g)
}
sort.Slice(groupslice, func(i, j int) bool {
return groupslice[i].GroupName < groupslice[j].GroupName
})
fmt.Printf("# Groups\n")
for _, g := range groupslice {
fmt.Printf("* [%s](#%s)\n", g.GroupName, g.GroupName)
for _, method := range g.Methods {
fmt.Printf(" * [%s](#%s)\n", method.Name, method.Name)
}
}
for _, g := range groupslice {
g := g
fmt.Printf("## %s\n", g.GroupName)
fmt.Printf("%s\n\n", g.Header)
sort.Slice(g.Methods, func(i, j int) bool {
return g.Methods[i].Name < g.Methods[j].Name
})
for _, m := range g.Methods {
fmt.Printf("### %s\n", m.Name)
fmt.Printf("%s\n\n", m.Comment)
meth, ok := permStruct.FieldByName(m.Name)
if !ok {
meth, ok = commonPermStruct.FieldByName(m.Name)
if !ok {
panic("no perms for method: " + m.Name)
}
}
perms := meth.Tag.Get("perm")
fmt.Printf("Perms: %s\n\n", perms)
if strings.Count(m.InputExample, "\n") > 0 {
fmt.Printf("Inputs:\n```json\n%s\n```\n\n", m.InputExample)
} else {
fmt.Printf("Inputs: `%s`\n\n", m.InputExample)
}
if strings.Count(m.ResponseExample, "\n") > 0 {
fmt.Printf("Response:\n```json\n%s\n```\n\n", m.ResponseExample)
} else {
fmt.Printf("Response: `%s`\n\n", m.ResponseExample)
}
}
}
}

View File

@ -1,18 +1,18 @@
package main
package docgen
import (
"encoding/json"
"fmt"
"go/ast"
"go/parser"
"go/token"
"os"
"path/filepath"
"reflect"
"sort"
"strings"
"time"
"unicode"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
"github.com/google/uuid"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-filestore"
@ -23,8 +23,6 @@ import (
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/multiformats/go-multiaddr"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
datatransfer "github.com/filecoin-project/go-data-transfer"
filestore2 "github.com/filecoin-project/go-fil-markets/filestore"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
@ -36,7 +34,8 @@ import (
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/apistruct"
apitypes "github.com/filecoin-project/lotus/api/types"
"github.com/filecoin-project/lotus/api/v0api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
@ -89,6 +88,8 @@ func init() {
addExample(pid)
addExample(&pid)
multistoreIDExample := multistore.StoreID(50)
addExample(bitfield.NewFromSet([]uint64{5}))
addExample(abi.RegisteredSealProof_StackedDrg32GiBV1_1)
addExample(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1)
@ -113,12 +114,13 @@ func init() {
addExample(network.Connected)
addExample(dtypes.NetworkName("lotus"))
addExample(api.SyncStateStage(1))
addExample(api.FullAPIVersion)
addExample(api.FullAPIVersion1)
addExample(api.PCHInbound)
addExample(time.Minute)
addExample(datatransfer.TransferID(3))
addExample(datatransfer.Ongoing)
addExample(multistore.StoreID(50))
addExample(multistoreIDExample)
addExample(&multistoreIDExample)
addExample(retrievalmarket.ClientEventDealAccepted)
addExample(retrievalmarket.DealStatusNew)
addExample(network.ReachabilityPublic)
@ -126,17 +128,17 @@ func init() {
addExample(map[string]int{"name": 42})
addExample(map[string]time.Time{"name": time.Unix(1615243938, 0).UTC()})
addExample(&types.ExecutionTrace{
Msg: exampleValue("init", reflect.TypeOf(&types.Message{}), nil).(*types.Message),
MsgRct: exampleValue("init", reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt),
Msg: ExampleValue("init", reflect.TypeOf(&types.Message{}), nil).(*types.Message),
MsgRct: ExampleValue("init", reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt),
})
addExample(map[string]types.Actor{
"t01236": exampleValue("init", reflect.TypeOf(types.Actor{}), nil).(types.Actor),
"t01236": ExampleValue("init", reflect.TypeOf(types.Actor{}), nil).(types.Actor),
})
addExample(map[string]api.MarketDeal{
"t026363": exampleValue("init", reflect.TypeOf(api.MarketDeal{}), nil).(api.MarketDeal),
"t026363": ExampleValue("init", reflect.TypeOf(api.MarketDeal{}), nil).(api.MarketDeal),
})
addExample(map[string]api.MarketBalance{
"t026363": exampleValue("init", reflect.TypeOf(api.MarketBalance{}), nil).(api.MarketBalance),
"t026363": ExampleValue("init", reflect.TypeOf(api.MarketBalance{}), nil).(api.MarketBalance),
})
addExample(map[string]*pubsub.TopicScoreSnapshot{
"/blocks": {
@ -251,9 +253,53 @@ func init() {
sealtasks.TTPreCommit2: {},
})
addExample(sealtasks.TTCommit2)
addExample(apitypes.OpenRPCDocument{
"openrpc": "1.2.6",
"info": map[string]interface{}{
"title": "Lotus RPC API",
"version": "1.2.1/generated=2020-11-22T08:22:42-06:00",
},
"methods": []interface{}{}},
)
}
func exampleValue(method string, t, parent reflect.Type) interface{} {
func GetAPIType(name, pkg string) (i interface{}, t, permStruct, commonPermStruct reflect.Type) {
switch pkg {
case "api": // latest
switch name {
case "FullNode":
i = &api.FullNodeStruct{}
t = reflect.TypeOf(new(struct{ api.FullNode })).Elem()
permStruct = reflect.TypeOf(api.FullNodeStruct{}.Internal)
commonPermStruct = reflect.TypeOf(api.CommonStruct{}.Internal)
case "StorageMiner":
i = &api.StorageMinerStruct{}
t = reflect.TypeOf(new(struct{ api.StorageMiner })).Elem()
permStruct = reflect.TypeOf(api.StorageMinerStruct{}.Internal)
commonPermStruct = reflect.TypeOf(api.CommonStruct{}.Internal)
case "Worker":
i = &api.WorkerStruct{}
t = reflect.TypeOf(new(struct{ api.Worker })).Elem()
permStruct = reflect.TypeOf(api.WorkerStruct{}.Internal)
commonPermStruct = reflect.TypeOf(api.WorkerStruct{}.Internal)
default:
panic("unknown type")
}
case "v0api":
switch name {
case "FullNode":
i = v0api.FullNodeStruct{}
t = reflect.TypeOf(new(struct{ v0api.FullNode })).Elem()
permStruct = reflect.TypeOf(v0api.FullNodeStruct{}.Internal)
commonPermStruct = reflect.TypeOf(v0api.CommonStruct{}.Internal)
default:
panic("unknown type")
}
}
return
}
func ExampleValue(method string, t, parent reflect.Type) interface{} {
v, ok := ExampleValues[t]
if ok {
return v
@ -262,10 +308,10 @@ func exampleValue(method string, t, parent reflect.Type) interface{} {
switch t.Kind() {
case reflect.Slice:
out := reflect.New(t).Elem()
reflect.Append(out, reflect.ValueOf(exampleValue(method, t.Elem(), t)))
reflect.Append(out, reflect.ValueOf(ExampleValue(method, t.Elem(), t)))
return out.Interface()
case reflect.Chan:
return exampleValue(method, t.Elem(), nil)
return ExampleValue(method, t.Elem(), nil)
case reflect.Struct:
es := exampleStruct(method, t, parent)
v := reflect.ValueOf(es).Elem().Interface()
@ -274,7 +320,7 @@ func exampleValue(method string, t, parent reflect.Type) interface{} {
case reflect.Array:
out := reflect.New(t).Elem()
for i := 0; i < t.Len(); i++ {
out.Index(i).Set(reflect.ValueOf(exampleValue(method, t.Elem(), t)))
out.Index(i).Set(reflect.ValueOf(ExampleValue(method, t.Elem(), t)))
}
return out.Interface()
@ -299,7 +345,7 @@ func exampleStruct(method string, t, parent reflect.Type) interface{} {
continue
}
if strings.Title(f.Name) == f.Name {
ns.Elem().Field(i).Set(reflect.ValueOf(exampleValue(method, f.Type, t)))
ns.Elem().Field(i).Set(reflect.ValueOf(ExampleValue(method, f.Type, t)))
}
}
@ -331,32 +377,43 @@ func (v *Visitor) Visit(node ast.Node) ast.Visitor {
return v
}
const noComment = "There are not yet any comments for this method."
const NoComment = "There are not yet any comments for this method."
func parseApiASTInfo(apiFile, iface string) (map[string]string, map[string]string) { //nolint:golint
func ParseApiASTInfo(apiFile, iface, pkg, dir string) (comments map[string]string, groupDocs map[string]string) { //nolint:golint
fset := token.NewFileSet()
pkgs, err := parser.ParseDir(fset, "./api", nil, parser.AllErrors|parser.ParseComments)
apiDir, err := filepath.Abs(dir)
if err != nil {
fmt.Println("./api filepath absolute error: ", err)
return
}
apiFile, err = filepath.Abs(apiFile)
if err != nil {
fmt.Println("filepath absolute error: ", err, "file:", apiFile)
return
}
pkgs, err := parser.ParseDir(fset, apiDir, nil, parser.AllErrors|parser.ParseComments)
if err != nil {
fmt.Println("parse error: ", err)
return
}
ap := pkgs["api"]
ap := pkgs[pkg]
f := ap.Files[apiFile]
cmap := ast.NewCommentMap(fset, f, f.Comments)
v := &Visitor{iface, make(map[string]ast.Node)}
ast.Walk(v, pkgs["api"])
ast.Walk(v, ap)
groupDocs := make(map[string]string)
out := make(map[string]string)
comments = make(map[string]string)
groupDocs = make(map[string]string)
for mn, node := range v.Methods {
cs := cmap.Filter(node).Comments()
if len(cs) == 0 {
out[mn] = noComment
filteredComments := cmap.Filter(node).Comments()
if len(filteredComments) == 0 {
comments[mn] = NoComment
} else {
for _, c := range cs {
for _, c := range filteredComments {
if strings.HasPrefix(c.Text(), "MethodGroup:") {
parts := strings.Split(c.Text(), "\n")
groupName := strings.TrimSpace(parts[0][12:])
@ -367,15 +424,19 @@ func parseApiASTInfo(apiFile, iface string) (map[string]string, map[string]strin
}
}
last := cs[len(cs)-1].Text()
l := len(filteredComments) - 1
if len(filteredComments) > 1 {
l = len(filteredComments) - 2
}
last := filteredComments[l].Text()
if !strings.HasPrefix(last, "MethodGroup:") {
out[mn] = last
comments[mn] = last
} else {
out[mn] = noComment
comments[mn] = NoComment
}
}
}
return out, groupDocs
return comments, groupDocs
}
type MethodGroup struct {
@ -391,7 +452,7 @@ type Method struct {
ResponseExample string
}
func methodGroupFromName(mn string) string {
func MethodGroupFromName(mn string) string {
i := strings.IndexFunc(mn[1:], func(r rune) bool {
return unicode.IsUpper(r)
})
@ -400,126 +461,3 @@ func methodGroupFromName(mn string) string {
}
return mn[:i+1]
}
func main() {
comments, groupComments := parseApiASTInfo(os.Args[1], os.Args[2])
groups := make(map[string]*MethodGroup)
var t reflect.Type
var permStruct, commonPermStruct reflect.Type
switch os.Args[2] {
case "FullNode":
t = reflect.TypeOf(new(struct{ api.FullNode })).Elem()
permStruct = reflect.TypeOf(apistruct.FullNodeStruct{}.Internal)
commonPermStruct = reflect.TypeOf(apistruct.CommonStruct{}.Internal)
case "StorageMiner":
t = reflect.TypeOf(new(struct{ api.StorageMiner })).Elem()
permStruct = reflect.TypeOf(apistruct.StorageMinerStruct{}.Internal)
commonPermStruct = reflect.TypeOf(apistruct.CommonStruct{}.Internal)
case "WorkerAPI":
t = reflect.TypeOf(new(struct{ api.WorkerAPI })).Elem()
permStruct = reflect.TypeOf(apistruct.WorkerStruct{}.Internal)
commonPermStruct = reflect.TypeOf(apistruct.WorkerStruct{}.Internal)
default:
panic("unknown type")
}
for i := 0; i < t.NumMethod(); i++ {
m := t.Method(i)
groupName := methodGroupFromName(m.Name)
g, ok := groups[groupName]
if !ok {
g = new(MethodGroup)
g.Header = groupComments[groupName]
g.GroupName = groupName
groups[groupName] = g
}
var args []interface{}
ft := m.Func.Type()
for j := 2; j < ft.NumIn(); j++ {
inp := ft.In(j)
args = append(args, exampleValue(m.Name, inp, nil))
}
v, err := json.MarshalIndent(args, "", " ")
if err != nil {
panic(err)
}
outv := exampleValue(m.Name, ft.Out(0), nil)
ov, err := json.MarshalIndent(outv, "", " ")
if err != nil {
panic(err)
}
g.Methods = append(g.Methods, &Method{
Name: m.Name,
Comment: comments[m.Name],
InputExample: string(v),
ResponseExample: string(ov),
})
}
var groupslice []*MethodGroup
for _, g := range groups {
groupslice = append(groupslice, g)
}
sort.Slice(groupslice, func(i, j int) bool {
return groupslice[i].GroupName < groupslice[j].GroupName
})
fmt.Printf("# Groups\n")
for _, g := range groupslice {
fmt.Printf("* [%s](#%s)\n", g.GroupName, g.GroupName)
for _, method := range g.Methods {
fmt.Printf(" * [%s](#%s)\n", method.Name, method.Name)
}
}
for _, g := range groupslice {
g := g
fmt.Printf("## %s\n", g.GroupName)
fmt.Printf("%s\n\n", g.Header)
sort.Slice(g.Methods, func(i, j int) bool {
return g.Methods[i].Name < g.Methods[j].Name
})
for _, m := range g.Methods {
fmt.Printf("### %s\n", m.Name)
fmt.Printf("%s\n\n", m.Comment)
meth, ok := permStruct.FieldByName(m.Name)
if !ok {
meth, ok = commonPermStruct.FieldByName(m.Name)
if !ok {
panic("no perms for method: " + m.Name)
}
}
perms := meth.Tag.Get("perm")
fmt.Printf("Perms: %s\n\n", perms)
if strings.Count(m.InputExample, "\n") > 0 {
fmt.Printf("Inputs:\n```json\n%s\n```\n\n", m.InputExample)
} else {
fmt.Printf("Inputs: `%s`\n\n", m.InputExample)
}
if strings.Count(m.ResponseExample, "\n") > 0 {
fmt.Printf("Response:\n```json\n%s\n```\n\n", m.ResponseExample)
} else {
fmt.Printf("Response: `%s`\n\n", m.ResponseExample)
}
}
}
}

View File

@ -6,9 +6,12 @@ package mocks
import (
context "context"
reflect "reflect"
address "github.com/filecoin-project/go-address"
bitfield "github.com/filecoin-project/go-bitfield"
datatransfer "github.com/filecoin-project/go-data-transfer"
retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket"
storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket"
auth "github.com/filecoin-project/go-jsonrpc/auth"
multistore "github.com/filecoin-project/go-multistore"
@ -18,6 +21,7 @@ import (
dline "github.com/filecoin-project/go-state-types/dline"
network "github.com/filecoin-project/go-state-types/network"
api "github.com/filecoin-project/lotus/api"
apitypes "github.com/filecoin-project/lotus/api/types"
miner "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
types "github.com/filecoin-project/lotus/chain/types"
marketevents "github.com/filecoin-project/lotus/markets/loggers"
@ -31,7 +35,6 @@ import (
network0 "github.com/libp2p/go-libp2p-core/network"
peer "github.com/libp2p/go-libp2p-core/peer"
protocol "github.com/libp2p/go-libp2p-core/protocol"
reflect "reflect"
)
// MockFullNode is a mock of FullNode interface
@ -444,6 +447,20 @@ func (mr *MockFullNodeMockRecorder) ClientCancelDataTransfer(arg0, arg1, arg2, a
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCancelDataTransfer", reflect.TypeOf((*MockFullNode)(nil).ClientCancelDataTransfer), arg0, arg1, arg2, arg3)
}
// ClientCancelRetrievalDeal mocks base method
func (m *MockFullNode) ClientCancelRetrievalDeal(arg0 context.Context, arg1 retrievalmarket.DealID) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ClientCancelRetrievalDeal", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// ClientCancelRetrievalDeal indicates an expected call of ClientCancelRetrievalDeal
func (mr *MockFullNodeMockRecorder) ClientCancelRetrievalDeal(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCancelRetrievalDeal", reflect.TypeOf((*MockFullNode)(nil).ClientCancelRetrievalDeal), arg0, arg1)
}
// ClientDataTransferUpdates mocks base method
func (m *MockFullNode) ClientDataTransferUpdates(arg0 context.Context) (<-chan api.DataTransferChannel, error) {
m.ctrl.T.Helper()
@ -783,6 +800,21 @@ func (mr *MockFullNodeMockRecorder) CreateBackup(arg0, arg1 interface{}) *gomock
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateBackup", reflect.TypeOf((*MockFullNode)(nil).CreateBackup), arg0, arg1)
}
// Discover mocks base method
func (m *MockFullNode) Discover(arg0 context.Context) (apitypes.OpenRPCDocument, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Discover", arg0)
ret0, _ := ret[0].(apitypes.OpenRPCDocument)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Discover indicates an expected call of Discover
func (mr *MockFullNodeMockRecorder) Discover(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Discover", reflect.TypeOf((*MockFullNode)(nil).Discover), arg0)
}
// GasEstimateFeeCap mocks base method
func (m *MockFullNode) GasEstimateFeeCap(arg0 context.Context, arg1 *types.Message, arg2 int64, arg3 types.TipSetKey) (big.Int, error) {
m.ctrl.T.Helper()
@ -2063,21 +2095,6 @@ func (mr *MockFullNodeMockRecorder) StateGetActor(arg0, arg1, arg2 interface{})
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetActor", reflect.TypeOf((*MockFullNode)(nil).StateGetActor), arg0, arg1, arg2)
}
// StateGetReceipt mocks base method
func (m *MockFullNode) StateGetReceipt(arg0 context.Context, arg1 cid.Cid, arg2 types.TipSetKey) (*types.MessageReceipt, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StateGetReceipt", arg0, arg1, arg2)
ret0, _ := ret[0].(*types.MessageReceipt)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// StateGetReceipt indicates an expected call of StateGetReceipt
func (mr *MockFullNodeMockRecorder) StateGetReceipt(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetReceipt", reflect.TypeOf((*MockFullNode)(nil).StateGetReceipt), arg0, arg1, arg2)
}
// StateListActors mocks base method
func (m *MockFullNode) StateListActors(arg0 context.Context, arg1 types.TipSetKey) ([]address.Address, error) {
m.ctrl.T.Helper()
@ -2469,33 +2486,18 @@ func (mr *MockFullNodeMockRecorder) StateReplay(arg0, arg1, arg2 interface{}) *g
}
// StateSearchMsg mocks base method
func (m *MockFullNode) StateSearchMsg(arg0 context.Context, arg1 cid.Cid) (*api.MsgLookup, error) {
func (m *MockFullNode) StateSearchMsg(arg0 context.Context, arg1 types.TipSetKey, arg2 cid.Cid, arg3 abi.ChainEpoch, arg4 bool) (*api.MsgLookup, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StateSearchMsg", arg0, arg1)
ret := m.ctrl.Call(m, "StateSearchMsg", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(*api.MsgLookup)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// StateSearchMsg indicates an expected call of StateSearchMsg
func (mr *MockFullNodeMockRecorder) StateSearchMsg(arg0, arg1 interface{}) *gomock.Call {
func (mr *MockFullNodeMockRecorder) StateSearchMsg(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSearchMsg", reflect.TypeOf((*MockFullNode)(nil).StateSearchMsg), arg0, arg1)
}
// StateSearchMsgLimited mocks base method
func (m *MockFullNode) StateSearchMsgLimited(arg0 context.Context, arg1 cid.Cid, arg2 abi.ChainEpoch) (*api.MsgLookup, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StateSearchMsgLimited", arg0, arg1, arg2)
ret0, _ := ret[0].(*api.MsgLookup)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// StateSearchMsgLimited indicates an expected call of StateSearchMsgLimited
func (mr *MockFullNodeMockRecorder) StateSearchMsgLimited(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSearchMsgLimited", reflect.TypeOf((*MockFullNode)(nil).StateSearchMsgLimited), arg0, arg1, arg2)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSearchMsg", reflect.TypeOf((*MockFullNode)(nil).StateSearchMsg), arg0, arg1, arg2, arg3, arg4)
}
// StateSectorExpiration mocks base method
@ -2619,33 +2621,18 @@ func (mr *MockFullNodeMockRecorder) StateVerifierStatus(arg0, arg1, arg2 interfa
}
// StateWaitMsg mocks base method
func (m *MockFullNode) StateWaitMsg(arg0 context.Context, arg1 cid.Cid, arg2 uint64) (*api.MsgLookup, error) {
func (m *MockFullNode) StateWaitMsg(arg0 context.Context, arg1 cid.Cid, arg2 uint64, arg3 abi.ChainEpoch, arg4 bool) (*api.MsgLookup, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StateWaitMsg", arg0, arg1, arg2)
ret := m.ctrl.Call(m, "StateWaitMsg", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(*api.MsgLookup)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// StateWaitMsg indicates an expected call of StateWaitMsg
func (mr *MockFullNodeMockRecorder) StateWaitMsg(arg0, arg1, arg2 interface{}) *gomock.Call {
func (mr *MockFullNodeMockRecorder) StateWaitMsg(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateWaitMsg", reflect.TypeOf((*MockFullNode)(nil).StateWaitMsg), arg0, arg1, arg2)
}
// StateWaitMsgLimited mocks base method
func (m *MockFullNode) StateWaitMsgLimited(arg0 context.Context, arg1 cid.Cid, arg2 uint64, arg3 abi.ChainEpoch) (*api.MsgLookup, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StateWaitMsgLimited", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*api.MsgLookup)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// StateWaitMsgLimited indicates an expected call of StateWaitMsgLimited
func (mr *MockFullNodeMockRecorder) StateWaitMsgLimited(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateWaitMsgLimited", reflect.TypeOf((*MockFullNode)(nil).StateWaitMsgLimited), arg0, arg1, arg2, arg3)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateWaitMsg", reflect.TypeOf((*MockFullNode)(nil).StateWaitMsg), arg0, arg1, arg2, arg3, arg4)
}
// SyncCheckBad mocks base method

View File

@ -1,8 +1,7 @@
package apistruct
package api
import (
"github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/lotus/api"
)
const (
@ -17,27 +16,27 @@ const (
var AllPermissions = []auth.Permission{PermRead, PermWrite, PermSign, PermAdmin}
var DefaultPerms = []auth.Permission{PermRead}
func PermissionedStorMinerAPI(a api.StorageMiner) api.StorageMiner {
func PermissionedStorMinerAPI(a StorageMiner) StorageMiner {
var out StorageMinerStruct
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal)
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.CommonStruct.Internal)
return &out
}
func PermissionedFullAPI(a api.FullNode) api.FullNode {
func PermissionedFullAPI(a FullNode) FullNode {
var out FullNodeStruct
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal)
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.CommonStruct.Internal)
return &out
}
func PermissionedWorkerAPI(a api.WorkerAPI) api.WorkerAPI {
func PermissionedWorkerAPI(a Worker) Worker {
var out WorkerStruct
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal)
return &out
}
func PermissionedWalletAPI(a api.WalletAPI) api.WalletAPI {
func PermissionedWalletAPI(a Wallet) Wallet {
var out WalletStruct
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal)
return &out

3581
api/proxy_gen.go Normal file

File diff suppressed because it is too large Load Diff

View File

@ -7,6 +7,8 @@ import (
"testing"
"time"
"github.com/filecoin-project/lotus/api"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-address"
@ -240,7 +242,7 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
}, nil)
require.NoError(t, err)
r, err := client.StateWaitMsg(ctx, m.Cid(), 2)
r, err := client.StateWaitMsg(ctx, m.Cid(), 2, api.LookbackNoLimit, true)
require.NoError(t, err)
require.Equal(t, exitcode.Ok, r.Receipt.ExitCode)
}
@ -323,7 +325,7 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
fmt.Println("sent termination message:", smsg.Cid())
r, err := client.StateWaitMsg(ctx, smsg.Cid(), 2)
r, err := client.StateWaitMsg(ctx, smsg.Cid(), 2, api.LookbackNoLimit, true)
require.NoError(t, err)
require.Equal(t, exitcode.Ok, r.Receipt.ExitCode)

View File

@ -23,9 +23,11 @@ import (
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/types"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
"github.com/filecoin-project/lotus/markets/storageadapter"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/impl"
"github.com/filecoin-project/lotus/node/modules/dtypes"
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
ipld "github.com/ipfs/go-ipld-format"
dag "github.com/ipfs/go-merkledag"
@ -183,6 +185,71 @@ func TestPublishDealsBatching(t *testing.T, b APIBuilder, blocktime time.Duratio
}
}
func TestBatchDealInput(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
publishPeriod := 10 * time.Second
maxDealsPerMsg := uint64(4)
// Set max deals per publish deals message to maxDealsPerMsg
minerDef := []StorageMiner{{
Full: 0,
Opts: node.Options(
node.Override(
new(*storageadapter.DealPublisher),
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
Period: publishPeriod,
MaxDealsPerMsg: maxDealsPerMsg,
})),
node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) {
return func() (sealiface.Config, error) {
return sealiface.Config{
MaxWaitDealsSectors: 1,
MaxSealingSectors: 1,
MaxSealingSectorsForDeals: 2,
AlwaysKeepUnsealedCopy: true,
}, nil
}, nil
}),
),
Preseal: PresealGenesis,
}}
// Create a connect client and miner node
n, sn := b(t, OneFull, minerDef)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
s := connectAndStartMining(t, b, blocktime, client, miner)
defer s.blockMiner.Stop()
// Starts a deal and waits until it's published
runDealTillSeal := func(rseed int) {
res, _, err := CreateClientFile(s.ctx, s.client, rseed)
require.NoError(t, err)
dc := startDeal(t, s.ctx, s.miner, s.client, res.Root, false, startEpoch)
waitDealSealed(t, s.ctx, s.miner, s.client, dc, false)
}
// Run maxDealsPerMsg+1 deals in parallel
done := make(chan struct{}, maxDealsPerMsg+1)
for rseed := 1; rseed <= int(maxDealsPerMsg+1); rseed++ {
rseed := rseed
go func() {
runDealTillSeal(rseed)
done <- struct{}{}
}()
}
// Wait for maxDealsPerMsg of the deals to be published
for i := 0; i < int(maxDealsPerMsg); i++ {
<-done
}
sl, err := sn[0].SectorsList(s.ctx)
require.NoError(t, err)
require.GreaterOrEqual(t, len(sl), 4)
require.LessOrEqual(t, len(sl), 5)
}
func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
s := setupOneClientOneMiner(t, b, blocktime)
defer s.blockMiner.Stop()

View File

@ -235,7 +235,7 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) {
if err != nil {
t.Fatal(err)
}
res, err = paymentReceiver.StateWaitMsg(ctx, collectMsg, 3)
res, err = paymentReceiver.StateWaitMsg(ctx, collectMsg, 3, api.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}
@ -287,7 +287,7 @@ func waitForBlocks(ctx context.Context, t *testing.T, bm *BlockMiner, paymentRec
t.Fatal(err)
}
_, err = paymentReceiver.StateWaitMsg(ctx, m.Cid(), 1)
_, err = paymentReceiver.StateWaitMsg(ctx, m.Cid(), 1, api.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}
@ -299,7 +299,7 @@ func waitForMessage(ctx context.Context, t *testing.T, paymentCreator TestNode,
defer cancel()
fmt.Println("Waiting for", desc)
res, err := paymentCreator.StateWaitMsg(ctx, msgCid, 1)
res, err := paymentCreator.StateWaitMsg(ctx, msgCid, 1, api.LookbackNoLimit, true)
if err != nil {
fmt.Println("Error waiting for", desc, err)
t.Fatal(err)

View File

@ -19,7 +19,8 @@ import (
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/api"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types"
@ -39,7 +40,7 @@ func init() {
type StorageBuilder func(context.Context, *testing.T, abi.RegisteredSealProof, address.Address) TestStorageNode
type TestNode struct {
api.FullNode
v1api.FullNode
// ListenAddr is the address on which an API server is listening, if an
// API server is created for this Node
ListenAddr multiaddr.Multiaddr
@ -48,7 +49,7 @@ type TestNode struct {
}
type TestStorageNode struct {
api.StorageMiner
lapi.StorageMiner
// ListenAddr is the address on which an API server is listening, if an
// API server is created for this Node
ListenAddr multiaddr.Multiaddr
@ -170,9 +171,9 @@ var MineNext = miner.MineReq{
}
func (ts *testSuite) testVersion(t *testing.T) {
api.RunningNodeType = api.NodeFull
lapi.RunningNodeType = lapi.NodeFull
t.Cleanup(func() {
api.RunningNodeType = api.NodeUnknown
lapi.RunningNodeType = lapi.NodeUnknown
})
ctx := context.Background()
@ -214,7 +215,7 @@ func (ts *testSuite) testSearchMsg(t *testing.T) {
if err != nil {
t.Fatal(err)
}
res, err := api.StateWaitMsg(ctx, sm.Cid(), 1)
res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}
@ -222,7 +223,7 @@ func (ts *testSuite) testSearchMsg(t *testing.T) {
t.Fatal("did not successfully send message")
}
searchRes, err := api.StateSearchMsg(ctx, sm.Cid())
searchRes, err := api.StateSearchMsg(ctx, types.EmptyTSK, sm.Cid(), lapi.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}

View File

@ -8,6 +8,7 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-address"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/miner"
)
@ -28,7 +29,7 @@ func SendFunds(ctx context.Context, t *testing.T, sender TestNode, addr address.
if err != nil {
t.Fatal(err)
}
res, err := sender.StateWaitMsg(ctx, sm.Cid(), 1)
res, err := sender.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}

View File

@ -766,7 +766,7 @@ func TestWindowPostDispute(t *testing.T, b APIBuilder, blocktime time.Duration)
require.NoError(t, err)
fmt.Println("waiting dispute")
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence)
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
require.NoError(t, err)
require.Zero(t, rec.Receipt.ExitCode, "dispute not accepted: %s", rec.Receipt.ExitCode.Error())
}
@ -807,7 +807,7 @@ func TestWindowPostDispute(t *testing.T, b APIBuilder, blocktime time.Duration)
sm, err := client.MpoolPushMessage(ctx, msg, nil)
require.NoError(t, err)
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence)
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
require.NoError(t, err)
require.Zero(t, rec.Receipt.ExitCode, "recovery not accepted: %s", rec.Receipt.ExitCode.Error())
}
@ -886,7 +886,7 @@ func submitBadProof(
return err
}
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence)
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
if err != nil {
return err
}

View File

@ -61,6 +61,7 @@ type DataTransferChannel struct {
Message string
OtherPeer peer.ID
Transferred uint64
Stages *datatransfer.ChannelStages
}
// NewDataTransferChannel constructs an API DataTransferChannel type from full channel state snapshot and a host id

5
api/types/actors.go Normal file
View File

@ -0,0 +1,5 @@
package apitypes
import "github.com/filecoin-project/go-state-types/network"
type NetworkVersion = network.Version

3
api/types/openrpc.go Normal file
View File

@ -0,0 +1,3 @@
package apitypes
type OpenRPCDocument map[string]interface{}

701
api/v0api/full.go Normal file
View File

@ -0,0 +1,701 @@
package v0api
import (
"context"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
datatransfer "github.com/filecoin-project/go-data-transfer"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-multistore"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/dline"
"github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/filecoin-project/lotus/api"
apitypes "github.com/filecoin-project/lotus/api/types"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/builtin/paych"
"github.com/filecoin-project/lotus/chain/types"
marketevents "github.com/filecoin-project/lotus/markets/loggers"
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
//go:generate go run github.com/golang/mock/mockgen -destination=v0mocks/mock_full.go -package=v0mocks . FullNode
// MODIFYING THE API INTERFACE
//
// NOTE: This is the V0 (Stable) API - when adding methods to this interface,
// you'll need to make sure they are also present on the V1 (Unstable) API
//
// This API is implemented in `v1_wrapper.go` as a compatibility layer backed
// by the V1 api
//
// When adding / changing methods in this file:
// * Do the change here
// * Adjust implementation in `node/impl/`
// * Run `make gen` - this will:
// * Generate proxy structs
// * Generate mocks
// * Generate markdown docs
// * Generate openrpc blobs
// FullNode API is a low-level interface to the Filecoin network full node
type FullNode interface {
Common
// MethodGroup: Chain
// The Chain method group contains methods for interacting with the
// blockchain, but that do not require any form of state computation.
// ChainNotify returns channel with chain head updates.
// First message is guaranteed to be of len == 1, and type == 'current'.
ChainNotify(context.Context) (<-chan []*api.HeadChange, error) //perm:read
// ChainHead returns the current head of the chain.
ChainHead(context.Context) (*types.TipSet, error) //perm:read
// ChainGetRandomnessFromTickets is used to sample the chain for randomness.
ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) //perm:read
// ChainGetRandomnessFromBeacon is used to sample the beacon for randomness.
ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) //perm:read
// ChainGetBlock returns the block specified by the given CID.
ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error) //perm:read
// ChainGetTipSet returns the tipset specified by the given TipSetKey.
ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) //perm:read
// ChainGetBlockMessages returns messages stored in the specified block.
//
// Note: If there are multiple blocks in a tipset, it's likely that some
// messages will be duplicated. It's also possible for blocks in a tipset to have
// different messages from the same sender at the same nonce. When that happens,
// only the first message (in a block with lowest ticket) will be considered
// for execution
//
// NOTE: THIS METHOD SHOULD ONLY BE USED FOR GETTING MESSAGES IN A SPECIFIC BLOCK
//
// DO NOT USE THIS METHOD TO GET MESSAGES INCLUDED IN A TIPSET
// Use ChainGetParentMessages, which will perform correct message deduplication
ChainGetBlockMessages(ctx context.Context, blockCid cid.Cid) (*api.BlockMessages, error) //perm:read
// ChainGetParentReceipts returns receipts for messages in parent tipset of
// the specified block. The receipts in the list returned is one-to-one with the
// messages returned by a call to ChainGetParentMessages with the same blockCid.
ChainGetParentReceipts(ctx context.Context, blockCid cid.Cid) ([]*types.MessageReceipt, error) //perm:read
// ChainGetParentMessages returns messages stored in parent tipset of the
// specified block.
ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]api.Message, error) //perm:read
// ChainGetTipSetByHeight looks back for a tipset at the specified epoch.
// If there are no blocks at the specified epoch, a tipset at an earlier epoch
// will be returned.
ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) //perm:read
// ChainReadObj reads ipld nodes referenced by the specified CID from chain
// blockstore and returns raw bytes.
ChainReadObj(context.Context, cid.Cid) ([]byte, error) //perm:read
// ChainDeleteObj deletes node referenced by the given CID
ChainDeleteObj(context.Context, cid.Cid) error //perm:admin
// ChainHasObj checks if a given CID exists in the chain blockstore.
ChainHasObj(context.Context, cid.Cid) (bool, error) //perm:read
// ChainStatObj returns statistics about the graph referenced by 'obj'.
// If 'base' is also specified, then the returned stat will be a diff
// between the two objects.
ChainStatObj(ctx context.Context, obj cid.Cid, base cid.Cid) (api.ObjStat, error) //perm:read
// ChainSetHead forcefully sets current chain head. Use with caution.
ChainSetHead(context.Context, types.TipSetKey) error //perm:admin
// ChainGetGenesis returns the genesis tipset.
ChainGetGenesis(context.Context) (*types.TipSet, error) //perm:read
// ChainTipSetWeight computes weight for the specified tipset.
ChainTipSetWeight(context.Context, types.TipSetKey) (types.BigInt, error) //perm:read
ChainGetNode(ctx context.Context, p string) (*api.IpldObject, error) //perm:read
// ChainGetMessage reads a message referenced by the specified CID from the
// chain blockstore.
ChainGetMessage(context.Context, cid.Cid) (*types.Message, error) //perm:read
// ChainGetPath returns a set of revert/apply operations needed to get from
// one tipset to another, for example:
//```
// to
// ^
// from tAA
// ^ ^
// tBA tAB
// ^---*--^
// ^
// tRR
//```
// Would return `[revert(tBA), apply(tAB), apply(tAA)]`
ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*api.HeadChange, error) //perm:read
// ChainExport returns a stream of bytes with CAR dump of chain data.
// The exported chain data includes the header chain from the given tipset
// back to genesis, the entire genesis state, and the most recent 'nroots'
// state trees.
// If oldmsgskip is set, messages from before the requested roots are also not included.
ChainExport(ctx context.Context, nroots abi.ChainEpoch, oldmsgskip bool, tsk types.TipSetKey) (<-chan []byte, error) //perm:read
// MethodGroup: Beacon
// The Beacon method group contains methods for interacting with the random beacon (DRAND)
// BeaconGetEntry returns the beacon entry for the given filecoin epoch. If
// the entry has not yet been produced, the call will block until the entry
// becomes available
BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) //perm:read
// GasEstimateFeeCap estimates gas fee cap
GasEstimateFeeCap(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error) //perm:read
// GasEstimateGasLimit estimates gas used by the message and returns it.
// It fails if message fails to execute.
GasEstimateGasLimit(context.Context, *types.Message, types.TipSetKey) (int64, error) //perm:read
// GasEstimateGasPremium estimates what gas price should be used for a
// message to have high likelihood of inclusion in `nblocksincl` epochs.
GasEstimateGasPremium(_ context.Context, nblocksincl uint64,
sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) //perm:read
// GasEstimateMessageGas estimates gas values for unset message gas fields
GasEstimateMessageGas(context.Context, *types.Message, *api.MessageSendSpec, types.TipSetKey) (*types.Message, error) //perm:read
// MethodGroup: Sync
// The Sync method group contains methods for interacting with and
// observing the lotus sync service.
// SyncState returns the current status of the lotus sync system.
SyncState(context.Context) (*api.SyncState, error) //perm:read
// SyncSubmitBlock can be used to submit a newly created block to the.
// network through this node
SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error //perm:write
// SyncIncomingBlocks returns a channel streaming incoming, potentially not
// yet synced block headers.
SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) //perm:read
// SyncCheckpoint marks a blocks as checkpointed, meaning that it won't ever fork away from it.
SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) error //perm:admin
// SyncMarkBad marks a blocks as bad, meaning that it won't ever by synced.
// Use with extreme caution.
SyncMarkBad(ctx context.Context, bcid cid.Cid) error //perm:admin
// SyncUnmarkBad unmarks a blocks as bad, making it possible to be validated and synced again.
SyncUnmarkBad(ctx context.Context, bcid cid.Cid) error //perm:admin
// SyncUnmarkAllBad purges bad block cache, making it possible to sync to chains previously marked as bad
SyncUnmarkAllBad(ctx context.Context) error //perm:admin
// SyncCheckBad checks if a block was marked as bad, and if it was, returns
// the reason.
SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error) //perm:read
// SyncValidateTipset indicates whether the provided tipset is valid or not
SyncValidateTipset(ctx context.Context, tsk types.TipSetKey) (bool, error) //perm:read
// MethodGroup: Mpool
// The Mpool methods are for interacting with the message pool. The message pool
// manages all incoming and outgoing 'messages' going over the network.
// MpoolPending returns pending mempool messages.
MpoolPending(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) //perm:read
// MpoolSelect returns a list of pending messages for inclusion in the next block
MpoolSelect(context.Context, types.TipSetKey, float64) ([]*types.SignedMessage, error) //perm:read
// MpoolPush pushes a signed message to mempool.
MpoolPush(context.Context, *types.SignedMessage) (cid.Cid, error) //perm:write
// MpoolPushUntrusted pushes a signed message to mempool from untrusted sources.
MpoolPushUntrusted(context.Context, *types.SignedMessage) (cid.Cid, error) //perm:write
// MpoolPushMessage atomically assigns a nonce, signs, and pushes a message
// to mempool.
// maxFee is only used when GasFeeCap/GasPremium fields aren't specified
//
// When maxFee is set to 0, MpoolPushMessage will guess appropriate fee
// based on current chain conditions
MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) //perm:sign
// MpoolBatchPush batch pushes a signed message to mempool.
MpoolBatchPush(context.Context, []*types.SignedMessage) ([]cid.Cid, error) //perm:write
// MpoolBatchPushUntrusted batch pushes a signed message to mempool from untrusted sources.
MpoolBatchPushUntrusted(context.Context, []*types.SignedMessage) ([]cid.Cid, error) //perm:write
// MpoolBatchPushMessage batch pushes a unsigned message to mempool.
MpoolBatchPushMessage(context.Context, []*types.Message, *api.MessageSendSpec) ([]*types.SignedMessage, error) //perm:sign
// MpoolGetNonce gets next nonce for the specified sender.
// Note that this method may not be atomic. Use MpoolPushMessage instead.
MpoolGetNonce(context.Context, address.Address) (uint64, error) //perm:read
MpoolSub(context.Context) (<-chan api.MpoolUpdate, error) //perm:read
// MpoolClear clears pending messages from the mpool
MpoolClear(context.Context, bool) error //perm:write
// MpoolGetConfig returns (a copy of) the current mpool config
MpoolGetConfig(context.Context) (*types.MpoolConfig, error) //perm:read
// MpoolSetConfig sets the mpool config to (a copy of) the supplied config
MpoolSetConfig(context.Context, *types.MpoolConfig) error //perm:admin
// MethodGroup: Miner
MinerGetBaseInfo(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*api.MiningBaseInfo, error) //perm:read
MinerCreateBlock(context.Context, *api.BlockTemplate) (*types.BlockMsg, error) //perm:write
// // UX ?
// MethodGroup: Wallet
// WalletNew creates a new address in the wallet with the given sigType.
// Available key types: bls, secp256k1, secp256k1-ledger
// Support for numerical types: 1 - secp256k1, 2 - BLS is deprecated
WalletNew(context.Context, types.KeyType) (address.Address, error) //perm:write
// WalletHas indicates whether the given address is in the wallet.
WalletHas(context.Context, address.Address) (bool, error) //perm:write
// WalletList lists all the addresses in the wallet.
WalletList(context.Context) ([]address.Address, error) //perm:write
// WalletBalance returns the balance of the given address at the current head of the chain.
WalletBalance(context.Context, address.Address) (types.BigInt, error) //perm:read
// WalletSign signs the given bytes using the given address.
WalletSign(context.Context, address.Address, []byte) (*crypto.Signature, error) //perm:sign
// WalletSignMessage signs the given message using the given address.
WalletSignMessage(context.Context, address.Address, *types.Message) (*types.SignedMessage, error) //perm:sign
// WalletVerify takes an address, a signature, and some bytes, and indicates whether the signature is valid.
// The address does not have to be in the wallet.
WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) //perm:read
// WalletDefaultAddress returns the address marked as default in the wallet.
WalletDefaultAddress(context.Context) (address.Address, error) //perm:write
// WalletSetDefault marks the given address as as the default one.
WalletSetDefault(context.Context, address.Address) error //perm:write
// WalletExport returns the private key of an address in the wallet.
WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin
// WalletImport receives a KeyInfo, which includes a private key, and imports it into the wallet.
WalletImport(context.Context, *types.KeyInfo) (address.Address, error) //perm:admin
// WalletDelete deletes an address from the wallet.
WalletDelete(context.Context, address.Address) error //perm:admin
// WalletValidateAddress validates whether a given string can be decoded as a well-formed address
WalletValidateAddress(context.Context, string) (address.Address, error) //perm:read
// Other
// MethodGroup: Client
// The Client methods all have to do with interacting with the storage and
// retrieval markets as a client
// ClientImport imports file under the specified path into filestore.
ClientImport(ctx context.Context, ref api.FileRef) (*api.ImportRes, error) //perm:admin
// ClientRemoveImport removes file import
ClientRemoveImport(ctx context.Context, importID multistore.StoreID) error //perm:admin
// ClientStartDeal proposes a deal with a miner.
ClientStartDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) //perm:admin
// ClientGetDealInfo returns the latest information about a given deal.
ClientGetDealInfo(context.Context, cid.Cid) (*api.DealInfo, error) //perm:read
// ClientListDeals returns information about the deals made by the local client.
ClientListDeals(ctx context.Context) ([]api.DealInfo, error) //perm:write
// ClientGetDealUpdates returns the status of updated deals
ClientGetDealUpdates(ctx context.Context) (<-chan api.DealInfo, error) //perm:write
// ClientGetDealStatus returns status given a code
ClientGetDealStatus(ctx context.Context, statusCode uint64) (string, error) //perm:read
// ClientHasLocal indicates whether a certain CID is locally stored.
ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) //perm:write
// ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer).
ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]api.QueryOffer, error) //perm:read
// ClientMinerQueryOffer returns a QueryOffer for the specific miner and file.
ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (api.QueryOffer, error) //perm:read
// ClientRetrieve initiates the retrieval of a file, as specified in the order.
ClientRetrieve(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error //perm:admin
// ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel
// of status updates.
ClientRetrieveWithEvents(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin
// ClientQueryAsk returns a signed StorageAsk from the specified miner.
ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) //perm:read
// ClientCalcCommP calculates the CommP and data size of the specified CID
ClientDealPieceCID(ctx context.Context, root cid.Cid) (api.DataCIDSize, error) //perm:read
// ClientCalcCommP calculates the CommP for a specified file
ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet, error) //perm:write
// ClientGenCar generates a CAR file for the specified file.
ClientGenCar(ctx context.Context, ref api.FileRef, outpath string) error //perm:write
// ClientDealSize calculates real deal data size
ClientDealSize(ctx context.Context, root cid.Cid) (api.DataSize, error) //perm:read
// ClientListTransfers returns the status of all ongoing transfers of data
ClientListDataTransfers(ctx context.Context) ([]api.DataTransferChannel, error) //perm:write
ClientDataTransferUpdates(ctx context.Context) (<-chan api.DataTransferChannel, error) //perm:write
// ClientRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
ClientRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
// ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
ClientCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
// ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel
// which are stuck due to insufficient funds
ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error //perm:write
// ClientCancelRetrievalDeal cancels an ongoing retrieval deal based on DealID
ClientCancelRetrievalDeal(ctx context.Context, dealid retrievalmarket.DealID) error //perm:write
// ClientUnimport removes references to the specified file from filestore
//ClientUnimport(path string)
// ClientListImports lists imported files and their root CIDs
ClientListImports(ctx context.Context) ([]api.Import, error) //perm:write
//ClientListAsks() []Ask
// MethodGroup: State
// The State methods are used to query, inspect, and interact with chain state.
// Most methods take a TipSetKey as a parameter. The state looked up is the parent state of the tipset.
// A nil TipSetKey can be provided as a param, this will cause the heaviest tipset in the chain to be used.
// StateCall runs the given message and returns its result without any persisted changes.
//
// StateCall applies the message to the tipset's parent state. The
// message is not applied on-top-of the messages in the passed-in
// tipset.
StateCall(context.Context, *types.Message, types.TipSetKey) (*api.InvocResult, error) //perm:read
// StateReplay replays a given message, assuming it was included in a block in the specified tipset.
//
// If a tipset key is provided, and a replacing message is found on chain,
// the method will return an error saying that the message wasn't found
//
// If no tipset key is provided, the appropriate tipset is looked up, and if
// the message was gas-repriced, the on-chain message will be replayed - in
// that case the returned InvocResult.MsgCid will not match the Cid param
//
// If the caller wants to ensure that exactly the requested message was executed,
// they MUST check that InvocResult.MsgCid is equal to the provided Cid.
// Without this check both the requested and original message may appear as
// successfully executed on-chain, which may look like a double-spend.
//
// A replacing message is a message with a different CID, any of Gas values, and
// different signature, but with all other parameters matching (source/destination,
// nonce, params, etc.)
StateReplay(context.Context, types.TipSetKey, cid.Cid) (*api.InvocResult, error) //perm:read
// StateGetActor returns the indicated actor's nonce and balance.
StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) //perm:read
// StateReadState returns the indicated actor's state.
StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*api.ActorState, error) //perm:read
// StateListMessages looks back and returns all messages with a matching to or from address, stopping at the given height.
StateListMessages(ctx context.Context, match *api.MessageMatch, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) //perm:read
// StateDecodeParams attempts to decode the provided params, based on the recipient actor address and method number.
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error) //perm:read
// StateNetworkName returns the name of the network the node is synced to
StateNetworkName(context.Context) (dtypes.NetworkName, error) //perm:read
// StateMinerSectors returns info about the given miner's sectors. If the filter bitfield is nil, all sectors are included.
StateMinerSectors(context.Context, address.Address, *bitfield.BitField, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) //perm:read
// StateMinerActiveSectors returns info about sectors that a given miner is actively proving.
StateMinerActiveSectors(context.Context, address.Address, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) //perm:read
// StateMinerProvingDeadline calculates the deadline at some epoch for a proving period
// and returns the deadline-related calculations.
StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) //perm:read
// StateMinerPower returns the power of the indicated miner
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) //perm:read
// StateMinerInfo returns info about the indicated miner
StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) //perm:read
// StateMinerDeadlines returns all the proving deadlines for the given miner
StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]api.Deadline, error) //perm:read
// StateMinerPartitions returns all partitions in the specified deadline
StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tsk types.TipSetKey) ([]api.Partition, error) //perm:read
// StateMinerFaults returns a bitfield indicating the faulty sectors of the given miner
StateMinerFaults(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) //perm:read
// StateAllMinerFaults returns all non-expired Faults that occur within lookback epochs of the given tipset
StateAllMinerFaults(ctx context.Context, lookback abi.ChainEpoch, ts types.TipSetKey) ([]*api.Fault, error) //perm:read
// StateMinerRecoveries returns a bitfield indicating the recovering sectors of the given miner
StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) //perm:read
// StateMinerInitialPledgeCollateral returns the precommit deposit for the specified miner's sector
StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) //perm:read
// StateMinerInitialPledgeCollateral returns the initial pledge collateral for the specified miner's sector
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) //perm:read
// StateMinerAvailableBalance returns the portion of a miner's balance that can be withdrawn or spent
StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) //perm:read
// StateMinerSectorAllocated checks if a sector is allocated
StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (bool, error) //perm:read
// StateSectorPreCommitInfo returns the PreCommit info for the specified miner's sector
StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) //perm:read
// StateSectorGetInfo returns the on-chain info for the specified miner's sector. Returns null in case the sector info isn't found
// NOTE: returned info.Expiration may not be accurate in some cases, use StateSectorExpiration to get accurate
// expiration epoch
StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) //perm:read
// StateSectorExpiration returns epoch at which given sector will expire
StateSectorExpiration(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorExpiration, error) //perm:read
// StateSectorPartition finds deadline/partition with the specified sector
StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error) //perm:read
// StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed
//
// NOTE: If a replacing message is found on chain, this method will return
// a MsgLookup for the replacing message - the MsgLookup.Message will be a different
// CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the
// result of the execution of the replacing message.
//
// If the caller wants to ensure that exactly the requested message was executed,
// they MUST check that MsgLookup.Message is equal to the provided 'cid'.
// Without this check both the requested and original message may appear as
// successfully executed on-chain, which may look like a double-spend.
//
// A replacing message is a message with a different CID, any of Gas values, and
// different signature, but with all other parameters matching (source/destination,
// nonce, params, etc.)
StateSearchMsg(context.Context, cid.Cid) (*api.MsgLookup, error) //perm:read
// StateSearchMsgLimited looks back up to limit epochs in the chain for a message, and returns its receipt and the tipset where it was executed
//
// NOTE: If a replacing message is found on chain, this method will return
// a MsgLookup for the replacing message - the MsgLookup.Message will be a different
// CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the
// result of the execution of the replacing message.
//
// If the caller wants to ensure that exactly the requested message was executed,
// they MUST check that MsgLookup.Message is equal to the provided 'cid'.
// Without this check both the requested and original message may appear as
// successfully executed on-chain, which may look like a double-spend.
//
// A replacing message is a message with a different CID, any of Gas values, and
// different signature, but with all other parameters matching (source/destination,
// nonce, params, etc.)
StateSearchMsgLimited(ctx context.Context, msg cid.Cid, limit abi.ChainEpoch) (*api.MsgLookup, error) //perm:read
// StateWaitMsg looks back in the chain for a message. If not found, it blocks until the
// message arrives on chain, and gets to the indicated confidence depth.
//
// NOTE: If a replacing message is found on chain, this method will return
// a MsgLookup for the replacing message - the MsgLookup.Message will be a different
// CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the
// result of the execution of the replacing message.
//
// If the caller wants to ensure that exactly the requested message was executed,
// they MUST check that MsgLookup.Message is equal to the provided 'cid'.
// Without this check both the requested and original message may appear as
// successfully executed on-chain, which may look like a double-spend.
//
// A replacing message is a message with a different CID, any of Gas values, and
// different signature, but with all other parameters matching (source/destination,
// nonce, params, etc.)
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error) //perm:read
// StateWaitMsgLimited looks back up to limit epochs in the chain for a message.
// If not found, it blocks until the message arrives on chain, and gets to the
// indicated confidence depth.
//
// NOTE: If a replacing message is found on chain, this method will return
// a MsgLookup for the replacing message - the MsgLookup.Message will be a different
// CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the
// result of the execution of the replacing message.
//
// If the caller wants to ensure that exactly the requested message was executed,
// they MUST check that MsgLookup.Message is equal to the provided 'cid'.
// Without this check both the requested and original message may appear as
// successfully executed on-chain, which may look like a double-spend.
//
// A replacing message is a message with a different CID, any of Gas values, and
// different signature, but with all other parameters matching (source/destination,
// nonce, params, etc.)
StateWaitMsgLimited(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch) (*api.MsgLookup, error) //perm:read
// StateListMiners returns the addresses of every miner that has claimed power in the Power Actor
StateListMiners(context.Context, types.TipSetKey) ([]address.Address, error) //perm:read
// StateListActors returns the addresses of every actor in the state
StateListActors(context.Context, types.TipSetKey) ([]address.Address, error) //perm:read
// StateMarketBalance looks up the Escrow and Locked balances of the given address in the Storage Market
StateMarketBalance(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error) //perm:read
// StateMarketParticipants returns the Escrow and Locked balances of every participant in the Storage Market
StateMarketParticipants(context.Context, types.TipSetKey) (map[string]api.MarketBalance, error) //perm:read
// StateMarketDeals returns information about every deal in the Storage Market
StateMarketDeals(context.Context, types.TipSetKey) (map[string]api.MarketDeal, error) //perm:read
// StateMarketStorageDeal returns information about the indicated deal
StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*api.MarketDeal, error) //perm:read
// StateLookupID retrieves the ID address of the given address
StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) //perm:read
// StateAccountKey returns the public key address of the given ID address
StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) //perm:read
// StateChangedActors returns all the actors whose states change between the two given state CIDs
// TODO: Should this take tipset keys instead?
StateChangedActors(context.Context, cid.Cid, cid.Cid) (map[string]types.Actor, error) //perm:read
// StateGetReceipt returns the message receipt for the given message or for a
// matching gas-repriced replacing message
//
// NOTE: If the requested message was replaced, this method will return the receipt
// for the replacing message - if the caller needs the receipt for exactly the
// requested message, use StateSearchMsg().Receipt, and check that MsgLookup.Message
// is matching the requested CID
//
// DEPRECATED: Use StateSearchMsg, this method won't be supported in v1 API
StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) //perm:read
// StateMinerSectorCount returns the number of sectors in a miner's sector set and proving set
StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (api.MinerSectors, error) //perm:read
// StateCompute is a flexible command that applies the given messages on the given tipset.
// The messages are run as though the VM were at the provided height.
//
// When called, StateCompute will:
// - Load the provided tipset, or use the current chain head if not provided
// - Compute the tipset state of the provided tipset on top of the parent state
// - (note that this step runs before vmheight is applied to the execution)
// - Execute state upgrade if any were scheduled at the epoch, or in null
// blocks preceding the tipset
// - Call the cron actor on null blocks preceding the tipset
// - For each block in the tipset
// - Apply messages in blocks in the specified
// - Award block reward by calling the reward actor
// - Call the cron actor for the current epoch
// - If the specified vmheight is higher than the current epoch, apply any
// needed state upgrades to the state
// - Apply the specified messages to the state
//
// The vmheight parameter sets VM execution epoch, and can be used to simulate
// message execution in different network versions. If the specified vmheight
// epoch is higher than the epoch of the specified tipset, any state upgrades
// until the vmheight will be executed on the state before applying messages
// specified by the user.
//
// Note that the initial tipset state computation is not affected by the
// vmheight parameter - only the messages in the `apply` set are
//
// If the caller wants to simply compute the state, vmheight should be set to
// the epoch of the specified tipset.
//
// Messages in the `apply` parameter must have the correct nonces, and gas
// values set.
StateCompute(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*api.ComputeStateOutput, error) //perm:read
// StateVerifierStatus returns the data cap for the given address.
// Returns nil if there is no entry in the data cap table for the
// address.
StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) //perm:read
// StateVerifiedClientStatus returns the data cap for the given address.
// Returns nil if there is no entry in the data cap table for the
// address.
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) //perm:read
// StateVerifiedClientStatus returns the address of the Verified Registry's root key
StateVerifiedRegistryRootKey(ctx context.Context, tsk types.TipSetKey) (address.Address, error) //perm:read
// StateDealProviderCollateralBounds returns the min and max collateral a storage provider
// can issue. It takes the deal size and verified status as parameters.
StateDealProviderCollateralBounds(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (api.DealCollateralBounds, error) //perm:read
// StateCirculatingSupply returns the exact circulating supply of Filecoin at the given tipset.
// This is not used anywhere in the protocol itself, and is only for external consumption.
StateCirculatingSupply(context.Context, types.TipSetKey) (abi.TokenAmount, error) //perm:read
// StateVMCirculatingSupplyInternal returns an approximation of the circulating supply of Filecoin at the given tipset.
// This is the value reported by the runtime interface to actors code.
StateVMCirculatingSupplyInternal(context.Context, types.TipSetKey) (api.CirculatingSupply, error) //perm:read
// StateNetworkVersion returns the network version at the given tipset
StateNetworkVersion(context.Context, types.TipSetKey) (apitypes.NetworkVersion, error) //perm:read
// MethodGroup: Msig
// The Msig methods are used to interact with multisig wallets on the
// filecoin network
// MsigGetAvailableBalance returns the portion of a multisig's balance that can be withdrawn or spent
MsigGetAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) //perm:read
// MsigGetVestingSchedule returns the vesting details of a given multisig.
MsigGetVestingSchedule(context.Context, address.Address, types.TipSetKey) (api.MsigVesting, error) //perm:read
// MsigGetVested returns the amount of FIL that vested in a multisig in a certain period.
// It takes the following params: <multisig address>, <start epoch>, <end epoch>
MsigGetVested(context.Context, address.Address, types.TipSetKey, types.TipSetKey) (types.BigInt, error) //perm:read
//MsigGetPending returns pending transactions for the given multisig
//wallet. Once pending transactions are fully approved, they will no longer
//appear here.
MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*api.MsigTransaction, error) //perm:read
// MsigCreate creates a multisig wallet
// It takes the following params: <required number of senders>, <approving addresses>, <unlock duration>
//<initial balance>, <sender address of the create msg>, <gas price>
MsigCreate(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (cid.Cid, error) //perm:sign
// MsigPropose proposes a multisig message
// It takes the following params: <multisig address>, <recipient address>, <value to transfer>,
// <sender address of the propose msg>, <method to call in the proposed message>, <params to include in the proposed message>
MsigPropose(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) //perm:sign
// MsigApprove approves a previously-proposed multisig message by transaction ID
// It takes the following params: <multisig address>, <proposed transaction ID> <signer address>
MsigApprove(context.Context, address.Address, uint64, address.Address) (cid.Cid, error) //perm:sign
// MsigApproveTxnHash approves a previously-proposed multisig message, specified
// using both transaction ID and a hash of the parameters used in the
// proposal. This method of approval can be used to ensure you only approve
// exactly the transaction you think you are.
// It takes the following params: <multisig address>, <proposed message ID>, <proposer address>, <recipient address>, <value to transfer>,
// <sender address of the approve msg>, <method to call in the proposed message>, <params to include in the proposed message>
MsigApproveTxnHash(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) //perm:sign
// MsigCancel cancels a previously-proposed multisig message
// It takes the following params: <multisig address>, <proposed transaction ID>, <recipient address>, <value to transfer>,
// <sender address of the cancel msg>, <method to call in the proposed message>, <params to include in the proposed message>
MsigCancel(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) //perm:sign
// MsigAddPropose proposes adding a signer in the multisig
// It takes the following params: <multisig address>, <sender address of the propose msg>,
// <new signer>, <whether the number of required signers should be increased>
MsigAddPropose(context.Context, address.Address, address.Address, address.Address, bool) (cid.Cid, error) //perm:sign
// MsigAddApprove approves a previously proposed AddSigner message
// It takes the following params: <multisig address>, <sender address of the approve msg>, <proposed message ID>,
// <proposer address>, <new signer>, <whether the number of required signers should be increased>
MsigAddApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, bool) (cid.Cid, error) //perm:sign
// MsigAddCancel cancels a previously proposed AddSigner message
// It takes the following params: <multisig address>, <sender address of the cancel msg>, <proposed message ID>,
// <new signer>, <whether the number of required signers should be increased>
MsigAddCancel(context.Context, address.Address, address.Address, uint64, address.Address, bool) (cid.Cid, error) //perm:sign
// MsigSwapPropose proposes swapping 2 signers in the multisig
// It takes the following params: <multisig address>, <sender address of the propose msg>,
// <old signer>, <new signer>
MsigSwapPropose(context.Context, address.Address, address.Address, address.Address, address.Address) (cid.Cid, error) //perm:sign
// MsigSwapApprove approves a previously proposed SwapSigner
// It takes the following params: <multisig address>, <sender address of the approve msg>, <proposed message ID>,
// <proposer address>, <old signer>, <new signer>
MsigSwapApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (cid.Cid, error) //perm:sign
// MsigSwapCancel cancels a previously proposed SwapSigner message
// It takes the following params: <multisig address>, <sender address of the cancel msg>, <proposed message ID>,
// <old signer>, <new signer>
MsigSwapCancel(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error) //perm:sign
// MsigRemoveSigner proposes the removal of a signer from the multisig.
// It accepts the multisig to make the change on, the proposer address to
// send the message from, the address to be removed, and a boolean
// indicating whether or not the signing threshold should be lowered by one
// along with the address removal.
MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) //perm:sign
// MarketAddBalance adds funds to the market actor
MarketAddBalance(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign
// MarketGetReserved gets the amount of funds that are currently reserved for the address
MarketGetReserved(ctx context.Context, addr address.Address) (types.BigInt, error) //perm:sign
// MarketReserveFunds reserves funds for a deal
MarketReserveFunds(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign
// MarketReleaseFunds releases funds reserved by MarketReserveFunds
MarketReleaseFunds(ctx context.Context, addr address.Address, amt types.BigInt) error //perm:sign
// MarketWithdraw withdraws unlocked funds from the market actor
MarketWithdraw(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign
// MethodGroup: Paych
// The Paych methods are for interacting with and managing payment channels
PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*api.ChannelInfo, error) //perm:sign
PaychGetWaitReady(context.Context, cid.Cid) (address.Address, error) //perm:sign
PaychAvailableFunds(ctx context.Context, ch address.Address) (*api.ChannelAvailableFunds, error) //perm:sign
PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*api.ChannelAvailableFunds, error) //perm:sign
PaychList(context.Context) ([]address.Address, error) //perm:read
PaychStatus(context.Context, address.Address) (*api.PaychStatus, error) //perm:read
PaychSettle(context.Context, address.Address) (cid.Cid, error) //perm:sign
PaychCollect(context.Context, address.Address) (cid.Cid, error) //perm:sign
PaychAllocateLane(ctx context.Context, ch address.Address) (uint64, error) //perm:sign
PaychNewPayment(ctx context.Context, from, to address.Address, vouchers []api.VoucherSpec) (*api.PaymentInfo, error) //perm:sign
PaychVoucherCheckValid(context.Context, address.Address, *paych.SignedVoucher) error //perm:read
PaychVoucherCheckSpendable(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (bool, error) //perm:read
PaychVoucherCreate(context.Context, address.Address, types.BigInt, uint64) (*api.VoucherCreateResult, error) //perm:sign
PaychVoucherAdd(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error) //perm:write
PaychVoucherList(context.Context, address.Address) ([]*paych.SignedVoucher, error) //perm:write
PaychVoucherSubmit(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error) //perm:sign
// CreateBackup creates node backup onder the specified file name. The
// method requires that the lotus daemon is running with the
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
// the path specified when calling CreateBackup is within the base path
CreateBackup(ctx context.Context, fpath string) error //perm:admin
}

68
api/v0api/gateway.go Normal file
View File

@ -0,0 +1,68 @@
package v0api
import (
"context"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/dline"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
)
// MODIFYING THE API INTERFACE
//
// NOTE: This is the V0 (Stable) API - when adding methods to this interface,
// you'll need to make sure they are also present on the V1 (Unstable) API
//
// This API is implemented in `v1_wrapper.go` as a compatibility layer backed
// by the V1 api
//
// When adding / changing methods in this file:
// * Do the change here
// * Adjust implementation in `node/impl/`
// * Run `make gen` - this will:
// * Generate proxy structs
// * Generate mocks
// * Generate markdown docs
// * Generate openrpc blobs
type Gateway interface {
ChainHasObj(context.Context, cid.Cid) (bool, error)
ChainHead(ctx context.Context) (*types.TipSet, error)
ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error)
ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error)
ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error)
ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error)
ChainNotify(context.Context) (<-chan []*api.HeadChange, error)
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error)
MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error)
MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error)
MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error)
MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*api.MsigTransaction, error)
StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error)
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error)
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error)
StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error)
StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error)
StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error)
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error)
StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error)
StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error)
StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error)
WalletBalance(context.Context, address.Address) (types.BigInt, error)
}
var _ Gateway = *new(FullNode)

25
api/v0api/latest.go Normal file
View File

@ -0,0 +1,25 @@
package v0api
import (
"github.com/filecoin-project/lotus/api"
)
type Common = api.Common
type CommonStruct = api.CommonStruct
type CommonStub = api.CommonStub
type StorageMiner = api.StorageMiner
type StorageMinerStruct = api.StorageMinerStruct
type Worker = api.Worker
type WorkerStruct = api.WorkerStruct
type Wallet = api.Wallet
func PermissionedStorMinerAPI(a StorageMiner) StorageMiner {
return api.PermissionedStorMinerAPI(a)
}
func PermissionedWorkerAPI(a Worker) Worker {
return api.PermissionedWorkerAPI(a)
}

13
api/v0api/permissioned.go Normal file
View File

@ -0,0 +1,13 @@
package v0api
import (
"github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/lotus/api"
)
func PermissionedFullAPI(a FullNode) FullNode {
var out FullNodeStruct
auth.PermissionedProxy(api.AllPermissions, api.DefaultPerms, a, &out.Internal)
auth.PermissionedProxy(api.AllPermissions, api.DefaultPerms, a, &out.CommonStruct.Internal)
return &out
}

2078
api/v0api/proxy_gen.go Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

60
api/v0api/v1_wrapper.go Normal file
View File

@ -0,0 +1,60 @@
package v0api
import (
"context"
"github.com/filecoin-project/lotus/chain/types"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/v1api"
)
type WrapperV1Full struct {
v1api.FullNode
}
func (w *WrapperV1Full) StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error) {
return w.FullNode.StateSearchMsg(ctx, types.EmptyTSK, msg, api.LookbackNoLimit, true)
}
func (w *WrapperV1Full) StateSearchMsgLimited(ctx context.Context, msg cid.Cid, limit abi.ChainEpoch) (*api.MsgLookup, error) {
return w.FullNode.StateSearchMsg(ctx, types.EmptyTSK, msg, limit, true)
}
func (w *WrapperV1Full) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error) {
return w.FullNode.StateWaitMsg(ctx, msg, confidence, api.LookbackNoLimit, true)
}
func (w *WrapperV1Full) StateWaitMsgLimited(ctx context.Context, msg cid.Cid, confidence uint64, limit abi.ChainEpoch) (*api.MsgLookup, error) {
return w.FullNode.StateWaitMsg(ctx, msg, confidence, limit, true)
}
func (w *WrapperV1Full) StateGetReceipt(ctx context.Context, msg cid.Cid, from types.TipSetKey) (*types.MessageReceipt, error) {
ml, err := w.FullNode.StateSearchMsg(ctx, from, msg, api.LookbackNoLimit, true)
if err != nil {
return nil, err
}
if ml == nil {
return nil, nil
}
return &ml.Receipt, nil
}
func (w *WrapperV1Full) Version(ctx context.Context) (api.APIVersion, error) {
ver, err := w.FullNode.Version(ctx)
if err != nil {
return api.APIVersion{}, err
}
ver.APIVersion = api.FullAPIVersion0
return ver, nil
}
var _ FullNode = &WrapperV1Full{}

12
api/v1api/latest.go Normal file
View File

@ -0,0 +1,12 @@
package v1api
import (
"github.com/filecoin-project/lotus/api"
)
type FullNode = api.FullNode
type FullNodeStruct = api.FullNodeStruct
func PermissionedFullAPI(a FullNode) FullNode {
return api.PermissionedFullAPI(a)
}

View File

@ -42,11 +42,11 @@ var RunningNodeType NodeType
func VersionForType(nodeType NodeType) (Version, error) {
switch nodeType {
case NodeFull:
return FullAPIVersion, nil
return FullAPIVersion1, nil
case NodeMiner:
return MinerAPIVersion, nil
return MinerAPIVersion0, nil
case NodeWorker:
return WorkerAPIVersion, nil
return WorkerAPIVersion0, nil
default:
return Version(0), xerrors.Errorf("unknown node type %d", nodeType)
}
@ -54,9 +54,11 @@ func VersionForType(nodeType NodeType) (Version, error) {
// semver versions of the rpc api exposed
var (
FullAPIVersion = newVer(1, 2, 0)
MinerAPIVersion = newVer(1, 0, 1)
WorkerAPIVersion = newVer(1, 0, 0)
FullAPIVersion0 = newVer(1, 3, 0)
FullAPIVersion1 = newVer(2, 1, 0)
MinerAPIVersion0 = newVer(1, 0, 1)
WorkerAPIVersion0 = newVer(1, 0, 0)
)
//nolint:varcheck,deadcode

32
api/wrap.go Normal file
View File

@ -0,0 +1,32 @@
package api
import (
"reflect"
)
// Wrap adapts partial api impl to another version
// proxyT is the proxy type used as input in wrapperT
// Usage: Wrap(new(v1api.FullNodeStruct), new(v0api.WrapperV1Full), eventsApi).(EventAPI)
func Wrap(proxyT, wrapperT, impl interface{}) interface{} {
proxy := reflect.New(reflect.TypeOf(proxyT).Elem())
proxyMethods := proxy.Elem().FieldByName("Internal")
ri := reflect.ValueOf(impl)
for i := 0; i < ri.NumMethod(); i++ {
mt := ri.Type().Method(i)
if proxyMethods.FieldByName(mt.Name).Kind() == reflect.Invalid {
continue
}
fn := ri.Method(i)
of := proxyMethods.FieldByName(mt.Name)
proxyMethods.FieldByName(mt.Name).Set(reflect.MakeFunc(of.Type(), func(args []reflect.Value) (results []reflect.Value) {
return fn.Call(args)
}))
}
wp := reflect.New(reflect.TypeOf(wrapperT).Elem())
wp.Elem().Field(0).Set(proxy)
return wp.Interface()
}

View File

@ -4,6 +4,7 @@ import (
"context"
"fmt"
"io"
"runtime"
"sync/atomic"
"github.com/dgraph-io/badger/v2"
@ -84,11 +85,11 @@ const (
// operation calls after Close() has returned, but it may not happen for
// operations in progress. Those are likely to fail with a different error.
type Blockstore struct {
DB *badger.DB
// state is guarded by atomic.
// state is accessed atomically
state int64
DB *badger.DB
prefixing bool
prefix []byte
prefixLen int
@ -150,6 +151,20 @@ func (b *Blockstore) CollectGarbage() error {
return err
}
// Compact runs a synchronous compaction
func (b *Blockstore) Compact() error {
if atomic.LoadInt64(&b.state) != stateOpen {
return ErrBlockstoreClosed
}
nworkers := runtime.NumCPU() / 2
if nworkers < 2 {
nworkers = 2
}
return b.DB.Flatten(nworkers)
}
// View implements blockstore.Viewer, which leverages zero-copy read-only
// access to values.
func (b *Blockstore) View(cid cid.Cid, fn func([]byte) error) error {
@ -288,9 +303,6 @@ func (b *Blockstore) PutMany(blocks []blocks.Block) error {
return ErrBlockstoreClosed
}
batch := b.DB.NewWriteBatch()
defer batch.Cancel()
// toReturn tracks the byte slices to return to the pool, if we're using key
// prefixing. we can't return each slice to the pool after each Set, because
// badger holds on to the slice.
@ -304,6 +316,9 @@ func (b *Blockstore) PutMany(blocks []blocks.Block) error {
}()
}
batch := b.DB.NewWriteBatch()
defer batch.Cancel()
for _, block := range blocks {
k, pooled := b.PooledStorageKey(block.Cid())
if pooled {
@ -342,9 +357,6 @@ func (b *Blockstore) DeleteMany(cids []cid.Cid) error {
return ErrBlockstoreClosed
}
batch := b.DB.NewWriteBatch()
defer batch.Cancel()
// toReturn tracks the byte slices to return to the pool, if we're using key
// prefixing. we can't return each slice to the pool after each Set, because
// badger holds on to the slice.
@ -358,6 +370,9 @@ func (b *Blockstore) DeleteMany(cids []cid.Cid) error {
}()
}
batch := b.DB.NewWriteBatch()
defer batch.Cancel()
for _, cid := range cids {
k, pooled := b.PooledStorageKey(cid)
if pooled {

View File

@ -1,25 +0,0 @@
package blockstore
import (
"context"
blockstore "github.com/ipfs/go-ipfs-blockstore"
)
type CacheOpts = blockstore.CacheOpts
func DefaultCacheOpts() CacheOpts {
return CacheOpts{
HasBloomFilterSize: 0,
HasBloomFilterHashes: 0,
HasARCCacheSize: 512 << 10,
}
}
func CachedBlockstore(ctx context.Context, bs Blockstore, opts CacheOpts) (Blockstore, error) {
cached, err := blockstore.CachedBlockstore(ctx, bs, opts)
if err != nil {
return nil, err
}
return WrapIDStore(cached), nil
}

View File

@ -798,15 +798,26 @@ func (s *SplitStore) purgeTracking(cids []cid.Cid) error {
}
func (s *SplitStore) gcHotstore() {
if compact, ok := s.hot.(interface{ Compact() error }); ok {
log.Infof("compacting hotstore")
startCompact := time.Now()
err := compact.Compact()
if err != nil {
log.Warnf("error compacting hotstore: %s", err)
return
}
log.Infow("hotstore compaction done", "took", time.Since(startCompact))
}
if gc, ok := s.hot.(interface{ CollectGarbage() error }); ok {
log.Infof("garbage collecting hotstore")
startGC := time.Now()
err := gc.CollectGarbage()
if err != nil {
log.Warnf("error garbage collecting hotstore: %s", err)
} else {
log.Infow("garbage collection done", "took", time.Since(startGC))
return
}
log.Infow("hotstore garbage collection done", "took", time.Since(startGC))
}
}

View File

@ -103,13 +103,20 @@ func (t *TimedCacheBlockstore) PutMany(bs []blocks.Block) error {
}
func (t *TimedCacheBlockstore) View(k cid.Cid, callback func([]byte) error) error {
// The underlying blockstore is always a "mem" blockstore so there's no difference,
// from a performance perspective, between view & get. So we call Get to avoid
// calling an arbitrary callback while holding a lock.
t.mu.RLock()
defer t.mu.RUnlock()
err := t.active.View(k, callback)
block, err := t.active.Get(k)
if err == ErrNotFound {
err = t.inactive.View(k, callback)
block, err = t.inactive.Get(k)
}
return err
t.mu.RUnlock()
if err != nil {
return err
}
return callback(block.RawData())
}
func (t *TimedCacheBlockstore) Get(k cid.Cid) (blocks.Block, error) {

View File

@ -7,7 +7,7 @@
/dns4/bootstrap-6.mainnet.filops.net/tcp/1347/p2p/12D3KooWP5MwCiqdMETF9ub1P3MbCvQCcfconnYHbWg6sUJcDRQQ
/dns4/bootstrap-7.mainnet.filops.net/tcp/1347/p2p/12D3KooWRs3aY1p3juFjPy8gPN95PEQChm2QKGUCAdcDCC4EBMKf
/dns4/bootstrap-8.mainnet.filops.net/tcp/1347/p2p/12D3KooWScFR7385LTyR4zU1bYdzSiiAb5rnNABfVahPvVSzyTkR
/dns4/lotus-bootstrap.forceup.cn/tcp/41778/p2p/12D3KooWFQsv3nRMUevZNWWsY1Wu6NUzUbawnWU5NcRhgKuJA37C
/dns4/lotus-bootstrap.ipfsforce.com/tcp/41778/p2p/12D3KooWGhufNmZHF3sv48aQeS13ng5XVJZ9E6qy2Ms4VzqeUsHk
/dns4/bootstrap-0.starpool.in/tcp/12757/p2p/12D3KooWGHpBMeZbestVEWkfdnC9u7p6uFHXL1n7m1ZBqsEmiUzz
/dns4/bootstrap-1.starpool.in/tcp/12757/p2p/12D3KooWQZrGH1PxSNZPum99M1zNvjNFM33d1AAu5DcvdHptuU7u
/dns4/node.glif.io/tcp/1235/p2p/12D3KooWBF8cpp65hp2u9LK5mh19x67ftAam84z9LsfaquTDSBpt

43
build/openrpc.go Normal file
View File

@ -0,0 +1,43 @@
package build
import (
"bytes"
"compress/gzip"
"encoding/json"
rice "github.com/GeertJohan/go.rice"
apitypes "github.com/filecoin-project/lotus/api/types"
)
func mustReadGzippedOpenRPCDocument(data []byte) apitypes.OpenRPCDocument {
zr, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
log.Fatal(err)
}
m := apitypes.OpenRPCDocument{}
err = json.NewDecoder(zr).Decode(&m)
if err != nil {
log.Fatal(err)
}
err = zr.Close()
if err != nil {
log.Fatal(err)
}
return m
}
func OpenRPCDiscoverJSON_Full() apitypes.OpenRPCDocument {
data := rice.MustFindBox("openrpc").MustBytes("full.json.gz")
return mustReadGzippedOpenRPCDocument(data)
}
func OpenRPCDiscoverJSON_Miner() apitypes.OpenRPCDocument {
data := rice.MustFindBox("openrpc").MustBytes("miner.json.gz")
return mustReadGzippedOpenRPCDocument(data)
}
func OpenRPCDiscoverJSON_Worker() apitypes.OpenRPCDocument {
data := rice.MustFindBox("openrpc").MustBytes("worker.json.gz")
return mustReadGzippedOpenRPCDocument(data)
}

BIN
build/openrpc/full.json.gz Normal file

Binary file not shown.

BIN
build/openrpc/miner.json.gz Normal file

Binary file not shown.

Binary file not shown.

23
build/openrpc_test.go Normal file
View File

@ -0,0 +1,23 @@
package build
import (
"testing"
apitypes "github.com/filecoin-project/lotus/api/types"
)
func TestOpenRPCDiscoverJSON_Version(t *testing.T) {
// openRPCDocVersion is the current OpenRPC version of the API docs.
openRPCDocVersion := "1.2.6"
for i, docFn := range []func() apitypes.OpenRPCDocument{
OpenRPCDiscoverJSON_Full,
OpenRPCDiscoverJSON_Miner,
OpenRPCDiscoverJSON_Worker,
} {
doc := docFn()
if got, ok := doc["openrpc"]; !ok || got != openRPCDocVersion {
t.Fatalf("case: %d, want: %s, got: %v, doc: %v", i, openRPCDocVersion, got, doc)
}
}
}

View File

@ -6,6 +6,8 @@ import (
"os"
"strconv"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/actors/policy"
)
@ -45,6 +47,7 @@ func init() {
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
policy.SetPreCommitChallengeDelay(abi.ChainEpoch(10))
getUpgradeHeight := func(ev string, def abi.ChainEpoch) abi.ChainEpoch {
hs, found := os.LookupEnv(ev)
@ -93,3 +96,5 @@ const SlashablePowerDelay = 20
const InteractivePoRepConfidence = 6
const BootstrapPeerThreshold = 1
var WhitelistedBlock = cid.Undef

View File

@ -7,6 +7,7 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/actors/policy"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
"github.com/ipfs/go-cid"
)
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
@ -52,3 +53,5 @@ const PropagationDelaySecs = uint64(6)
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
const BootstrapPeerThreshold = 2
var WhitelistedBlock = cid.Undef

View File

@ -7,6 +7,7 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/actors/policy"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
"github.com/ipfs/go-cid"
)
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
@ -64,3 +65,5 @@ const PropagationDelaySecs = uint64(6)
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
const BootstrapPeerThreshold = 4
var WhitelistedBlock = cid.Undef

View File

@ -88,3 +88,6 @@ const PropagationDelaySecs = uint64(6)
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
const BootstrapPeerThreshold = 4
// we skip checks on message validity in this block to sidestep the zero-bls signature
var WhitelistedBlock = MustParseCid("bafy2bzaceapyg2uyzk7vueh3xccxkuwbz3nxewjyguoxvhx77malc2lzn2ybi")

View File

@ -5,6 +5,7 @@ package build
import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/ipfs/go-cid"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
)
@ -72,3 +73,5 @@ const PropagationDelaySecs = uint64(6)
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
const BootstrapPeerThreshold = 4
var WhitelistedBlock = cid.Undef

View File

@ -2,6 +2,7 @@ package build
import (
"github.com/filecoin-project/go-address"
"github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p-core/protocol"
@ -28,3 +29,12 @@ func MustParseAddress(addr string) address.Address {
return ret
}
func MustParseCid(c string) cid.Cid {
ret, err := cid.Decode(c)
if err != nil {
panic(err)
}
return ret
}

View File

@ -25,7 +25,7 @@ const UnixfsLinksPerLevel = 1024
// Consensus / Network
const AllowableClockDriftSecs = uint64(1)
const NewestNetworkVersion = network.Version9
const NewestNetworkVersion = network.Version11
const ActorUpgradeNetworkVersion = network.Version4
// Epochs
@ -118,5 +118,5 @@ const PackingEfficiencyNum = 4
const PackingEfficiencyDenom = 5
// Actor consts
// TODO: Pull from actors when its made not private
var MinDealDuration = abi.ChainEpoch(180 * builtin2.EpochsInDay)
// TODO: pieceSize unused from actors
var MinDealDuration, MaxDealDuration = policy.DealDurationBounds(0)

View File

@ -12,6 +12,7 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/network"
"github.com/ipfs/go-cid"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
@ -72,8 +73,8 @@ var (
}()
// Actor consts
// TODO: Pull from actors when its made not private
MinDealDuration = abi.ChainEpoch(180 * builtin2.EpochsInDay)
// TODO: pieceSize unused from actors
MinDealDuration, MaxDealDuration = policy.DealDurationBounds(0)
PackingEfficiencyNum int64 = 4
PackingEfficiencyDenom int64 = 5
@ -100,12 +101,13 @@ var (
0: DrandMainnet,
}
NewestNetworkVersion = network.Version9
NewestNetworkVersion = network.Version11
ActorUpgradeNetworkVersion = network.Version4
Devnet = true
ZeroAddress = MustParseAddress("f3yaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaby2smx7a")
WhitelistedBlock = cid.Undef
BootstrappersFile = ""
GenesisFile = ""
)

View File

@ -29,7 +29,7 @@ func buildType() string {
}
// BuildVersion is the local build version, set by build system
const BuildVersion = "1.8.0"
const BuildVersion = "1.11.0-dev"
func UserVersion() string {
return BuildVersion + buildType() + CurrentCommit

View File

@ -54,7 +54,7 @@ type PoStProof = proof0.PoStProof
type FilterEstimate = smoothing0.FilterEstimate
func FromV0FilterEstimate(v0 smoothing0.FilterEstimate) FilterEstimate {
return (FilterEstimate)(v0)
return (FilterEstimate)(v0) //nolint:unconvert
}
// Doesn't change between actors v0, v2, and v3.

View File

@ -67,6 +67,7 @@ type State interface {
VerifyDealsForActivation(
minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch,
) (weight, verifiedWeight abi.DealWeight, err error)
NextID() (abi.DealID, error)
}
type BalanceTable interface {

View File

@ -105,6 +105,10 @@ func (s *state0) VerifyDealsForActivation(
return market0.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch)
}
func (s *state0) NextID() (abi.DealID, error) {
return s.State.NextID, nil
}
type balanceTable0 struct {
*adt0.BalanceTable
}

View File

@ -106,6 +106,10 @@ func (s *state2) VerifyDealsForActivation(
return w, vw, err
}
func (s *state2) NextID() (abi.DealID, error) {
return s.State.NextID, nil
}
type balanceTable2 struct {
*adt2.BalanceTable
}

View File

@ -106,6 +106,10 @@ func (s *state3) VerifyDealsForActivation(
return w, vw, err
}
func (s *state3) NextID() (abi.DealID, error) {
return s.State.NextID, nil
}
type balanceTable3 struct {
*adt3.BalanceTable
}

View File

@ -106,6 +106,10 @@ func (s *state4) VerifyDealsForActivation(
return w, vw, err
}
func (s *state4) NextID() (abi.DealID, error) {
return s.State.NextID, nil
}
type balanceTable4 struct {
*adt4.BalanceTable
}

View File

@ -68,7 +68,7 @@ func (s *state0) ForEachPendingTxn(cb func(id int64, txn Transaction) error) err
if n <= 0 {
return xerrors.Errorf("invalid pending transaction key: %v", key)
}
return cb(txid, (Transaction)(out))
return cb(txid, (Transaction)(out)) //nolint:unconvert
})
}

View File

@ -67,7 +67,7 @@ func (s *state2) ForEachPendingTxn(cb func(id int64, txn Transaction) error) err
if n <= 0 {
return xerrors.Errorf("invalid pending transaction key: %v", key)
}
return cb(txid, (Transaction)(out))
return cb(txid, (Transaction)(out)) //nolint:unconvert
})
}

View File

@ -69,7 +69,7 @@ func (s *state3) ForEachPendingTxn(cb func(id int64, txn Transaction) error) err
if n <= 0 {
return xerrors.Errorf("invalid pending transaction key: %v", key)
}
return cb(txid, (Transaction)(out))
return cb(txid, (Transaction)(out)) //nolint:unconvert
})
}

View File

@ -30,9 +30,10 @@ import (
)
const (
ChainFinality = miner4.ChainFinality
SealRandomnessLookback = ChainFinality
PaychSettleDelay = paych4.SettleDelay
ChainFinality = miner4.ChainFinality
SealRandomnessLookback = ChainFinality
PaychSettleDelay = paych4.SettleDelay
MaxPreCommitRandomnessLookback = builtin4.EpochsInDay + SealRandomnessLookback
)
// SetSupportedProofTypes sets supported proof types, across all actor versions.
@ -157,6 +158,10 @@ func DealProviderCollateralBounds(
}
}
func DealDurationBounds(pieceSize abi.PaddedPieceSize) (min, max abi.ChainEpoch) {
return market2.DealDurationBounds(pieceSize)
}
// Sets the challenge window and scales the proving period to match (such that
// there are always 48 challenge windows in a proving period).
func SetWPoStChallengeWindow(period abi.ChainEpoch) {

View File

@ -4,7 +4,7 @@ import (
"context"
"github.com/filecoin-project/go-state-types/abi"
logging "github.com/ipfs/go-log"
logging "github.com/ipfs/go-log/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/build"

View File

@ -16,7 +16,7 @@ import (
"go.uber.org/zap/zapcore"
"golang.org/x/xerrors"
logging "github.com/ipfs/go-log"
logging "github.com/ipfs/go-log/v2"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/filecoin-project/go-state-types/abi"

View File

@ -33,19 +33,19 @@ type heightHandler struct {
revert RevertHandler
}
type eventAPI interface {
type EventAPI interface {
ChainNotify(context.Context) (<-chan []*api.HeadChange, error)
ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error)
ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error)
ChainHead(context.Context) (*types.TipSet, error)
StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error)
StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error)
ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error)
StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) // optional / for CalledMsg
}
type Events struct {
api eventAPI
api EventAPI
tsc *tipSetCache
lk sync.Mutex
@ -55,11 +55,11 @@ type Events struct {
heightEvents
*hcEvents
observers []TipSetObserver
}
func NewEvents(ctx context.Context, api eventAPI) *Events {
gcConfidence := 2 * build.ForkLengthThreshold
func NewEventsWithConfidence(ctx context.Context, api EventAPI, gcConfidence abi.ChainEpoch) *Events {
tsc := newTSCache(gcConfidence, api)
e := &Events{
@ -77,8 +77,9 @@ func NewEvents(ctx context.Context, api eventAPI) *Events {
htHeights: map[abi.ChainEpoch][]uint64{},
},
hcEvents: newHCEvents(ctx, api, tsc, uint64(gcConfidence)),
ready: make(chan struct{}),
hcEvents: newHCEvents(ctx, api, tsc, uint64(gcConfidence)),
ready: make(chan struct{}),
observers: []TipSetObserver{},
}
go e.listenHeadChanges(ctx)
@ -92,6 +93,11 @@ func NewEvents(ctx context.Context, api eventAPI) *Events {
return e
}
func NewEvents(ctx context.Context, api EventAPI) *Events {
gcConfidence := 2 * build.ForkLengthThreshold
return NewEventsWithConfidence(ctx, api, gcConfidence)
}
func (e *Events) listenHeadChanges(ctx context.Context) {
for {
if err := e.listenHeadChangesOnce(ctx); err != nil {
@ -164,7 +170,7 @@ func (e *Events) listenHeadChangesOnce(ctx context.Context) error {
}
}
if err := e.headChange(rev, app); err != nil {
if err := e.headChange(ctx, rev, app); err != nil {
log.Warnf("headChange failed: %s", err)
}
@ -177,7 +183,7 @@ func (e *Events) listenHeadChangesOnce(ctx context.Context) error {
return nil
}
func (e *Events) headChange(rev, app []*types.TipSet) error {
func (e *Events) headChange(ctx context.Context, rev, app []*types.TipSet) error {
if len(app) == 0 {
return xerrors.New("events.headChange expected at least one applied tipset")
}
@ -189,5 +195,39 @@ func (e *Events) headChange(rev, app []*types.TipSet) error {
return err
}
if err := e.observeChanges(ctx, rev, app); err != nil {
return err
}
return e.processHeadChangeEvent(rev, app)
}
// A TipSetObserver receives notifications of tipsets
type TipSetObserver interface {
Apply(ctx context.Context, ts *types.TipSet) error
Revert(ctx context.Context, ts *types.TipSet) error
}
// TODO: add a confidence level so we can have observers with difference levels of confidence
func (e *Events) Observe(obs TipSetObserver) error {
e.lk.Lock()
defer e.lk.Unlock()
e.observers = append(e.observers, obs)
return nil
}
// observeChanges expects caller to hold e.lk
func (e *Events) observeChanges(ctx context.Context, rev, app []*types.TipSet) error {
for _, ts := range rev {
for _, o := range e.observers {
_ = o.Revert(ctx, ts)
}
}
for _, ts := range app {
for _, o := range e.observers {
_ = o.Apply(ctx, ts)
}
}
return nil
}

View File

@ -5,6 +5,8 @@ import (
"math"
"sync"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
@ -66,7 +68,7 @@ type queuedEvent struct {
// Manages chain head change events, which may be forward (new tipset added to
// chain) or backward (chain branch discarded in favour of heavier branch)
type hcEvents struct {
cs eventAPI
cs EventAPI
tsc *tipSetCache
ctx context.Context
gcConfidence uint64
@ -93,7 +95,7 @@ type hcEvents struct {
watcherEvents
}
func newHCEvents(ctx context.Context, cs eventAPI, tsc *tipSetCache, gcConfidence uint64) *hcEvents {
func newHCEvents(ctx context.Context, cs EventAPI, tsc *tipSetCache, gcConfidence uint64) *hcEvents {
e := hcEvents{
ctx: ctx,
cs: cs,
@ -353,14 +355,14 @@ type headChangeAPI interface {
// watcherEvents watches for a state change
type watcherEvents struct {
ctx context.Context
cs eventAPI
cs EventAPI
hcAPI headChangeAPI
lk sync.RWMutex
matchers map[triggerID]StateMatchFunc
}
func newWatcherEvents(ctx context.Context, hcAPI headChangeAPI, cs eventAPI) watcherEvents {
func newWatcherEvents(ctx context.Context, hcAPI headChangeAPI, cs EventAPI) watcherEvents {
return watcherEvents{
ctx: ctx,
cs: cs,
@ -455,14 +457,14 @@ func (we *watcherEvents) StateChanged(check CheckFunc, scHnd StateChangeHandler,
// messageEvents watches for message calls to actors
type messageEvents struct {
ctx context.Context
cs eventAPI
cs EventAPI
hcAPI headChangeAPI
lk sync.RWMutex
matchers map[triggerID]MsgMatchFunc
}
func newMessageEvents(ctx context.Context, hcAPI headChangeAPI, cs eventAPI) messageEvents {
func newMessageEvents(ctx context.Context, hcAPI headChangeAPI, cs EventAPI) messageEvents {
return messageEvents{
ctx: ctx,
cs: cs,
@ -583,12 +585,16 @@ func (me *messageEvents) Called(check CheckFunc, msgHnd MsgHandler, rev RevertHa
panic("expected msg")
}
rec, err := me.cs.StateGetReceipt(me.ctx, msg.Cid(), ts.Key())
ml, err := me.cs.StateSearchMsg(me.ctx, ts.Key(), msg.Cid(), stmgr.LookbackNoLimit, true)
if err != nil {
return false, err
}
return msgHnd(msg, rec, ts, height)
if ml == nil {
return msgHnd(msg, nil, ts, height)
}
return msgHnd(msg, &ml.Receipt, ts, height)
}
id, err := me.hcAPI.onHeadChanged(check, hnd, rev, confidence, timeout)

View File

@ -54,7 +54,7 @@ func (fcs *fakeCS) ChainGetTipSet(ctx context.Context, key types.TipSetKey) (*ty
return fcs.tipsets[key], nil
}
func (fcs *fakeCS) StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) {
func (fcs *fakeCS) StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) {
return nil, nil
}
@ -229,7 +229,7 @@ func (fcs *fakeCS) notifDone() {
fcs.sync.Unlock()
}
var _ eventAPI = &fakeCS{}
var _ EventAPI = &fakeCS{}
func TestAt(t *testing.T) {
fcs := &fakeCS{

View File

@ -3,6 +3,8 @@ package events
import (
"context"
"github.com/filecoin-project/lotus/chain/stmgr"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/chain/types"
@ -22,12 +24,16 @@ func (me *messageEvents) CheckMsg(ctx context.Context, smsg types.ChainMsg, hnd
return false, true, nil
}
rec, err := me.cs.StateGetReceipt(ctx, smsg.VMMessage().Cid(), ts.Key())
ml, err := me.cs.StateSearchMsg(me.ctx, ts.Key(), msg.Cid(), stmgr.LookbackNoLimit, true)
if err != nil {
return false, true, xerrors.Errorf("getting receipt in CheckMsg: %w", err)
}
more, err = hnd(msg, rec, ts, ts.Height())
if ml == nil {
more, err = hnd(msg, nil, ts, ts.Height())
} else {
more, err = hnd(msg, &ml.Receipt, ts, ts.Height())
}
return true, more, err
}

View File

@ -7,7 +7,7 @@ import (
"github.com/filecoin-project/lotus/chain/store"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log"
logging "github.com/ipfs/go-log/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/chain/types"

View File

@ -133,9 +133,22 @@ func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesi
}
}
err := setupMsig(remainder.Meta)
if err != nil {
return 0, nil, nil, xerrors.Errorf("setting up remainder msig: %w", err)
if remainder.Type == genesis.TAccount {
var ainfo genesis.AccountMeta
if err := json.Unmarshal(remainder.Meta, &ainfo); err != nil {
return 0, nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err)
}
// TODO: Use builtin.ReserveAddress...
value := cbg.CborInt(90)
if err := amap.Put(abi.AddrKey(ainfo.Owner), &value); err != nil {
return 0, nil, nil, err
}
} else if remainder.Type == genesis.TMultisig {
err := setupMsig(remainder.Meta)
if err != nil {
return 0, nil, nil, xerrors.Errorf("setting up remainder msig: %w", err)
}
}
amapaddr, err := amap.Root()

View File

@ -330,24 +330,18 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
if err := json.Unmarshal(template.RemainderAccount.Meta, &ainfo); err != nil {
return nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err)
}
st, err := cst.Put(ctx, &account0.State{Address: ainfo.Owner})
if err != nil {
return nil, nil, err
}
_, ok := keyIDs[ainfo.Owner]
if ok {
return nil, nil, fmt.Errorf("remainder account has already been declared, cannot be assigned 90: %s", ainfo.Owner)
}
err = state.SetActor(builtin.ReserveAddress, &types.Actor{
Code: builtin0.AccountActorCodeID,
Balance: template.RemainderAccount.Balance,
Head: st,
})
keyIDs[ainfo.Owner] = builtin.ReserveAddress
err = createAccountActor(ctx, cst, state, template.RemainderAccount, keyIDs)
if err != nil {
return nil, nil, xerrors.Errorf("setting remainder account: %w", err)
return nil, nil, xerrors.Errorf("creating remainder acct: %w", err)
}
case genesis.TMultisig:
if err = createMultisigAccount(ctx, bs, cst, state, builtin.ReserveAddress, template.RemainderAccount, keyIDs); err != nil {
return nil, nil, xerrors.Errorf("failed to set up remainder: %w", err)

View File

@ -15,7 +15,7 @@ import (
"github.com/filecoin-project/lotus/chain/types"
)
func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.WalletAPI, bt *api.BlockTemplate) (*types.FullBlock, error) {
func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.Wallet, bt *api.BlockTemplate) (*types.FullBlock, error) {
pts, err := sm.ChainStore().LoadTipSet(bt.Parents)
if err != nil {

View File

@ -16,7 +16,7 @@ import (
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
logging "github.com/ipfs/go-log"
logging "github.com/ipfs/go-log/v2"
"go.uber.org/fx"
"golang.org/x/xerrors"
)
@ -36,7 +36,7 @@ type FundManagerAPI struct {
type fundManagerAPI interface {
MpoolPushMessage(context.Context, *types.Message, *api.MessageSendSpec) (*types.SignedMessage, error)
StateMarketBalance(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error)
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error)
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error)
}
// FundManager keeps track of funds in a set of addresses
@ -721,6 +721,6 @@ func (env *fundManagerEnvironment) WithdrawFunds(
}
func (env *fundManagerEnvironment) WaitMsg(ctx context.Context, c cid.Cid) error {
_, err := env.api.StateWaitMsg(ctx, c, build.MessageConfidence)
_, err := env.api.StateWaitMsg(ctx, c, build.MessageConfidence, api.LookbackNoLimit, true)
return err
}

View File

@ -793,7 +793,7 @@ func (mapi *mockFundManagerAPI) publish(addr address.Address, amt abi.TokenAmoun
mapi.escrow[addr] = escrow
}
func (mapi *mockFundManagerAPI) StateWaitMsg(ctx context.Context, c cid.Cid, confidence uint64) (*api.MsgLookup, error) {
func (mapi *mockFundManagerAPI) StateWaitMsg(ctx context.Context, c cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) {
res := &api.MsgLookup{
Message: c,
Receipt: types.MessageReceipt{

View File

@ -195,16 +195,6 @@ func CapGasFee(mff dtypes.DefaultMaxFeeFunc, msg *types.Message, sendSepc *api.M
maxFee = mf
}
if maxFee.Equals(big.Zero()) {
mf, err := mff()
if err != nil {
log.Errorf("failed to get default max gas fee: %+v", err)
mf = big.Zero()
}
maxFee = mf
}
gl := types.NewInt(uint64(msg.GasLimit))
totalFee := types.BigMul(msg.GasFeeCap, gl)
@ -805,7 +795,7 @@ func (mp *MessagePool) addLocked(m *types.SignedMessage, strict, untrusted bool)
return nil
}
func (mp *MessagePool) GetNonce(addr address.Address) (uint64, error) {
func (mp *MessagePool) GetNonce(_ context.Context, addr address.Address, _ types.TipSetKey) (uint64, error) {
mp.curTsLk.Lock()
defer mp.curTsLk.Unlock()

View File

@ -199,7 +199,7 @@ func (tma *testMpoolAPI) ChainComputeBaseFee(ctx context.Context, ts *types.TipS
func assertNonce(t *testing.T, mp *MessagePool, addr address.Address, val uint64) {
t.Helper()
n, err := mp.GetNonce(addr)
n, err := mp.GetNonce(context.Background(), addr, types.EmptyTSK)
if err != nil {
t.Fatal(err)
}

View File

@ -16,7 +16,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
logging "github.com/ipfs/go-log"
logging "github.com/ipfs/go-log/v2"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
@ -1333,7 +1333,7 @@ readLoop:
}
actorMap := make(map[address.Address]address.Address)
actorWallets := make(map[address.Address]api.WalletAPI)
actorWallets := make(map[address.Address]api.Wallet)
for _, m := range msgs {
baseNonce := baseNonces[m.Message.From]

View File

@ -23,19 +23,19 @@ const dsKeyActorNonce = "ActorNextNonce"
var log = logging.Logger("messagesigner")
type MpoolNonceAPI interface {
GetNonce(address.Address) (uint64, error)
GetNonce(context.Context, address.Address, types.TipSetKey) (uint64, error)
}
// MessageSigner keeps track of nonces per address, and increments the nonce
// when signing a message
type MessageSigner struct {
wallet api.WalletAPI
wallet api.Wallet
lk sync.Mutex
mpool MpoolNonceAPI
ds datastore.Batching
}
func NewMessageSigner(wallet api.WalletAPI, mpool MpoolNonceAPI, ds dtypes.MetadataDS) *MessageSigner {
func NewMessageSigner(wallet api.Wallet, mpool MpoolNonceAPI, ds dtypes.MetadataDS) *MessageSigner {
ds = namespace.Wrap(ds, datastore.NewKey("/message-signer/"))
return &MessageSigner{
wallet: wallet,
@ -51,7 +51,7 @@ func (ms *MessageSigner) SignMessage(ctx context.Context, msg *types.Message, cb
defer ms.lk.Unlock()
// Get the next message nonce
nonce, err := ms.nextNonce(msg.From)
nonce, err := ms.nextNonce(ctx, msg.From)
if err != nil {
return nil, xerrors.Errorf("failed to create nonce: %w", err)
}
@ -92,12 +92,12 @@ func (ms *MessageSigner) SignMessage(ctx context.Context, msg *types.Message, cb
// nextNonce gets the next nonce for the given address.
// If there is no nonce in the datastore, gets the nonce from the message pool.
func (ms *MessageSigner) nextNonce(addr address.Address) (uint64, error) {
func (ms *MessageSigner) nextNonce(ctx context.Context, addr address.Address) (uint64, error) {
// Nonces used to be created by the mempool and we need to support nodes
// that have mempool nonces, so first check the mempool for a nonce for
// this address. Note that the mempool returns the actor state's nonce
// by default.
nonce, err := ms.mpool.GetNonce(addr)
nonce, err := ms.mpool.GetNonce(ctx, addr, types.EmptyTSK)
if err != nil {
return 0, xerrors.Errorf("failed to get nonce from mempool: %w", err)
}

View File

@ -35,7 +35,7 @@ func (mp *mockMpool) setNonce(addr address.Address, nonce uint64) {
mp.nonces[addr] = nonce
}
func (mp *mockMpool) GetNonce(addr address.Address) (uint64, error) {
func (mp *mockMpool) GetNonce(_ context.Context, addr address.Address, _ types.TipSetKey) (uint64, error) {
mp.lk.RLock()
defer mp.lk.RUnlock()

View File

@ -255,7 +255,7 @@ func (sm *StateManager) Replay(ctx context.Context, ts *types.TipSet, mcid cid.C
}
return nil
})
if err != nil && err != errHaltExecution {
if err != nil && !xerrors.Is(err, errHaltExecution) {
return nil, nil, xerrors.Errorf("unexpected error during execution: %w", err)
}

View File

@ -9,7 +9,7 @@ import (
"github.com/ipfs/go-cid"
ipldcbor "github.com/ipfs/go-ipld-cbor"
logging "github.com/ipfs/go-log"
logging "github.com/ipfs/go-log/v2"
"github.com/stretchr/testify/require"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"

View File

@ -1,4 +1,4 @@
package modules
package rpcstmgr
import (
"context"
@ -16,11 +16,11 @@ import (
)
type RPCStateManager struct {
gapi api.GatewayAPI
gapi api.Gateway
cstore *cbor.BasicIpldStore
}
func NewRPCStateManager(api api.GatewayAPI) *RPCStateManager {
func NewRPCStateManager(api api.Gateway) *RPCStateManager {
cstore := cbor.NewCborStore(blockstore.NewAPIBlockstore(api))
return &RPCStateManager{gapi: api, cstore: cstore}
}

View File

@ -5,11 +5,13 @@ import (
"errors"
"fmt"
"sync"
"sync/atomic"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
logging "github.com/ipfs/go-log/v2"
cbg "github.com/whyrusleeping/cbor-gen"
"go.opencensus.io/stats"
"go.opencensus.io/trace"
"golang.org/x/xerrors"
@ -43,9 +45,10 @@ import (
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/metrics"
)
const LookbackNoLimit = abi.ChainEpoch(-1)
const LookbackNoLimit = api.LookbackNoLimit
const ReceiptAmtBitwidth = 3
var log = logging.Logger("statemgr")
@ -280,6 +283,13 @@ func (sm *StateManager) ExecutionTrace(ctx context.Context, ts *types.TipSet) (c
type ExecCallback func(cid.Cid, *types.Message, *vm.ApplyRet) error
func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEpoch, pstate cid.Cid, bms []store.BlockMessages, epoch abi.ChainEpoch, r vm.Rand, cb ExecCallback, baseFee abi.TokenAmount, ts *types.TipSet) (cid.Cid, cid.Cid, error) {
done := metrics.Timer(ctx, metrics.VMApplyBlocksTotal)
defer done()
partDone := metrics.Timer(ctx, metrics.VMApplyEarly)
defer func() {
partDone()
}()
makeVmWithBaseState := func(base cid.Cid) (*vm.VM, error) {
vmopt := &vm.VMOpts{
@ -303,7 +313,6 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
}
runCron := func(epoch abi.ChainEpoch) error {
cronMsg := &types.Message{
To: cron.Address,
From: builtin.SystemActorAddr,
@ -362,6 +371,9 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
pstate = newState
}
partDone()
partDone = metrics.Timer(ctx, metrics.VMApplyMessages)
var receipts []cbg.CBORMarshaler
processedMsgs := make(map[cid.Cid]struct{})
for _, b := range bms {
@ -426,10 +438,16 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
}
}
partDone()
partDone = metrics.Timer(ctx, metrics.VMApplyCron)
if err := runCron(epoch); err != nil {
return cid.Cid{}, cid.Cid{}, err
}
partDone()
partDone = metrics.Timer(ctx, metrics.VMApplyFlush)
rectarr := blockadt.MakeEmptyArray(sm.cs.ActorStore(ctx))
for i, receipt := range receipts {
if err := rectarr.Set(uint64(i), receipt); err != nil {
@ -446,6 +464,9 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
return cid.Undef, cid.Undef, xerrors.Errorf("vm flush failed: %w", err)
}
stats.Record(ctx, metrics.VMSends.M(int64(atomic.LoadUint64(&vm.StatSends))),
metrics.VMApplied.M(int64(atomic.LoadUint64(&vm.StatApplied))))
return st, rectroot, nil
}
@ -564,24 +585,10 @@ func (sm *StateManager) LookupID(ctx context.Context, addr address.Address, ts *
return state.LookupID(addr)
}
func (sm *StateManager) GetReceipt(ctx context.Context, msg cid.Cid, ts *types.TipSet) (*types.MessageReceipt, error) {
m, err := sm.cs.GetCMessage(msg)
if err != nil {
return nil, fmt.Errorf("failed to load message: %w", err)
}
_, r, _, err := sm.searchBackForMsg(ctx, ts, m, LookbackNoLimit)
if err != nil {
return nil, fmt.Errorf("failed to look back through chain for message: %w", err)
}
return r, nil
}
// WaitForMessage blocks until a message appears on chain. It looks backwards in the chain to see if this has already
// happened, with an optional limit to how many epochs it will search. It guarantees that the message has been on
// chain for at least confidence epochs without being reverted before returning.
func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confidence uint64, lookbackLimit abi.ChainEpoch) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) {
func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confidence uint64, lookbackLimit abi.ChainEpoch, allowReplaced bool) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
@ -605,7 +612,7 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid
return nil, nil, cid.Undef, fmt.Errorf("expected current head on SHC stream (got %s)", head[0].Type)
}
r, foundMsg, err := sm.tipsetExecutedMessage(head[0].Val, mcid, msg.VMMessage())
r, foundMsg, err := sm.tipsetExecutedMessage(head[0].Val, mcid, msg.VMMessage(), allowReplaced)
if err != nil {
return nil, nil, cid.Undef, err
}
@ -619,7 +626,7 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid
var backFm cid.Cid
backSearchWait := make(chan struct{})
go func() {
fts, r, foundMsg, err := sm.searchBackForMsg(ctx, head[0].Val, msg, lookbackLimit)
fts, r, foundMsg, err := sm.searchBackForMsg(ctx, head[0].Val, msg, lookbackLimit, allowReplaced)
if err != nil {
log.Warnf("failed to look back through chain for message: %v", err)
return
@ -658,7 +665,7 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid
if candidateTs != nil && val.Val.Height() >= candidateTs.Height()+abi.ChainEpoch(confidence) {
return candidateTs, candidateRcp, candidateFm, nil
}
r, foundMsg, err := sm.tipsetExecutedMessage(val.Val, mcid, msg.VMMessage())
r, foundMsg, err := sm.tipsetExecutedMessage(val.Val, mcid, msg.VMMessage(), allowReplaced)
if err != nil {
return nil, nil, cid.Undef, err
}
@ -694,15 +701,13 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid
}
}
func (sm *StateManager) SearchForMessage(ctx context.Context, mcid cid.Cid, lookbackLimit abi.ChainEpoch) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) {
func (sm *StateManager) SearchForMessage(ctx context.Context, head *types.TipSet, mcid cid.Cid, lookbackLimit abi.ChainEpoch, allowReplaced bool) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) {
msg, err := sm.cs.GetCMessage(mcid)
if err != nil {
return nil, nil, cid.Undef, fmt.Errorf("failed to load message: %w", err)
}
head := sm.cs.GetHeaviestTipSet()
r, foundMsg, err := sm.tipsetExecutedMessage(head, mcid, msg.VMMessage())
r, foundMsg, err := sm.tipsetExecutedMessage(head, mcid, msg.VMMessage(), allowReplaced)
if err != nil {
return nil, nil, cid.Undef, err
}
@ -711,7 +716,7 @@ func (sm *StateManager) SearchForMessage(ctx context.Context, mcid cid.Cid, look
return head, r, foundMsg, nil
}
fts, r, foundMsg, err := sm.searchBackForMsg(ctx, head, msg, lookbackLimit)
fts, r, foundMsg, err := sm.searchBackForMsg(ctx, head, msg, lookbackLimit, allowReplaced)
if err != nil {
log.Warnf("failed to look back through chain for message %s", mcid)
@ -731,7 +736,7 @@ func (sm *StateManager) SearchForMessage(ctx context.Context, mcid cid.Cid, look
// - 0 then no tipsets are searched
// - 5 then five tipset are searched
// - LookbackNoLimit then there is no limit
func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet, m types.ChainMsg, limit abi.ChainEpoch) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) {
func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet, m types.ChainMsg, limit abi.ChainEpoch, allowReplaced bool) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) {
limitHeight := from.Height() - limit
noLimit := limit == LookbackNoLimit
@ -781,7 +786,7 @@ func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet
// check that between cur and parent tipset the nonce fell into range of our message
if actorNoExist || (curActor.Nonce > mNonce && act.Nonce <= mNonce) {
r, foundMsg, err := sm.tipsetExecutedMessage(cur, m.Cid(), m.VMMessage())
r, foundMsg, err := sm.tipsetExecutedMessage(cur, m.Cid(), m.VMMessage(), allowReplaced)
if err != nil {
return nil, nil, cid.Undef, xerrors.Errorf("checking for message execution during lookback: %w", err)
}
@ -796,7 +801,7 @@ func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet
}
}
func (sm *StateManager) tipsetExecutedMessage(ts *types.TipSet, msg cid.Cid, vmm *types.Message) (*types.MessageReceipt, cid.Cid, error) {
func (sm *StateManager) tipsetExecutedMessage(ts *types.TipSet, msg cid.Cid, vmm *types.Message, allowReplaced bool) (*types.MessageReceipt, cid.Cid, error) {
// The genesis block did not execute any messages
if ts.Height() == 0 {
return nil, cid.Undef, nil
@ -819,7 +824,7 @@ func (sm *StateManager) tipsetExecutedMessage(ts *types.TipSet, msg cid.Cid, vmm
if m.VMMessage().From == vmm.From { // cheaper to just check origin first
if m.VMMessage().Nonce == vmm.Nonce {
if m.VMMessage().EqualCall(vmm) {
if allowReplaced && m.VMMessage().EqualCall(vmm) {
if m.Cid() != msg {
log.Warnw("found message with equal nonce and call params but different CID",
"wanted", msg, "found", m.Cid(), "nonce", vmm.Nonce, "from", vmm.From)
@ -1295,11 +1300,12 @@ func (sm *StateManager) GetVMCirculatingSupplyDetailed(ctx context.Context, heig
}
return api.CirculatingSupply{
FilVested: filVested,
FilMined: filMined,
FilBurnt: filBurnt,
FilLocked: filLocked,
FilCirculating: ret,
FilVested: filVested,
FilMined: filMined,
FilBurnt: filBurnt,
FilLocked: filLocked,
FilCirculating: ret,
FilReserveDisbursed: filReserveDisbursed,
}, nil
}

View File

@ -113,7 +113,7 @@ type ChainStore struct {
chainLocalBlockstore bstore.Blockstore
heaviestLk sync.Mutex
heaviestLk sync.RWMutex
heaviest *types.TipSet
bestTips *pubsub.PubSub
@ -775,10 +775,11 @@ func ReorgOps(lts func(types.TipSetKey) (*types.TipSet, error), a, b *types.TipS
}
// GetHeaviestTipSet returns the current heaviest tipset known (i.e. our head).
func (cs *ChainStore) GetHeaviestTipSet() *types.TipSet {
cs.heaviestLk.Lock()
defer cs.heaviestLk.Unlock()
return cs.heaviest
func (cs *ChainStore) GetHeaviestTipSet() (ts *types.TipSet) {
cs.heaviestLk.RLock()
ts = cs.heaviest
cs.heaviestLk.RUnlock()
return
}
func (cs *ChainStore) AddToTipSetTracker(b *types.BlockHeader) error {

View File

@ -751,6 +751,10 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, use
}
msgsCheck := async.Err(func() error {
if b.Cid() == build.WhitelistedBlock {
return nil
}
if err := syncer.checkBlockMessages(ctx, b, baseTs); err != nil {
return xerrors.Errorf("block had invalid messages: %w", err)
}

View File

@ -622,17 +622,17 @@ func TestDuplicateNonce(t *testing.T) {
var includedMsg cid.Cid
var skippedMsg cid.Cid
r0, err0 := tu.nds[0].StateGetReceipt(context.TODO(), msgs[0][0].Cid(), ts2.TipSet().Key())
r1, err1 := tu.nds[0].StateGetReceipt(context.TODO(), msgs[1][0].Cid(), ts2.TipSet().Key())
r0, err0 := tu.nds[0].StateSearchMsg(context.TODO(), ts2.TipSet().Key(), msgs[0][0].Cid(), api.LookbackNoLimit, true)
r1, err1 := tu.nds[0].StateSearchMsg(context.TODO(), ts2.TipSet().Key(), msgs[1][0].Cid(), api.LookbackNoLimit, true)
if err0 == nil {
require.Error(t, err1, "at least one of the StateGetReceipt calls should fail")
require.True(t, r0.ExitCode.IsSuccess())
require.True(t, r0.Receipt.ExitCode.IsSuccess())
includedMsg = msgs[0][0].Message.Cid()
skippedMsg = msgs[1][0].Message.Cid()
} else {
require.NoError(t, err1, "both the StateGetReceipt calls should not fail")
require.True(t, r1.ExitCode.IsSuccess())
require.True(t, r1.Receipt.ExitCode.IsSuccess())
includedMsg = msgs[1][0].Message.Cid()
skippedMsg = msgs[0][0].Message.Cid()
}
@ -751,6 +751,8 @@ func TestSyncInputs(t *testing.T) {
}
func TestSyncCheckpointHead(t *testing.T) {
t.Skip("flaky")
H := 10
tu := prepSyncTest(t, H)
@ -793,6 +795,8 @@ func TestSyncCheckpointHead(t *testing.T) {
}
func TestSyncCheckpointEarlierThanHead(t *testing.T) {
t.Skip("flaky")
H := 10
tu := prepSyncTest(t, H)

View File

@ -9,7 +9,7 @@ import (
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/query"
logging "github.com/ipfs/go-log"
logging "github.com/ipfs/go-log/v2"
ledgerfil "github.com/whyrusleeping/ledger-filecoin-go"
"golang.org/x/xerrors"
@ -36,7 +36,7 @@ type LedgerKeyInfo struct {
Path []uint32
}
var _ api.WalletAPI = (*LedgerWallet)(nil)
var _ api.Wallet = (*LedgerWallet)(nil)
func (lw LedgerWallet) WalletSign(ctx context.Context, signer address.Address, toSign []byte, meta api.MsgMeta) (*crypto.Signature, error) {
ki, err := lw.getKeyInfo(signer)
@ -227,7 +227,7 @@ func (lw LedgerWallet) WalletNew(ctx context.Context, t types.KeyType) (address.
return lw.importKey(lki)
}
func (lw *LedgerWallet) Get() api.WalletAPI {
func (lw *LedgerWallet) Get() api.Wallet {
if lw == nil {
return nil
}

View File

@ -24,13 +24,13 @@ type MultiWallet struct {
}
type getif interface {
api.WalletAPI
api.Wallet
// workaround for the fact that iface(*struct(nil)) != nil
Get() api.WalletAPI
Get() api.Wallet
}
func firstNonNil(wallets ...getif) api.WalletAPI {
func firstNonNil(wallets ...getif) api.Wallet {
for _, w := range wallets {
if w.Get() != nil {
return w
@ -40,8 +40,8 @@ func firstNonNil(wallets ...getif) api.WalletAPI {
return nil
}
func nonNil(wallets ...getif) []api.WalletAPI {
var out []api.WalletAPI
func nonNil(wallets ...getif) []api.Wallet {
var out []api.Wallet
for _, w := range wallets {
if w.Get() == nil {
continue
@ -53,7 +53,7 @@ func nonNil(wallets ...getif) []api.WalletAPI {
return out
}
func (m MultiWallet) find(ctx context.Context, address address.Address, wallets ...getif) (api.WalletAPI, error) {
func (m MultiWallet) find(ctx context.Context, address address.Address, wallets ...getif) (api.Wallet, error) {
ws := nonNil(wallets...)
for _, w := range ws {
@ -167,4 +167,4 @@ func (m MultiWallet) WalletDelete(ctx context.Context, address address.Address)
}
}
var _ api.WalletAPI = MultiWallet{}
var _ api.Wallet = MultiWallet{}

View File

@ -13,19 +13,19 @@ import (
)
type RemoteWallet struct {
api.WalletAPI
api.Wallet
}
func SetupRemoteWallet(info string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (*RemoteWallet, error) {
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (*RemoteWallet, error) {
ai := cliutil.ParseApiInfo(info)
url, err := ai.DialArgs()
url, err := ai.DialArgs("v0")
if err != nil {
return nil, err
}
wapi, closer, err := client.NewWalletRPC(mctx, url, ai.AuthHeader())
wapi, closer, err := client.NewWalletRPCV0(mctx, url, ai.AuthHeader())
if err != nil {
return nil, xerrors.Errorf("creating jsonrpc client: %w", err)
}
@ -41,7 +41,7 @@ func SetupRemoteWallet(info string) func(mctx helpers.MetricsCtx, lc fx.Lifecycl
}
}
func (w *RemoteWallet) Get() api.WalletAPI {
func (w *RemoteWallet) Get() api.Wallet {
if w == nil {
return nil
}

Some files were not shown because too many files have changed in this diff Show More