Merge pull request #1 from filecoin-project/master

update-0706
This commit is contained in:
Leo Cheung 2020-07-06 02:43:20 -05:00 committed by GitHub
commit 79e93760b7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
251 changed files with 8893 additions and 3389 deletions

View File

@ -1,6 +1,6 @@
version: 2.1 version: 2.1
orbs: orbs:
go: gotest/tools@0.0.9 go: gotest/tools@0.0.13
executors: executors:
golang: golang:
@ -45,7 +45,7 @@ commands:
- 'v25-2k-lotus-params' - 'v25-2k-lotus-params'
paths: paths:
- /var/tmp/filecoin-proof-parameters/ - /var/tmp/filecoin-proof-parameters/
- run: ./lotus fetch-params --proving-params 2048 - run: ./lotus fetch-params 2048
- save_cache: - save_cache:
name: Save parameters cache name: Save parameters cache
key: 'v25-2k-lotus-params' key: 'v25-2k-lotus-params'
@ -79,7 +79,6 @@ jobs:
steps: steps:
- install-deps - install-deps
- prepare - prepare
- go/mod-download
- go/mod-tidy-check - go/mod-tidy-check
build-all: build-all:
@ -87,12 +86,8 @@ jobs:
steps: steps:
- install-deps - install-deps
- prepare - prepare
- go/mod-download
- run: sudo apt-get update - run: sudo apt-get update
- run: sudo apt-get install npm - run: sudo apt-get install npm
- restore_cache:
name: restore go mod cache
key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }}
- run: - run:
command: make buildall command: make buildall
- store_artifacts: - store_artifacts:
@ -112,10 +107,6 @@ jobs:
steps: steps:
- install-deps - install-deps
- prepare - prepare
- go/mod-download
- restore_cache:
name: restore go mod cache
key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }}
- run: - run:
command: make debug command: make debug
@ -134,6 +125,9 @@ jobs:
type: string type: string
default: "./..." default: "./..."
description: Import paths of packages to be tested. description: Import paths of packages to be tested.
winpost-test:
type: string
default: "0"
test-suite-name: test-suite-name:
type: string type: string
default: unit default: unit
@ -156,10 +150,6 @@ jobs:
steps: steps:
- install-deps - install-deps
- prepare - prepare
- go/mod-download
- restore_cache:
name: restore go mod cache
key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }}
- run: - run:
command: make deps lotus command: make deps lotus
no_output_timeout: 30m no_output_timeout: 30m
@ -171,6 +161,7 @@ jobs:
environment: environment:
GOTESTSUM_JUNITFILE: /tmp/test-reports/<< parameters.test-suite-name >>/junit.xml GOTESTSUM_JUNITFILE: /tmp/test-reports/<< parameters.test-suite-name >>/junit.xml
GOTESTSUM_FORMAT: << parameters.gotestsum-format >> GOTESTSUM_FORMAT: << parameters.gotestsum-format >>
LOTUS_TEST_WINDOW_POST: << parameters.winpost-test >>
command: | command: |
mkdir -p /tmp/test-reports/<< parameters.test-suite-name >> mkdir -p /tmp/test-reports/<< parameters.test-suite-name >>
gotestsum -- \ gotestsum -- \
@ -189,16 +180,11 @@ jobs:
shell: /bin/bash -eo pipefail shell: /bin/bash -eo pipefail
command: | command: |
bash <(curl -s https://codecov.io/bash) bash <(curl -s https://codecov.io/bash)
- save_cache:
name: save go mod cache
key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }}
paths:
- "~/go/pkg"
- "~/go/src/github.com"
- "~/go/src/golang.org"
test-short: test-short:
<<: *test <<: *test
test-window-post:
<<: *test
build-macos: build-macos:
description: build darwin lotus binary description: build darwin lotus binary
@ -228,10 +214,9 @@ jobs:
curl --location https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 --output /usr/local/bin/jq curl --location https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 --output /usr/local/bin/jq
chmod +x /usr/local/bin/jq chmod +x /usr/local/bin/jq
- restore_cache: - restore_cache:
name: restore go mod and cargo cache name: restore cargo cache
key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }} key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }}
- install-deps - install-deps
- go/mod-download
- run: - run:
command: make build command: make build
no_output_timeout: 30m no_output_timeout: 30m
@ -258,7 +243,6 @@ jobs:
steps: steps:
- install-deps - install-deps
- prepare - prepare
- go/mod-download
- run: - run:
command: "! go fmt ./... 2>&1 | read" command: "! go fmt ./... 2>&1 | read"
@ -271,7 +255,7 @@ jobs:
default: golang default: golang
golangci-lint-version: golangci-lint-version:
type: string type: string
default: 1.23.8 default: 1.27.0
concurrency: concurrency:
type: string type: string
default: '2' default: '2'
@ -287,7 +271,6 @@ jobs:
steps: steps:
- install-deps - install-deps
- prepare - prepare
- go/mod-download
- run: - run:
command: make deps command: make deps
no_output_timeout: 30m no_output_timeout: 30m
@ -297,7 +280,7 @@ jobs:
- run: - run:
name: Lint name: Lint
command: | command: |
$HOME/.local/bin/golangci-lint run -v \ $HOME/.local/bin/golangci-lint run -v --timeout 2m \
--concurrency << parameters.concurrency >> << parameters.args >> --concurrency << parameters.concurrency >> << parameters.args >>
lint-changes: lint-changes:
<<: *lint <<: *lint
@ -332,10 +315,13 @@ workflows:
jobs: jobs:
- lint-changes: - lint-changes:
args: "--new-from-rev origin/master" args: "--new-from-rev origin/master"
- test:
codecov-upload: true
- mod-tidy-check - mod-tidy-check
- gofmt - gofmt
- test:
codecov-upload: true
- test-window-post:
go-test-flags: "-run=TestWindowedPost"
winpost-test: "1"
- test-short: - test-short:
go-test-flags: "--timeout 10m --short" go-test-flags: "--timeout 10m --short"
filters: filters:

1
.gitignore vendored
View File

@ -35,3 +35,4 @@ build/paramfetch.sh
bin/ipget bin/ipget
bin/tmp/* bin/tmp/*
.idea .idea
scratchpad

View File

@ -22,13 +22,27 @@ issues:
- "func name will be used as test\\.Test.* by other packages, and that stutters; consider calling this" - "func name will be used as test\\.Test.* by other packages, and that stutters; consider calling this"
- "Potential file inclusion via variable" - "Potential file inclusion via variable"
- "should have( a package)? comment" - "should have( a package)? comment"
- "Error return value of `logging.SetLogLevel` is not checked"
exclude-use-default: false exclude-use-default: false
exclude-rules: exclude-rules:
- path: lotuspond
linters:
- errcheck
- path: node/modules/lp2p - path: node/modules/lp2p
linters: linters:
- golint - golint
- path: ".*_test.go"
- path: build/params_.*\.go
linters:
- golint
- path: api/apistruct/struct.go
linters:
- golint
- path: .*_test.go
linters: linters:
- gosec - gosec

View File

@ -6,7 +6,7 @@ all: build
unexport GOFLAGS unexport GOFLAGS
GOVERSION:=$(shell go version | cut -d' ' -f 3 | cut -d. -f 2) GOVERSION:=$(shell go version | cut -d' ' -f 3 | cut -d. -f 2)
ifeq ($(shell expr $(GOVERSION) \< 13), 1) ifeq ($(shell expr $(GOVERSION) \< 14), 1)
$(warning Your Golang version is go 1.$(GOVERSION)) $(warning Your Golang version is go 1.$(GOVERSION))
$(error Update Golang to version $(shell grep '^go' go.mod)) $(error Update Golang to version $(shell grep '^go' go.mod))
endif endif
@ -17,7 +17,7 @@ MODULES:=
CLEAN:= CLEAN:=
BINS:= BINS:=
ldflags=-X=github.com/filecoin-project/lotus/build.CurrentCommit='+git$(subst -,.,$(shell git describe --always --match=NeVeRmAtCh --dirty 2>/dev/null || git rev-parse --short HEAD 2>/dev/null))' ldflags=-X=github.com/filecoin-project/lotus/build.CurrentCommit=+git.$(subst -,.,$(shell git describe --always --match=NeVeRmAtCh --dirty 2>/dev/null || git rev-parse --short HEAD 2>/dev/null))
ifneq ($(strip $(LDFLAGS)),) ifneq ($(strip $(LDFLAGS)),)
ldflags+=-extldflags=$(LDFLAGS) ldflags+=-extldflags=$(LDFLAGS)
endif endif
@ -105,15 +105,17 @@ install:
install-services: install install-services: install
mkdir -p /usr/local/lib/systemd/system mkdir -p /usr/local/lib/systemd/system
mkdir -p /var/log/lotus
install -C -m 0644 ./scripts/lotus-daemon.service /usr/local/lib/systemd/system/lotus-daemon.service install -C -m 0644 ./scripts/lotus-daemon.service /usr/local/lib/systemd/system/lotus-daemon.service
install -C -m 0644 ./scripts/lotus-miner.service /usr/local/lib/systemd/system/lotus-miner.service install -C -m 0644 ./scripts/lotus-miner.service /usr/local/lib/systemd/system/lotus-miner.service
systemctl daemon-reload systemctl daemon-reload
@echo @echo
@echo "lotus and lotus-miner services installed. Don't forget to 'systemctl enable lotus|lotus-miner' for it to be enabled on startup." @echo "lotus-daemon and lotus-miner services installed. Don't forget to 'systemctl enable lotus-daemon|lotus-miner' for it to be enabled on startup."
clean-services: clean-services:
rm -f /usr/local/lib/systemd/system/lotus-daemon.service rm -f /usr/local/lib/systemd/system/lotus-daemon.service
rm -f /usr/local/lib/systemd/system/lotus-miner.service rm -f /usr/local/lib/systemd/system/lotus-miner.service
rm -f /usr/local/lib/systemd/system/chainwatch.service
systemctl daemon-reload systemctl daemon-reload
# TOOLS # TOOLS
@ -132,7 +134,7 @@ benchmarks:
@curl -X POST 'http://benchmark.kittyhawk.wtf/benchmark' -d '@bench.json' -u "${benchmark_http_cred}" @curl -X POST 'http://benchmark.kittyhawk.wtf/benchmark' -d '@bench.json' -u "${benchmark_http_cred}"
.PHONY: benchmarks .PHONY: benchmarks
pond: build pond: 2k
go build -o pond ./lotuspond go build -o pond ./lotuspond
(cd lotuspond/front && npm i && CI=false npm run build) (cd lotuspond/front && npm i && CI=false npm run build)
.PHONY: pond .PHONY: pond
@ -160,6 +162,13 @@ chainwatch:
.PHONY: chainwatch .PHONY: chainwatch
BINS+=chainwatch BINS+=chainwatch
install-chainwatch-service: chainwatch
install -C ./chainwatch /usr/local/bin/chainwatch
install -C -m 0644 ./scripts/chainwatch.service /usr/local/lib/systemd/system/chainwatch.service
systemctl daemon-reload
@echo
@echo "chainwatch installed. Don't forget to 'systemctl enable chainwatch' for it to be enabled on startup."
bench: bench:
rm -f bench rm -f bench
go build -o bench ./cmd/lotus-bench go build -o bench ./cmd/lotus-bench
@ -182,6 +191,12 @@ health:
.PHONY: health .PHONY: health
BINS+=health BINS+=health
testground:
go build -tags testground -o /dev/null ./cmd/lotus
.PHONY: testground
BINS+=testground
# MISC # MISC
buildall: $(BINS) buildall: $(BINS)

View File

@ -4,14 +4,23 @@
Lotus is an implementation of the Filecoin Distributed Storage Network. For more details about Filecoin, check out the [Filecoin Spec](https://github.com/filecoin-project/specs). Lotus is an implementation of the Filecoin Distributed Storage Network. For more details about Filecoin, check out the [Filecoin Spec](https://github.com/filecoin-project/specs).
## Development
All work is tracked via issues. An attempt at keeping an up-to-date view on remaining work is in the [lotus testnet github project board](https://github.com/filecoin-project/lotus/projects/1).
## Building & Documentation ## Building & Documentation
For instructions on how to build lotus from source, please visit [https://docs.lotu.sh](https://docs.lotu.sh) or read the source [here](https://github.com/filecoin-project/lotus/tree/master/documentation). For instructions on how to build lotus from source, please visit [https://docs.lotu.sh](https://docs.lotu.sh) or read the source [here](https://github.com/filecoin-project/lotus/tree/master/documentation).
## Reporting a Vulnerability
Please send an email to security@filecoin.org. See our [security policy](SECURITY.md) for more details.
## Development
All work is tracked via issues. An attempt at keeping an up-to-date view on remaining work is in the [lotus testnet github project board](https://github.com/filecoin-project/lotus/projects/1).
The main branches under development at the moment are:
* [`master`](https://github.com/filecoin-project/lotus): current testnet.
* [`next`](https://github.com/filecoin-project/lotus/tree/next): working branch with chain-breaking changes.
* [`interopnet`](https://github.com/filecoin-project/lotus/tree/interopnet): devnet running one of `next` commits.
## License ## License
Dual-licensed under [MIT](https://github.com/filecoin-project/lotus/blob/master/LICENSE-MIT) + [Apache 2.0](https://github.com/filecoin-project/lotus/blob/master/LICENSE-APACHE) Dual-licensed under [MIT](https://github.com/filecoin-project/lotus/blob/master/LICENSE-MIT) + [Apache 2.0](https://github.com/filecoin-project/lotus/blob/master/LICENSE-APACHE)

29
SECURITY.md Normal file
View File

@ -0,0 +1,29 @@
# Security Policy
## Reporting a Vulnerability
For *critical* bugs, please send an email to security@filecoin.org.
The bug reporting process differs between bugs that are critical and may crash the network, and others that are unlikely to cause problems if malicious parties know about it. For non-critical bugs, please simply file a GitHub [issue](https://github.com/filecoin-project/lotus/issues/new?template=bug_report.md).
Please try to provide a clear description of any bugs reported, along with how to reproduce the bug if possible. More detailed bug reports (especially those with a PoC included) will help us move forward much faster. Additionally, please avoid reporting bugs that already have open issues. Take a moment to search the issue list of the related GitHub repositories before writing up a new report.
Here are some examples of bugs we would consider 'critical':
* If you can spend from a `multisig` wallet you do not control the keys for.
* If you can cause a miner to be slashed without them actually misbehaving.
* If you can maintain power without submitting windowed posts regularly.
* If you can craft a message that causes lotus nodes to panic.
* If you can cause your miner to win significantly more blocks than it should.
* If you can craft a message that causes a persistent fork in the network.
* If you can cause the total amount of Filecoin in the network to no longer be 2 billion.
This is not an exhaustive list, but should provide some idea of what we consider 'critical'.
## Supported Versions
* TODO: This should be defined and set up by Mainnet launch.
| Version | Supported |
| ------- | ------------------ |
| Testnet | :white_check_mark: |

View File

@ -13,11 +13,13 @@ import (
) )
type Common interface { type Common interface {
// Auth
// MethodGroup: Auth
AuthVerify(ctx context.Context, token string) ([]auth.Permission, error) AuthVerify(ctx context.Context, token string) ([]auth.Permission, error)
AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error) AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error)
// network // MethodGroup: Net
NetConnectedness(context.Context, peer.ID) (network.Connectedness, error) NetConnectedness(context.Context, peer.ID) (network.Connectedness, error)
NetPeers(context.Context) ([]peer.AddrInfo, error) NetPeers(context.Context) ([]peer.AddrInfo, error)
@ -25,6 +27,9 @@ type Common interface {
NetAddrsListen(context.Context) (peer.AddrInfo, error) NetAddrsListen(context.Context) (peer.AddrInfo, error)
NetDisconnect(context.Context, peer.ID) error NetDisconnect(context.Context, peer.ID) error
NetFindPeer(context.Context, peer.ID) (peer.AddrInfo, error) NetFindPeer(context.Context, peer.ID) (peer.AddrInfo, error)
NetPubsubScores(context.Context) ([]PubsubScore, error)
// MethodGroup: Common
// ID returns peerID of libp2p node backing this API // ID returns peerID of libp2p node backing this API
ID(context.Context) (peer.ID, error) ID(context.Context) (peer.ID, error)
@ -37,6 +42,8 @@ type Common interface {
// trigger graceful shutdown // trigger graceful shutdown
Shutdown(context.Context) error Shutdown(context.Context) error
Closing(context.Context) (<-chan struct{}, error)
} }
// Version provides various build-time information // Version provides various build-time information

View File

@ -30,56 +30,124 @@ type FullNode interface {
// MethodGroup: Chain // MethodGroup: Chain
// The Chain method group contains methods for interacting with the // The Chain method group contains methods for interacting with the
// blockchain, but that do not require any form of state computation // blockchain, but that do not require any form of state computation.
// ChainNotify returns channel with chain head updates // ChainNotify returns channel with chain head updates.
// First message is guaranteed to be of len == 1, and type == 'current' // First message is guaranteed to be of len == 1, and type == 'current'.
ChainNotify(context.Context) (<-chan []*HeadChange, error) ChainNotify(context.Context) (<-chan []*HeadChange, error)
// ChainHead returns the current head of the chain
// ChainHead returns the current head of the chain.
ChainHead(context.Context) (*types.TipSet, error) ChainHead(context.Context) (*types.TipSet, error)
// ChainGetRandomness is used to sample the chain for randomness
// ChainGetRandomness is used to sample the chain for randomness.
ChainGetRandomness(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) ChainGetRandomness(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
// ChainGetBlock returns the block specified by the given CID
// ChainGetBlock returns the block specified by the given CID.
ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error) ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error)
// ChainGetTipSet returns the tipset specified by the given TipSetKey.
ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error)
ChainGetBlockMessages(context.Context, cid.Cid) (*BlockMessages, error)
ChainGetParentReceipts(context.Context, cid.Cid) ([]*types.MessageReceipt, error) // ChainGetBlockMessages returns messages stored in the specified block.
ChainGetParentMessages(context.Context, cid.Cid) ([]Message, error) ChainGetBlockMessages(ctx context.Context, blockCid cid.Cid) (*BlockMessages, error)
// ChainGetParentReceipts returns receipts for messages in parent tipset of
// the specified block.
ChainGetParentReceipts(ctx context.Context, blockCid cid.Cid) ([]*types.MessageReceipt, error)
// ChainGetParentReceipts returns messages stored in parent tipset of the
// specified block.
ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]Message, error)
// ChainGetTipSetByHeight looks back for a tipset at the specified epoch.
// If there are no blocks at the specified epoch, a tipset at higher epoch
// will be returned.
ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error)
// ChainReadObj reads ipld nodes referenced by the specified CID from chain
// blockstore and returns raw bytes.
ChainReadObj(context.Context, cid.Cid) ([]byte, error) ChainReadObj(context.Context, cid.Cid) ([]byte, error)
// ChainHasObj checks if a given CID exists in the chain blockstore.
ChainHasObj(context.Context, cid.Cid) (bool, error) ChainHasObj(context.Context, cid.Cid) (bool, error)
ChainStatObj(context.Context, cid.Cid, cid.Cid) (ObjStat, error) ChainStatObj(context.Context, cid.Cid, cid.Cid) (ObjStat, error)
// ChainSetHead forcefully sets current chain head. Use with caution.
ChainSetHead(context.Context, types.TipSetKey) error ChainSetHead(context.Context, types.TipSetKey) error
// ChainGetGenesis returns the genesis tipset.
ChainGetGenesis(context.Context) (*types.TipSet, error) ChainGetGenesis(context.Context) (*types.TipSet, error)
// ChainTipSetWeight computes weight for the specified tipset.
ChainTipSetWeight(context.Context, types.TipSetKey) (types.BigInt, error) ChainTipSetWeight(context.Context, types.TipSetKey) (types.BigInt, error)
ChainGetNode(ctx context.Context, p string) (*IpldObject, error) ChainGetNode(ctx context.Context, p string) (*IpldObject, error)
// ChainGetMessage reads a message referenced by the specified CID from the
// chain blockstore.
ChainGetMessage(context.Context, cid.Cid) (*types.Message, error) ChainGetMessage(context.Context, cid.Cid) (*types.Message, error)
// ChainGetPath returns a set of revert/apply operations needed to get from
// one tipset to another, for example:
//```
// to
// ^
// from tAA
// ^ ^
// tBA tAB
// ^---*--^
// ^
// tRR
//```
// Would return `[revert(tBA), apply(tAB), apply(tAA)]`
ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*HeadChange, error) ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*HeadChange, error)
// ChainExport returns a stream of bytes with CAR dump of chain data.
ChainExport(context.Context, types.TipSetKey) (<-chan []byte, error) ChainExport(context.Context, types.TipSetKey) (<-chan []byte, error)
// MethodGroup: Sync // MethodGroup: Sync
// The Sync method group contains methods for interacting with and // The Sync method group contains methods for interacting with and
// observing the lotus sync service // observing the lotus sync service.
// SyncState returns the current status of the lotus sync system // SyncState returns the current status of the lotus sync system.
SyncState(context.Context) (*SyncState, error) SyncState(context.Context) (*SyncState, error)
// SyncSubmitBlock can be used to submit a newly created block to the
// SyncSubmitBlock can be used to submit a newly created block to the.
// network through this node // network through this node
SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error
// SyncIncomingBlocks returns a channel streaming incoming, potentially not
// yet synced block headers.
SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error)
// SyncMarkBad marks a blocks as bad, meaning that it won't ever by synced.
// Use with extreme caution.
SyncMarkBad(ctx context.Context, bcid cid.Cid) error SyncMarkBad(ctx context.Context, bcid cid.Cid) error
// SyncCheckBad checks if a block was marked as bad, and if it was, returns
// the reason.
SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error) SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error)
// MethodGroup: Mpool // MethodGroup: Mpool
// The Mpool methods are for interacting with the message pool. The message pool // The Mpool methods are for interacting with the message pool. The message pool
// manages all incoming and outgoing 'messages' going over the network. // manages all incoming and outgoing 'messages' going over the network.
// MpoolPending returns pending mempool messages.
MpoolPending(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) MpoolPending(context.Context, types.TipSetKey) ([]*types.SignedMessage, error)
// MpoolPush pushes a signed message to mempool.
MpoolPush(context.Context, *types.SignedMessage) (cid.Cid, error) MpoolPush(context.Context, *types.SignedMessage) (cid.Cid, error)
MpoolPushMessage(context.Context, *types.Message) (*types.SignedMessage, error) // get nonce, sign, push
// MpoolPushMessage atomically assigns a nonce, signs, and pushes a message
// to mempool.
MpoolPushMessage(context.Context, *types.Message) (*types.SignedMessage, error)
// MpoolGetNonce gets next nonce for the specified sender.
// Note that this method may not be atomic. Use MpoolPushMessage instead.
MpoolGetNonce(context.Context, address.Address) (uint64, error) MpoolGetNonce(context.Context, address.Address) (uint64, error)
MpoolSub(context.Context) (<-chan MpoolUpdate, error) MpoolSub(context.Context) (<-chan MpoolUpdate, error)
MpoolEstimateGasPrice(context.Context, uint64, address.Address, int64, types.TipSetKey) (types.BigInt, error)
// MpoolEstimateGasPrice estimates what gas price should be used for a
// message to have high likelihood of inclusion in `nblocksincl` epochs.
MpoolEstimateGasPrice(ctx context.Context, nblocksincl uint64, sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error)
// MethodGroup: Miner // MethodGroup: Miner
@ -90,17 +158,30 @@ type FullNode interface {
// MethodGroup: Wallet // MethodGroup: Wallet
// WalletNew creates a new address in the wallet with the given sigType.
WalletNew(context.Context, crypto.SigType) (address.Address, error) WalletNew(context.Context, crypto.SigType) (address.Address, error)
// WalletHas indicates whether the given address is in the wallet.
WalletHas(context.Context, address.Address) (bool, error) WalletHas(context.Context, address.Address) (bool, error)
// WalletHas indicates whether the given address is in the wallet.
WalletList(context.Context) ([]address.Address, error) WalletList(context.Context) ([]address.Address, error)
// WalletBalance returns the balance of the given address at the current head of the chain.
WalletBalance(context.Context, address.Address) (types.BigInt, error) WalletBalance(context.Context, address.Address) (types.BigInt, error)
// WalletSign signs the given bytes using the given address.
WalletSign(context.Context, address.Address, []byte) (*crypto.Signature, error) WalletSign(context.Context, address.Address, []byte) (*crypto.Signature, error)
// WalletSignMessage signs the given message using the given address.
WalletSignMessage(context.Context, address.Address, *types.Message) (*types.SignedMessage, error) WalletSignMessage(context.Context, address.Address, *types.Message) (*types.SignedMessage, error)
// WalletVerify takes an address, a signature, and some bytes, and indicates whether the signature is valid.
// The address does not have to be in the wallet.
WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) bool WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) bool
// WalletDefaultAddress returns the address marked as default in the wallet.
WalletDefaultAddress(context.Context) (address.Address, error) WalletDefaultAddress(context.Context) (address.Address, error)
// WalletSetDefault marks the given address as as the default one.
WalletSetDefault(context.Context, address.Address) error WalletSetDefault(context.Context, address.Address) error
// WalletExport returns the private key of an address in the wallet.
WalletExport(context.Context, address.Address) (*types.KeyInfo, error) WalletExport(context.Context, address.Address) (*types.KeyInfo, error)
// WalletImport receives a KeyInfo, which includes a private key, and imports it into the wallet.
WalletImport(context.Context, *types.KeyInfo) (address.Address, error) WalletImport(context.Context, *types.KeyInfo) (address.Address, error)
// WalletDelete deletes an address from the wallet.
WalletDelete(context.Context, address.Address) error WalletDelete(context.Context, address.Address) error
// Other // Other
@ -109,18 +190,27 @@ type FullNode interface {
// The Client methods all have to do with interacting with the storage and // The Client methods all have to do with interacting with the storage and
// retrieval markets as a client // retrieval markets as a client
// ClientImport imports file under the specified path into filestore // ClientImport imports file under the specified path into filestore.
ClientImport(ctx context.Context, ref FileRef) (cid.Cid, error) ClientImport(ctx context.Context, ref FileRef) (cid.Cid, error)
// ClientStartDeal proposes a deal with a miner // ClientStartDeal proposes a deal with a miner.
ClientStartDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error) ClientStartDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error)
// ClientGetDeal info returns the latest information about a given deal // ClientGetDealInfo returns the latest information about a given deal.
ClientGetDealInfo(context.Context, cid.Cid) (*DealInfo, error) ClientGetDealInfo(context.Context, cid.Cid) (*DealInfo, error)
// ClientListDeals returns information about the deals made by the local client.
ClientListDeals(ctx context.Context) ([]DealInfo, error) ClientListDeals(ctx context.Context) ([]DealInfo, error)
// ClientHasLocal indicates whether a certain CID is locally stored.
ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error)
// ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer).
ClientFindData(ctx context.Context, root cid.Cid) ([]QueryOffer, error) ClientFindData(ctx context.Context, root cid.Cid) ([]QueryOffer, error)
// ClientMinerQueryOffer returns a QueryOffer for the specific miner and file.
ClientMinerQueryOffer(ctx context.Context, root cid.Cid, miner address.Address) (QueryOffer, error)
// ClientRetrieve initiates the retrieval of a file, as specified in the order.
ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *FileRef) error ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *FileRef) error
// ClientQueryAsk returns a signed StorageAsk from the specified miner.
ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error)
// ClientCalcCommP calculates the CommP for a specified file, based on the sector size of the provided miner.
ClientCalcCommP(ctx context.Context, inpath string, miner address.Address) (*CommPRet, error) ClientCalcCommP(ctx context.Context, inpath string, miner address.Address) (*CommPRet, error)
// ClientGenCar generates a CAR file for the specified file.
ClientGenCar(ctx context.Context, ref FileRef, outpath string) error ClientGenCar(ctx context.Context, ref FileRef, outpath string) error
// ClientUnimport removes references to the specified file from filestore // ClientUnimport removes references to the specified file from filestore
@ -132,53 +222,107 @@ type FullNode interface {
//ClientListAsks() []Ask //ClientListAsks() []Ask
// MethodGroup: State // MethodGroup: State
// The State methods are used to query, inspect, and interact with chain state // The State methods are used to query, inspect, and interact with chain state.
// All methods take a TipSetKey as a parameter. The state looked up is the state at that tipset.
// A nil TipSetKey can be provided as a param, this will cause the heaviest tipset in the chain to be used.
// if tipset is nil, we'll use heaviest // StateCall runs the given message and returns its result without any persisted changes.
StateCall(context.Context, *types.Message, types.TipSetKey) (*InvocResult, error) StateCall(context.Context, *types.Message, types.TipSetKey) (*InvocResult, error)
// StateReplay returns the result of executing the indicated message, assuming it was executed in the indicated tipset.
StateReplay(context.Context, types.TipSetKey, cid.Cid) (*InvocResult, error) StateReplay(context.Context, types.TipSetKey, cid.Cid) (*InvocResult, error)
// StateGetActor returns the indicated actor's nonce and balance.
StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error)
StateReadState(ctx context.Context, act *types.Actor, tsk types.TipSetKey) (*ActorState, error) // StateReadState returns the indicated actor's state.
StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error)
// StateListMessages looks back and returns all messages with a matching to or from address, stopping at the given height.
StateListMessages(ctx context.Context, match *types.Message, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) StateListMessages(ctx context.Context, match *types.Message, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error)
// StateNetworkName returns the name of the network the node is synced to
StateNetworkName(context.Context) (dtypes.NetworkName, error) StateNetworkName(context.Context) (dtypes.NetworkName, error)
// StateMinerSectors returns info about the given miner's sectors. If the filter bitfield is nil, all sectors are included.
// If the filterOut boolean is set to true, any sectors in the filter are excluded.
// If false, only those sectors in the filter are included.
StateMinerSectors(context.Context, address.Address, *abi.BitField, bool, types.TipSetKey) ([]*ChainSectorInfo, error) StateMinerSectors(context.Context, address.Address, *abi.BitField, bool, types.TipSetKey) ([]*ChainSectorInfo, error)
// StateMinerProvingSet returns info about those sectors that a given miner is actively proving.
StateMinerProvingSet(context.Context, address.Address, types.TipSetKey) ([]*ChainSectorInfo, error) StateMinerProvingSet(context.Context, address.Address, types.TipSetKey) ([]*ChainSectorInfo, error)
// StateMinerProvingDeadline calculates the deadline at some epoch for a proving period
// and returns the deadline-related calculations.
StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*miner.DeadlineInfo, error) StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*miner.DeadlineInfo, error)
// StateMinerPower returns the power of the indicated miner
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error) StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error)
StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) // StateMinerInfo returns info about the indicated miner
StateMinerInfo(context.Context, address.Address, types.TipSetKey) (MinerInfo, error)
// StateMinerDeadlines returns all the proving deadlines for the given miner
StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) (*miner.Deadlines, error) StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) (*miner.Deadlines, error)
// StateMinerFaults returns a bitfield indicating the faulty sectors of the given miner
StateMinerFaults(context.Context, address.Address, types.TipSetKey) (*abi.BitField, error) StateMinerFaults(context.Context, address.Address, types.TipSetKey) (*abi.BitField, error)
// Returns all non-expired Faults that occur within lookback epochs of the given tipset // StateAllMinerFaults returns all non-expired Faults that occur within lookback epochs of the given tipset
StateAllMinerFaults(ctx context.Context, lookback abi.ChainEpoch, ts types.TipSetKey) ([]*Fault, error) StateAllMinerFaults(ctx context.Context, lookback abi.ChainEpoch, ts types.TipSetKey) ([]*Fault, error)
// StateMinerRecoveries returns a bitfield indicating the recovering sectors of the given miner
StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (*abi.BitField, error) StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (*abi.BitField, error)
// StateMinerInitialPledgeCollateral returns the initial pledge collateral for the specified miner's sector
StateMinerInitialPledgeCollateral(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (types.BigInt, error) StateMinerInitialPledgeCollateral(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (types.BigInt, error)
// StateMinerAvailableBalance returns the portion of a miner's balance that can be withdrawn or spent
StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error)
// StateSectorPreCommitInfo returns the PreCommit info for the specified miner's sector
StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error)
// StateSectorGetInfo returns the on-chain info for the specified miner's sector
StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error)
StatePledgeCollateral(context.Context, types.TipSetKey) (types.BigInt, error) StatePledgeCollateral(context.Context, types.TipSetKey) (types.BigInt, error)
StateWaitMsg(context.Context, cid.Cid) (*MsgLookup, error) // StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed
StateSearchMsg(context.Context, cid.Cid) (*MsgLookup, error) StateSearchMsg(context.Context, cid.Cid) (*MsgLookup, error)
// StateWaitMsg looks back in the chain for a message. If not found, it blocks until the
// message arrives on chain, and gets to the indicated confidence depth.
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*MsgLookup, error)
// StateListMiners returns the addresses of every miner that has claimed power in the Power Actor
StateListMiners(context.Context, types.TipSetKey) ([]address.Address, error) StateListMiners(context.Context, types.TipSetKey) ([]address.Address, error)
// StateListActors returns the addresses of every actor in the state
StateListActors(context.Context, types.TipSetKey) ([]address.Address, error) StateListActors(context.Context, types.TipSetKey) ([]address.Address, error)
// StateMarketBalance looks up the Escrow and Locked balances of the given address in the Storage Market
StateMarketBalance(context.Context, address.Address, types.TipSetKey) (MarketBalance, error) StateMarketBalance(context.Context, address.Address, types.TipSetKey) (MarketBalance, error)
// StateMarketParticipants returns the Escrow and Locked balances of every participant in the Storage Market
StateMarketParticipants(context.Context, types.TipSetKey) (map[string]MarketBalance, error) StateMarketParticipants(context.Context, types.TipSetKey) (map[string]MarketBalance, error)
// StateMarketDeals returns information about every deal in the Storage Market
StateMarketDeals(context.Context, types.TipSetKey) (map[string]MarketDeal, error) StateMarketDeals(context.Context, types.TipSetKey) (map[string]MarketDeal, error)
// StateMarketStorageDeal returns information about the indicated deal
StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*MarketDeal, error) StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*MarketDeal, error)
// StateLookupID retrieves the ID address of the given address
StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error)
// StateAccountKey returns the public key address of the given ID address
StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error)
// StateChangedActors returns all the actors whose states change between the two given state CIDs
// TODO: Should this take tipset keys instead?
StateChangedActors(context.Context, cid.Cid, cid.Cid) (map[string]types.Actor, error) StateChangedActors(context.Context, cid.Cid, cid.Cid) (map[string]types.Actor, error)
// StateGetReceipt returns the message receipt for the given message
StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error)
// StateMinerSectorCount returns the number of sectors in a miner's sector set and proving set
StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (MinerSectors, error) StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (MinerSectors, error)
// StateCompute is a flexible command that applies the given messages on the given tipset.
// The messages are run as though the VM were at the provided height.
StateCompute(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*ComputeStateOutput, error) StateCompute(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*ComputeStateOutput, error)
// MethodGroup: Msig // MethodGroup: Msig
// The Msig methods are used to interact with multisig wallets on the // The Msig methods are used to interact with multisig wallets on the
// filecoin network // filecoin network
// MsigGetAvailableBalance returns the portion of a multisig's balance that can be withdrawn or spent
MsigGetAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) MsigGetAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error)
// MsigGetAvailableBalance creates a multisig wallet
// It takes the following params: <required number of senders>, <approving addresses>, <initial balance>,
// <sender address of the create msg>, <gas price>
MsigCreate(context.Context, int64, []address.Address, types.BigInt, address.Address, types.BigInt) (cid.Cid, error) MsigCreate(context.Context, int64, []address.Address, types.BigInt, address.Address, types.BigInt) (cid.Cid, error)
// MsigPropose proposes a multisig message
// It takes the following params: <multisig address>, <recipient address>, <value to transfer>,
// <sender address of the propose msg>, <method to call in the proposed message>, <params to include in the proposed message>
MsigPropose(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) MsigPropose(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error)
// MsigApprove approves a previously-proposed multisig message
// It takes the following params: <multisig address>, <proposed message ID>, <proposer address>, <recipient address>, <value to transfer>,
// <sender address of the approve msg>, <method to call in the proposed message>, <params to include in the proposed message>
MsigApprove(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) MsigApprove(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error)
// MsigCancel cancels a previously-proposed multisig message
// It takes the following params: <multisig address>, <proposed message ID>, <proposer address>, <recipient address>, <value to transfer>,
// <sender address of the cancel msg>, <method to call in the proposed message>, <params to include in the proposed message>
// TODO: You can't cancel someone else's proposed message, so "src" and "proposer" here are redundant
MsigCancel(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) MsigCancel(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error)
MarketEnsureAvailable(context.Context, address.Address, address.Address, types.BigInt) (cid.Cid, error) MarketEnsureAvailable(context.Context, address.Address, address.Address, types.BigInt) (cid.Cid, error)
@ -235,6 +379,7 @@ type DealInfo struct {
type MsgLookup struct { type MsgLookup struct {
Receipt types.MessageReceipt Receipt types.MessageReceipt
// TODO: This should probably a tipsetkey?
TipSet *types.TipSet TipSet *types.TipSet
} }
@ -351,7 +496,7 @@ type RetrievalOrder struct {
type InvocResult struct { type InvocResult struct {
Msg *types.Message Msg *types.Message
MsgRct *types.MessageReceipt MsgRct *types.MessageReceipt
InternalExecutions []*types.ExecutionResult ExecutionTrace types.ExecutionTrace
Error string Error string
Duration time.Duration Duration time.Duration
} }

View File

@ -8,11 +8,10 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/sector-storage/stores" "github.com/filecoin-project/sector-storage/stores"
"github.com/filecoin-project/sector-storage/storiface" "github.com/filecoin-project/sector-storage/storiface"
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/lotus/chain/types"
) )
// StorageMiner is a low-level interface to the Filecoin network storage miner node // StorageMiner is a low-level interface to the Filecoin network storage miner node
@ -37,6 +36,7 @@ type StorageMiner interface {
SectorsRefs(context.Context) (map[string][]SealedRef, error) SectorsRefs(context.Context) (map[string][]SealedRef, error)
SectorsUpdate(context.Context, abi.SectorNumber, SectorState) error SectorsUpdate(context.Context, abi.SectorNumber, SectorState) error
SectorRemove(context.Context, abi.SectorNumber) error
StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error)
StorageLocal(ctx context.Context) (map[stores.ID]string, error) StorageLocal(ctx context.Context) (map[stores.ID]string, error)
@ -51,10 +51,21 @@ type StorageMiner interface {
MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error
MarketListDeals(ctx context.Context) ([]storagemarket.StorageDeal, error) MarketListDeals(ctx context.Context) ([]storagemarket.StorageDeal, error)
MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error) MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error)
MarketSetPrice(context.Context, types.BigInt) error MarketSetAsk(ctx context.Context, price types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error
MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error)
DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error
DealsList(ctx context.Context) ([]storagemarket.StorageDeal, error) DealsList(ctx context.Context) ([]storagemarket.StorageDeal, error)
DealsConsiderOnlineStorageDeals(context.Context) (bool, error)
DealsSetConsiderOnlineStorageDeals(context.Context, bool) error
DealsConsiderOnlineRetrievalDeals(context.Context) (bool, error)
DealsSetConsiderOnlineRetrievalDeals(context.Context, bool) error
DealsPieceCidBlocklist(context.Context) ([]cid.Cid, error)
DealsSetPieceCidBlocklist(context.Context, []cid.Cid) error
DealsConsiderOfflineStorageDeals(context.Context) (bool, error)
DealsSetConsiderOfflineStorageDeals(context.Context, bool) error
DealsConsiderOfflineRetrievalDeals(context.Context) (bool, error)
DealsSetConsiderOfflineRetrievalDeals(context.Context, bool) error
StorageAddLocal(ctx context.Context, path string) error StorageAddLocal(ctx context.Context, path string) error
} }

View File

@ -2,6 +2,9 @@ package api
import ( import (
"context" "context"
"io"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/sector-storage/sealtasks" "github.com/filecoin-project/sector-storage/sealtasks"
"github.com/filecoin-project/sector-storage/stores" "github.com/filecoin-project/sector-storage/stores"
@ -12,7 +15,7 @@ import (
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
) )
type WorkerApi interface { type WorkerAPI interface {
Version(context.Context) (build.Version, error) Version(context.Context) (build.Version, error)
// TODO: Info() (name, ...) ? // TODO: Info() (name, ...) ?
@ -21,7 +24,13 @@ type WorkerApi interface {
Info(context.Context) (storiface.WorkerInfo, error) Info(context.Context) (storiface.WorkerInfo, error)
storage.Sealer storage.Sealer
Fetch(context.Context, abi.SectorID, stores.SectorFileType, bool) error
MoveStorage(ctx context.Context, sector abi.SectorID) error
UnsealPiece(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error
ReadPiece(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) error
Fetch(context.Context, abi.SectorID, stores.SectorFileType, stores.PathType, stores.AcquireMode) error
Closing(context.Context) (<-chan struct{}, error) Closing(context.Context) (<-chan struct{}, error)
} }

View File

@ -31,7 +31,7 @@ func PermissionedFullAPI(a api.FullNode) api.FullNode {
return &out return &out
} }
func PermissionedWorkerAPI(a api.WorkerApi) api.WorkerApi { func PermissionedWorkerAPI(a api.WorkerAPI) api.WorkerAPI {
var out WorkerStruct var out WorkerStruct
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal) auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal)
return &out return &out

View File

@ -2,6 +2,7 @@ package apistruct
import ( import (
"context" "context"
"io"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/network"
@ -41,6 +42,7 @@ type CommonStruct struct {
NetAddrsListen func(context.Context) (peer.AddrInfo, error) `perm:"read"` NetAddrsListen func(context.Context) (peer.AddrInfo, error) `perm:"read"`
NetDisconnect func(context.Context, peer.ID) error `perm:"write"` NetDisconnect func(context.Context, peer.ID) error `perm:"write"`
NetFindPeer func(context.Context, peer.ID) (peer.AddrInfo, error) `perm:"read"` NetFindPeer func(context.Context, peer.ID) (peer.AddrInfo, error) `perm:"read"`
NetPubsubScores func(context.Context) ([]api.PubsubScore, error) `perm:"read"`
ID func(context.Context) (peer.ID, error) `perm:"read"` ID func(context.Context) (peer.ID, error) `perm:"read"`
Version func(context.Context) (api.Version, error) `perm:"read"` Version func(context.Context) (api.Version, error) `perm:"read"`
@ -49,6 +51,7 @@ type CommonStruct struct {
LogSetLevel func(context.Context, string, string) error `perm:"write"` LogSetLevel func(context.Context, string, string) error `perm:"write"`
Shutdown func(context.Context) error `perm:"admin"` Shutdown func(context.Context) error `perm:"admin"`
Closing func(context.Context) (<-chan struct{}, error) `perm:"read"`
} }
} }
@ -110,6 +113,7 @@ type FullNodeStruct struct {
ClientListImports func(ctx context.Context) ([]api.Import, error) `perm:"write"` ClientListImports func(ctx context.Context) ([]api.Import, error) `perm:"write"`
ClientHasLocal func(ctx context.Context, root cid.Cid) (bool, error) `perm:"write"` ClientHasLocal func(ctx context.Context, root cid.Cid) (bool, error) `perm:"write"`
ClientFindData func(ctx context.Context, root cid.Cid) ([]api.QueryOffer, error) `perm:"read"` ClientFindData func(ctx context.Context, root cid.Cid) ([]api.QueryOffer, error) `perm:"read"`
ClientMinerQueryOffer func(ctx context.Context, root cid.Cid, miner address.Address) (api.QueryOffer, error) `perm:"read"`
ClientStartDeal func(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) `perm:"admin"` ClientStartDeal func(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) `perm:"admin"`
ClientGetDealInfo func(context.Context, cid.Cid) (*api.DealInfo, error) `perm:"read"` ClientGetDealInfo func(context.Context, cid.Cid) (*api.DealInfo, error) `perm:"read"`
ClientListDeals func(ctx context.Context) ([]api.DealInfo, error) `perm:"write"` ClientListDeals func(ctx context.Context) ([]api.DealInfo, error) `perm:"write"`
@ -123,7 +127,7 @@ type FullNodeStruct struct {
StateMinerProvingSet func(context.Context, address.Address, types.TipSetKey) ([]*api.ChainSectorInfo, error) `perm:"read"` StateMinerProvingSet func(context.Context, address.Address, types.TipSetKey) ([]*api.ChainSectorInfo, error) `perm:"read"`
StateMinerProvingDeadline func(context.Context, address.Address, types.TipSetKey) (*miner.DeadlineInfo, error) `perm:"read"` StateMinerProvingDeadline func(context.Context, address.Address, types.TipSetKey) (*miner.DeadlineInfo, error) `perm:"read"`
StateMinerPower func(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) `perm:"read"` StateMinerPower func(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) `perm:"read"`
StateMinerInfo func(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) `perm:"read"` StateMinerInfo func(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error) `perm:"read"`
StateMinerDeadlines func(context.Context, address.Address, types.TipSetKey) (*miner.Deadlines, error) `perm:"read"` StateMinerDeadlines func(context.Context, address.Address, types.TipSetKey) (*miner.Deadlines, error) `perm:"read"`
StateMinerFaults func(context.Context, address.Address, types.TipSetKey) (*abi.BitField, error) `perm:"read"` StateMinerFaults func(context.Context, address.Address, types.TipSetKey) (*abi.BitField, error) `perm:"read"`
StateAllMinerFaults func(context.Context, abi.ChainEpoch, types.TipSetKey) ([]*api.Fault, error) `perm:"read"` StateAllMinerFaults func(context.Context, abi.ChainEpoch, types.TipSetKey) ([]*api.Fault, error) `perm:"read"`
@ -131,12 +135,13 @@ type FullNodeStruct struct {
StateMinerInitialPledgeCollateral func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (types.BigInt, error) `perm:"read"` StateMinerInitialPledgeCollateral func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (types.BigInt, error) `perm:"read"`
StateMinerAvailableBalance func(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) `perm:"read"` StateMinerAvailableBalance func(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) `perm:"read"`
StateSectorPreCommitInfo func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) `perm:"read"` StateSectorPreCommitInfo func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) `perm:"read"`
StateSectorGetInfo func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) `perm:"read"`
StateCall func(context.Context, *types.Message, types.TipSetKey) (*api.InvocResult, error) `perm:"read"` StateCall func(context.Context, *types.Message, types.TipSetKey) (*api.InvocResult, error) `perm:"read"`
StateReplay func(context.Context, types.TipSetKey, cid.Cid) (*api.InvocResult, error) `perm:"read"` StateReplay func(context.Context, types.TipSetKey, cid.Cid) (*api.InvocResult, error) `perm:"read"`
StateGetActor func(context.Context, address.Address, types.TipSetKey) (*types.Actor, error) `perm:"read"` StateGetActor func(context.Context, address.Address, types.TipSetKey) (*types.Actor, error) `perm:"read"`
StateReadState func(context.Context, *types.Actor, types.TipSetKey) (*api.ActorState, error) `perm:"read"` StateReadState func(context.Context, address.Address, types.TipSetKey) (*api.ActorState, error) `perm:"read"`
StatePledgeCollateral func(context.Context, types.TipSetKey) (types.BigInt, error) `perm:"read"` StatePledgeCollateral func(context.Context, types.TipSetKey) (types.BigInt, error) `perm:"read"`
StateWaitMsg func(context.Context, cid.Cid) (*api.MsgLookup, error) `perm:"read"` StateWaitMsg func(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error) `perm:"read"`
StateSearchMsg func(context.Context, cid.Cid) (*api.MsgLookup, error) `perm:"read"` StateSearchMsg func(context.Context, cid.Cid) (*api.MsgLookup, error) `perm:"read"`
StateListMiners func(context.Context, types.TipSetKey) ([]address.Address, error) `perm:"read"` StateListMiners func(context.Context, types.TipSetKey) ([]address.Address, error) `perm:"read"`
StateListActors func(context.Context, types.TipSetKey) ([]address.Address, error) `perm:"read"` StateListActors func(context.Context, types.TipSetKey) ([]address.Address, error) `perm:"read"`
@ -192,7 +197,8 @@ type StorageMinerStruct struct {
MarketImportDealData func(context.Context, cid.Cid, string) error `perm:"write"` MarketImportDealData func(context.Context, cid.Cid, string) error `perm:"write"`
MarketListDeals func(ctx context.Context) ([]storagemarket.StorageDeal, error) `perm:"read"` MarketListDeals func(ctx context.Context) ([]storagemarket.StorageDeal, error) `perm:"read"`
MarketListIncompleteDeals func(ctx context.Context) ([]storagemarket.MinerDeal, error) `perm:"read"` MarketListIncompleteDeals func(ctx context.Context) ([]storagemarket.MinerDeal, error) `perm:"read"`
MarketSetPrice func(context.Context, types.BigInt) error `perm:"admin"` MarketSetAsk func(ctx context.Context, price types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error `perm:"admin"`
MarketGetAsk func(ctx context.Context) (*storagemarket.SignedStorageAsk, error) `perm:"read"`
PledgeSector func(context.Context) error `perm:"write"` PledgeSector func(context.Context) error `perm:"write"`
@ -200,6 +206,7 @@ type StorageMinerStruct struct {
SectorsList func(context.Context) ([]abi.SectorNumber, error) `perm:"read"` SectorsList func(context.Context) ([]abi.SectorNumber, error) `perm:"read"`
SectorsRefs func(context.Context) (map[string][]api.SealedRef, error) `perm:"read"` SectorsRefs func(context.Context) (map[string][]api.SealedRef, error) `perm:"read"`
SectorsUpdate func(context.Context, abi.SectorNumber, api.SectorState) error `perm:"write"` SectorsUpdate func(context.Context, abi.SectorNumber, api.SectorState) error `perm:"write"`
SectorRemove func(context.Context, abi.SectorNumber) error `perm:"admin"`
WorkerConnect func(context.Context, string) error `perm:"admin"` // TODO: worker perm WorkerConnect func(context.Context, string) error `perm:"admin"` // TODO: worker perm
WorkerStats func(context.Context) (map[uint64]storiface.WorkerStats, error) `perm:"admin"` WorkerStats func(context.Context) (map[uint64]storiface.WorkerStats, error) `perm:"admin"`
@ -208,15 +215,27 @@ type StorageMinerStruct struct {
StorageLocal func(context.Context) (map[stores.ID]string, error) `perm:"admin"` StorageLocal func(context.Context) (map[stores.ID]string, error) `perm:"admin"`
StorageStat func(context.Context, stores.ID) (stores.FsStat, error) `perm:"admin"` StorageStat func(context.Context, stores.ID) (stores.FsStat, error) `perm:"admin"`
StorageAttach func(context.Context, stores.StorageInfo, stores.FsStat) error `perm:"admin"` StorageAttach func(context.Context, stores.StorageInfo, stores.FsStat) error `perm:"admin"`
StorageDeclareSector func(context.Context, stores.ID, abi.SectorID, stores.SectorFileType) error `perm:"admin"` StorageDeclareSector func(context.Context, stores.ID, abi.SectorID, stores.SectorFileType, bool) error `perm:"admin"`
StorageDropSector func(context.Context, stores.ID, abi.SectorID, stores.SectorFileType) error `perm:"admin"` StorageDropSector func(context.Context, stores.ID, abi.SectorID, stores.SectorFileType) error `perm:"admin"`
StorageFindSector func(context.Context, abi.SectorID, stores.SectorFileType, bool) ([]stores.StorageInfo, error) `perm:"admin"` StorageFindSector func(context.Context, abi.SectorID, stores.SectorFileType, bool) ([]stores.SectorStorageInfo, error) `perm:"admin"`
StorageInfo func(context.Context, stores.ID) (stores.StorageInfo, error) `perm:"admin"` StorageInfo func(context.Context, stores.ID) (stores.StorageInfo, error) `perm:"admin"`
StorageBestAlloc func(ctx context.Context, allocate stores.SectorFileType, spt abi.RegisteredProof, sealing bool) ([]stores.StorageInfo, error) `perm:"admin"` StorageBestAlloc func(ctx context.Context, allocate stores.SectorFileType, spt abi.RegisteredSealProof, sealing stores.PathType) ([]stores.StorageInfo, error) `perm:"admin"`
StorageReportHealth func(ctx context.Context, id stores.ID, report stores.HealthReport) error `perm:"admin"` StorageReportHealth func(ctx context.Context, id stores.ID, report stores.HealthReport) error `perm:"admin"`
StorageLock func(ctx context.Context, sector abi.SectorID, read stores.SectorFileType, write stores.SectorFileType) error `perm:"admin"`
StorageTryLock func(ctx context.Context, sector abi.SectorID, read stores.SectorFileType, write stores.SectorFileType) (bool, error) `perm:"admin"`
DealsImportData func(ctx context.Context, dealPropCid cid.Cid, file string) error `perm:"write"` DealsImportData func(ctx context.Context, dealPropCid cid.Cid, file string) error `perm:"write"`
DealsList func(ctx context.Context) ([]storagemarket.StorageDeal, error) `perm:"read"` DealsList func(ctx context.Context) ([]storagemarket.StorageDeal, error) `perm:"read"`
DealsConsiderOnlineStorageDeals func(context.Context) (bool, error) `perm:"read"`
DealsSetConsiderOnlineStorageDeals func(context.Context, bool) error `perm:"admin"`
DealsConsiderOnlineRetrievalDeals func(context.Context) (bool, error) `perm:"read"`
DealsSetConsiderOnlineRetrievalDeals func(context.Context, bool) error `perm:"admin"`
DealsConsiderOfflineStorageDeals func(context.Context) (bool, error) `perm:"read"`
DealsSetConsiderOfflineStorageDeals func(context.Context, bool) error `perm:"admin"`
DealsConsiderOfflineRetrievalDeals func(context.Context) (bool, error) `perm:"read"`
DealsSetConsiderOfflineRetrievalDeals func(context.Context, bool) error `perm:"admin"`
DealsPieceCidBlocklist func(context.Context) ([]cid.Cid, error) `perm:"read"`
DealsSetPieceCidBlocklist func(context.Context, []cid.Cid) error `perm:"admin"`
StorageAddLocal func(ctx context.Context, path string) error `perm:"admin"` StorageAddLocal func(ctx context.Context, path string) error `perm:"admin"`
} }
@ -236,9 +255,15 @@ type WorkerStruct struct {
SealPreCommit2 func(context.Context, abi.SectorID, storage.PreCommit1Out) (cids storage.SectorCids, err error) `perm:"admin"` SealPreCommit2 func(context.Context, abi.SectorID, storage.PreCommit1Out) (cids storage.SectorCids, err error) `perm:"admin"`
SealCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) `perm:"admin"` SealCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) `perm:"admin"`
SealCommit2 func(context.Context, abi.SectorID, storage.Commit1Out) (storage.Proof, error) `perm:"admin"` SealCommit2 func(context.Context, abi.SectorID, storage.Commit1Out) (storage.Proof, error) `perm:"admin"`
FinalizeSector func(context.Context, abi.SectorID) error `perm:"admin"` FinalizeSector func(context.Context, abi.SectorID, []storage.Range) error `perm:"admin"`
ReleaseUnsealed func(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error `perm:"admin"`
Remove func(ctx context.Context, sector abi.SectorID) error `perm:"admin"`
MoveStorage func(ctx context.Context, sector abi.SectorID) error `perm:"admin"`
Fetch func(context.Context, abi.SectorID, stores.SectorFileType, bool) error `perm:"admin"` UnsealPiece func(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error `perm:"admin"`
ReadPiece func(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) error `perm:"admin"`
Fetch func(context.Context, abi.SectorID, stores.SectorFileType, stores.PathType, stores.AcquireMode) error `perm:"admin"`
Closing func(context.Context) (<-chan struct{}, error) `perm:"admin"` Closing func(context.Context) (<-chan struct{}, error) `perm:"admin"`
} }
@ -254,6 +279,9 @@ func (c *CommonStruct) AuthNew(ctx context.Context, perms []auth.Permission) ([]
return c.Internal.AuthNew(ctx, perms) return c.Internal.AuthNew(ctx, perms)
} }
func (c *CommonStruct) NetPubsubScores(ctx context.Context) ([]api.PubsubScore, error) {
return c.Internal.NetPubsubScores(ctx)
}
func (c *CommonStruct) NetConnectedness(ctx context.Context, pid peer.ID) (network.Connectedness, error) { func (c *CommonStruct) NetConnectedness(ctx context.Context, pid peer.ID) (network.Connectedness, error) {
return c.Internal.NetConnectedness(ctx, pid) return c.Internal.NetConnectedness(ctx, pid)
} }
@ -300,6 +328,10 @@ func (c *CommonStruct) Shutdown(ctx context.Context) error {
return c.Internal.Shutdown(ctx) return c.Internal.Shutdown(ctx)
} }
func (c *CommonStruct) Closing(ctx context.Context) (<-chan struct{}, error) {
return c.Internal.Closing(ctx)
}
// FullNodeStruct // FullNodeStruct
func (c *FullNodeStruct) ClientListImports(ctx context.Context) ([]api.Import, error) { func (c *FullNodeStruct) ClientListImports(ctx context.Context) ([]api.Import, error) {
@ -318,6 +350,10 @@ func (c *FullNodeStruct) ClientFindData(ctx context.Context, root cid.Cid) ([]ap
return c.Internal.ClientFindData(ctx, root) return c.Internal.ClientFindData(ctx, root)
} }
func (c *FullNodeStruct) ClientMinerQueryOffer(ctx context.Context, root cid.Cid, miner address.Address) (api.QueryOffer, error) {
return c.Internal.ClientMinerQueryOffer(ctx, root, miner)
}
func (c *FullNodeStruct) ClientStartDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) { func (c *FullNodeStruct) ClientStartDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) {
return c.Internal.ClientStartDeal(ctx, params) return c.Internal.ClientStartDeal(ctx, params)
} }
@ -540,7 +576,7 @@ func (c *FullNodeStruct) StateMinerPower(ctx context.Context, a address.Address,
return c.Internal.StateMinerPower(ctx, a, tsk) return c.Internal.StateMinerPower(ctx, a, tsk)
} }
func (c *FullNodeStruct) StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) { func (c *FullNodeStruct) StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (api.MinerInfo, error) {
return c.Internal.StateMinerInfo(ctx, actor, tsk) return c.Internal.StateMinerInfo(ctx, actor, tsk)
} }
@ -572,6 +608,10 @@ func (c *FullNodeStruct) StateSectorPreCommitInfo(ctx context.Context, maddr add
return c.Internal.StateSectorPreCommitInfo(ctx, maddr, n, tsk) return c.Internal.StateSectorPreCommitInfo(ctx, maddr, n, tsk)
} }
func (c *FullNodeStruct) StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) {
return c.Internal.StateSectorGetInfo(ctx, maddr, n, tsk)
}
func (c *FullNodeStruct) StateCall(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (*api.InvocResult, error) { func (c *FullNodeStruct) StateCall(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (*api.InvocResult, error) {
return c.Internal.StateCall(ctx, msg, tsk) return c.Internal.StateCall(ctx, msg, tsk)
} }
@ -584,16 +624,16 @@ func (c *FullNodeStruct) StateGetActor(ctx context.Context, actor address.Addres
return c.Internal.StateGetActor(ctx, actor, tsk) return c.Internal.StateGetActor(ctx, actor, tsk)
} }
func (c *FullNodeStruct) StateReadState(ctx context.Context, act *types.Actor, tsk types.TipSetKey) (*api.ActorState, error) { func (c *FullNodeStruct) StateReadState(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*api.ActorState, error) {
return c.Internal.StateReadState(ctx, act, tsk) return c.Internal.StateReadState(ctx, addr, tsk)
} }
func (c *FullNodeStruct) StatePledgeCollateral(ctx context.Context, tsk types.TipSetKey) (types.BigInt, error) { func (c *FullNodeStruct) StatePledgeCollateral(ctx context.Context, tsk types.TipSetKey) (types.BigInt, error) {
return c.Internal.StatePledgeCollateral(ctx, tsk) return c.Internal.StatePledgeCollateral(ctx, tsk)
} }
func (c *FullNodeStruct) StateWaitMsg(ctx context.Context, msgc cid.Cid) (*api.MsgLookup, error) { func (c *FullNodeStruct) StateWaitMsg(ctx context.Context, msgc cid.Cid, confidence uint64) (*api.MsgLookup, error) {
return c.Internal.StateWaitMsg(ctx, msgc) return c.Internal.StateWaitMsg(ctx, msgc, confidence)
} }
func (c *FullNodeStruct) StateSearchMsg(ctx context.Context, msgc cid.Cid) (*api.MsgLookup, error) { func (c *FullNodeStruct) StateSearchMsg(ctx context.Context, msgc cid.Cid) (*api.MsgLookup, error) {
@ -756,6 +796,10 @@ func (c *StorageMinerStruct) SectorsUpdate(ctx context.Context, id abi.SectorNum
return c.Internal.SectorsUpdate(ctx, id, state) return c.Internal.SectorsUpdate(ctx, id, state)
} }
func (c *StorageMinerStruct) SectorRemove(ctx context.Context, number abi.SectorNumber) error {
return c.Internal.SectorRemove(ctx, number)
}
func (c *StorageMinerStruct) WorkerConnect(ctx context.Context, url string) error { func (c *StorageMinerStruct) WorkerConnect(ctx context.Context, url string) error {
return c.Internal.WorkerConnect(ctx, url) return c.Internal.WorkerConnect(ctx, url)
} }
@ -768,15 +812,15 @@ func (c *StorageMinerStruct) StorageAttach(ctx context.Context, si stores.Storag
return c.Internal.StorageAttach(ctx, si, st) return c.Internal.StorageAttach(ctx, si, st)
} }
func (c *StorageMinerStruct) StorageDeclareSector(ctx context.Context, storageId stores.ID, s abi.SectorID, ft stores.SectorFileType) error { func (c *StorageMinerStruct) StorageDeclareSector(ctx context.Context, storageId stores.ID, s abi.SectorID, ft stores.SectorFileType, primary bool) error {
return c.Internal.StorageDeclareSector(ctx, storageId, s, ft) return c.Internal.StorageDeclareSector(ctx, storageId, s, ft, primary)
} }
func (c *StorageMinerStruct) StorageDropSector(ctx context.Context, storageId stores.ID, s abi.SectorID, ft stores.SectorFileType) error { func (c *StorageMinerStruct) StorageDropSector(ctx context.Context, storageId stores.ID, s abi.SectorID, ft stores.SectorFileType) error {
return c.Internal.StorageDropSector(ctx, storageId, s, ft) return c.Internal.StorageDropSector(ctx, storageId, s, ft)
} }
func (c *StorageMinerStruct) StorageFindSector(ctx context.Context, si abi.SectorID, types stores.SectorFileType, allowFetch bool) ([]stores.StorageInfo, error) { func (c *StorageMinerStruct) StorageFindSector(ctx context.Context, si abi.SectorID, types stores.SectorFileType, allowFetch bool) ([]stores.SectorStorageInfo, error) {
return c.Internal.StorageFindSector(ctx, si, types, allowFetch) return c.Internal.StorageFindSector(ctx, si, types, allowFetch)
} }
@ -796,14 +840,22 @@ func (c *StorageMinerStruct) StorageInfo(ctx context.Context, id stores.ID) (sto
return c.Internal.StorageInfo(ctx, id) return c.Internal.StorageInfo(ctx, id)
} }
func (c *StorageMinerStruct) StorageBestAlloc(ctx context.Context, allocate stores.SectorFileType, spt abi.RegisteredProof, sealing bool) ([]stores.StorageInfo, error) { func (c *StorageMinerStruct) StorageBestAlloc(ctx context.Context, allocate stores.SectorFileType, spt abi.RegisteredSealProof, pt stores.PathType) ([]stores.StorageInfo, error) {
return c.Internal.StorageBestAlloc(ctx, allocate, spt, sealing) return c.Internal.StorageBestAlloc(ctx, allocate, spt, pt)
} }
func (c *StorageMinerStruct) StorageReportHealth(ctx context.Context, id stores.ID, report stores.HealthReport) error { func (c *StorageMinerStruct) StorageReportHealth(ctx context.Context, id stores.ID, report stores.HealthReport) error {
return c.Internal.StorageReportHealth(ctx, id, report) return c.Internal.StorageReportHealth(ctx, id, report)
} }
func (c *StorageMinerStruct) StorageLock(ctx context.Context, sector abi.SectorID, read stores.SectorFileType, write stores.SectorFileType) error {
return c.Internal.StorageLock(ctx, sector, read, write)
}
func (c *StorageMinerStruct) StorageTryLock(ctx context.Context, sector abi.SectorID, read stores.SectorFileType, write stores.SectorFileType) (bool, error) {
return c.Internal.StorageTryLock(ctx, sector, read, write)
}
func (c *StorageMinerStruct) MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error { func (c *StorageMinerStruct) MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error {
return c.Internal.MarketImportDealData(ctx, propcid, path) return c.Internal.MarketImportDealData(ctx, propcid, path)
} }
@ -816,8 +868,12 @@ func (c *StorageMinerStruct) MarketListIncompleteDeals(ctx context.Context) ([]s
return c.Internal.MarketListIncompleteDeals(ctx) return c.Internal.MarketListIncompleteDeals(ctx)
} }
func (c *StorageMinerStruct) MarketSetPrice(ctx context.Context, p types.BigInt) error { func (c *StorageMinerStruct) MarketSetAsk(ctx context.Context, price types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error {
return c.Internal.MarketSetPrice(ctx, p) return c.Internal.MarketSetAsk(ctx, price, duration, minPieceSize, maxPieceSize)
}
func (c *StorageMinerStruct) MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error) {
return c.Internal.MarketGetAsk(ctx)
} }
func (c *StorageMinerStruct) DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error { func (c *StorageMinerStruct) DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error {
@ -828,6 +884,46 @@ func (c *StorageMinerStruct) DealsList(ctx context.Context) ([]storagemarket.Sto
return c.Internal.DealsList(ctx) return c.Internal.DealsList(ctx)
} }
func (c *StorageMinerStruct) DealsConsiderOnlineStorageDeals(ctx context.Context) (bool, error) {
return c.Internal.DealsConsiderOnlineStorageDeals(ctx)
}
func (c *StorageMinerStruct) DealsSetConsiderOnlineStorageDeals(ctx context.Context, b bool) error {
return c.Internal.DealsSetConsiderOnlineStorageDeals(ctx, b)
}
func (c *StorageMinerStruct) DealsConsiderOnlineRetrievalDeals(ctx context.Context) (bool, error) {
return c.Internal.DealsConsiderOnlineRetrievalDeals(ctx)
}
func (c *StorageMinerStruct) DealsSetConsiderOnlineRetrievalDeals(ctx context.Context, b bool) error {
return c.Internal.DealsSetConsiderOnlineRetrievalDeals(ctx, b)
}
func (c *StorageMinerStruct) DealsPieceCidBlocklist(ctx context.Context) ([]cid.Cid, error) {
return c.Internal.DealsPieceCidBlocklist(ctx)
}
func (c *StorageMinerStruct) DealsSetPieceCidBlocklist(ctx context.Context, cids []cid.Cid) error {
return c.Internal.DealsSetPieceCidBlocklist(ctx, cids)
}
func (c *StorageMinerStruct) DealsConsiderOfflineStorageDeals(ctx context.Context) (bool, error) {
return c.Internal.DealsConsiderOfflineStorageDeals(ctx)
}
func (c *StorageMinerStruct) DealsSetConsiderOfflineStorageDeals(ctx context.Context, b bool) error {
return c.Internal.DealsSetConsiderOfflineStorageDeals(ctx, b)
}
func (c *StorageMinerStruct) DealsConsiderOfflineRetrievalDeals(ctx context.Context) (bool, error) {
return c.Internal.DealsConsiderOfflineRetrievalDeals(ctx)
}
func (c *StorageMinerStruct) DealsSetConsiderOfflineRetrievalDeals(ctx context.Context, b bool) error {
return c.Internal.DealsSetConsiderOfflineRetrievalDeals(ctx, b)
}
func (c *StorageMinerStruct) StorageAddLocal(ctx context.Context, path string) error { func (c *StorageMinerStruct) StorageAddLocal(ctx context.Context, path string) error {
return c.Internal.StorageAddLocal(ctx, path) return c.Internal.StorageAddLocal(ctx, path)
} }
@ -866,12 +962,32 @@ func (w *WorkerStruct) SealCommit2(ctx context.Context, sector abi.SectorID, c1o
return w.Internal.SealCommit2(ctx, sector, c1o) return w.Internal.SealCommit2(ctx, sector, c1o)
} }
func (w *WorkerStruct) FinalizeSector(ctx context.Context, sector abi.SectorID) error { func (w *WorkerStruct) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error {
return w.Internal.FinalizeSector(ctx, sector) return w.Internal.FinalizeSector(ctx, sector, keepUnsealed)
} }
func (w *WorkerStruct) Fetch(ctx context.Context, id abi.SectorID, fileType stores.SectorFileType, b bool) error { func (w *WorkerStruct) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error {
return w.Internal.Fetch(ctx, id, fileType, b) return w.Internal.ReleaseUnsealed(ctx, sector, safeToFree)
}
func (w *WorkerStruct) Remove(ctx context.Context, sector abi.SectorID) error {
return w.Internal.Remove(ctx, sector)
}
func (w *WorkerStruct) MoveStorage(ctx context.Context, sector abi.SectorID) error {
return w.Internal.MoveStorage(ctx, sector)
}
func (w *WorkerStruct) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, c cid.Cid) error {
return w.Internal.UnsealPiece(ctx, id, index, size, randomness, c)
}
func (w *WorkerStruct) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error {
return w.Internal.ReadPiece(ctx, writer, id, index, size)
}
func (w *WorkerStruct) Fetch(ctx context.Context, id abi.SectorID, fileType stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error {
return w.Internal.Fetch(ctx, id, fileType, ptype, am)
} }
func (w *WorkerStruct) Closing(ctx context.Context) (<-chan struct{}, error) { func (w *WorkerStruct) Closing(ctx context.Context) (<-chan struct{}, error) {
@ -881,4 +997,4 @@ func (w *WorkerStruct) Closing(ctx context.Context) (<-chan struct{}, error) {
var _ api.Common = &CommonStruct{} var _ api.Common = &CommonStruct{}
var _ api.FullNode = &FullNodeStruct{} var _ api.FullNode = &FullNodeStruct{}
var _ api.StorageMiner = &StorageMinerStruct{} var _ api.StorageMiner = &StorageMinerStruct{}
var _ api.WorkerApi = &WorkerStruct{} var _ api.WorkerAPI = &WorkerStruct{}

View File

@ -48,7 +48,7 @@ func NewStorageMinerRPC(addr string, requestHeader http.Header) (api.StorageMine
return &res, closer, err return &res, closer, err
} }
func NewWorkerRPC(addr string, requestHeader http.Header) (api.WorkerApi, jsonrpc.ClientCloser, error) { func NewWorkerRPC(addr string, requestHeader http.Header) (api.WorkerAPI, jsonrpc.ClientCloser, error) {
var res apistruct.WorkerStruct var res apistruct.WorkerStruct
closer, err := jsonrpc.NewMergeClient(addr, "Filecoin", closer, err := jsonrpc.NewMergeClient(addr, "Filecoin",
[]interface{}{ []interface{}{

View File

@ -16,6 +16,7 @@ import (
"github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/apistruct"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/dtypes"
@ -72,10 +73,14 @@ func init() {
addExample(pid) addExample(pid)
addExample(bitfield.NewFromSet([]uint64{5})) addExample(bitfield.NewFromSet([]uint64{5}))
addExample(abi.RegisteredProof_StackedDRG32GiBPoSt) addExample(abi.RegisteredSealProof_StackedDrg32GiBV1)
addExample(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1)
addExample(abi.ChainEpoch(10101)) addExample(abi.ChainEpoch(10101))
addExample(crypto.SigTypeBLS) addExample(crypto.SigTypeBLS)
addExample(int64(9)) addExample(int64(9))
addExample(12.3)
addExample(123)
addExample(uintptr(0))
addExample(abi.MethodNum(1)) addExample(abi.MethodNum(1))
addExample(exitcode.ExitCode(0)) addExample(exitcode.ExitCode(0))
addExample(crypto.DomainSeparationTag_ElectionProofProduction) addExample(crypto.DomainSeparationTag_ElectionProofProduction)
@ -93,18 +98,18 @@ func init() {
addExample(build.APIVersion) addExample(build.APIVersion)
addExample(api.PCHInbound) addExample(api.PCHInbound)
addExample(time.Minute) addExample(time.Minute)
addExample(&types.ExecutionResult{ addExample(&types.ExecutionTrace{
Msg: exampleValue(reflect.TypeOf(&types.Message{})).(*types.Message), Msg: exampleValue(reflect.TypeOf(&types.Message{}), nil).(*types.Message),
MsgRct: exampleValue(reflect.TypeOf(&types.MessageReceipt{})).(*types.MessageReceipt), MsgRct: exampleValue(reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt),
}) })
addExample(map[string]types.Actor{ addExample(map[string]types.Actor{
"t01236": exampleValue(reflect.TypeOf(types.Actor{})).(types.Actor), "t01236": exampleValue(reflect.TypeOf(types.Actor{}), nil).(types.Actor),
}) })
addExample(map[string]api.MarketDeal{ addExample(map[string]api.MarketDeal{
"t026363": exampleValue(reflect.TypeOf(api.MarketDeal{})).(api.MarketDeal), "t026363": exampleValue(reflect.TypeOf(api.MarketDeal{}), nil).(api.MarketDeal),
}) })
addExample(map[string]api.MarketBalance{ addExample(map[string]api.MarketBalance{
"t026363": exampleValue(reflect.TypeOf(api.MarketBalance{})).(api.MarketBalance), "t026363": exampleValue(reflect.TypeOf(api.MarketBalance{}), nil).(api.MarketBalance),
}) })
maddr, err := multiaddr.NewMultiaddr("/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior") maddr, err := multiaddr.NewMultiaddr("/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior")
@ -117,7 +122,7 @@ func init() {
} }
func exampleValue(t reflect.Type) interface{} { func exampleValue(t, parent reflect.Type) interface{} {
v, ok := ExampleValues[t] v, ok := ExampleValues[t]
if ok { if ok {
return v return v
@ -126,25 +131,25 @@ func exampleValue(t reflect.Type) interface{} {
switch t.Kind() { switch t.Kind() {
case reflect.Slice: case reflect.Slice:
out := reflect.New(t).Elem() out := reflect.New(t).Elem()
reflect.Append(out, reflect.ValueOf(exampleValue(t.Elem()))) reflect.Append(out, reflect.ValueOf(exampleValue(t.Elem(), t)))
return out.Interface() return out.Interface()
case reflect.Chan: case reflect.Chan:
return exampleValue(t.Elem()) return exampleValue(t.Elem(), nil)
case reflect.Struct: case reflect.Struct:
es := exampleStruct(t) es := exampleStruct(t, parent)
v := reflect.ValueOf(es).Elem().Interface() v := reflect.ValueOf(es).Elem().Interface()
ExampleValues[t] = v ExampleValues[t] = v
return v return v
case reflect.Array: case reflect.Array:
out := reflect.New(t).Elem() out := reflect.New(t).Elem()
for i := 0; i < t.Len(); i++ { for i := 0; i < t.Len(); i++ {
out.Index(i).Set(reflect.ValueOf(exampleValue(t.Elem()))) out.Index(i).Set(reflect.ValueOf(exampleValue(t.Elem(), t)))
} }
return out.Interface() return out.Interface()
case reflect.Ptr: case reflect.Ptr:
if t.Elem().Kind() == reflect.Struct { if t.Elem().Kind() == reflect.Struct {
es := exampleStruct(t.Elem()) es := exampleStruct(t.Elem(), t)
//ExampleValues[t] = es //ExampleValues[t] = es
return es return es
} }
@ -155,12 +160,15 @@ func exampleValue(t reflect.Type) interface{} {
panic(fmt.Sprintf("No example value for type: %s", t)) panic(fmt.Sprintf("No example value for type: %s", t))
} }
func exampleStruct(t reflect.Type) interface{} { func exampleStruct(t, parent reflect.Type) interface{} {
ns := reflect.New(t) ns := reflect.New(t)
for i := 0; i < t.NumField(); i++ { for i := 0; i < t.NumField(); i++ {
f := t.Field(i) f := t.Field(i)
if f.Type == parent {
continue
}
if strings.Title(f.Name) == f.Name { if strings.Title(f.Name) == f.Name {
ns.Elem().Field(i).Set(reflect.ValueOf(exampleValue(f.Type))) ns.Elem().Field(i).Set(reflect.ValueOf(exampleValue(f.Type, t)))
} }
} }
@ -193,8 +201,7 @@ func (v *Visitor) Visit(node ast.Node) ast.Visitor {
const noComment = "There are not yet any comments for this method." const noComment = "There are not yet any comments for this method."
func parseApiASTInfo() (map[string]string, map[string]string) { func parseApiASTInfo() (map[string]string, map[string]string) { //nolint:golint
fset := token.NewFileSet() fset := token.NewFileSet()
pkgs, err := parser.ParseDir(fset, "./api", nil, parser.AllErrors|parser.ParseComments) pkgs, err := parser.ParseDir(fset, "./api", nil, parser.AllErrors|parser.ParseComments)
if err != nil { if err != nil {
@ -287,17 +294,17 @@ func main() {
ft := m.Func.Type() ft := m.Func.Type()
for j := 2; j < ft.NumIn(); j++ { for j := 2; j < ft.NumIn(); j++ {
inp := ft.In(j) inp := ft.In(j)
args = append(args, exampleValue(inp)) args = append(args, exampleValue(inp, nil))
} }
v, err := json.Marshal(args) v, err := json.MarshalIndent(args, "", " ")
if err != nil { if err != nil {
panic(err) panic(err)
} }
outv := exampleValue(ft.Out(0)) outv := exampleValue(ft.Out(0), nil)
ov, err := json.Marshal(outv) ov, err := json.MarshalIndent(outv, "", " ")
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -319,7 +326,20 @@ func main() {
return groupslice[i].GroupName < groupslice[j].GroupName return groupslice[i].GroupName < groupslice[j].GroupName
}) })
fmt.Printf("# Groups\n")
for _, g := range groupslice { for _, g := range groupslice {
fmt.Printf("* [%s](#%s)\n", g.GroupName, g.GroupName)
for _, method := range g.Methods {
fmt.Printf(" * [%s](#%s)\n", method.Name, method.Name)
}
}
permStruct := reflect.TypeOf(apistruct.FullNodeStruct{}.Internal)
commonPermStruct := reflect.TypeOf(apistruct.CommonStruct{}.Internal)
for _, g := range groupslice {
g := g
fmt.Printf("## %s\n", g.GroupName) fmt.Printf("## %s\n", g.GroupName)
fmt.Printf("%s\n\n", g.Header) fmt.Printf("%s\n\n", g.Header)
@ -331,8 +351,29 @@ func main() {
fmt.Printf("### %s\n", m.Name) fmt.Printf("### %s\n", m.Name)
fmt.Printf("%s\n\n", m.Comment) fmt.Printf("%s\n\n", m.Comment)
meth, ok := permStruct.FieldByName(m.Name)
if !ok {
meth, ok = commonPermStruct.FieldByName(m.Name)
if !ok {
panic("no perms for method: " + m.Name)
}
}
perms := meth.Tag.Get("perm")
fmt.Printf("Perms: %s\n\n", perms)
if strings.Count(m.InputExample, "\n") > 0 {
fmt.Printf("Inputs:\n```json\n%s\n```\n\n", m.InputExample)
} else {
fmt.Printf("Inputs: `%s`\n\n", m.InputExample) fmt.Printf("Inputs: `%s`\n\n", m.InputExample)
}
if strings.Count(m.ResponseExample, "\n") > 0 {
fmt.Printf("Response:\n```json\n%s\n```\n\n", m.ResponseExample)
} else {
fmt.Printf("Response: `%s`\n\n", m.ResponseExample) fmt.Printf("Response: `%s`\n\n", m.ResponseExample)
} }
} }
}
} }

View File

@ -8,6 +8,7 @@ import (
"math/rand" "math/rand"
"os" "os"
"path/filepath" "path/filepath"
"sync/atomic"
"testing" "testing"
"time" "time"
@ -35,7 +36,7 @@ func init() {
} }
func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport bool) { func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport bool) {
os.Setenv("BELLMAN_NO_GPU", "1") _ = os.Setenv("BELLMAN_NO_GPU", "1")
ctx := context.Background() ctx := context.Background()
n, sn := b(t, 1, oneMiner) n, sn := b(t, 1, oneMiner)
@ -52,11 +53,11 @@ func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport
} }
time.Sleep(time.Second) time.Sleep(time.Second)
mine := true mine := int64(1)
done := make(chan struct{}) done := make(chan struct{})
go func() { go func() {
defer close(done) defer close(done)
for mine { for atomic.LoadInt64(&mine) == 1 {
time.Sleep(blocktime) time.Sleep(blocktime)
if err := sn[0].MineOne(ctx, func(bool) {}); err != nil { if err := sn[0].MineOne(ctx, func(bool) {}); err != nil {
t.Error(err) t.Error(err)
@ -66,13 +67,13 @@ func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport
makeDeal(t, ctx, 6, client, miner, carExport) makeDeal(t, ctx, 6, client, miner, carExport)
mine = false atomic.AddInt64(&mine, -1)
fmt.Println("shutting down mining") fmt.Println("shutting down mining")
<-done <-done
} }
func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) { func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) {
os.Setenv("BELLMAN_NO_GPU", "1") _ = os.Setenv("BELLMAN_NO_GPU", "1")
ctx := context.Background() ctx := context.Background()
n, sn := b(t, 1, oneMiner) n, sn := b(t, 1, oneMiner)
@ -89,12 +90,12 @@ func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) {
} }
time.Sleep(time.Second) time.Sleep(time.Second)
mine := true mine := int64(1)
done := make(chan struct{}) done := make(chan struct{})
go func() { go func() {
defer close(done) defer close(done)
for mine { for atomic.LoadInt64(&mine) == 1 {
time.Sleep(blocktime) time.Sleep(blocktime)
if err := sn[0].MineOne(ctx, func(bool) {}); err != nil { if err := sn[0].MineOne(ctx, func(bool) {}); err != nil {
t.Error(err) t.Error(err)
@ -105,7 +106,7 @@ func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) {
makeDeal(t, ctx, 6, client, miner, false) makeDeal(t, ctx, 6, client, miner, false)
makeDeal(t, ctx, 7, client, miner, false) makeDeal(t, ctx, 7, client, miner, false)
mine = false atomic.AddInt64(&mine, -1)
fmt.Println("shutting down mining") fmt.Println("shutting down mining")
<-done <-done
} }
@ -193,7 +194,7 @@ func testRetrieval(t *testing.T, ctx context.Context, err error, client *impl.Fu
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer os.RemoveAll(rpath) defer os.RemoveAll(rpath) //nolint:errcheck
caddr, err := client.WalletDefaultAddress(ctx) caddr, err := client.WalletDefaultAddress(ctx)
if err != nil { if err != nil {

View File

@ -82,7 +82,7 @@ func (ts *testSuite) testMiningReal(t *testing.T) {
} }
func TestDealMining(t *testing.T, b APIBuilder, blocktime time.Duration, carExport bool) { func TestDealMining(t *testing.T, b APIBuilder, blocktime time.Duration, carExport bool) {
os.Setenv("BELLMAN_NO_GPU", "1") _ = os.Setenv("BELLMAN_NO_GPU", "1")
// test making a deal with a fresh miner, and see if it starts to mine // test making a deal with a fresh miner, and see if it starts to mine
@ -126,6 +126,7 @@ func TestDealMining(t *testing.T, b APIBuilder, blocktime time.Duration, carExpo
minedTwo := make(chan struct{}) minedTwo := make(chan struct{})
go func() { go func() {
doneMinedTwo := false
defer close(done) defer close(done)
prevExpect := 0 prevExpect := 0
@ -175,9 +176,9 @@ func TestDealMining(t *testing.T, b APIBuilder, blocktime time.Duration, carExpo
time.Sleep(blocktime) time.Sleep(blocktime)
} }
if prevExpect == 2 && expect == 2 && minedTwo != nil { if prevExpect == 2 && expect == 2 && !doneMinedTwo {
close(minedTwo) close(minedTwo)
minedTwo = nil doneMinedTwo = true
} }
prevExpect = expect prevExpect = expect

View File

@ -4,9 +4,11 @@ import (
"context" "context"
"testing" "testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
"github.com/stretchr/testify/assert"
) )
type TestNode struct { type TestNode struct {
@ -21,6 +23,8 @@ type TestStorageNode struct {
var PresealGenesis = -1 var PresealGenesis = -1
const GenesisPreseals = 2
type StorageMiner struct { type StorageMiner struct {
Full int Full int
Preseal int Preseal int
@ -60,9 +64,7 @@ func (ts *testSuite) testVersion(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if v.Version != build.BuildVersion { require.Equal(t, v.Version, build.BuildVersion)
t.Error("Version didn't work properly")
}
} }
func (ts *testSuite) testID(t *testing.T) { func (ts *testSuite) testID(t *testing.T) {

169
api/test/window_post.go Normal file
View File

@ -0,0 +1,169 @@
package test
import (
"context"
"fmt"
"github.com/filecoin-project/lotus/api"
"os"
"strings"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/specs-actors/actors/abi"
miner2 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
sealing "github.com/filecoin-project/storage-fsm"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/node/impl"
)
func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
os.Setenv("BELLMAN_NO_GPU", "1")
ctx := context.Background()
n, sn := b(t, 1, oneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
time.Sleep(time.Second)
mine := true
done := make(chan struct{})
go func() {
defer close(done)
for mine {
time.Sleep(blocktime)
if err := sn[0].MineOne(ctx, func(bool) {}); err != nil {
t.Error(err)
}
}
}()
pledgeSectors(t, ctx, miner, nSectors)
mine = false
<-done
}
func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n int) {
for i := 0; i < n; i++ {
err := miner.PledgeSector(ctx)
require.NoError(t, err)
}
for {
s, err := miner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM
require.NoError(t, err)
fmt.Printf("Sectors: %d\n", len(s))
if len(s) >= n {
break
}
time.Sleep(100 * time.Millisecond)
}
fmt.Printf("All sectors is fsm\n")
s, err := miner.SectorsList(ctx)
require.NoError(t, err)
toCheck := map[abi.SectorNumber]struct{}{}
for _, number := range s {
toCheck[number] = struct{}{}
}
for len(toCheck) > 0 {
for n := range toCheck {
st, err := miner.SectorsStatus(ctx, n)
require.NoError(t, err)
if st.State == api.SectorState(sealing.Proving) {
delete(toCheck, n)
}
if strings.Contains(string(st.State), "Fail") {
t.Fatal("sector in a failed state", st.State)
}
}
time.Sleep(100 * time.Millisecond)
fmt.Printf("WaitSeal: %d\n", len(s))
}
}
func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
os.Setenv("BELLMAN_NO_GPU", "1")
ctx := context.Background()
n, sn := b(t, 1, oneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
time.Sleep(time.Second)
mine := true
done := make(chan struct{})
go func() {
defer close(done)
for mine {
time.Sleep(blocktime)
if err := sn[0].MineOne(ctx, func(bool) {}); err != nil {
t.Error(err)
}
}
}()
pledgeSectors(t, ctx, miner, nSectors)
maddr, err := miner.ActorAddress(ctx)
require.NoError(t, err)
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
fmt.Printf("Running one proving periods\n")
for {
head, err := client.ChainHead(ctx)
require.NoError(t, err)
if head.Height() > di.PeriodStart+(miner2.WPoStProvingPeriod)+2 {
break
}
if head.Height()%100 == 0 {
fmt.Printf("@%d\n", head.Height())
}
time.Sleep(blocktime)
}
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
ssz, err := miner.ActorSectorSize(ctx, maddr)
require.NoError(t, err)
require.Equal(t, p.MinerPower, p.TotalPower)
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(nSectors+GenesisPreseals)))
// TODO: Inject faults here
mine = false
<-done
}

View File

@ -2,11 +2,16 @@ package api
import ( import (
"encoding/json" "encoding/json"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/libp2p/go-libp2p-core/peer"
ma "github.com/multiformats/go-multiaddr" ma "github.com/multiformats/go-multiaddr"
) )
// TODO: check if this exists anywhere else // TODO: check if this exists anywhere else
type MultiaddrSlice []ma.Multiaddr type MultiaddrSlice []ma.Multiaddr
func (m *MultiaddrSlice) UnmarshalJSON(raw []byte) (err error) { func (m *MultiaddrSlice) UnmarshalJSON(raw []byte) (err error) {
@ -32,3 +37,41 @@ type ObjStat struct {
Size uint64 Size uint64
Links uint64 Links uint64
} }
type PubsubScore struct {
ID peer.ID
Score float64
}
type MinerInfo struct {
Owner address.Address // Must be an ID-address.
Worker address.Address // Must be an ID-address.
NewWorker address.Address // Must be an ID-address.
WorkerChangeEpoch abi.ChainEpoch
PeerId peer.ID
Multiaddrs []abi.Multiaddrs
SealProofType abi.RegisteredSealProof
SectorSize abi.SectorSize
WindowPoStPartitionSectors uint64
}
func NewApiMinerInfo(info miner.MinerInfo) MinerInfo {
mi := MinerInfo{
Owner: info.Owner,
Worker: info.Worker,
NewWorker: address.Undef,
WorkerChangeEpoch: -1,
PeerId: peer.ID(info.PeerId),
Multiaddrs: info.Multiaddrs,
SealProofType: info.SealProofType,
SectorSize: info.SectorSize,
WindowPoStPartitionSectors: info.WindowPoStPartitionSectors,
}
if info.PendingWorkerKey != nil {
mi.NewWorker = info.PendingWorkerKey.NewWorker
mi.WorkerChangeEpoch = info.PendingWorkerKey.EffectiveAt
}
return mi
}

View File

@ -13,6 +13,10 @@ import (
) )
func BuiltinBootstrap() ([]peer.AddrInfo, error) { func BuiltinBootstrap() ([]peer.AddrInfo, error) {
if DisableBuiltinAssets {
return nil, nil
}
var out []peer.AddrInfo var out []peer.AddrInfo
b := rice.MustFindBox("bootstrap") b := rice.MustFindBox("bootstrap")
@ -34,3 +38,12 @@ func BuiltinBootstrap() ([]peer.AddrInfo, error) {
}) })
return out, err return out, err
} }
func DrandBootstrap() ([]peer.AddrInfo, error) {
addrs := []string{
"/dnsaddr/pl-eu.testnet.drand.sh/",
"/dnsaddr/pl-us.testnet.drand.sh/",
"/dnsaddr/pl-sin.testnet.drand.sh/",
}
return addrutil.ParseAddresses(context.TODO(), addrs)
}

View File

@ -1,12 +1,12 @@
/dns4/bootstrap-0-sin.fil-test.net/tcp/1347/p2p/12D3KooWKNF7vNFEhnvB45E9mw2B5z6t419W3ziZPLdUDVnLLKGs /dns4/bootstrap-0-sin.fil-test.net/tcp/1347/p2p/12D3KooWPdUquftaQvoQEtEdsRBAhwD6jopbF2oweVTzR59VbHEd
/ip4/86.109.15.57/tcp/1347/p2p/12D3KooWKNF7vNFEhnvB45E9mw2B5z6t419W3ziZPLdUDVnLLKGs /ip4/86.109.15.57/tcp/1347/p2p/12D3KooWPdUquftaQvoQEtEdsRBAhwD6jopbF2oweVTzR59VbHEd
/dns4/bootstrap-0-dfw.fil-test.net/tcp/1347/p2p/12D3KooWECJTm7RUPyGfNbRwm6y2fK4wA7EB8rDJtWsq5AKi7iDr /dns4/bootstrap-0-dfw.fil-test.net/tcp/1347/p2p/12D3KooWQSCkHCzosEyrh8FgYfLejKgEPM5VB6qWzZE3yDAuXn8d
/ip4/139.178.84.45/tcp/1347/p2p/12D3KooWECJTm7RUPyGfNbRwm6y2fK4wA7EB8rDJtWsq5AKi7iDr /ip4/139.178.84.45/tcp/1347/p2p/12D3KooWQSCkHCzosEyrh8FgYfLejKgEPM5VB6qWzZE3yDAuXn8d
/dns4/bootstrap-0-fra.fil-test.net/tcp/1347/p2p/12D3KooWC7MD6m7iNCuDsYtNr7xVtazihyVUizBbhmhEiyMAm9ym /dns4/bootstrap-0-fra.fil-test.net/tcp/1347/p2p/12D3KooWEXN2eQmoyqnNjde9PBAQfQLHN67jcEdWU6JougWrgXJK
/ip4/136.144.49.17/tcp/1347/p2p/12D3KooWC7MD6m7iNCuDsYtNr7xVtazihyVUizBbhmhEiyMAm9ym /ip4/136.144.49.17/tcp/1347/p2p/12D3KooWEXN2eQmoyqnNjde9PBAQfQLHN67jcEdWU6JougWrgXJK
/dns4/bootstrap-1-sin.fil-test.net/tcp/1347/p2p/12D3KooWD8eYqsKcEMFax6EbWN3rjA7qFsxCez2rmN8dWqkzgNaN /dns4/bootstrap-1-sin.fil-test.net/tcp/1347/p2p/12D3KooWLmJkZd33mJhjg5RrpJ6NFep9SNLXWc4uVngV4TXKwzYw
/ip4/86.109.15.55/tcp/1347/p2p/12D3KooWD8eYqsKcEMFax6EbWN3rjA7qFsxCez2rmN8dWqkzgNaN /ip4/86.109.15.123/tcp/1347/p2p/12D3KooWLmJkZd33mJhjg5RrpJ6NFep9SNLXWc4uVngV4TXKwzYw
/dns4/bootstrap-1-dfw.fil-test.net/tcp/1347/p2p/12D3KooWLB3RR8frLAmaK4ntHC2dwrAjyGzQgyUzWxAum1FxyyqD /dns4/bootstrap-1-dfw.fil-test.net/tcp/1347/p2p/12D3KooWGXLHjiz6pTRu7x2pkgTVCoxcCiVxcNLpMnWcJ3JiNEy5
/ip4/139.178.84.41/tcp/1347/p2p/12D3KooWLB3RR8frLAmaK4ntHC2dwrAjyGzQgyUzWxAum1FxyyqD /ip4/139.178.86.3/tcp/1347/p2p/12D3KooWGXLHjiz6pTRu7x2pkgTVCoxcCiVxcNLpMnWcJ3JiNEy5
/dns4/bootstrap-1-fra.fil-test.net/tcp/1347/p2p/12D3KooWGPDJAw3HW4uVU3JEQBfFaZ1kdpg4HvvwRMVpUYbzhsLQ /dns4/bootstrap-1-fra.fil-test.net/tcp/1347/p2p/12D3KooW9szZmKttS9A1FafH3Zc2pxKwwmvCWCGKkRP4KmbhhC4R
/ip4/136.144.49.131/tcp/1347/p2p/12D3KooWGPDJAw3HW4uVU3JEQBfFaZ1kdpg4HvvwRMVpUYbzhsLQ /ip4/136.144.49.131/tcp/1347/p2p/12D3KooW9szZmKttS9A1FafH3Zc2pxKwwmvCWCGKkRP4KmbhhC4R

15
build/flags.go Normal file
View File

@ -0,0 +1,15 @@
package build
// DisableBuiltinAssets disables the resolution of go.rice boxes that store
// built-in assets, such as proof parameters, bootstrap peers, genesis blocks,
// etc.
//
// When this value is set to true, it is expected that the user will
// provide any such configurations through the Lotus API itself.
//
// This is useful when you're using Lotus as a library, such as to orchestrate
// test scenarios, or for other purposes where you don't need to use the
// defaults shipped with the binary.
//
// For this flag to be effective, it must be enabled _before_ instantiating Lotus.
var DisableBuiltinAssets = false

Binary file not shown.

View File

@ -2,6 +2,6 @@ package build
import rice "github.com/GeertJohan/go.rice" import rice "github.com/GeertJohan/go.rice"
func ParametersJson() []byte { func ParametersJSON() []byte {
return rice.MustFindBox("proof-params").MustBytes("parameters.json") return rice.MustFindBox("proof-params").MustBytes("parameters.json")
} }

View File

@ -12,16 +12,17 @@ import (
func init() { func init() {
power.ConsensusMinerMinPower = big.NewInt(2048) power.ConsensusMinerMinPower = big.NewInt(2048)
miner.SupportedProofTypes = map[abi.RegisteredProof]struct{}{ miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
abi.RegisteredProof_StackedDRG2KiBSeal: {}, abi.RegisteredSealProof_StackedDrg2KiBV1: {},
} }
verifreg.MinVerifiedDealSize = big.NewInt(256) verifreg.MinVerifiedDealSize = big.NewInt(256)
BuildType |= Build2k
} }
// Seconds const BlockDelaySecs = uint64(2)
const BlockDelay = 2
const PropagationDelay = 3 const PropagationDelaySecs = uint64(3)
// SlashablePowerDelay is the number of epochs after ElectionPeriodStart, after // SlashablePowerDelay is the number of epochs after ElectionPeriodStart, after
// which the miner is slashed // which the miner is slashed

View File

@ -4,6 +4,7 @@ package build
func init() { func init() {
InsecurePoStValidation = true InsecurePoStValidation = true
BuildType |= BuildDebug
} }
// NOTE: Also includes settings from params_2k // NOTE: Also includes settings from params_2k

View File

@ -0,0 +1,38 @@
package build
import (
"sort"
"github.com/libp2p/go-libp2p-core/protocol"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
func DefaultSectorSize() abi.SectorSize {
szs := make([]abi.SectorSize, 0, len(miner.SupportedProofTypes))
for spt := range miner.SupportedProofTypes {
ss, err := spt.SectorSize()
if err != nil {
panic(err)
}
szs = append(szs, ss)
}
sort.Slice(szs, func(i, j int) bool {
return szs[i] < szs[j]
})
return szs[0]
}
// Core network constants
func BlocksTopic(netName dtypes.NetworkName) string { return "/fil/blocks/" + string(netName) }
func MessagesTopic(netName dtypes.NetworkName) string { return "/fil/msgs/" + string(netName) }
func DhtProtocolName(netName dtypes.NetworkName) protocol.ID {
return protocol.ID("/fil/kad/" + string(netName))
}

View File

@ -1,10 +1,9 @@
// +build !testground
package build package build
import ( import (
"math/big" "math/big"
"sort"
"github.com/libp2p/go-libp2p-core/protocol"
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/builtin" "github.com/filecoin-project/specs-actors/actors/builtin"
@ -13,32 +12,6 @@ import (
"github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/dtypes"
) )
func DefaultSectorSize() abi.SectorSize {
szs := make([]abi.SectorSize, 0, len(miner.SupportedProofTypes))
for spt := range miner.SupportedProofTypes {
ss, err := spt.SectorSize()
if err != nil {
panic(err)
}
szs = append(szs, ss)
}
sort.Slice(szs, func(i, j int) bool {
return szs[i] < szs[j]
})
return szs[0]
}
// Core network constants
func BlocksTopic(netName dtypes.NetworkName) string { return "/fil/blocks/" + string(netName) }
func MessagesTopic(netName dtypes.NetworkName) string { return "/fil/msgs/" + string(netName) }
func DhtProtocolName(netName dtypes.NetworkName) protocol.ID {
return protocol.ID("/fil/kad/" + string(netName))
}
// ///// // /////
// Storage // Storage
@ -48,8 +21,7 @@ const UnixfsLinksPerLevel = 1024
// ///// // /////
// Consensus / Network // Consensus / Network
// Seconds const AllowableClockDriftSecs = uint64(1)
const AllowableClockDrift = 1
// Epochs // Epochs
const ForkLengthThreshold = Finality const ForkLengthThreshold = Finality
@ -59,11 +31,12 @@ var BlocksPerEpoch = uint64(builtin.ExpectedLeadersPerEpoch)
// Epochs // Epochs
const Finality = miner.ChainFinalityish const Finality = miner.ChainFinalityish
const MessageConfidence = uint64(5)
// constants for Weight calculation // constants for Weight calculation
// The ratio of weight contributed by short-term vs long-term factors in a given round // The ratio of weight contributed by short-term vs long-term factors in a given round
const WRatioNum = int64(1) const WRatioNum = int64(1)
const WRatioDen = 2 const WRatioDen = uint64(2)
// ///// // /////
// Proofs // Proofs
@ -81,25 +54,25 @@ const MaxSealLookback = SealRandomnessLookbackLimit + 2000 // TODO: Get from spe
// Mining // Mining
// Epochs // Epochs
const TicketRandomnessLookback = 1 const TicketRandomnessLookback = abi.ChainEpoch(1)
const WinningPoStSectorSetLookback = 10 const WinningPoStSectorSetLookback = abi.ChainEpoch(10)
// ///// // /////
// Devnet settings // Devnet settings
const TotalFilecoin = 2_000_000_000 const TotalFilecoin = uint64(2_000_000_000)
const MiningRewardTotal = 1_400_000_000 const MiningRewardTotal = uint64(1_400_000_000)
const FilecoinPrecision = 1_000_000_000_000_000_000 const FilecoinPrecision = uint64(1_000_000_000_000_000_000)
var InitialRewardBalance *big.Int var InitialRewardBalance *big.Int
// TODO: Move other important consts here // TODO: Move other important consts here
func init() { func init() {
InitialRewardBalance = big.NewInt(MiningRewardTotal) InitialRewardBalance = big.NewInt(int64(MiningRewardTotal))
InitialRewardBalance = InitialRewardBalance.Mul(InitialRewardBalance, big.NewInt(FilecoinPrecision)) InitialRewardBalance = InitialRewardBalance.Mul(InitialRewardBalance, big.NewInt(int64(FilecoinPrecision)))
} }
// Sync // Sync
@ -118,11 +91,13 @@ const VerifSigCacheSize = 32000
// TODO: If this is gonna stay, it should move to specs-actors // TODO: If this is gonna stay, it should move to specs-actors
const BlockMessageLimit = 512 const BlockMessageLimit = 512
const BlockGasLimit = 100_000_000 const BlockGasLimit = 100_000_000_000
var DrandCoeffs = []string{ var DrandConfig = dtypes.DrandConfig{
"82c279cce744450e68de98ee08f9698a01dd38f8e3be3c53f2b840fb9d09ad62a0b6b87981e179e1b14bc9a2d284c985", Servers: []string{
"82d51308ad346c686f81b8094551597d7b963295cbf313401a93df9baf52d5ae98a87745bee70839a4d6e65c342bd15b", "https://pl-eu.testnet.drand.sh",
"94eebfd53f4ba6a3b8304236400a12e73885e5a781509a5c8d41d2e8b476923d8ea6052649b3c17282f596217f96c5de", "https://pl-us.testnet.drand.sh",
"8dc4231e42b4edf39e86ef1579401692480647918275da767d3e558c520d6375ad953530610fd27daf110187877a65d0", "https://pl-sin.testnet.drand.sh",
},
ChainInfoJSON: `{"public_key":"922a2e93828ff83345bae533f5172669a26c02dc76d6bf59c80892e12ab1455c229211886f35bb56af6d5bea981024df","period":25,"genesis_time":1590445175,"hash":"138a324aa6540f93d0dad002aa89454b1bec2b6e948682cde6bd4db40f4b7c9b"}`,
} }

View File

@ -0,0 +1,73 @@
// +build testground
// This file makes hardcoded parameters (const) configurable as vars.
//
// Its purpose is to unlock various degrees of flexibility and parametrization
// when writing Testground plans for Lotus.
//
package build
import (
"math/big"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
)
var (
UnixfsChunkSize = uint64(1 << 20)
UnixfsLinksPerLevel = 1024
BlocksPerEpoch = uint64(builtin.ExpectedLeadersPerEpoch)
BlockMessageLimit = 512
BlockGasLimit = int64(100_000_000_000)
BlockDelaySecs = uint64(builtin.EpochDurationSeconds)
PropagationDelaySecs = uint64(6)
AllowableClockDriftSecs = uint64(1)
Finality = miner.ChainFinalityish
ForkLengthThreshold = Finality
SlashablePowerDelay = 20
InteractivePoRepConfidence = 6
MessageConfidence uint64 = 5
WRatioNum = int64(1)
WRatioDen = uint64(2)
BadBlockCacheSize = 1 << 15
BlsSignatureCacheSize = 40000
VerifSigCacheSize = 32000
SealRandomnessLookback = Finality
SealRandomnessLookbackLimit = SealRandomnessLookback + 2000
MaxSealLookback = SealRandomnessLookbackLimit + 2000
TicketRandomnessLookback = abi.ChainEpoch(1)
WinningPoStSectorSetLookback = abi.ChainEpoch(10)
TotalFilecoin uint64 = 2_000_000_000
MiningRewardTotal uint64 = 1_400_000_000
FilecoinPrecision uint64 = 1_000_000_000_000_000_000
InitialRewardBalance = func() *big.Int {
v := big.NewInt(int64(MiningRewardTotal))
v = v.Mul(v, big.NewInt(int64(FilecoinPrecision)))
return v
}()
DrandConfig = dtypes.DrandConfig{
Servers: []string{
"https://pl-eu.testnet.drand.sh",
"https://pl-us.testnet.drand.sh",
"https://pl-sin.testnet.drand.sh",
},
ChainInfoJSON: `{"public_key":"922a2e93828ff83345bae533f5172669a26c02dc76d6bf59c80892e12ab1455c229211886f35bb56af6d5bea981024df","period":25,"genesis_time":1590445175,"hash":"138a324aa6540f93d0dad002aa89454b1bec2b6e948682cde6bd4db40f4b7c9b"}`,
}
)

View File

@ -1,5 +1,6 @@
// +build !debug // +build !debug
// +build !2k // +build !2k
// +build !testground
package build package build
@ -13,13 +14,12 @@ import (
func init() { func init() {
power.ConsensusMinerMinPower = big.NewInt(1024 << 30) power.ConsensusMinerMinPower = big.NewInt(1024 << 30)
miner.SupportedProofTypes = map[abi.RegisteredProof]struct{}{ miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
abi.RegisteredProof_StackedDRG32GiBSeal: {}, abi.RegisteredSealProof_StackedDrg32GiBV1: {},
abi.RegisteredProof_StackedDRG64GiBSeal: {}, abi.RegisteredSealProof_StackedDrg64GiBV1: {},
} }
} }
// Seconds const BlockDelaySecs = uint64(builtin.EpochDurationSeconds)
const BlockDelay = builtin.EpochDurationSeconds
const PropagationDelay = 6 const PropagationDelaySecs = uint64(6)

View File

@ -1,152 +1,152 @@
{ {
"v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.params": { "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.params": {
"cid": "QmYkygifkXnrnsN4MJsjBFHTQJHx294CyikDgDK8nYxdGh", "cid": "QmeDRyxek34F1H6xJY6AkFdWvPsy5F6dKTrebV3ZtWT4ky",
"digest": "df3f30442a6d6b4192f5071fb17e820c", "digest": "f5827f2d8801c62c831e0f972f6dc8bb",
"sector_size": 2048 "sector_size": 2048
}, },
"v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.vk": { "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.vk": {
"cid": "QmdXyqbmy2bkJA9Kyhh6z25GrTCq48LwX6c1mxPsm54wi7", "cid": "QmUw1ZmG4BBbX19MsbH3zAEGKUc42iFJc5ZAyomDHeJTsA",
"digest": "0bea3951abf9557a3569f68e52a30c6c", "digest": "398fecdb4b2de445125852bc3c080b35",
"sector_size": 2048 "sector_size": 2048
}, },
"v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.params": { "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.params": {
"cid": "Qmf5XZZtP5VcYTf65MbKjLVabcS6cYMbr2rFShmfJzh5e5", "cid": "QmUeNKp9YZpiAFm81RV5KuxH1FDGJx2DuwcbU2XNSZLLSv",
"digest": "655e6277638edc8c658094f6f0b33d54", "digest": "2b6d2972ac9e862e8134d98fb695b0c5",
"sector_size": 536870912 "sector_size": 536870912
}, },
"v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.vk": { "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.vk": {
"cid": "QmPuhdWnAXBks43emnkqi9FQzyU1gASKyz23zrD27BPGs8", "cid": "QmQaQmTXX995Akd66ggtJY5bNx6Gkxk8P34JTdMMq8393G",
"digest": "57690e3a6a94c3f704802a674b34f36b", "digest": "3688c9eb256b7b17f411dad78d5ef74a",
"sector_size": 536870912 "sector_size": 536870912
}, },
"v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.params": { "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.params": {
"cid": "QmPNVgTN7N5vDtD5u7ERMTLcvUtrKRBfYVUDr6uW3pKhX7", "cid": "QmfEYTMSkwGJTumQx26iKXGNKiYh3mmAC4SkdybZpJCj5p",
"digest": "3d390654f58e603b896ac70c653f5676", "digest": "09bff16aed893349d94485cfae366a9c",
"sector_size": 2048 "sector_size": 2048
}, },
"v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.vk": { "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.vk": {
"cid": "Qmbj61Zez7v5xA7nSCnmWbyLYznWJDWeusz7Yg8EcgVdoN", "cid": "QmP4ThPieSUJyRanjibWpT5R5cCMzMAU4j8Y7kBn7CSW1Q",
"digest": "8c170a164743c39576a7f47a1b51e6f3", "digest": "142f2f7e8f1b1779290315cabfd2c803",
"sector_size": 2048 "sector_size": 2048
}, },
"v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.params": { "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.params": {
"cid": "QmRApb8RZoBK3cqicT7V3ydXg8yVvqPFMPrQNXP33aBihp", "cid": "QmcAixrHsz29DgvtZiMc2kQjvPRvWxYUp36QYmRDZbmREm",
"digest": "b1b58ff9a297b82885e8a7dfb035f83c", "digest": "8f987f64d434365562180b96ec12e299",
"sector_size": 8388608 "sector_size": 8388608
}, },
"v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.vk": { "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.vk": {
"cid": "QmcytF1dTdqMFoyXi931j1RgmGtLfR9LLLaBznRt1tPQyD", "cid": "QmT4iFnbL6r4txS5PXsiV7NTzbhCxHy54PvdkJJGV2VFXb",
"digest": "1a09e00c641f192f55af3433a028f050", "digest": "94b6c24ac01924f4feeecedd16b5d77d",
"sector_size": 8388608 "sector_size": 8388608
}, },
"v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.params": { "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.params": {
"cid": "QmPvr54tWaVeP4WnekivzUAJitTqsQfvikBvAHNEaDNQSw", "cid": "QmbjFst6SFCK1KsTQrfwPdxf3VTNa1raed574tEZZ9PoyQ",
"digest": "9380e41368ed4083dbc922b290d3b786", "digest": "2c245fe8179839dd6c6cdea207c67ae8",
"sector_size": 8388608 "sector_size": 8388608
}, },
"v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.vk": { "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.vk": {
"cid": "QmXyVLVDRCcxA9SjT7PeK8HFtyxZ2ZH3SHa8KoGLw8VGJt", "cid": "QmQJKmvZN1a5cQ1Nw6CDyXs3nuRPzvyU5NvCFMUL2BfcZC",
"digest": "f0731a7e20f90704bd38fc5d27882f6d", "digest": "56ae47bfda53bb8d22981ed8d8d27d72",
"sector_size": 8388608 "sector_size": 8388608
}, },
"v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.params": { "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.params": {
"cid": "Qmf5f6ko3dqj7qauzXpZqxM9B2x2sL977K6gE2ppNwuJPv", "cid": "QmQCABxeTpdvXTyjDyk7nPBxkQzCh7MXfGztWnSXEPKMLW",
"digest": "273ebb8c896326b7c292bee8b775fd38", "digest": "7e6b2eb5ecbb11ac651ad66ebbb2075a",
"sector_size": 536870912 "sector_size": 536870912
}, },
"v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.vk": { "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.vk": {
"cid": "QmfP3MQe8koW63n5MkDENENVHxib78MJYYyZvbneCsuze8", "cid": "QmPBweyugh5Sx4umk8ULhgEGbjY8xmWLfU6M7EMpc8Mad6",
"digest": "3dd94da9da64e51b3445bc528d84e76d", "digest": "94a8d9e25a9ab9674d339833664eba25",
"sector_size": 536870912 "sector_size": 536870912
}, },
"v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.params": { "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.params": {
"cid": "QmYEeeCE8uT2bsVkxcqqUYeMmMEbe6rfmo8wQCv7jFHqqm", "cid": "QmY5yax1E9KymBnCeHksE9Zi8NieZbmwcpoDGoabkeeb9h",
"digest": "c947f2021304ed43b7216f7a8436e294", "digest": "c909ea9e3fe25ab9b391a64593afdbba",
"sector_size": 34359738368 "sector_size": 34359738368
}, },
"v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.vk": { "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.vk": {
"cid": "QmXB63ExriFjB4ywWnXTnFwCcLFfCeEP3h15qtL5i7F4aX", "cid": "QmXnPo4yH5mwMguwrvqgRfduSttbmPrXtbBfbwU21wQWHt",
"digest": "ab20d7b253e7e9a0d2ccdf7599ec8ec3", "digest": "caf900461e988bbf86dbcaca087b7864",
"sector_size": 34359738368 "sector_size": 34359738368
}, },
"v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.params": { "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.params": {
"cid": "QmW5Yxg3L1NSzuQVcRMHMbG3uvVoi4dTLzVaDpnEUPQpnA", "cid": "QmZtzzPWwmZEgR7MSMvXRbt9KVK8k4XZ5RLWHybHJW9SdE",
"digest": "079ba19645828ae42b22b0e3f4866e8d", "digest": "a2844f0703f186d143a06146a04577d8",
"sector_size": 34359738368 "sector_size": 34359738368
}, },
"v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.vk": { "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.vk": {
"cid": "QmQzZ5dJ11tcSBees38WX41tZLXS9BqpEti253m5QcnTNs", "cid": "QmWxEA7EdQCUJTzjNpxg5XTF45D2uVyYnN1QRUb5TRYU8M",
"digest": "c76125a50a7de315165de359b5174ae4", "digest": "2306247a1e616dbe07f01b88196c2044",
"sector_size": 34359738368 "sector_size": 34359738368
}, },
"v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.params": { "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.params": {
"cid": "QmNk3wga1tS53FUu1QnkK8ehWA2cqpCnSEAPv3KLxdJxNa", "cid": "QmP676KwuvyF9Y64uJnXvLtvD1xcuWQ6wD23RzYtQ6dd4f",
"digest": "421e4790c0b80e0107a7ff67acf14084", "digest": "215b1c667a4f46a1d0178338df568615",
"sector_size": 68719476736 "sector_size": 68719476736
}, },
"v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.vk": { "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.vk": {
"cid": "QmVQCHGsrUtbn9RjHs1e6GXfeXDW5m9w4ge48PSX3Z2as2", "cid": "QmPvPwbJtcSGyqB1rQJhSF5yvFbX9ZBSsHVej5F8JUyHUJ",
"digest": "8b60e9cc1470a6729c687d6cf0a1f79c", "digest": "0c9c423b28b1455fcbc329a1045fd4dd",
"sector_size": 68719476736 "sector_size": 68719476736
}, },
"v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.params": { "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.params": {
"cid": "QmTL3VvydaMFWKvE5VzxjgKsJYgL9JMM4JVYNtQxdj9JK1", "cid": "QmUxPQfvckzm1t6MFRdDZ1fDK5UJzAjK7pTZ97cwyachdr",
"digest": "2685f31124b22ea6b2857e5a5e87ffa3", "digest": "965132f51ae445b0e6d32692b7561995",
"sector_size": 68719476736 "sector_size": 68719476736
}, },
"v26-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.vk": { "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.vk": {
"cid": "QmSVWbLqQYbUbbJyfsRMzEib2rfSqMtnPks1Nw22omcBQm", "cid": "QmTxq2EBnQWb5R8tS4MHdchj4vNfLYGoSXxwJFvs5xgW4K",
"digest": "efe703cd2839597c7ca5c2a906b74296", "digest": "fc8c3d26e0e56373ad96cb41520d55a6",
"sector_size": 68719476736 "sector_size": 68719476736
}, },
"v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.params": { "v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.params": {
"cid": "QmU9dH31nZZUJnsogR4Ld4ySUcH6wm2RgmGiujwnqtbU6k", "cid": "QmRjgZHERgqGoRagR788Kh6ybi26csVYa8mqbqhmZm57Jx",
"digest": "fcef8e87ae2afd7a28aae44347b804cf", "digest": "cfc7b0897d1eee48c586f7beb89e67f7",
"sector_size": 2048 "sector_size": 2048
}, },
"v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.vk": { "v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.vk": {
"cid": "QmdJ15DMGPooye5NaPcRfXUdHUDibcN7hKjbmTGuu1K4AQ", "cid": "QmNjvnvFP7KgovHUddULoB19fBHT81iz7NcUbzEHZUUPsm",
"digest": "2ee2b3518229680db15161d4f582af37", "digest": "fb59bd061c987eac7068008c44de346b",
"sector_size": 2048 "sector_size": 2048
}, },
"v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.params": { "v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.params": {
"cid": "QmZgtxcY3tMXXQxZTA7ZTUDXLVUnfxNcerXgeW4gG2NnfP", "cid": "QmTpRPBA4dt8fgGpcVzi4L1KA1U2eBHCE8WVmS2GUygMvT",
"digest": "3273c7135cb75684248b475781b738ee", "digest": "36d465915b0afbf96bd08e7915e00952",
"sector_size": 536870912 "sector_size": 536870912
}, },
"v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.vk": { "v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.vk": {
"cid": "QmSS6ZkAV2aGZcgKgdPpEEgihXF1ryZX8PSAZDWSoeL1d4", "cid": "QmRzDyVfQCLsxspoVsed5bcQRsG6KiktngJfcNBL3TJPZe",
"digest": "1519b5f61d9044a59f2bdc57537c094b", "digest": "99d16df0eb6a7e227a4f4570c4f6b6f1",
"sector_size": 536870912 "sector_size": 536870912
}, },
"v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.params": { "v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.params": {
"cid": "QmQBGXeiNn6hVwbR6qFarQqiNGDdKk4h9ucfyvcXyfYz2N", "cid": "QmV8ZjTSGzDUWmFvsq9NSyPBR7eDDUcvCPNgj2yE7HMAFu",
"digest": "7d5f896f435c38e93bcda6dd168d860b", "digest": "34f3ddf1d1c9f41c0cd73b91e8b4bc27",
"sector_size": 8388608 "sector_size": 8388608
}, },
"v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.vk": { "v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.vk": {
"cid": "QmPrZgBVGMckEAeu5eSJnLmiAwcPQjKjZe5ir6VaQ5AxKs", "cid": "QmTa3VbjTiqJWU6r4WKayaQrUaaBsrpp5UDqYvPDd2C5hs",
"digest": "fe6d2de44580a0db5a4934688899b92f", "digest": "ec62d59651daa5631d3d1e9c782dd940",
"sector_size": 8388608 "sector_size": 8388608
}, },
"v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.params": { "v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.params": {
"cid": "QmZL2cq45XJn5BFzagAZwgFmLrcM1W6CXoiEF9C5j5tjEF", "cid": "Qmf8ngfArxrv9tFWDqBcNegdBMymvuakwyHKd1pbW3pbsb",
"digest": "acdfed9f0512bc85a01a9fb871d475d5", "digest": "a16d6f4c6424fb280236739f84b24f97",
"sector_size": 34359738368 "sector_size": 34359738368
}, },
"v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.vk": { "v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.vk": {
"cid": "QmQ4zB7nNa1tDYNifBkExRnZtwtxZw775iaqvVsZyRi6Q2", "cid": "QmfQgVFerArJ6Jupwyc9tKjLD9n1J9ajLHBdpY465tRM7M",
"digest": "524a2f3e9d6826593caebc41bb545c40", "digest": "7a139d82b8a02e35279d657e197f5c1f",
"sector_size": 34359738368 "sector_size": 34359738368
}, },
"v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.params": { "v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.params": {
"cid": "QmY7DitNKXFeLQt9QoVQkfjM1EvRnprqUVxjmkTXkHDNka", "cid": "QmfDha8271nXJn14Aq3qQeghjMBWbs6HNSGa6VuzCVk4TW",
"digest": "f27271c0537ba65ade2ec045f8fbd069", "digest": "5d3cd3f107a3bea8a96d1189efd2965c",
"sector_size": 68719476736 "sector_size": 68719476736
}, },
"v26-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.vk": { "v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.vk": {
"cid": "QmUJsvoCuQ4LszPmeRVAkMYb5qY95ctz3UXKhu8xLzyFKo", "cid": "QmRVtTtiFzHJTHurYzaCvetGAchux9cktixT4aGHthN6Zt",
"digest": "576b292938c6c9d0a0e721bd867a543b", "digest": "62c366405404e60f171e661492740b1c",
"sector_size": 68719476736 "sector_size": 68719476736
} }
} }

View File

@ -3,11 +3,33 @@ package build
import "fmt" import "fmt"
var CurrentCommit string var CurrentCommit string
var BuildType int
const (
BuildDefault = 0
Build2k = 0x1
BuildDebug = 0x3
)
func buildType() string {
switch BuildType {
case BuildDefault:
return ""
case BuildDebug:
return "+debug"
case Build2k:
return "+2k"
default:
return "+huh?"
}
}
// BuildVersion is the local build version, set by build system // BuildVersion is the local build version, set by build system
const BuildVersion = "0.3.0" const BuildVersion = "0.4.1"
var UserVersion = BuildVersion + CurrentCommit func UserVersion() string {
return BuildVersion + buildType() + CurrentCommit
}
type Version uint32 type Version uint32
@ -31,8 +53,9 @@ func (ve Version) EqMajorMinor(v2 Version) bool {
} }
// APIVersion is a semver version of the rpc api exposed // APIVersion is a semver version of the rpc api exposed
var APIVersion Version = newVer(0, 3, 0) var APIVersion Version = newVer(0, 5, 0)
//nolint:varcheck,deadcode
const ( const (
majorMask = 0xff0000 majorMask = 0xff0000
minorMask = 0xffff00 minorMask = 0xffff00

View File

@ -50,6 +50,7 @@ func Newf(retCode exitcode.ExitCode, format string, args ...interface{}) ActorEr
} }
// todo: bit hacky // todo: bit hacky
func NewfSkip(skip int, retCode exitcode.ExitCode, format string, args ...interface{}) ActorError { func NewfSkip(skip int, retCode exitcode.ExitCode, format string, args ...interface{}) ActorError {
if retCode == 0 { if retCode == 0 {
return &actorError{ return &actorError{

View File

@ -17,6 +17,10 @@ type Response struct {
Err error Err error
} }
// RandomBeacon represents a system that provides randomness to Lotus.
// Other components interrogate the RandomBeacon to acquire randomness that's
// valid for a specific chain epoch. Also to verify beacon entries that have
// been posted on chain.
type RandomBeacon interface { type RandomBeacon interface {
Entry(context.Context, uint64) <-chan Response Entry(context.Context, uint64) <-chan Response
VerifyEntry(types.BeaconEntry, types.BeaconEntry) error VerifyEntry(types.BeaconEntry, types.BeaconEntry) error

View File

@ -1,48 +1,33 @@
package drand package drand
import ( import (
"bytes"
"context" "context"
"math/rand"
"sync" "sync"
"time" "time"
"github.com/filecoin-project/lotus/build" dchain "github.com/drand/drand/chain"
"github.com/filecoin-project/lotus/chain/beacon" dclient "github.com/drand/drand/client"
"github.com/filecoin-project/lotus/chain/types" hclient "github.com/drand/drand/client/http"
"github.com/filecoin-project/specs-actors/actors/abi" dlog "github.com/drand/drand/log"
gclient "github.com/drand/drand/lp2p/client"
"github.com/drand/kyber"
kzap "github.com/go-kit/kit/log/zap"
"go.uber.org/zap/zapcore"
"golang.org/x/xerrors" "golang.org/x/xerrors"
logging "github.com/ipfs/go-log" logging "github.com/ipfs/go-log"
pubsub "github.com/libp2p/go-libp2p-pubsub"
dbeacon "github.com/drand/drand/beacon" "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/drand/drand/core"
dkey "github.com/drand/drand/key" "github.com/filecoin-project/lotus/chain/beacon"
dnet "github.com/drand/drand/net" "github.com/filecoin-project/lotus/chain/types"
dproto "github.com/drand/drand/protobuf/drand" "github.com/filecoin-project/lotus/node/modules/dtypes"
) )
var log = logging.Logger("drand") var log = logging.Logger("drand")
var drandServers = []string{
"nicolas.drand.fil-test.net:443",
"philipp.drand.fil-test.net:443",
"mathilde.drand.fil-test.net:443",
"ludovic.drand.fil-test.net:443",
"gabbi.drand.fil-test.net:443",
"linus.drand.fil-test.net:443",
"jeff.drand.fil-test.net:443",
}
var drandPubKey *dkey.DistPublic
func init() {
drandPubKey = new(dkey.DistPublic)
err := drandPubKey.FromTOML(&dkey.DistPublicTOML{Coefficients: build.DrandCoeffs})
if err != nil {
panic(err)
}
}
type drandPeer struct { type drandPeer struct {
addr string addr string
tls bool tls bool
@ -56,14 +41,17 @@ func (dp *drandPeer) IsTLS() bool {
return dp.tls return dp.tls
} }
// DrandBeacon connects Lotus with a drand network in order to provide
// randomness to the system in a way that's aligned with Filecoin rounds/epochs.
//
// We connect to drand peers via their public HTTP endpoints. The peers are
// enumerated in the drandServers variable.
//
// The root trust for the Drand chain is configured from build.DrandChain.
type DrandBeacon struct { type DrandBeacon struct {
client dnet.Client client dclient.Client
peers []dnet.Peer pubkey kyber.Point
peersIndex int
peersIndexMtx sync.Mutex
pubkey *dkey.DistPublic
// seconds // seconds
interval time.Duration interval time.Duration
@ -76,120 +64,91 @@ type DrandBeacon struct {
localCache map[uint64]types.BeaconEntry localCache map[uint64]types.BeaconEntry
} }
func NewDrandBeacon(genesisTs, interval uint64) (*DrandBeacon, error) { func NewDrandBeacon(genesisTs, interval uint64, ps *pubsub.PubSub, config dtypes.DrandConfig) (*DrandBeacon, error) {
if genesisTs == 0 { if genesisTs == 0 {
panic("what are you doing this cant be zero") panic("what are you doing this cant be zero")
} }
drandChain, err := dchain.InfoFromJSON(bytes.NewReader([]byte(config.ChainInfoJSON)))
if err != nil {
return nil, xerrors.Errorf("unable to unmarshal drand chain info: %w", err)
}
dlogger := dlog.NewKitLoggerFrom(kzap.NewZapSugarLogger(
log.SugaredLogger.Desugar(), zapcore.InfoLevel))
var clients []dclient.Client
for _, url := range config.Servers {
hc, err := hclient.NewWithInfo(url, drandChain, nil)
if err != nil {
return nil, xerrors.Errorf("could not create http drand client: %w", err)
}
clients = append(clients, hc)
}
opts := []dclient.Option{
dclient.WithChainInfo(drandChain),
dclient.WithCacheSize(1024),
dclient.WithLogger(dlogger),
dclient.WithAutoWatch(),
}
if ps != nil {
opts = append(opts, gclient.WithPubsub(ps))
} else {
log.Info("drand beacon without pubsub")
}
client, err := dclient.Wrap(clients, opts...)
if err != nil {
return nil, xerrors.Errorf("creating drand client")
}
db := &DrandBeacon{ db := &DrandBeacon{
client: dnet.NewGrpcClient(), client: client,
localCache: make(map[uint64]types.BeaconEntry), localCache: make(map[uint64]types.BeaconEntry),
} }
for _, ds := range drandServers {
db.peers = append(db.peers, &drandPeer{addr: ds, tls: true})
}
db.peersIndex = rand.Intn(len(db.peers)) db.pubkey = drandChain.PublicKey
db.interval = drandChain.Period
groupResp, err := db.client.Group(context.TODO(), db.peers[db.peersIndex], &dproto.GroupRequest{}) db.drandGenTime = uint64(drandChain.GenesisTime)
if err != nil {
return nil, xerrors.Errorf("failed to get group response from beacon peer: %w", err)
}
kgroup, err := core.ProtoToGroup(groupResp)
if err != nil {
return nil, xerrors.Errorf("failed to parse group response: %w", err)
}
// TODO: verify these values are what we expect them to be
if !kgroup.PublicKey.Equal(drandPubKey) {
return nil, xerrors.Errorf("public key does not match")
}
// fmt.Printf("Drand Pubkey:\n%#v\n", kgroup.PublicKey.TOML()) // use to print public key
db.pubkey = drandPubKey
db.interval = kgroup.Period
db.drandGenTime = uint64(kgroup.GenesisTime)
db.filRoundTime = interval db.filRoundTime = interval
db.filGenTime = genesisTs db.filGenTime = genesisTs
// TODO: the stream currently gives you back *all* values since drand genesis.
// Having the stream in the background is merely an optimization, so not a big deal to disable it for now
// go db.handleStreamingUpdates()
return db, nil return db, nil
} }
func (db *DrandBeacon) rotatePeersIndex() {
db.peersIndexMtx.Lock()
nval := rand.Intn(len(db.peers))
db.peersIndex = nval
db.peersIndexMtx.Unlock()
log.Warnf("rotated to drand peer %d, %q", nval, db.peers[nval].Address())
}
func (db *DrandBeacon) getPeerIndex() int {
db.peersIndexMtx.Lock()
defer db.peersIndexMtx.Unlock()
return db.peersIndex
}
func (db *DrandBeacon) handleStreamingUpdates() {
for {
p := db.peers[db.getPeerIndex()]
ch, err := db.client.PublicRandStream(context.Background(), p, &dproto.PublicRandRequest{})
if err != nil {
log.Warnf("failed to get public rand stream to peer %q: %s", p.Address(), err)
log.Warnf("trying again in 10 seconds")
db.rotatePeersIndex()
time.Sleep(time.Second * 10)
continue
}
for e := range ch {
db.cacheValue(types.BeaconEntry{
Round: e.Round,
Data: e.Signature,
})
}
log.Warnf("drand beacon stream to peer %q broke, reconnecting in 10 seconds", p.Address())
db.rotatePeersIndex()
time.Sleep(time.Second * 10)
}
}
func (db *DrandBeacon) Entry(ctx context.Context, round uint64) <-chan beacon.Response { func (db *DrandBeacon) Entry(ctx context.Context, round uint64) <-chan beacon.Response {
// check cache, it it if there, otherwise query the endpoint
cres := db.getCachedValue(round)
if cres != nil {
out := make(chan beacon.Response, 1) out := make(chan beacon.Response, 1)
out <- beacon.Response{Entry: *cres} if round != 0 {
be := db.getCachedValue(round)
if be != nil {
out <- beacon.Response{Entry: *be}
close(out) close(out)
return out return out
} }
}
out := make(chan beacon.Response, 1)
go func() { go func() {
p := db.peers[db.getPeerIndex()] start := time.Now()
resp, err := db.client.PublicRand(ctx, p, &dproto.PublicRandRequest{Round: round}) log.Infow("start fetching randomness", "round", round)
resp, err := db.client.Get(ctx, round)
var br beacon.Response var br beacon.Response
if err != nil { if err != nil {
db.rotatePeersIndex() br.Err = xerrors.Errorf("drand failed Get request: %w", err)
br.Err = xerrors.Errorf("drand peer %q failed publicRand request: %w", p.Address(), err)
} else { } else {
br.Entry.Round = resp.GetRound() br.Entry.Round = resp.Round()
br.Entry.Data = resp.GetSignature() br.Entry.Data = resp.Signature()
} }
log.Infow("done fetching randomness", "round", round, "took", time.Since(start))
out <- br out <- br
close(out) close(out)
}() }()
return out return out
} }
func (db *DrandBeacon) cacheValue(e types.BeaconEntry) { func (db *DrandBeacon) cacheValue(e types.BeaconEntry) {
db.cacheLk.Lock() db.cacheLk.Lock()
defer db.cacheLk.Unlock() defer db.cacheLk.Unlock()
@ -211,13 +170,12 @@ func (db *DrandBeacon) VerifyEntry(curr types.BeaconEntry, prev types.BeaconEntr
// TODO handle genesis better // TODO handle genesis better
return nil return nil
} }
b := &dbeacon.Beacon{ b := &dchain.Beacon{
PreviousSig: prev.Data, PreviousSig: prev.Data,
Round: curr.Round, Round: curr.Round,
Signature: curr.Data, Signature: curr.Data,
} }
//log.Warnw("VerifyEntry", "beacon", b) err := dchain.VerifyBeacon(db.pubkey, b)
err := dbeacon.VerifyBeacon(db.pubkey.Key(), b)
if err == nil { if err == nil {
db.cacheValue(curr) db.cacheValue(curr)
} }

View File

@ -1,14 +1,25 @@
package drand package drand
import ( import (
"fmt" "os"
"testing" "testing"
dchain "github.com/drand/drand/chain"
hclient "github.com/drand/drand/client/http"
"github.com/stretchr/testify/assert"
"github.com/filecoin-project/lotus/build"
) )
func TestPrintDrandPubkey(t *testing.T) { func TestPrintGroupInfo(t *testing.T) {
bc, err := NewDrandBeacon(1, 1) server := build.DrandConfig.Servers[0]
if err != nil { c, err := hclient.New(server, nil, nil)
t.Fatal(err) assert.NoError(t, err)
} cg := c.(interface {
fmt.Printf("Drand Pubkey:\n%#v\n", bc.pubkey.TOML()) FetchChainInfo(groupHash []byte) (*dchain.Info, error)
})
chain, err := cg.FetchChainInfo(nil)
assert.NoError(t, err)
err = chain.ToJSON(os.Stdout)
assert.NoError(t, err)
} }

View File

@ -10,6 +10,7 @@ import (
"golang.org/x/xerrors" "golang.org/x/xerrors"
cborutil "github.com/filecoin-project/go-cbor-util" cborutil "github.com/filecoin-project/go-cbor-util"
"github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
@ -27,6 +28,24 @@ const BlockSyncProtocolID = "/fil/sync/blk/0.0.1"
const BlockSyncMaxRequestLength = 800 const BlockSyncMaxRequestLength = 800
// BlockSyncService is the component that services BlockSync requests from
// peers.
//
// BlockSync is the basic chain synchronization protocol of Filecoin. BlockSync
// is an RPC-oriented protocol, with a single operation to request blocks.
//
// A request contains a start anchor block (referred to with a CID), and a
// amount of blocks requested beyond the anchor (including the anchor itself).
//
// A client can also pass options, encoded as a 64-bit bitfield. Lotus supports
// two options at the moment:
//
// - include block contents
// - include block messages
//
// The response will include a status code, an optional message, and the
// response payload in case of success. The payload is a slice of serialized
// tipsets.
type BlockSyncService struct { type BlockSyncService struct {
cs *store.ChainStore cs *store.ChainStore
} }
@ -91,7 +110,7 @@ func (bss *BlockSyncService) HandleStream(s inet.Stream) {
ctx, span := trace.StartSpan(context.Background(), "blocksync.HandleStream") ctx, span := trace.StartSpan(context.Background(), "blocksync.HandleStream")
defer span.End() defer span.End()
defer s.Close() defer s.Close() //nolint:errcheck
var req BlockSyncRequest var req BlockSyncRequest
if err := cborutil.ReadCborRPC(bufio.NewReader(s), &req); err != nil { if err := cborutil.ReadCborRPC(bufio.NewReader(s), &req); err != nil {
@ -107,7 +126,7 @@ func (bss *BlockSyncService) HandleStream(s inet.Stream) {
} }
writeDeadline := 60 * time.Second writeDeadline := 60 * time.Second
s.SetDeadline(time.Now().Add(writeDeadline)) _ = s.SetDeadline(time.Now().Add(writeDeadline))
if err := cborutil.WriteCborRPC(s, resp); err != nil { if err := cborutil.WriteCborRPC(s, resp); err != nil {
log.Warnw("failed to write back response for handle stream", "err", err, "peer", s.Conn().RemotePeer()) log.Warnw("failed to write back response for handle stream", "err", err, "peer", s.Conn().RemotePeer())
return return

View File

@ -64,6 +64,11 @@ func (bs *BlockSync) processStatus(req *BlockSyncRequest, res *BlockSyncResponse
} }
} }
// GetBlocks fetches count blocks from the network, from the provided tipset
// *backwards*, returning as many tipsets as count.
//
// {hint/usage}: This is used by the Syncer during normal chain syncing and when
// resolving forks.
func (bs *BlockSync) GetBlocks(ctx context.Context, tsk types.TipSetKey, count int) ([]*types.TipSet, error) { func (bs *BlockSync) GetBlocks(ctx context.Context, tsk types.TipSetKey, count int) ([]*types.TipSet, error) {
ctx, span := trace.StartSpan(ctx, "bsync.GetBlocks") ctx, span := trace.StartSpan(ctx, "bsync.GetBlocks")
defer span.End() defer span.End()
@ -80,7 +85,9 @@ func (bs *BlockSync) GetBlocks(ctx context.Context, tsk types.TipSetKey, count i
Options: BSOptBlocks, Options: BSOptBlocks,
} }
// this peerset is sorted by latency and failure counting.
peers := bs.getPeers() peers := bs.getPeers()
// randomize the first few peers so we don't always pick the same peer // randomize the first few peers so we don't always pick the same peer
shufflePrefix(peers) shufflePrefix(peers)
@ -283,14 +290,14 @@ func (bs *BlockSync) fetchBlocksBlockSync(ctx context.Context, p peer.ID, req *B
bs.RemovePeer(p) bs.RemovePeer(p)
return nil, xerrors.Errorf("failed to open stream to peer: %w", err) return nil, xerrors.Errorf("failed to open stream to peer: %w", err)
} }
s.SetWriteDeadline(time.Now().Add(5 * time.Second)) _ = s.SetWriteDeadline(time.Now().Add(5 * time.Second))
if err := cborutil.WriteCborRPC(s, req); err != nil { if err := cborutil.WriteCborRPC(s, req); err != nil {
s.SetWriteDeadline(time.Time{}) _ = s.SetWriteDeadline(time.Time{})
bs.syncPeers.logFailure(p, time.Since(start)) bs.syncPeers.logFailure(p, time.Since(start))
return nil, err return nil, err
} }
s.SetWriteDeadline(time.Time{}) _ = s.SetWriteDeadline(time.Time{})
var res BlockSyncResponse var res BlockSyncResponse
r := incrt.New(s, 50<<10, 5*time.Second) r := incrt.New(s, 50<<10, 5*time.Second)
@ -356,6 +363,7 @@ func (bs *BlockSync) RemovePeer(p peer.ID) {
bs.syncPeers.removePeer(p) bs.syncPeers.removePeer(p)
} }
// getPeers returns a preference-sorted set of peers to query.
func (bs *BlockSync) getPeers() []peer.ID { func (bs *BlockSync) getPeers() []peer.ID {
return bs.syncPeers.prefSortedPeers() return bs.syncPeers.prefSortedPeers()
} }
@ -561,26 +569,30 @@ func (bpt *bsPeerTracker) logSuccess(p peer.ID, dur time.Duration) {
bpt.lk.Lock() bpt.lk.Lock()
defer bpt.lk.Unlock() defer bpt.lk.Unlock()
if pi, ok := bpt.peers[p]; !ok { var pi *peerStats
var ok bool
if pi, ok = bpt.peers[p]; !ok {
log.Warnw("log success called on peer not in tracker", "peerid", p.String()) log.Warnw("log success called on peer not in tracker", "peerid", p.String())
return return
} else {
pi.successes++
logTime(pi, dur)
} }
pi.successes++
logTime(pi, dur)
} }
func (bpt *bsPeerTracker) logFailure(p peer.ID, dur time.Duration) { func (bpt *bsPeerTracker) logFailure(p peer.ID, dur time.Duration) {
bpt.lk.Lock() bpt.lk.Lock()
defer bpt.lk.Unlock() defer bpt.lk.Unlock()
if pi, ok := bpt.peers[p]; !ok {
var pi *peerStats
var ok bool
if pi, ok = bpt.peers[p]; !ok {
log.Warn("log failure called on peer not in tracker", "peerid", p.String()) log.Warn("log failure called on peer not in tracker", "peerid", p.String())
return return
} else { }
pi.failures++ pi.failures++
logTime(pi, dur) logTime(pi, dur)
}
} }
func (bpt *bsPeerTracker) removePeer(p peer.ID) { func (bpt *bsPeerTracker) removePeer(p peer.ID) {

View File

@ -19,7 +19,7 @@ import (
var log = logging.Logger("events") var log = logging.Logger("events")
// `curH`-`ts.Height` = `confidence` // HeightHandler `curH`-`ts.Height` = `confidence`
type HeightHandler func(ctx context.Context, ts *types.TipSet, curH abi.ChainEpoch) error type HeightHandler func(ctx context.Context, ts *types.TipSet, curH abi.ChainEpoch) error
type RevertHandler func(ctx context.Context, ts *types.TipSet) error type RevertHandler func(ctx context.Context, ts *types.TipSet) error
@ -31,7 +31,7 @@ type heightHandler struct {
revert RevertHandler revert RevertHandler
} }
type eventApi interface { type eventAPI interface {
ChainNotify(context.Context) (<-chan []*api.HeadChange, error) ChainNotify(context.Context) (<-chan []*api.HeadChange, error)
ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error) ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error)
ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error)
@ -42,7 +42,7 @@ type eventApi interface {
} }
type Events struct { type Events struct {
api eventApi api eventAPI
tsc *tipSetCache tsc *tipSetCache
lk sync.Mutex lk sync.Mutex
@ -51,10 +51,10 @@ type Events struct {
readyOnce sync.Once readyOnce sync.Once
heightEvents heightEvents
calledEvents *hcEvents
} }
func NewEvents(ctx context.Context, api eventApi) *Events { func NewEvents(ctx context.Context, api eventAPI) *Events {
gcConfidence := 2 * build.ForkLengthThreshold gcConfidence := 2 * build.ForkLengthThreshold
tsc := newTSCache(gcConfidence, api.ChainGetTipSetByHeight) tsc := newTSCache(gcConfidence, api.ChainGetTipSetByHeight)
@ -67,25 +67,14 @@ func NewEvents(ctx context.Context, api eventApi) *Events {
heightEvents: heightEvents{ heightEvents: heightEvents{
tsc: tsc, tsc: tsc,
ctx: ctx, ctx: ctx,
gcConfidence: abi.ChainEpoch(gcConfidence), gcConfidence: gcConfidence,
heightTriggers: map[uint64]*heightHandler{}, heightTriggers: map[uint64]*heightHandler{},
htTriggerHeights: map[abi.ChainEpoch][]uint64{}, htTriggerHeights: map[abi.ChainEpoch][]uint64{},
htHeights: map[abi.ChainEpoch][]uint64{}, htHeights: map[abi.ChainEpoch][]uint64{},
}, },
calledEvents: calledEvents{ hcEvents: newHCEvents(ctx, api, tsc, uint64(gcConfidence)),
cs: api,
tsc: tsc,
ctx: ctx,
gcConfidence: uint64(gcConfidence),
confQueue: map[triggerH]map[msgH][]*queuedEvent{},
revertQueue: map[msgH][]triggerH{},
triggers: map[triggerId]*callHandler{},
matchers: map[triggerId][]MatchFunc{},
timeouts: map[abi.ChainEpoch]map[triggerId]int{},
},
} }
e.ready.Add(1) e.ready.Add(1)
@ -143,7 +132,7 @@ func (e *Events) listenHeadChangesOnce(ctx context.Context) error {
} }
e.readyOnce.Do(func() { e.readyOnce.Do(func() {
e.at = cur[0].Val.Height() e.lastTs = cur[0].Val
e.ready.Done() e.ready.Done()
}) })
@ -186,5 +175,5 @@ func (e *Events) headChange(rev, app []*types.TipSet) error {
return err return err
} }
return e.headChangeCalled(rev, app) return e.processHeadChangeEvent(rev, app)
} }

View File

@ -13,8 +13,9 @@ import (
) )
const NoTimeout = math.MaxInt64 const NoTimeout = math.MaxInt64
const NoHeight = abi.ChainEpoch(-1)
type triggerId = uint64 type triggerID = uint64
// msgH is the block height at which a message was present / event has happened // msgH is the block height at which a message was present / event has happened
type msgH = abi.ChainEpoch type msgH = abi.ChainEpoch
@ -23,53 +24,60 @@ type msgH = abi.ChainEpoch
// message (msgH+confidence) // message (msgH+confidence)
type triggerH = abi.ChainEpoch type triggerH = abi.ChainEpoch
// `ts` is the tipset, in which the `msg` is included. type eventData interface{}
// EventHandler arguments:
// `prevTs` is the previous tipset, eg the "from" tipset for a state change.
// `ts` is the event tipset, eg the tipset in which the `msg` is included.
// `curH`-`ts.Height` = `confidence` // `curH`-`ts.Height` = `confidence`
type CalledHandler func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error) type EventHandler func(data eventData, prevTs, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error)
// CheckFunc is used for atomicity guarantees. If the condition the callbacks // CheckFunc is used for atomicity guarantees. If the condition the callbacks
// wait for has already happened in tipset `ts` // wait for has already happened in tipset `ts`
// //
// If `done` is true, timeout won't be triggered // If `done` is true, timeout won't be triggered
// If `more` is false, no messages will be sent to CalledHandler (RevertHandler // If `more` is false, no messages will be sent to EventHandler (RevertHandler
// may still be called) // may still be called)
type CheckFunc func(ts *types.TipSet) (done bool, more bool, err error) type CheckFunc func(ts *types.TipSet) (done bool, more bool, err error)
type MatchFunc func(msg *types.Message) (bool, error) // Keep track of information for an event handler
type handlerInfo struct {
type callHandler struct {
confidence int confidence int
timeout abi.ChainEpoch timeout abi.ChainEpoch
disabled bool // TODO: GC after gcConfidence reached disabled bool // TODO: GC after gcConfidence reached
handle CalledHandler handle EventHandler
revert RevertHandler revert RevertHandler
} }
// When a change occurs, a queuedEvent is created and put into a queue
// until the required confidence is reached
type queuedEvent struct { type queuedEvent struct {
trigger triggerId trigger triggerID
prevH abi.ChainEpoch
h abi.ChainEpoch h abi.ChainEpoch
msg *types.Message data eventData
called bool called bool
} }
type calledEvents struct { // Manages chain head change events, which may be forward (new tipset added to
cs eventApi // chain) or backward (chain branch discarded in favour of heavier branch)
type hcEvents struct {
cs eventAPI
tsc *tipSetCache tsc *tipSetCache
ctx context.Context ctx context.Context
gcConfidence uint64 gcConfidence uint64
at abi.ChainEpoch lastTs *types.TipSet
lk sync.Mutex lk sync.Mutex
ctr triggerId ctr triggerID
triggers map[triggerId]*callHandler triggers map[triggerID]*handlerInfo
matchers map[triggerId][]MatchFunc
// maps block heights to events // maps block heights to events
// [triggerH][msgH][event] // [triggerH][msgH][event]
@ -78,29 +86,81 @@ type calledEvents struct {
// [msgH][triggerH] // [msgH][triggerH]
revertQueue map[msgH][]triggerH revertQueue map[msgH][]triggerH
// [timeoutH+confidence][triggerId]{calls} // [timeoutH+confidence][triggerID]{calls}
timeouts map[abi.ChainEpoch]map[triggerId]int timeouts map[abi.ChainEpoch]map[triggerID]int
messageEvents
watcherEvents
} }
func (e *calledEvents) headChangeCalled(rev, app []*types.TipSet) error { func newHCEvents(ctx context.Context, cs eventAPI, tsc *tipSetCache, gcConfidence uint64) *hcEvents {
e := hcEvents{
ctx: ctx,
cs: cs,
tsc: tsc,
gcConfidence: gcConfidence,
confQueue: map[triggerH]map[msgH][]*queuedEvent{},
revertQueue: map[msgH][]triggerH{},
triggers: map[triggerID]*handlerInfo{},
timeouts: map[abi.ChainEpoch]map[triggerID]int{},
}
e.messageEvents = newMessageEvents(ctx, &e, cs)
e.watcherEvents = newWatcherEvents(ctx, &e, cs)
return &e
}
// Called when there is a change to the head with tipsets to be
// reverted / applied
func (e *hcEvents) processHeadChangeEvent(rev, app []*types.TipSet) error {
e.lk.Lock()
defer e.lk.Unlock()
for _, ts := range rev { for _, ts := range rev {
e.handleReverts(ts) e.handleReverts(ts)
e.at = ts.Height() e.lastTs = ts
} }
for _, ts := range app { for _, ts := range app {
// called triggers // Check if the head change caused any state changes that we were
e.checkNewCalls(ts) // waiting for
for ; e.at <= ts.Height(); e.at++ { stateChanges := e.watcherEvents.checkStateChanges(e.lastTs, ts)
e.applyWithConfidence(ts, e.at)
// Queue up calls until there have been enough blocks to reach
// confidence on the state changes
for tid, data := range stateChanges {
e.queueForConfidence(tid, data, e.lastTs, ts)
}
// Check if the head change included any new message calls
newCalls, err := e.messageEvents.checkNewCalls(ts)
if err != nil {
return err
}
// Queue up calls until there have been enough blocks to reach
// confidence on the message calls
for tid, data := range newCalls {
e.queueForConfidence(tid, data, nil, ts)
}
for at := e.lastTs.Height(); at <= ts.Height(); at++ {
// Apply any queued events and timeouts that were targeted at the
// current chain height
e.applyWithConfidence(ts, at)
e.applyTimeouts(ts) e.applyTimeouts(ts)
} }
// Update the latest known tipset
e.lastTs = ts
} }
return nil return nil
} }
func (e *calledEvents) handleReverts(ts *types.TipSet) { func (e *hcEvents) handleReverts(ts *types.TipSet) {
reverts, ok := e.revertQueue[ts.Height()] reverts, ok := e.revertQueue[ts.Height()]
if !ok { if !ok {
return // nothing to do return // nothing to do
@ -116,7 +176,7 @@ func (e *calledEvents) handleReverts(ts *types.TipSet) {
trigger := e.triggers[event.trigger] trigger := e.triggers[event.trigger]
if err := trigger.revert(e.ctx, ts); err != nil { if err := trigger.revert(e.ctx, ts); err != nil {
log.Errorf("reverting chain trigger (call %s.%d() @H %d, called @ %d) failed: %s", event.msg.To, event.msg.Method, ts.Height(), triggerH, err) log.Errorf("reverting chain trigger (@H %d, triggered @ %d) failed: %s", ts.Height(), triggerH, err)
} }
} }
delete(e.confQueue[triggerH], ts.Height()) delete(e.confQueue[triggerH], ts.Height())
@ -124,42 +184,15 @@ func (e *calledEvents) handleReverts(ts *types.TipSet) {
delete(e.revertQueue, ts.Height()) delete(e.revertQueue, ts.Height())
} }
func (e *calledEvents) checkNewCalls(ts *types.TipSet) { // Queue up events until the chain has reached a height that reflects the
pts, err := e.cs.ChainGetTipSet(e.ctx, ts.Parents()) // we actually care about messages in the parent tipset here // desired confidence
if err != nil { func (e *hcEvents) queueForConfidence(trigID uint64, data eventData, prevTs, ts *types.TipSet) {
log.Errorf("getting parent tipset in checkNewCalls: %s", err) trigger := e.triggers[trigID]
return
}
e.messagesForTs(pts, func(msg *types.Message) { prevH := NoHeight
// TODO: provide receipts if prevTs != nil {
prevH = prevTs.Height()
for tid, matchFns := range e.matchers {
var matched bool
for _, matchFn := range matchFns {
ok, err := matchFn(msg)
if err != nil {
log.Errorf("event matcher failed: %s", err)
continue
} }
matched = ok
if matched {
break
}
}
if matched {
e.queueForConfidence(tid, msg, ts)
break
}
}
})
}
func (e *calledEvents) queueForConfidence(triggerId uint64, msg *types.Message, ts *types.TipSet) {
trigger := e.triggers[triggerId]
appliedH := ts.Height() appliedH := ts.Height()
triggerH := appliedH + abi.ChainEpoch(trigger.confidence) triggerH := appliedH + abi.ChainEpoch(trigger.confidence)
@ -171,18 +204,20 @@ func (e *calledEvents) queueForConfidence(triggerId uint64, msg *types.Message,
} }
byOrigH[appliedH] = append(byOrigH[appliedH], &queuedEvent{ byOrigH[appliedH] = append(byOrigH[appliedH], &queuedEvent{
trigger: triggerId, trigger: trigID,
prevH: prevH,
h: appliedH, h: appliedH,
msg: msg, data: data,
}) })
e.revertQueue[appliedH] = append(e.revertQueue[appliedH], triggerH) e.revertQueue[appliedH] = append(e.revertQueue[appliedH], triggerH)
} }
func (e *calledEvents) applyWithConfidence(ts *types.TipSet, height abi.ChainEpoch) { // Apply any events that were waiting for this chain height for confidence
func (e *hcEvents) applyWithConfidence(ts *types.TipSet, height abi.ChainEpoch) {
byOrigH, ok := e.confQueue[height] byOrigH, ok := e.confQueue[height]
if !ok { if !ok {
return // no triggers at thin height return // no triggers at this height
} }
for origH, events := range byOrigH { for origH, events := range byOrigH {
@ -201,15 +236,20 @@ func (e *calledEvents) applyWithConfidence(ts *types.TipSet, height abi.ChainEpo
continue continue
} }
rec, err := e.cs.StateGetReceipt(e.ctx, event.msg.Cid(), ts.Key()) // Previous tipset - this is relevant for example in a state change
// from one tipset to another
var prevTs *types.TipSet
if event.prevH != NoHeight {
prevTs, err = e.tsc.get(event.prevH)
if err != nil { if err != nil {
log.Error(err) log.Errorf("events: applyWithConfidence didn't find tipset for previous event; wanted %d; current %d", event.prevH, height)
return continue
}
} }
more, err := trigger.handle(event.msg, rec, triggerTs, height) more, err := trigger.handle(event.data, prevTs, triggerTs, height)
if err != nil { if err != nil {
log.Errorf("chain trigger (call %s.%d() @H %d, called @ %d) failed: %s", event.msg.To, event.msg.Method, origH, height, err) log.Errorf("chain trigger (@H %d, triggered @ %d) failed: %s", origH, height, err)
continue // don't revert failed calls continue // don't revert failed calls
} }
@ -225,17 +265,18 @@ func (e *calledEvents) applyWithConfidence(ts *types.TipSet, height abi.ChainEpo
} }
} }
func (e *calledEvents) applyTimeouts(ts *types.TipSet) { // Apply any timeouts that expire at this height
func (e *hcEvents) applyTimeouts(ts *types.TipSet) {
triggers, ok := e.timeouts[ts.Height()] triggers, ok := e.timeouts[ts.Height()]
if !ok { if !ok {
return // nothing to do return // nothing to do
} }
for triggerId, calls := range triggers { for triggerID, calls := range triggers {
if calls > 0 { if calls > 0 {
continue // don't timeout if the method was called continue // don't timeout if the method was called
} }
trigger := e.triggers[triggerId] trigger := e.triggers[triggerID]
if trigger.disabled { if trigger.disabled {
continue continue
} }
@ -255,12 +296,225 @@ func (e *calledEvents) applyTimeouts(ts *types.TipSet) {
} }
} }
func (e *calledEvents) messagesForTs(ts *types.TipSet, consume func(*types.Message)) { // Listen for an event
// - CheckFunc: immediately checks if the event already occurred
// - EventHandler: called when the event has occurred, after confidence tipsets
// - RevertHandler: called if the chain head changes causing the event to revert
// - confidence: wait this many tipsets before calling EventHandler
// - timeout: at this chain height, timeout on waiting for this event
func (e *hcEvents) onHeadChanged(check CheckFunc, hnd EventHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch) (triggerID, error) {
e.lk.Lock()
defer e.lk.Unlock()
// Check if the event has already occurred
ts := e.tsc.best()
done, more, err := check(ts)
if err != nil {
return 0, xerrors.Errorf("called check error (h: %d): %w", ts.Height(), err)
}
if done {
timeout = NoTimeout
}
// Create a trigger for the event
id := e.ctr
e.ctr++
e.triggers[id] = &handlerInfo{
confidence: confidence,
timeout: timeout + abi.ChainEpoch(confidence),
disabled: !more,
handle: hnd,
revert: rev,
}
// If there's a timeout, set up a timeout check at that height
if timeout != NoTimeout {
if e.timeouts[timeout+abi.ChainEpoch(confidence)] == nil {
e.timeouts[timeout+abi.ChainEpoch(confidence)] = map[uint64]int{}
}
e.timeouts[timeout+abi.ChainEpoch(confidence)][id] = 0
}
return id, nil
}
// headChangeAPI is used to allow the composed event APIs to call back to hcEvents
// to listen for changes
type headChangeAPI interface {
onHeadChanged(check CheckFunc, hnd EventHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch) (triggerID, error)
}
// watcherEvents watches for a state change
type watcherEvents struct {
ctx context.Context
cs eventAPI
hcAPI headChangeAPI
lk sync.RWMutex
matchers map[triggerID]StateMatchFunc
}
func newWatcherEvents(ctx context.Context, hcAPI headChangeAPI, cs eventAPI) watcherEvents {
return watcherEvents{
ctx: ctx,
cs: cs,
hcAPI: hcAPI,
matchers: make(map[triggerID]StateMatchFunc),
}
}
// Run each of the matchers against the previous and current state to see if
// there's a change
func (we *watcherEvents) checkStateChanges(oldState, newState *types.TipSet) map[triggerID]eventData {
we.lk.RLock()
defer we.lk.RUnlock()
res := make(map[triggerID]eventData)
for tid, matchFn := range we.matchers {
ok, data, err := matchFn(oldState, newState)
if err != nil {
log.Errorf("event diff fn failed: %s", err)
continue
}
if ok {
res[tid] = data
}
}
return res
}
// StateChange represents a change in state
type StateChange interface{}
// StateChangeHandler arguments:
// `oldTs` is the state "from" tipset
// `newTs` is the state "to" tipset
// `states` is the change in state
// `curH`-`ts.Height` = `confidence`
type StateChangeHandler func(oldTs, newTs *types.TipSet, states StateChange, curH abi.ChainEpoch) (more bool, err error)
type StateMatchFunc func(oldTs, newTs *types.TipSet) (bool, StateChange, error)
// StateChanged registers a callback which is triggered when a specified state
// change occurs or a timeout is reached.
//
// * `CheckFunc` callback is invoked immediately with a recent tipset, it
// returns two booleans - `done`, and `more`.
//
// * `done` should be true when some on-chain state change we are waiting
// for has happened. When `done` is set to true, timeout trigger is disabled.
//
// * `more` should be false when we don't want to receive new notifications
// through StateChangeHandler. Note that notifications may still be delivered to
// RevertHandler
//
// * `StateChangeHandler` is called when the specified state change was observed
// on-chain, and a confidence threshold was reached, or the specified `timeout`
// height was reached with no state change observed. When this callback is
// invoked on a timeout, `oldState` and `newState` are set to nil.
// This callback returns a boolean specifying whether further notifications
// should be sent, like `more` return param from `CheckFunc` above.
//
// * `RevertHandler` is called after apply handler, when we drop the tipset
// containing the message. The tipset passed as the argument is the tipset
// that is being dropped. Note that the event dropped may be re-applied
// in a different tipset in small amount of time.
//
// * `StateMatchFunc` is called against each tipset state. If there is a match,
// the state change is queued up until the confidence interval has elapsed (and
// `StateChangeHandler` is called)
func (we *watcherEvents) StateChanged(check CheckFunc, scHnd StateChangeHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, mf StateMatchFunc) error {
hnd := func(data eventData, prevTs, ts *types.TipSet, height abi.ChainEpoch) (bool, error) {
states, ok := data.(StateChange)
if data != nil && !ok {
panic("expected StateChange")
}
return scHnd(prevTs, ts, states, height)
}
id, err := we.hcAPI.onHeadChanged(check, hnd, rev, confidence, timeout)
if err != nil {
return err
}
we.lk.Lock()
defer we.lk.Unlock()
we.matchers[id] = mf
return nil
}
// messageEvents watches for message calls to actors
type messageEvents struct {
ctx context.Context
cs eventAPI
hcAPI headChangeAPI
lk sync.RWMutex
matchers map[triggerID][]MsgMatchFunc
}
func newMessageEvents(ctx context.Context, hcAPI headChangeAPI, cs eventAPI) messageEvents {
return messageEvents{
ctx: ctx,
cs: cs,
hcAPI: hcAPI,
matchers: map[triggerID][]MsgMatchFunc{},
}
}
// Check if there are any new actor calls
func (me *messageEvents) checkNewCalls(ts *types.TipSet) (map[triggerID]eventData, error) {
pts, err := me.cs.ChainGetTipSet(me.ctx, ts.Parents()) // we actually care about messages in the parent tipset here
if err != nil {
log.Errorf("getting parent tipset in checkNewCalls: %s", err)
return nil, err
}
me.lk.RLock()
defer me.lk.RUnlock()
res := make(map[triggerID]eventData)
me.messagesForTs(pts, func(msg *types.Message) {
// TODO: provide receipts
for tid, matchFns := range me.matchers {
var matched bool
for _, matchFn := range matchFns {
ok, err := matchFn(msg)
if err != nil {
log.Errorf("event matcher failed: %s", err)
continue
}
matched = ok
if matched {
break
}
}
if matched {
res[tid] = msg
break
}
}
})
return res, nil
}
// Get the messages in a tipset
func (me *messageEvents) messagesForTs(ts *types.TipSet, consume func(*types.Message)) {
seen := map[cid.Cid]struct{}{} seen := map[cid.Cid]struct{}{}
for _, tsb := range ts.Blocks() { for _, tsb := range ts.Blocks() {
msgs, err := e.cs.ChainGetBlockMessages(context.TODO(), tsb.Cid()) msgs, err := me.cs.ChainGetBlockMessages(context.TODO(), tsb.Cid())
if err != nil { if err != nil {
log.Errorf("messagesForTs MessagesForBlock failed (ts.H=%d, Bcid:%s, B.Mcid:%s): %s", ts.Height(), tsb.Cid(), tsb.Messages, err) log.Errorf("messagesForTs MessagesForBlock failed (ts.H=%d, Bcid:%s, B.Mcid:%s): %s", ts.Height(), tsb.Cid(), tsb.Messages, err)
// this is quite bad, but probably better than missing all the other updates // this is quite bad, but probably better than missing all the other updates
@ -289,7 +543,14 @@ func (e *calledEvents) messagesForTs(ts *types.TipSet, consume func(*types.Messa
} }
} }
// Called registers a callbacks which are triggered when a specified method is // MsgHandler arguments:
// `ts` is the tipset, in which the `msg` is included.
// `curH`-`ts.Height` = `confidence`
type MsgHandler func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error)
type MsgMatchFunc func(msg *types.Message) (bool, error)
// Called registers a callback which is triggered when a specified method is
// called on an actor, or a timeout is reached. // called on an actor, or a timeout is reached.
// //
// * `CheckFunc` callback is invoked immediately with a recent tipset, it // * `CheckFunc` callback is invoked immediately with a recent tipset, it
@ -299,10 +560,10 @@ func (e *calledEvents) messagesForTs(ts *types.TipSet, consume func(*types.Messa
// happened. When `done` is set to true, timeout trigger is disabled. // happened. When `done` is set to true, timeout trigger is disabled.
// //
// * `more` should be false when we don't want to receive new notifications // * `more` should be false when we don't want to receive new notifications
// through CalledHandler. Note that notifications may still be delivered to // through MsgHandler. Note that notifications may still be delivered to
// RevertHandler // RevertHandler
// //
// * `CalledHandler` is called when the specified event was observed on-chain, // * `MsgHandler` is called when the specified event was observed on-chain,
// and a confidence threshold was reached, or the specified `timeout` height // and a confidence threshold was reached, or the specified `timeout` height
// was reached with no events observed. When this callback is invoked on a // was reached with no events observed. When this callback is invoked on a
// timeout, `msg` is set to nil. This callback returns a boolean specifying // timeout, `msg` is set to nil. This callback returns a boolean specifying
@ -313,44 +574,38 @@ func (e *calledEvents) messagesForTs(ts *types.TipSet, consume func(*types.Messa
// containing the message. The tipset passed as the argument is the tipset // containing the message. The tipset passed as the argument is the tipset
// that is being dropped. Note that the message dropped may be re-applied // that is being dropped. Note that the message dropped may be re-applied
// in a different tipset in small amount of time. // in a different tipset in small amount of time.
func (e *calledEvents) Called(check CheckFunc, hnd CalledHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, mf MatchFunc) error { //
e.lk.Lock() // * `MsgMatchFunc` is called against each message. If there is a match, the
defer e.lk.Unlock() // message is queued up until the confidence interval has elapsed (and
// `MsgHandler` is called)
func (me *messageEvents) Called(check CheckFunc, msgHnd MsgHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, mf MsgMatchFunc) error {
hnd := func(data eventData, prevTs, ts *types.TipSet, height abi.ChainEpoch) (bool, error) {
msg, ok := data.(*types.Message)
if data != nil && !ok {
panic("expected msg")
}
ts := e.tsc.best() rec, err := me.cs.StateGetReceipt(me.ctx, msg.Cid(), ts.Key())
done, more, err := check(ts)
if err != nil { if err != nil {
return xerrors.Errorf("called check error (h: %d): %w", ts.Height(), err) return false, err
}
if done {
timeout = NoTimeout
} }
id := e.ctr return msgHnd(msg, rec, ts, height)
e.ctr++
e.triggers[id] = &callHandler{
confidence: confidence,
timeout: timeout + abi.ChainEpoch(confidence),
disabled: !more,
handle: hnd,
revert: rev,
} }
e.matchers[id] = append(e.matchers[id], mf) id, err := me.hcAPI.onHeadChanged(check, hnd, rev, confidence, timeout)
if err != nil {
return err
}
if timeout != NoTimeout { me.lk.Lock()
if e.timeouts[timeout+abi.ChainEpoch(confidence)] == nil { defer me.lk.Unlock()
e.timeouts[timeout+abi.ChainEpoch(confidence)] = map[uint64]int{} me.matchers[id] = append(me.matchers[id], mf)
}
e.timeouts[timeout+abi.ChainEpoch(confidence)][id] = 0
}
return nil return nil
} }
func (e *calledEvents) CalledMsg(ctx context.Context, hnd CalledHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, msg types.ChainMsg) error { // Convenience function for checking and matching messages
return e.Called(e.CheckMsg(ctx, msg, hnd), hnd, rev, confidence, timeout, e.MatchMsg(msg.VMMessage())) func (me *messageEvents) CalledMsg(ctx context.Context, hnd MsgHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, msg types.ChainMsg) error {
return me.Called(me.CheckMsg(ctx, msg, hnd), hnd, rev, confidence, timeout, me.MatchMsg(msg.VMMessage()))
} }

View File

@ -15,23 +15,26 @@ type heightEvents struct {
tsc *tipSetCache tsc *tipSetCache
gcConfidence abi.ChainEpoch gcConfidence abi.ChainEpoch
ctr triggerId ctr triggerID
heightTriggers map[triggerId]*heightHandler heightTriggers map[triggerID]*heightHandler
htTriggerHeights map[triggerH][]triggerId htTriggerHeights map[triggerH][]triggerID
htHeights map[msgH][]triggerId htHeights map[msgH][]triggerID
ctx context.Context ctx context.Context
} }
func (e *heightEvents) headChangeAt(rev, app []*types.TipSet) error { func (e *heightEvents) headChangeAt(rev, app []*types.TipSet) error {
ctx, span := trace.StartSpan(e.ctx, "events.HeightHeadChange") ctx, span := trace.StartSpan(e.ctx, "events.HeightHeadChange")
defer span.End() defer span.End()
span.AddAttributes(trace.Int64Attribute("endHeight", int64(app[0].Height()))) span.AddAttributes(trace.Int64Attribute("endHeight", int64(app[0].Height())))
span.AddAttributes(trace.Int64Attribute("reverts", int64(len(rev)))) span.AddAttributes(trace.Int64Attribute("reverts", int64(len(rev))))
span.AddAttributes(trace.Int64Attribute("applies", int64(len(app)))) span.AddAttributes(trace.Int64Attribute("applies", int64(len(app))))
e.lk.Lock()
defer e.lk.Unlock()
for _, ts := range rev { for _, ts := range rev {
// TODO: log error if h below gcconfidence // TODO: log error if h below gcconfidence
// revert height-based triggers // revert height-based triggers
@ -40,7 +43,10 @@ func (e *heightEvents) headChangeAt(rev, app []*types.TipSet) error {
for _, tid := range e.htHeights[h] { for _, tid := range e.htHeights[h] {
ctx, span := trace.StartSpan(ctx, "events.HeightRevert") ctx, span := trace.StartSpan(ctx, "events.HeightRevert")
err := e.heightTriggers[tid].revert(ctx, ts) rev := e.heightTriggers[tid].revert
e.lk.Unlock()
err := rev(ctx, ts)
e.lk.Lock()
e.heightTriggers[tid].called = false e.heightTriggers[tid].called = false
span.End() span.End()
@ -98,8 +104,10 @@ func (e *heightEvents) headChangeAt(rev, app []*types.TipSet) error {
ctx, span := trace.StartSpan(ctx, "events.HeightApply") ctx, span := trace.StartSpan(ctx, "events.HeightApply")
span.AddAttributes(trace.BoolAttribute("immediate", false)) span.AddAttributes(trace.BoolAttribute("immediate", false))
handle := hnd.handle
err = hnd.handle(ctx, incTs, h) e.lk.Unlock()
err = handle(ctx, incTs, h)
e.lk.Lock()
span.End() span.End()
if err != nil { if err != nil {

View File

@ -211,15 +211,14 @@ func (fcs *fakeCS) advance(rev, app int, msgs map[int]cid.Cid, nulls ...int) { /
fcs.sub(revs, apps) fcs.sub(revs, apps)
fcs.sync.Lock() fcs.sync.Lock()
fcs.sync.Unlock() fcs.sync.Unlock() //nolint:staticcheck
} }
func (fcs *fakeCS) notifDone() { func (fcs *fakeCS) notifDone() {
fcs.sync.Unlock() fcs.sync.Unlock()
} }
var _ eventApi = &fakeCS{} var _ eventAPI = &fakeCS{}
func TestAt(t *testing.T) { func TestAt(t *testing.T) {
fcs := &fakeCS{ fcs := &fakeCS{
@ -1005,8 +1004,6 @@ func TestRemoveTriggersOnMessage(t *testing.T) {
return false, true, nil return false, true, nil
}, func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (bool, error) { }, func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (bool, error) {
require.Equal(t, false, applied) require.Equal(t, false, applied)
fmt.Println(msg == nil)
fmt.Println(curH)
applied = true applied = true
return more, nil return more, nil
}, func(_ context.Context, ts *types.TipSet) error { }, func(_ context.Context, ts *types.TipSet) error {
@ -1068,3 +1065,250 @@ func TestRemoveTriggersOnMessage(t *testing.T) {
require.Equal(t, true, applied) require.Equal(t, true, applied)
require.Equal(t, false, reverted) require.Equal(t, false, reverted)
} }
type testStateChange struct {
from string
to string
}
func TestStateChanged(t *testing.T) {
fcs := &fakeCS{
t: t,
h: 1,
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
events := NewEvents(context.Background(), fcs)
more := true
var applied, reverted bool
var appliedData StateChange
var appliedOldTs *types.TipSet
var appliedNewTs *types.TipSet
var appliedH abi.ChainEpoch
var matchData StateChange
confidence := 3
timeout := abi.ChainEpoch(20)
err := events.StateChanged(func(ts *types.TipSet) (d bool, m bool, e error) {
return false, true, nil
}, func(oldTs, newTs *types.TipSet, data StateChange, curH abi.ChainEpoch) (bool, error) {
require.Equal(t, false, applied)
applied = true
appliedData = data
appliedOldTs = oldTs
appliedNewTs = newTs
appliedH = curH
return more, nil
}, func(_ context.Context, ts *types.TipSet) error {
reverted = true
return nil
}, confidence, timeout, func(oldTs, newTs *types.TipSet) (bool, StateChange, error) {
if matchData == nil {
return false, matchData, nil
}
d := matchData
matchData = nil
return true, d, nil
})
require.NoError(t, err)
// create few blocks to make sure nothing get's randomly called
fcs.advance(0, 4, nil) // H=5
require.Equal(t, false, applied)
require.Equal(t, false, reverted)
// create state change (but below confidence threshold)
matchData = testStateChange{from: "a", to: "b"}
fcs.advance(0, 3, nil)
require.Equal(t, false, applied)
require.Equal(t, false, reverted)
// create additional block so we are above confidence threshold
fcs.advance(0, 2, nil) // H=10 (confidence=3, apply)
require.Equal(t, true, applied)
require.Equal(t, false, reverted)
applied = false
// dip below confidence (should not apply again)
fcs.advance(2, 2, nil) // H=10 (confidence=3, apply)
require.Equal(t, false, applied)
require.Equal(t, false, reverted)
// Change happens from 5 -> 6
require.Equal(t, abi.ChainEpoch(5), appliedOldTs.Height())
require.Equal(t, abi.ChainEpoch(6), appliedNewTs.Height())
// Actually applied (with confidence) at 9
require.Equal(t, abi.ChainEpoch(9), appliedH)
// Make sure the state change was correctly passed through
rcvd := appliedData.(testStateChange)
require.Equal(t, "a", rcvd.from)
require.Equal(t, "b", rcvd.to)
}
func TestStateChangedRevert(t *testing.T) {
fcs := &fakeCS{
t: t,
h: 1,
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
events := NewEvents(context.Background(), fcs)
more := true
var applied, reverted bool
var matchData StateChange
confidence := 1
timeout := abi.ChainEpoch(20)
err := events.StateChanged(func(ts *types.TipSet) (d bool, m bool, e error) {
return false, true, nil
}, func(oldTs, newTs *types.TipSet, data StateChange, curH abi.ChainEpoch) (bool, error) {
require.Equal(t, false, applied)
applied = true
return more, nil
}, func(_ context.Context, ts *types.TipSet) error {
reverted = true
return nil
}, confidence, timeout, func(oldTs, newTs *types.TipSet) (bool, StateChange, error) {
if matchData == nil {
return false, matchData, nil
}
d := matchData
matchData = nil
return true, d, nil
})
require.NoError(t, err)
fcs.advance(0, 2, nil) // H=3
// Make a state change from TS at height 3 to TS at height 4
matchData = testStateChange{from: "a", to: "b"}
fcs.advance(0, 1, nil) // H=4
// Haven't yet reached confidence
require.Equal(t, false, applied)
require.Equal(t, false, reverted)
// Advance to reach confidence level
fcs.advance(0, 1, nil) // H=5
// Should now have called the handler
require.Equal(t, true, applied)
require.Equal(t, false, reverted)
applied = false
// Advance 3 more TS
fcs.advance(0, 3, nil) // H=8
require.Equal(t, false, applied)
require.Equal(t, false, reverted)
// Regress but not so far as to cause a revert
fcs.advance(3, 1, nil) // H=6
require.Equal(t, false, applied)
require.Equal(t, false, reverted)
// Regress back to state where change happened
fcs.advance(3, 1, nil) // H=4
// Expect revert to have happened
require.Equal(t, false, applied)
require.Equal(t, true, reverted)
}
func TestStateChangedTimeout(t *testing.T) {
fcs := &fakeCS{
t: t,
h: 1,
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
events := NewEvents(context.Background(), fcs)
called := false
err := events.StateChanged(func(ts *types.TipSet) (d bool, m bool, e error) {
return false, true, nil
}, func(oldTs, newTs *types.TipSet, data StateChange, curH abi.ChainEpoch) (bool, error) {
called = true
require.Nil(t, data)
require.Equal(t, abi.ChainEpoch(20), newTs.Height())
require.Equal(t, abi.ChainEpoch(23), curH)
return false, nil
}, func(_ context.Context, ts *types.TipSet) error {
t.Fatal("revert on timeout")
return nil
}, 3, 20, func(oldTs, newTs *types.TipSet) (bool, StateChange, error) {
return false, nil, nil
})
require.NoError(t, err)
fcs.advance(0, 21, nil)
require.False(t, called)
fcs.advance(0, 5, nil)
require.True(t, called)
called = false
// with check func reporting done
fcs = &fakeCS{
t: t,
h: 1,
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
events = NewEvents(context.Background(), fcs)
err = events.StateChanged(func(ts *types.TipSet) (d bool, m bool, e error) {
return true, true, nil
}, func(oldTs, newTs *types.TipSet, data StateChange, curH abi.ChainEpoch) (bool, error) {
called = true
require.Nil(t, data)
require.Equal(t, abi.ChainEpoch(20), newTs.Height())
require.Equal(t, abi.ChainEpoch(23), curH)
return false, nil
}, func(_ context.Context, ts *types.TipSet) error {
t.Fatal("revert on timeout")
return nil
}, 3, 20, func(oldTs, newTs *types.TipSet) (bool, StateChange, error) {
return false, nil, nil
})
require.NoError(t, err)
fcs.advance(0, 21, nil)
require.False(t, called)
fcs.advance(0, 5, nil)
require.False(t, called)
}

View File

@ -0,0 +1,137 @@
package state
import (
"context"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-amt-ipld/v2"
"github.com/filecoin-project/lotus/api/apibstore"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/specs-actors/actors/builtin/market"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
)
// UserData is the data returned from the DiffFunc
type UserData interface{}
// ChainAPI abstracts out calls made by this class to external APIs
type ChainAPI interface {
apibstore.ChainIO
StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error)
}
// StatePredicates has common predicates for responding to state changes
type StatePredicates struct {
api ChainAPI
cst *cbor.BasicIpldStore
}
func NewStatePredicates(api ChainAPI) *StatePredicates {
return &StatePredicates{
api: api,
cst: cbor.NewCborStore(apibstore.NewAPIBlockstore(api)),
}
}
// DiffFunc check if there's a change form oldState to newState, and returns
// - changed: was there a change
// - user: user-defined data representing the state change
// - err
type DiffFunc func(ctx context.Context, oldState, newState *types.TipSet) (changed bool, user UserData, err error)
type DiffStateFunc func(ctx context.Context, oldActorStateHead, newActorStateHead cid.Cid) (changed bool, user UserData, err error)
// OnActorStateChanged calls diffStateFunc when the state changes for the given actor
func (sp *StatePredicates) OnActorStateChanged(addr address.Address, diffStateFunc DiffStateFunc) DiffFunc {
return func(ctx context.Context, oldState, newState *types.TipSet) (changed bool, user UserData, err error) {
oldActor, err := sp.api.StateGetActor(ctx, addr, oldState.Key())
if err != nil {
return false, nil, err
}
newActor, err := sp.api.StateGetActor(ctx, addr, newState.Key())
if err != nil {
return false, nil, err
}
if oldActor.Head.Equals(newActor.Head) {
return false, nil, nil
}
return diffStateFunc(ctx, oldActor.Head, newActor.Head)
}
}
type DiffStorageMarketStateFunc func(ctx context.Context, oldState *market.State, newState *market.State) (changed bool, user UserData, err error)
// OnStorageMarketActorChanged calls diffStorageMarketState when the state changes for the market actor
func (sp *StatePredicates) OnStorageMarketActorChanged(diffStorageMarketState DiffStorageMarketStateFunc) DiffFunc {
return sp.OnActorStateChanged(builtin.StorageMarketActorAddr, func(ctx context.Context, oldActorStateHead, newActorStateHead cid.Cid) (changed bool, user UserData, err error) {
var oldState market.State
if err := sp.cst.Get(ctx, oldActorStateHead, &oldState); err != nil {
return false, nil, err
}
var newState market.State
if err := sp.cst.Get(ctx, newActorStateHead, &newState); err != nil {
return false, nil, err
}
return diffStorageMarketState(ctx, &oldState, &newState)
})
}
type DiffDealStatesFunc func(ctx context.Context, oldDealStateRoot *amt.Root, newDealStateRoot *amt.Root) (changed bool, user UserData, err error)
// OnDealStateChanged calls diffDealStates when the market state changes
func (sp *StatePredicates) OnDealStateChanged(diffDealStates DiffDealStatesFunc) DiffStorageMarketStateFunc {
return func(ctx context.Context, oldState *market.State, newState *market.State) (changed bool, user UserData, err error) {
if oldState.States.Equals(newState.States) {
return false, nil, nil
}
oldRoot, err := amt.LoadAMT(ctx, sp.cst, oldState.States)
if err != nil {
return false, nil, err
}
newRoot, err := amt.LoadAMT(ctx, sp.cst, newState.States)
if err != nil {
return false, nil, err
}
return diffDealStates(ctx, oldRoot, newRoot)
}
}
// ChangedDeals is a set of changes to deal state
type ChangedDeals map[abi.DealID]DealStateChange
// DealStateChange is a change in deal state from -> to
type DealStateChange struct {
From market.DealState
To market.DealState
}
// DealStateChangedForIDs detects changes in the deal state AMT for the given deal IDs
func (sp *StatePredicates) DealStateChangedForIDs(dealIds []abi.DealID) DiffDealStatesFunc {
return func(ctx context.Context, oldDealStateRoot *amt.Root, newDealStateRoot *amt.Root) (changed bool, user UserData, err error) {
changedDeals := make(ChangedDeals)
for _, dealID := range dealIds {
var oldDeal, newDeal market.DealState
err := oldDealStateRoot.Get(ctx, uint64(dealID), &oldDeal)
if err != nil {
return false, nil, err
}
err = newDealStateRoot.Get(ctx, uint64(dealID), &newDeal)
if err != nil {
return false, nil, err
}
if oldDeal != newDeal {
changedDeals[dealID] = DealStateChange{oldDeal, newDeal}
}
}
if len(changedDeals) > 0 {
return true, changedDeals, nil
}
return false, nil, nil
}
}

View File

@ -0,0 +1,201 @@
package state
import (
"context"
"testing"
"github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/ipfs/go-hamt-ipld"
"github.com/filecoin-project/go-amt-ipld/v2"
"github.com/filecoin-project/specs-actors/actors/builtin/market"
ds "github.com/ipfs/go-datastore"
ds_sync "github.com/ipfs/go-datastore/sync"
bstore "github.com/ipfs/go-ipfs-blockstore"
cbornode "github.com/ipfs/go-ipld-cbor"
"golang.org/x/xerrors"
"github.com/ipfs/go-cid"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/specs-actors/actors/abi"
)
var dummyCid cid.Cid
func init() {
dummyCid, _ = cid.Parse("bafkqaaa")
}
type mockAPI struct {
ts map[types.TipSetKey]*types.Actor
bs bstore.Blockstore
}
func newMockAPI(bs bstore.Blockstore) *mockAPI {
return &mockAPI{
bs: bs,
ts: make(map[types.TipSetKey]*types.Actor),
}
}
func (m mockAPI) ChainHasObj(ctx context.Context, c cid.Cid) (bool, error) {
return m.bs.Has(c)
}
func (m mockAPI) ChainReadObj(ctx context.Context, c cid.Cid) ([]byte, error) {
blk, err := m.bs.Get(c)
if err != nil {
return nil, xerrors.Errorf("blockstore get: %w", err)
}
return blk.RawData(), nil
}
func (m mockAPI) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) {
return m.ts[tsk], nil
}
func (m mockAPI) setActor(tsk types.TipSetKey, act *types.Actor) {
m.ts[tsk] = act
}
func TestPredicates(t *testing.T) {
ctx := context.Background()
bs := bstore.NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore()))
store := cbornode.NewCborStore(bs)
oldDeals := map[abi.DealID]*market.DealState{
abi.DealID(1): {
SectorStartEpoch: 1,
LastUpdatedEpoch: 2,
SlashEpoch: 0,
},
abi.DealID(2): {
SectorStartEpoch: 4,
LastUpdatedEpoch: 5,
SlashEpoch: 0,
},
}
oldStateC := createMarketState(ctx, t, store, oldDeals)
newDeals := map[abi.DealID]*market.DealState{
abi.DealID(1): {
SectorStartEpoch: 1,
LastUpdatedEpoch: 3,
SlashEpoch: 0,
},
abi.DealID(2): {
SectorStartEpoch: 4,
LastUpdatedEpoch: 6,
SlashEpoch: 6,
},
}
newStateC := createMarketState(ctx, t, store, newDeals)
miner, err := address.NewFromString("t00")
require.NoError(t, err)
oldState, err := mockTipset(miner, 1)
require.NoError(t, err)
newState, err := mockTipset(miner, 2)
require.NoError(t, err)
api := newMockAPI(bs)
api.setActor(oldState.Key(), &types.Actor{Head: oldStateC})
api.setActor(newState.Key(), &types.Actor{Head: newStateC})
preds := NewStatePredicates(api)
dealIds := []abi.DealID{abi.DealID(1), abi.DealID(2)}
diffFn := preds.OnStorageMarketActorChanged(preds.OnDealStateChanged(preds.DealStateChangedForIDs(dealIds)))
// Diff a state against itself: expect no change
changed, _, err := diffFn(ctx, oldState, oldState)
require.NoError(t, err)
require.False(t, changed)
// Diff old state against new state
changed, val, err := diffFn(ctx, oldState, newState)
require.NoError(t, err)
require.True(t, changed)
changedDeals, ok := val.(ChangedDeals)
require.True(t, ok)
require.Len(t, changedDeals, 2)
require.Contains(t, changedDeals, abi.DealID(1))
require.Contains(t, changedDeals, abi.DealID(2))
deal1 := changedDeals[abi.DealID(1)]
if deal1.From.LastUpdatedEpoch != 2 || deal1.To.LastUpdatedEpoch != 3 {
t.Fatal("Unexpected change to LastUpdatedEpoch")
}
deal2 := changedDeals[abi.DealID(2)]
if deal2.From.SlashEpoch != 0 || deal2.To.SlashEpoch != 6 {
t.Fatal("Unexpected change to SlashEpoch")
}
// Test that OnActorStateChanged does not call the callback if the state has not changed
mockAddr, err := address.NewFromString("t01")
require.NoError(t, err)
actorDiffFn := preds.OnActorStateChanged(mockAddr, func(context.Context, cid.Cid, cid.Cid) (bool, UserData, error) {
t.Fatal("No state change so this should not be called")
return false, nil, nil
})
changed, _, err = actorDiffFn(ctx, oldState, oldState)
require.NoError(t, err)
require.False(t, changed)
// Test that OnDealStateChanged does not call the callback if the state has not changed
diffDealStateFn := preds.OnDealStateChanged(func(context.Context, *amt.Root, *amt.Root) (bool, UserData, error) {
t.Fatal("No state change so this should not be called")
return false, nil, nil
})
marketState := createEmptyMarketState(t, store)
changed, _, err = diffDealStateFn(ctx, marketState, marketState)
require.NoError(t, err)
require.False(t, changed)
}
func mockTipset(miner address.Address, timestamp uint64) (*types.TipSet, error) {
return types.NewTipSet([]*types.BlockHeader{{
Miner: miner,
Height: 5,
ParentStateRoot: dummyCid,
Messages: dummyCid,
ParentMessageReceipts: dummyCid,
BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS},
BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS},
Timestamp: timestamp,
}})
}
func createMarketState(ctx context.Context, t *testing.T, store *cbornode.BasicIpldStore, deals map[abi.DealID]*market.DealState) cid.Cid {
rootCid := createAMT(ctx, t, store, deals)
state := createEmptyMarketState(t, store)
state.States = rootCid
stateC, err := store.Put(ctx, state)
require.NoError(t, err)
return stateC
}
func createEmptyMarketState(t *testing.T, store *cbornode.BasicIpldStore) *market.State {
emptyArrayCid, err := amt.NewAMT(store).Flush(context.TODO())
require.NoError(t, err)
emptyMap, err := store.Put(context.TODO(), hamt.NewNode(store, hamt.UseTreeBitWidth(5)))
require.NoError(t, err)
return market.ConstructState(emptyArrayCid, emptyMap, emptyMap)
}
func createAMT(ctx context.Context, t *testing.T, store *cbornode.BasicIpldStore, deals map[abi.DealID]*market.DealState) cid.Cid {
root := amt.NewAMT(store)
for dealID, dealState := range deals {
err := root.Set(ctx, uint64(dealID), dealState)
require.NoError(t, err)
}
rootCid, err := root.Flush(ctx)
require.NoError(t, err)
return rootCid
}

View File

@ -8,11 +8,11 @@ import (
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
) )
func (e *calledEvents) CheckMsg(ctx context.Context, smsg types.ChainMsg, hnd CalledHandler) CheckFunc { func (me *messageEvents) CheckMsg(ctx context.Context, smsg types.ChainMsg, hnd MsgHandler) CheckFunc {
msg := smsg.VMMessage() msg := smsg.VMMessage()
return func(ts *types.TipSet) (done bool, more bool, err error) { return func(ts *types.TipSet) (done bool, more bool, err error) {
fa, err := e.cs.StateGetActor(ctx, msg.From, ts.Key()) fa, err := me.cs.StateGetActor(ctx, msg.From, ts.Key())
if err != nil { if err != nil {
return false, true, err return false, true, err
} }
@ -22,7 +22,7 @@ func (e *calledEvents) CheckMsg(ctx context.Context, smsg types.ChainMsg, hnd Ca
return false, true, nil return false, true, nil
} }
rec, err := e.cs.StateGetReceipt(ctx, smsg.VMMessage().Cid(), ts.Key()) rec, err := me.cs.StateGetReceipt(ctx, smsg.VMMessage().Cid(), ts.Key())
if err != nil { if err != nil {
return false, true, xerrors.Errorf("getting receipt in CheckMsg: %w", err) return false, true, xerrors.Errorf("getting receipt in CheckMsg: %w", err)
} }
@ -33,10 +33,10 @@ func (e *calledEvents) CheckMsg(ctx context.Context, smsg types.ChainMsg, hnd Ca
} }
} }
func (e *calledEvents) MatchMsg(inmsg *types.Message) MatchFunc { func (me *messageEvents) MatchMsg(inmsg *types.Message) MsgMatchFunc {
return func(msg *types.Message) (bool, error) { return func(msg *types.Message) (bool, error) {
if msg.From == inmsg.From && msg.Nonce == inmsg.Nonce && !inmsg.Equals(msg) { if msg.From == inmsg.From && msg.Nonce == inmsg.Nonce && !inmsg.Equals(msg) {
return false, xerrors.Errorf("matching msg %s from %s, nonce %d: got duplicate origin/nonce msg %s", inmsg.Cid(), inmsg.From, inmsg.Nonce, msg.Nonce) return false, xerrors.Errorf("matching msg %s from %s, nonce %d: got duplicate origin/nonce msg %d", inmsg.Cid(), inmsg.From, inmsg.Nonce, msg.Nonce)
} }
return inmsg.Equals(msg), nil return inmsg.Equals(msg), nil

View File

@ -45,6 +45,10 @@ var log = logging.Logger("gen")
const msgsPerBlock = 20 const msgsPerBlock = 20
var ValidWpostForTesting = []abi.PoStProof{{
ProofBytes: []byte("valid proof"),
}}
type ChainGen struct { type ChainGen struct {
msgsPerBlock int msgsPerBlock int
@ -89,8 +93,8 @@ func (m mybs) Get(c cid.Cid) (block.Block, error) {
} }
func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) { func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
saminer.SupportedProofTypes = map[abi.RegisteredProof]struct{}{ saminer.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
abi.RegisteredProof_StackedDRG2KiBSeal: {}, abi.RegisteredSealProof_StackedDrg2KiBV1: {},
} }
mr := repo.NewMemory(nil) mr := repo.NewMemory(nil)
@ -104,7 +108,7 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
return nil, xerrors.Errorf("failed to get metadata datastore: %w", err) return nil, xerrors.Errorf("failed to get metadata datastore: %w", err)
} }
bds, err := lr.Datastore("/blocks") bds, err := lr.Datastore("/chain")
if err != nil { if err != nil {
return nil, xerrors.Errorf("failed to get blocks datastore: %w", err) return nil, xerrors.Errorf("failed to get blocks datastore: %w", err)
} }
@ -141,7 +145,7 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
return nil, err return nil, err
} }
genm1, k1, err := seed.PreSeal(maddr1, abi.RegisteredProof_StackedDRG2KiBPoSt, 0, numSectors, m1temp, []byte("some randomness"), nil) genm1, k1, err := seed.PreSeal(maddr1, abi.RegisteredSealProof_StackedDrg2KiBV1, 0, numSectors, m1temp, []byte("some randomness"), nil, true)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -153,7 +157,7 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
return nil, err return nil, err
} }
genm2, k2, err := seed.PreSeal(maddr2, abi.RegisteredProof_StackedDRG2KiBPoSt, 0, numSectors, m2temp, []byte("some randomness"), nil) genm2, k2, err := seed.PreSeal(maddr2, abi.RegisteredSealProof_StackedDrg2KiBV1, 0, numSectors, m2temp, []byte("some randomness"), nil, true)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -192,7 +196,7 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
*genm2, *genm2,
}, },
NetworkName: "", NetworkName: "",
Timestamp: uint64(time.Now().Add(-500 * build.BlockDelay * time.Second).Unix()), Timestamp: uint64(time.Now().Add(-500 * time.Duration(build.BlockDelaySecs) * time.Second).Unix()),
} }
genb, err := genesis2.MakeGenesisBlock(context.TODO(), bs, sys, tpl) genb, err := genesis2.MakeGenesisBlock(context.TODO(), bs, sys, tpl)
@ -219,6 +223,10 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
miners := []address.Address{maddr1, maddr2} miners := []address.Address{maddr1, maddr2}
beac := beacon.NewMockBeacon(time.Second) beac := beacon.NewMockBeacon(time.Second)
//beac, err := drand.NewDrandBeacon(tpl.Timestamp, build.BlockDelaySecs)
//if err != nil {
//return nil, xerrors.Errorf("creating drand beacon: %w", err)
//}
gen := &ChainGen{ gen := &ChainGen{
bs: bs, bs: bs,
@ -406,7 +414,7 @@ func (cg *ChainGen) makeBlock(parents *types.TipSet, m address.Address, vrfticke
if cg.Timestamper != nil { if cg.Timestamper != nil {
ts = cg.Timestamper(parents, height-parents.Height()) ts = cg.Timestamper(parents, height-parents.Height())
} else { } else {
ts = parents.MinTimestamp() + uint64((height-parents.Height())*build.BlockDelay) ts = parents.MinTimestamp() + uint64(height-parents.Height())*build.BlockDelaySecs
} }
fblk, err := MinerCreateBlock(context.TODO(), cg.sm, cg.w, &api.BlockTemplate{ fblk, err := MinerCreateBlock(context.TODO(), cg.sm, cg.w, &api.BlockTemplate{
@ -427,7 +435,7 @@ func (cg *ChainGen) makeBlock(parents *types.TipSet, m address.Address, vrfticke
return fblk, err return fblk, err
} }
// This function is awkward. It's used to deal with messages made when // ResyncBankerNonce is used for dealing with messages made when
// simulating forks // simulating forks
func (cg *ChainGen) ResyncBankerNonce(ts *types.TipSet) error { func (cg *ChainGen) ResyncBankerNonce(ts *types.TipSet) error {
act, err := cg.sm.GetActor(cg.banker, ts) act, err := cg.sm.GetActor(cg.banker, ts)
@ -529,16 +537,7 @@ func (wpp *wppProvider) GenerateCandidates(ctx context.Context, _ abi.PoStRandom
} }
func (wpp *wppProvider) ComputeProof(context.Context, []abi.SectorInfo, abi.PoStRandomness) ([]abi.PoStProof, error) { func (wpp *wppProvider) ComputeProof(context.Context, []abi.SectorInfo, abi.PoStRandomness) ([]abi.PoStProof, error) {
return []abi.PoStProof{{ return ValidWpostForTesting, nil
ProofBytes: []byte("valid proof"),
}}, nil
}
type ProofInput struct {
sectors []abi.SectorInfo
hvrf []byte
challengedSectors []uint64
vrfout []byte
} }
func IsRoundWinner(ctx context.Context, ts *types.TipSet, round abi.ChainEpoch, func IsRoundWinner(ctx context.Context, ts *types.TipSet, round abi.ChainEpoch,
@ -614,6 +613,6 @@ func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info abi.WindowPo
panic("not supported") panic("not supported")
} }
func (m genFakeVerifier) GenerateWinningPoStSectorChallenge(ctx context.Context, proof abi.RegisteredProof, id abi.ActorID, randomness abi.PoStRandomness, u uint64) ([]uint64, error) { func (m genFakeVerifier) GenerateWinningPoStSectorChallenge(ctx context.Context, proof abi.RegisteredPoStProof, id abi.ActorID, randomness abi.PoStRandomness, u uint64) ([]uint64, error) {
panic("not supported") panic("not supported")
} }

View File

@ -14,8 +14,8 @@ import (
) )
func init() { func init() {
miner.SupportedProofTypes = map[abi.RegisteredProof]struct{}{ miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
abi.RegisteredProof_StackedDRG2KiBSeal: {}, abi.RegisteredSealProof_StackedDrg2KiBV1: {},
} }
power.ConsensusMinerMinPower = big.NewInt(2048) power.ConsensusMinerMinPower = big.NewInt(2048)
verifreg.MinVerifiedDealSize = big.NewInt(256) verifreg.MinVerifiedDealSize = big.NewInt(256)

View File

@ -58,6 +58,8 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
for i, m := range miners { for i, m := range miners {
// Create miner through power actor // Create miner through power actor
i := i
m := m
spt, err := ffiwrapper.SealProofTypeFromSectorSize(m.SectorSize) spt, err := ffiwrapper.SealProofTypeFromSectorSize(m.SectorSize)
if err != nil { if err != nil {
@ -69,7 +71,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
constructorParams := &power.CreateMinerParams{ constructorParams := &power.CreateMinerParams{
Owner: m.Worker, Owner: m.Worker,
Worker: m.Worker, Worker: m.Worker,
Peer: m.PeerId, Peer: []byte(m.PeerId),
SealProofType: spt, SealProofType: spt,
} }
@ -154,6 +156,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
// Commit sectors // Commit sectors
for pi, preseal := range m.Sectors { for pi, preseal := range m.Sectors {
preseal := preseal
// TODO: Maybe check seal (Can just be snark inputs, doesn't go into the genesis file) // TODO: Maybe check seal (Can just be snark inputs, doesn't go into the genesis file)
// check deals, get dealWeight // check deals, get dealWeight
@ -201,7 +204,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
{ {
newSectorInfo := &miner.SectorOnChainInfo{ newSectorInfo := &miner.SectorOnChainInfo{
Info: miner.SectorPreCommitInfo{ Info: miner.SectorPreCommitInfo{
RegisteredProof: preseal.ProofType, SealProof: preseal.ProofType,
SectorNumber: preseal.SectorID, SectorNumber: preseal.SectorID,
SealedCID: preseal.CommR, SealedCID: preseal.CommR,
SealRandEpoch: 0, SealRandEpoch: 0,
@ -239,6 +242,9 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
st.TotalQualityAdjPower = big.Sub(st.TotalQualityAdjPower, big.NewInt(1)) st.TotalQualityAdjPower = big.Sub(st.TotalQualityAdjPower, big.NewInt(1))
return nil return nil
}) })
if err != nil {
return cid.Undef, xerrors.Errorf("mutating state: %w", err)
}
c, err := vm.Flush(ctx) c, err := vm.Flush(ctx)
if err != nil { if err != nil {

View File

@ -150,6 +150,17 @@ func aggregateSignatures(sigs []crypto.Signature) (*crypto.Signature, error) {
} }
aggSig := bls.Aggregate(blsSigs) aggSig := bls.Aggregate(blsSigs)
if aggSig == nil {
if len(sigs) > 0 {
return nil, xerrors.Errorf("bls.Aggregate returned nil with %d signatures", len(sigs))
}
return &crypto.Signature{
Type: crypto.SigTypeBLS,
Data: new(bls.Signature)[:],
}, nil
}
return &crypto.Signature{ return &crypto.Signature{
Type: crypto.SigTypeBLS, Type: crypto.SigTypeBLS,
Data: aggSig[:], Data: aggSig[:],

View File

@ -187,7 +187,7 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName) (*Messa
mp := &MessagePool{ mp := &MessagePool{
closer: make(chan struct{}), closer: make(chan struct{}),
repubTk: time.NewTicker(build.BlockDelay * 10 * time.Second), repubTk: time.NewTicker(time.Duration(build.BlockDelaySecs) * 10 * time.Second),
localAddrs: make(map[address.Address]struct{}), localAddrs: make(map[address.Address]struct{}),
pending: make(map[address.Address]*msgSet), pending: make(map[address.Address]*msgSet),
minGasPrice: types.NewInt(0), minGasPrice: types.NewInt(0),

View File

@ -16,7 +16,7 @@ import (
"github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore"
) )
type testMpoolApi struct { type testMpoolAPI struct {
cb func(rev, app []*types.TipSet) error cb func(rev, app []*types.TipSet) error
bmsgs map[cid.Cid][]*types.SignedMessage bmsgs map[cid.Cid][]*types.SignedMessage
@ -25,68 +25,68 @@ type testMpoolApi struct {
tipsets []*types.TipSet tipsets []*types.TipSet
} }
func newTestMpoolApi() *testMpoolApi { func newTestMpoolAPI() *testMpoolAPI {
return &testMpoolApi{ return &testMpoolAPI{
bmsgs: make(map[cid.Cid][]*types.SignedMessage), bmsgs: make(map[cid.Cid][]*types.SignedMessage),
statenonce: make(map[address.Address]uint64), statenonce: make(map[address.Address]uint64),
} }
} }
func (tma *testMpoolApi) applyBlock(t *testing.T, b *types.BlockHeader) { func (tma *testMpoolAPI) applyBlock(t *testing.T, b *types.BlockHeader) {
t.Helper() t.Helper()
if err := tma.cb(nil, []*types.TipSet{mock.TipSet(b)}); err != nil { if err := tma.cb(nil, []*types.TipSet{mock.TipSet(b)}); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }
func (tma *testMpoolApi) revertBlock(t *testing.T, b *types.BlockHeader) { func (tma *testMpoolAPI) revertBlock(t *testing.T, b *types.BlockHeader) {
t.Helper() t.Helper()
if err := tma.cb([]*types.TipSet{mock.TipSet(b)}, nil); err != nil { if err := tma.cb([]*types.TipSet{mock.TipSet(b)}, nil); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }
func (tma *testMpoolApi) setStateNonce(addr address.Address, v uint64) { func (tma *testMpoolAPI) setStateNonce(addr address.Address, v uint64) {
tma.statenonce[addr] = v tma.statenonce[addr] = v
} }
func (tma *testMpoolApi) setBlockMessages(h *types.BlockHeader, msgs ...*types.SignedMessage) { func (tma *testMpoolAPI) setBlockMessages(h *types.BlockHeader, msgs ...*types.SignedMessage) {
tma.bmsgs[h.Cid()] = msgs tma.bmsgs[h.Cid()] = msgs
tma.tipsets = append(tma.tipsets, mock.TipSet(h)) tma.tipsets = append(tma.tipsets, mock.TipSet(h))
} }
func (tma *testMpoolApi) SubscribeHeadChanges(cb func(rev, app []*types.TipSet) error) *types.TipSet { func (tma *testMpoolAPI) SubscribeHeadChanges(cb func(rev, app []*types.TipSet) error) *types.TipSet {
tma.cb = cb tma.cb = cb
return nil return nil
} }
func (tma *testMpoolApi) PutMessage(m types.ChainMsg) (cid.Cid, error) { func (tma *testMpoolAPI) PutMessage(m types.ChainMsg) (cid.Cid, error) {
return cid.Undef, nil return cid.Undef, nil
} }
func (tma *testMpoolApi) PubSubPublish(string, []byte) error { func (tma *testMpoolAPI) PubSubPublish(string, []byte) error {
return nil return nil
} }
func (tma *testMpoolApi) StateGetActor(addr address.Address, ts *types.TipSet) (*types.Actor, error) { func (tma *testMpoolAPI) StateGetActor(addr address.Address, ts *types.TipSet) (*types.Actor, error) {
return &types.Actor{ return &types.Actor{
Nonce: tma.statenonce[addr], Nonce: tma.statenonce[addr],
Balance: types.NewInt(90000000), Balance: types.NewInt(90000000),
}, nil }, nil
} }
func (tma *testMpoolApi) StateAccountKey(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { func (tma *testMpoolAPI) StateAccountKey(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) {
if addr.Protocol() != address.BLS && addr.Protocol() != address.SECP256K1 { if addr.Protocol() != address.BLS && addr.Protocol() != address.SECP256K1 {
return address.Undef, fmt.Errorf("given address was not a key addr") return address.Undef, fmt.Errorf("given address was not a key addr")
} }
return addr, nil return addr, nil
} }
func (tma *testMpoolApi) MessagesForBlock(h *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { func (tma *testMpoolAPI) MessagesForBlock(h *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) {
return nil, tma.bmsgs[h.Cid()], nil return nil, tma.bmsgs[h.Cid()], nil
} }
func (tma *testMpoolApi) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg, error) { func (tma *testMpoolAPI) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg, error) {
if len(ts.Blocks()) != 1 { if len(ts.Blocks()) != 1 {
panic("cant deal with multiblock tipsets in this test") panic("cant deal with multiblock tipsets in this test")
} }
@ -108,7 +108,7 @@ func (tma *testMpoolApi) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg,
return out, nil return out, nil
} }
func (tma *testMpoolApi) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) { func (tma *testMpoolAPI) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) {
for _, ts := range tma.tipsets { for _, ts := range tma.tipsets {
if types.CidArrsEqual(tsk.Cids(), ts.Cids()) { if types.CidArrsEqual(tsk.Cids(), ts.Cids()) {
return ts, nil return ts, nil
@ -138,7 +138,7 @@ func mustAdd(t *testing.T, mp *MessagePool, msg *types.SignedMessage) {
} }
func TestMessagePool(t *testing.T) { func TestMessagePool(t *testing.T) {
tma := newTestMpoolApi() tma := newTestMpoolAPI()
w, err := wallet.NewWallet(wallet.NewMemKeyStore()) w, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil { if err != nil {
@ -179,7 +179,7 @@ func TestMessagePool(t *testing.T) {
} }
func TestRevertMessages(t *testing.T) { func TestRevertMessages(t *testing.T) {
tma := newTestMpoolApi() tma := newTestMpoolAPI()
w, err := wallet.NewWallet(wallet.NewMemKeyStore()) w, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil { if err != nil {

View File

@ -21,7 +21,7 @@ import (
var log = logging.Logger("statetree") var log = logging.Logger("statetree")
// Stores actors state by their ID. // StateTree stores actors state by their ID.
type StateTree struct { type StateTree struct {
root *hamt.Node root *hamt.Node
Store cbor.IpldStore Store cbor.IpldStore
@ -149,7 +149,7 @@ func (st *StateTree) SetActor(addr address.Address, act *types.Actor) error {
return nil return nil
} }
// `LookupID` gets the ID address of this actor's `addr` stored in the `InitActor`. // LookupID gets the ID address of this actor's `addr` stored in the `InitActor`.
func (st *StateTree) LookupID(addr address.Address) (address.Address, error) { func (st *StateTree) LookupID(addr address.Address) (address.Address, error) {
if addr.Protocol() == address.ID { if addr.Protocol() == address.ID {
return addr, nil return addr, nil

View File

@ -255,12 +255,15 @@ func TestStateTreeConsistency(t *testing.T) {
} }
for i, a := range addrs { for i, a := range addrs {
st.SetActor(a, &types.Actor{ err := st.SetActor(a, &types.Actor{
Code: randomCid, Code: randomCid,
Head: randomCid, Head: randomCid,
Balance: types.NewInt(uint64(10000 + i)), Balance: types.NewInt(uint64(10000 + i)),
Nonce: uint64(1000 - i), Nonce: uint64(1000 - i),
}) })
if err != nil {
t.Fatalf("while setting actor: %+v", err)
}
} }
root, err := st.Flush(context.TODO()) root, err := st.Flush(context.TODO())

View File

@ -64,7 +64,7 @@ func (sm *StateManager) CallRaw(ctx context.Context, msg *types.Message, bstate
return &api.InvocResult{ return &api.InvocResult{
Msg: msg, Msg: msg,
MsgRct: &ret.MessageReceipt, MsgRct: &ret.MessageReceipt,
InternalExecutions: ret.InternalExecutions, ExecutionTrace: ret.ExecutionTrace,
Error: errs, Error: errs,
Duration: ret.Duration, Duration: ret.Duration,
}, nil }, nil

View File

@ -37,8 +37,8 @@ import (
) )
func init() { func init() {
miner.SupportedProofTypes = map[abi.RegisteredProof]struct{}{ miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
abi.RegisteredProof_StackedDRG2KiBSeal: {}, abi.RegisteredSealProof_StackedDrg2KiBV1: {},
} }
power.ConsensusMinerMinPower = big.NewInt(2048) power.ConsensusMinerMinPower = big.NewInt(2048)
verifreg.MinVerifiedDealSize = big.NewInt(256) verifreg.MinVerifiedDealSize = big.NewInt(256)

View File

@ -123,7 +123,7 @@ func (sm *StateManager) ExecutionTrace(ctx context.Context, ts *types.TipSet) (c
ir := &api.InvocResult{ ir := &api.InvocResult{
Msg: msg, Msg: msg,
MsgRct: &ret.MessageReceipt, MsgRct: &ret.MessageReceipt,
InternalExecutions: ret.InternalExecutions, ExecutionTrace: ret.ExecutionTrace,
Duration: ret.Duration, Duration: ret.Duration,
} }
if ret.ActorErr != nil { if ret.ActorErr != nil {
@ -187,7 +187,6 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, pstate cid.Cid, bms []B
Miner: b.Miner, Miner: b.Miner,
Penalty: penalty, Penalty: penalty,
GasReward: gasReward, GasReward: gasReward,
TicketCount: 1, // TODO: no longer need ticket count here.
}) })
if err != nil { if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("failed to serialize award params: %w", err) return cid.Undef, cid.Undef, xerrors.Errorf("failed to serialize award params: %w", err)
@ -326,7 +325,7 @@ func (sm *StateManager) computeTipSetState(ctx context.Context, blks []*types.Bl
blkmsgs = append(blkmsgs, bm) blkmsgs = append(blkmsgs, bm)
} }
return sm.ApplyBlocks(ctx, pstate, blkmsgs, abi.ChainEpoch(blks[0].Height), r, cb) return sm.ApplyBlocks(ctx, pstate, blkmsgs, blks[0].Height, r, cb)
} }
func (sm *StateManager) parentState(ts *types.TipSet) cid.Cid { func (sm *StateManager) parentState(ts *types.TipSet) cid.Cid {
@ -382,8 +381,8 @@ func (sm *StateManager) LoadActorState(ctx context.Context, a address.Address, o
cst := cbor.NewCborStore(sm.cs.Blockstore()) cst := cbor.NewCborStore(sm.cs.Blockstore())
if err := cst.Get(ctx, act.Head, out); err != nil { if err := cst.Get(ctx, act.Head, out); err != nil {
var r cbg.Deferred var r cbg.Deferred
cst.Get(ctx, act.Head, &r) _ = cst.Get(ctx, act.Head, &r)
fmt.Printf("badhead %x\n", r.Raw) log.Errorw("bad actor head", "error", err, "raw", r.Raw, "address", a)
return nil, err return nil, err
} }
@ -405,8 +404,8 @@ func (sm *StateManager) LoadActorStateRaw(ctx context.Context, a address.Address
return act, nil return act, nil
} }
// Similar to `vm.ResolveToKeyAddr` but does not allow `Actor` type of addresses. Uses the `TipSet` `ts` // ResolveToKeyAddress is similar to `vm.ResolveToKeyAddr` but does not allow `Actor` type of addresses.
// to generate the VM state. // Uses the `TipSet` `ts` to generate the VM state.
func (sm *StateManager) ResolveToKeyAddress(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { func (sm *StateManager) ResolveToKeyAddress(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) {
switch addr.Protocol() { switch addr.Protocol() {
case address.BLS, address.SECP256K1: case address.BLS, address.SECP256K1:
@ -480,7 +479,10 @@ func (sm *StateManager) GetReceipt(ctx context.Context, msg cid.Cid, ts *types.T
return r, nil return r, nil
} }
func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid) (*types.TipSet, *types.MessageReceipt, error) { // WaitForMessage blocks until a message appears on chain. It looks backwards in the chain to see if this has already
// happened. It guarantees that the message has been on chain for at least confidence epochs without being reverted
// before returning.
func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confidence uint64) (*types.TipSet, *types.MessageReceipt, error) {
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
defer cancel() defer cancel()
@ -528,6 +530,11 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid) (*type
close(backSearchWait) close(backSearchWait)
}() }()
var candidateTs *types.TipSet
var candidateRcp *types.MessageReceipt
heightOfHead := head[0].Val.Height()
reverts := map[types.TipSetKey]bool{}
for { for {
select { select {
case notif, ok := <-tsub: case notif, ok := <-tsub:
@ -537,21 +544,44 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid) (*type
for _, val := range notif { for _, val := range notif {
switch val.Type { switch val.Type {
case store.HCRevert: case store.HCRevert:
continue if val.Val.Equals(candidateTs) {
candidateTs = nil
candidateRcp = nil
}
if backSearchWait != nil {
reverts[val.Val.Key()] = true
}
case store.HCApply: case store.HCApply:
if candidateTs != nil && val.Val.Height() >= candidateTs.Height()+abi.ChainEpoch(confidence) {
return candidateTs, candidateRcp, nil
}
r, err := sm.tipsetExecutedMessage(val.Val, mcid, msg.VMMessage()) r, err := sm.tipsetExecutedMessage(val.Val, mcid, msg.VMMessage())
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
if r != nil { if r != nil {
return val.Val, r, nil if confidence == 0 {
return val.Val, r, err
} }
candidateTs = val.Val
candidateRcp = r
}
heightOfHead = val.Val.Height()
} }
} }
case <-backSearchWait: case <-backSearchWait:
if backTs != nil { // check if we found the message in the chain and that is hasn't been reverted since we started searching
if backTs != nil && !reverts[backTs.Key()] {
// if head is at or past confidence interval, return immediately
if heightOfHead >= backTs.Height()+abi.ChainEpoch(confidence) {
return backTs, backRcp, nil return backTs, backRcp, nil
} }
// wait for confidence interval
candidateTs = backTs
candidateRcp = backRcp
}
reverts = nil
backSearchWait = nil backSearchWait = nil
case <-ctx.Done(): case <-ctx.Done():
return nil, nil, ctx.Err() return nil, nil, ctx.Err()

View File

@ -13,6 +13,7 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
amt "github.com/filecoin-project/go-amt-ipld/v2" amt "github.com/filecoin-project/go-amt-ipld/v2"
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/sector-storage/ffiwrapper" "github.com/filecoin-project/sector-storage/ffiwrapper"
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/builtin" "github.com/filecoin-project/specs-actors/actors/builtin"
@ -37,7 +38,7 @@ func GetNetworkName(ctx context.Context, sm *StateManager, st cid.Cid) (dtypes.N
var state init_.State var state init_.State
_, err := sm.LoadActorStateRaw(ctx, builtin.InitActorAddr, &state, st) _, err := sm.LoadActorStateRaw(ctx, builtin.InitActorAddr, &state, st)
if err != nil { if err != nil {
return "", xerrors.Errorf("(get sset) failed to load miner actor state: %w", err) return "", xerrors.Errorf("(get sset) failed to load init actor state: %w", err)
} }
return dtypes.NetworkName(state.NetworkName), nil return dtypes.NetworkName(state.NetworkName), nil
@ -138,6 +139,24 @@ func PreCommitInfo(ctx context.Context, sm *StateManager, maddr address.Address,
return *i, nil return *i, nil
} }
func MinerSectorInfo(ctx context.Context, sm *StateManager, maddr address.Address, sid abi.SectorNumber, ts *types.TipSet) (*miner.SectorOnChainInfo, error) {
var mas miner.State
_, err := sm.LoadActorState(ctx, maddr, &mas, ts)
if err != nil {
return nil, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err)
}
sectorInfo, ok, err := mas.GetSector(sm.cs.Store(ctx), sid)
if err != nil {
return nil, err
}
if !ok {
return nil, xerrors.New("sector not found")
}
return sectorInfo, nil
}
func GetMinerSectorSet(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address, filter *abi.BitField, filterOut bool) ([]*api.ChainSectorInfo, error) { func GetMinerSectorSet(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address, filter *abi.BitField, filterOut bool) ([]*api.ChainSectorInfo, error) {
var mas miner.State var mas miner.State
_, err := sm.LoadActorState(ctx, maddr, &mas, ts) _, err := sm.LoadActorState(ctx, maddr, &mas, ts)
@ -155,13 +174,34 @@ func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *S
return nil, xerrors.Errorf("(get sectors) failed to load miner actor state: %w", err) return nil, xerrors.Errorf("(get sectors) failed to load miner actor state: %w", err)
} }
// TODO: Optimization: we could avoid loaditg the whole proving set here if we had AMT.GetNth with bitfield filtering cst := cbor.NewCborStore(sm.cs.Blockstore())
sectorSet, err := GetProvingSetRaw(ctx, sm, mas) var deadlines miner.Deadlines
if err != nil { if err := cst.Get(ctx, mas.Deadlines, &deadlines); err != nil {
return nil, xerrors.Errorf("getting proving set: %w", err) return nil, xerrors.Errorf("failed to load deadlines: %w", err)
} }
if len(sectorSet) == 0 { notProving, err := abi.BitFieldUnion(mas.Faults, mas.Recoveries)
if err != nil {
return nil, xerrors.Errorf("failed to union faults and recoveries: %w", err)
}
allSectors, err := bitfield.MultiMerge(append(deadlines.Due[:], mas.NewSectors)...)
if err != nil {
return nil, xerrors.Errorf("merging deadline bitfields failed: %w", err)
}
provingSectors, err := bitfield.SubtractBitField(allSectors, notProving)
if err != nil {
return nil, xerrors.Errorf("failed to subtract non-proving sectors from set: %w", err)
}
numProvSect, err := provingSectors.Count()
if err != nil {
return nil, xerrors.Errorf("failed to count bits: %w", err)
}
// TODO(review): is this right? feels fishy to me
if numProvSect == 0 {
return nil, nil return nil, nil
} }
@ -180,17 +220,34 @@ func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *S
return nil, xerrors.Errorf("getting miner ID: %w", err) return nil, xerrors.Errorf("getting miner ID: %w", err)
} }
ids, err := pv.GenerateWinningPoStSectorChallenge(ctx, wpt, abi.ActorID(mid), rand, uint64(len(sectorSet))) ids, err := pv.GenerateWinningPoStSectorChallenge(ctx, wpt, abi.ActorID(mid), rand, numProvSect)
if err != nil { if err != nil {
return nil, xerrors.Errorf("generating winning post challenges: %w", err) return nil, xerrors.Errorf("generating winning post challenges: %w", err)
} }
sectors, err := provingSectors.All(miner.SectorsMax)
if err != nil {
return nil, xerrors.Errorf("failed to enumerate all sector IDs: %w", err)
}
sectorAmt, err := amt.LoadAMT(ctx, cst, mas.Sectors)
if err != nil {
return nil, xerrors.Errorf("failed to load sectors amt: %w", err)
}
out := make([]abi.SectorInfo, len(ids)) out := make([]abi.SectorInfo, len(ids))
for i, n := range ids { for i, n := range ids {
sid := sectors[n]
var sinfo miner.SectorOnChainInfo
if err := sectorAmt.Get(ctx, sid, &sinfo); err != nil {
return nil, xerrors.Errorf("failed to get sector %d: %w", sid, err)
}
out[i] = abi.SectorInfo{ out[i] = abi.SectorInfo{
RegisteredProof: wpt, SealProof: spt,
SectorNumber: sectorSet[n].ID, SectorNumber: sinfo.Info.SectorNumber,
SealedCID: sectorSet[n].Info.Info.SealedCID, SealedCID: sinfo.Info.SealedCID,
} }
} }
@ -268,7 +325,7 @@ func GetMinerRecoveries(ctx context.Context, sm *StateManager, ts *types.TipSet,
return mas.Recoveries, nil return mas.Recoveries, nil
} }
func GetStorageDeal(ctx context.Context, sm *StateManager, dealId abi.DealID, ts *types.TipSet) (*api.MarketDeal, error) { func GetStorageDeal(ctx context.Context, sm *StateManager, dealID abi.DealID, ts *types.TipSet) (*api.MarketDeal, error) {
var state market.State var state market.State
if _, err := sm.LoadActorState(ctx, builtin.StorageMarketActorAddr, &state, ts); err != nil { if _, err := sm.LoadActorState(ctx, builtin.StorageMarketActorAddr, &state, ts); err != nil {
return nil, err return nil, err
@ -280,7 +337,7 @@ func GetStorageDeal(ctx context.Context, sm *StateManager, dealId abi.DealID, ts
} }
var dp market.DealProposal var dp market.DealProposal
if err := da.Get(ctx, uint64(dealId), &dp); err != nil { if err := da.Get(ctx, uint64(dealID), &dp); err != nil {
return nil, err return nil, err
} }
@ -289,7 +346,7 @@ func GetStorageDeal(ctx context.Context, sm *StateManager, dealId abi.DealID, ts
return nil, err return nil, err
} }
st, found, err := sa.Get(dealId) st, found, err := sa.Get(dealID)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -32,8 +32,11 @@ func (fts *FullTipSet) Cids() []cid.Cid {
return cids return cids
} }
// TipSet returns a narrower view of this FullTipSet elliding the block
// messages.
func (fts *FullTipSet) TipSet() *types.TipSet { func (fts *FullTipSet) TipSet() *types.TipSet {
if fts.tipset != nil { if fts.tipset != nil {
// FIXME: fts.tipset is actually never set. Should it memoize?
return fts.tipset return fts.tipset
} }

View File

@ -34,7 +34,7 @@ type lbEntry struct {
target types.TipSetKey target types.TipSetKey
} }
func (ci *ChainIndex) GetTipsetByHeight(ctx context.Context, from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) { func (ci *ChainIndex) GetTipsetByHeight(_ context.Context, from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) {
if from.Height()-to <= ci.skipLength { if from.Height()-to <= ci.skipLength {
return ci.walkBack(from, to) return ci.walkBack(from, to)
} }
@ -91,15 +91,16 @@ func (ci *ChainIndex) fillCache(tsk types.TipSetKey) (*lbEntry, error) {
return nil, err return nil, err
} }
if parent.Height() > rheight {
return nil, xerrors.Errorf("cache is inconsistent")
}
rheight -= ci.skipLength rheight -= ci.skipLength
skipTarget, err := ci.walkBack(parent, rheight) var skipTarget *types.TipSet
if parent.Height() < rheight {
skipTarget = parent
} else {
skipTarget, err = ci.walkBack(parent, rheight)
if err != nil { if err != nil {
return nil, err return nil, xerrors.Errorf("fillCache walkback: %w", err)
}
} }
lbe := &lbEntry{ lbe := &lbEntry{
@ -113,8 +114,9 @@ func (ci *ChainIndex) fillCache(tsk types.TipSetKey) (*lbEntry, error) {
return lbe, nil return lbe, nil
} }
// floors to nearest skipLength multiple
func (ci *ChainIndex) roundHeight(h abi.ChainEpoch) abi.ChainEpoch { func (ci *ChainIndex) roundHeight(h abi.ChainEpoch) abi.ChainEpoch {
return abi.ChainEpoch(h/ci.skipLength) * ci.skipLength return (h / ci.skipLength) * ci.skipLength
} }
func (ci *ChainIndex) roundDown(ts *types.TipSet) (*types.TipSet, error) { func (ci *ChainIndex) roundDown(ts *types.TipSet) (*types.TipSet, error) {
@ -146,6 +148,8 @@ func (ci *ChainIndex) walkBack(from *types.TipSet, to abi.ChainEpoch) (*types.Ti
} }
if to > pts.Height() { if to > pts.Height() {
// in case pts is lower than the epoch we're looking for (null blocks)
// return a tipset above that height
return ts, nil return ts, nil
} }
if to == pts.Height() { if to == pts.Height() {

80
chain/store/index_test.go Normal file
View File

@ -0,0 +1,80 @@
package store_test
import (
"bytes"
"context"
"testing"
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types/mock"
"github.com/filecoin-project/specs-actors/actors/abi"
datastore "github.com/ipfs/go-datastore"
syncds "github.com/ipfs/go-datastore/sync"
blockstore "github.com/ipfs/go-ipfs-blockstore"
"github.com/stretchr/testify/assert"
)
func TestIndexSeeks(t *testing.T) {
cg, err := gen.NewGenerator()
if err != nil {
t.Fatal(err)
}
gencar, err := cg.GenesisCar()
if err != nil {
t.Fatal(err)
}
gen := cg.Genesis()
ctx := context.TODO()
nbs := blockstore.NewBlockstore(syncds.MutexWrap(datastore.NewMapDatastore()))
cs := store.NewChainStore(nbs, syncds.MutexWrap(datastore.NewMapDatastore()), nil)
_, err = cs.Import(bytes.NewReader(gencar))
if err != nil {
t.Fatal(err)
}
cur := mock.TipSet(gen)
if err := cs.PutTipSet(ctx, mock.TipSet(gen)); err != nil {
t.Fatal(err)
}
cs.SetGenesis(gen)
// Put 113 blocks from genesis
for i := 0; i < 113; i++ {
nextts := mock.TipSet(mock.MkBlock(cur, 1, 1))
if err := cs.PutTipSet(ctx, nextts); err != nil {
t.Fatal(err)
}
cur = nextts
}
// Put 50 null epochs + 1 block
skip := mock.MkBlock(cur, 1, 1)
skip.Height += 50
skipts := mock.TipSet(skip)
if err := cs.PutTipSet(ctx, skipts); err != nil {
t.Fatal(err)
}
ts, err := cs.GetTipsetByHeight(ctx, skip.Height-10, skipts, false)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, abi.ChainEpoch(164), ts.Height())
for i := 0; i <= 113; i++ {
ts3, err := cs.GetTipsetByHeight(ctx, abi.ChainEpoch(i), skipts, false)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, abi.ChainEpoch(i), ts3.Height())
}
}

View File

@ -47,7 +47,20 @@ import (
var log = logging.Logger("chainstore") var log = logging.Logger("chainstore")
var chainHeadKey = dstore.NewKey("head") var chainHeadKey = dstore.NewKey("head")
var blockValidationCacheKeyPrefix = dstore.NewKey("blockValidation")
// ReorgNotifee represents a callback that gets called upon reorgs.
type ReorgNotifee func(rev, app []*types.TipSet) error
// ChainStore is the main point of access to chain data.
//
// Raw chain data is stored in the Blockstore, with relevant markers (genesis,
// latest head tipset references) being tracked in the Datastore (key-value
// store).
//
// To alleviate disk access, the ChainStore has two ARC caches:
// 1. a tipset cache
// 2. a block => messages references cache.
type ChainStore struct { type ChainStore struct {
bs bstore.Blockstore bs bstore.Blockstore
ds dstore.Datastore ds dstore.Datastore
@ -64,7 +77,7 @@ type ChainStore struct {
cindex *ChainIndex cindex *ChainIndex
reorgCh chan<- reorg reorgCh chan<- reorg
headChangeNotifs []func(rev, app []*types.TipSet) error reorgNotifeeCh chan ReorgNotifee
mmCache *lru.ARCCache mmCache *lru.ARCCache
tsCache *lru.ARCCache tsCache *lru.ARCCache
@ -89,8 +102,6 @@ func NewChainStore(bs bstore.Blockstore, ds dstore.Batching, vmcalls runtime.Sys
cs.cindex = ci cs.cindex = ci
cs.reorgCh = cs.reorgWorker(context.TODO())
hcnf := func(rev, app []*types.TipSet) error { hcnf := func(rev, app []*types.TipSet) error {
cs.pubLk.Lock() cs.pubLk.Lock()
defer cs.pubLk.Unlock() defer cs.pubLk.Unlock()
@ -122,7 +133,8 @@ func NewChainStore(bs bstore.Blockstore, ds dstore.Batching, vmcalls runtime.Sys
return nil return nil
} }
cs.headChangeNotifs = append(cs.headChangeNotifs, hcnf, hcmetric) cs.reorgNotifeeCh = make(chan ReorgNotifee)
cs.reorgCh = cs.reorgWorker(context.TODO(), []ReorgNotifee{hcnf, hcmetric})
return cs return cs
} }
@ -211,8 +223,24 @@ func (cs *ChainStore) SubHeadChanges(ctx context.Context) chan []*api.HeadChange
return out return out
} }
func (cs *ChainStore) SubscribeHeadChanges(f func(rev, app []*types.TipSet) error) { func (cs *ChainStore) SubscribeHeadChanges(f ReorgNotifee) {
cs.headChangeNotifs = append(cs.headChangeNotifs, f) cs.reorgNotifeeCh <- f
}
func (cs *ChainStore) IsBlockValidated(ctx context.Context, blkid cid.Cid) (bool, error) {
key := blockValidationCacheKeyPrefix.Instance(blkid.String())
return cs.ds.Has(key)
}
func (cs *ChainStore) MarkBlockAsValidated(ctx context.Context, blkid cid.Cid) error {
key := blockValidationCacheKeyPrefix.Instance(blkid.String())
if err := cs.ds.Put(key, []byte{0}); err != nil {
return xerrors.Errorf("cache block validation: %w", err)
}
return nil
} }
func (cs *ChainStore) SetGenesis(b *types.BlockHeader) error { func (cs *ChainStore) SetGenesis(b *types.BlockHeader) error {
@ -247,6 +275,9 @@ func (cs *ChainStore) PutTipSet(ctx context.Context, ts *types.TipSet) error {
return nil return nil
} }
// MaybeTakeHeavierTipSet evaluates the incoming tipset and locks it in our
// internal state as our new head, if and only if it is heavier than the current
// head.
func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipSet) error { func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipSet) error {
cs.heaviestLk.Lock() cs.heaviestLk.Lock()
defer cs.heaviestLk.Unlock() defer cs.heaviestLk.Unlock()
@ -273,13 +304,19 @@ type reorg struct {
new *types.TipSet new *types.TipSet
} }
func (cs *ChainStore) reorgWorker(ctx context.Context) chan<- reorg { func (cs *ChainStore) reorgWorker(ctx context.Context, initialNotifees []ReorgNotifee) chan<- reorg {
out := make(chan reorg, 32) out := make(chan reorg, 32)
notifees := make([]ReorgNotifee, len(initialNotifees))
copy(notifees, initialNotifees)
go func() { go func() {
defer log.Warn("reorgWorker quit") defer log.Warn("reorgWorker quit")
for { for {
select { select {
case n := <-cs.reorgNotifeeCh:
notifees = append(notifees, n)
case r := <-out: case r := <-out:
revert, apply, err := cs.ReorgOps(r.old, r.new) revert, apply, err := cs.ReorgOps(r.old, r.new)
if err != nil { if err != nil {
@ -293,7 +330,7 @@ func (cs *ChainStore) reorgWorker(ctx context.Context) chan<- reorg {
apply[i], apply[opp] = apply[opp], apply[i] apply[i], apply[opp] = apply[opp], apply[i]
} }
for _, hcf := range cs.headChangeNotifs { for _, hcf := range notifees {
if err := hcf(revert, apply); err != nil { if err := hcf(revert, apply); err != nil {
log.Error("head change func errored (BAD): ", err) log.Error("head change func errored (BAD): ", err)
} }
@ -306,6 +343,9 @@ func (cs *ChainStore) reorgWorker(ctx context.Context) chan<- reorg {
return out return out
} }
// takeHeaviestTipSet actually sets the incoming tipset as our head both in
// memory and in the ChainStore. It also sends a notification to deliver to
// ReorgNotifees.
func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet) error { func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet) error {
_, span := trace.StartSpan(ctx, "takeHeaviestTipSet") _, span := trace.StartSpan(ctx, "takeHeaviestTipSet")
defer span.End() defer span.End()
@ -343,6 +383,7 @@ func (cs *ChainStore) SetHead(ts *types.TipSet) error {
return cs.takeHeaviestTipSet(context.TODO(), ts) return cs.takeHeaviestTipSet(context.TODO(), ts)
} }
// Contains returns whether our BlockStore has all blocks in the supplied TipSet.
func (cs *ChainStore) Contains(ts *types.TipSet) (bool, error) { func (cs *ChainStore) Contains(ts *types.TipSet) (bool, error) {
for _, c := range ts.Cids() { for _, c := range ts.Cids() {
has, err := cs.bs.Has(c) has, err := cs.bs.Has(c)
@ -357,6 +398,8 @@ func (cs *ChainStore) Contains(ts *types.TipSet) (bool, error) {
return true, nil return true, nil
} }
// GetBlock fetches a BlockHeader with the supplied CID. It returns
// blockstore.ErrNotFound if the block was not found in the BlockStore.
func (cs *ChainStore) GetBlock(c cid.Cid) (*types.BlockHeader, error) { func (cs *ChainStore) GetBlock(c cid.Cid) (*types.BlockHeader, error) {
sb, err := cs.bs.Get(c) sb, err := cs.bs.Get(c)
if err != nil { if err != nil {
@ -392,7 +435,7 @@ func (cs *ChainStore) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) {
return ts, nil return ts, nil
} }
// returns true if 'a' is an ancestor of 'b' // IsAncestorOf returns true if 'a' is an ancestor of 'b'
func (cs *ChainStore) IsAncestorOf(a, b *types.TipSet) (bool, error) { func (cs *ChainStore) IsAncestorOf(a, b *types.TipSet) (bool, error) {
if b.Height() <= a.Height() { if b.Height() <= a.Height() {
return false, nil return false, nil
@ -449,6 +492,7 @@ func (cs *ChainStore) ReorgOps(a, b *types.TipSet) ([]*types.TipSet, []*types.Ti
return leftChain, rightChain, nil return leftChain, rightChain, nil
} }
// GetHeaviestTipSet returns the current heaviest tipset known (i.e. our head).
func (cs *ChainStore) GetHeaviestTipSet() *types.TipSet { func (cs *ChainStore) GetHeaviestTipSet() *types.TipSet {
cs.heaviestLk.Lock() cs.heaviestLk.Lock()
defer cs.heaviestLk.Unlock() defer cs.heaviestLk.Unlock()
@ -900,39 +944,50 @@ func DrawRandomness(rbase []byte, pers crypto.DomainSeparationTag, round abi.Cha
return nil, xerrors.Errorf("deriving randomness: %w", err) return nil, xerrors.Errorf("deriving randomness: %w", err)
} }
VRFDigest := blake2b.Sum256(rbase) VRFDigest := blake2b.Sum256(rbase)
h.Write(VRFDigest[:]) _, err := h.Write(VRFDigest[:])
if err != nil {
return nil, xerrors.Errorf("hashing VRFDigest: %w", err)
}
if err := binary.Write(h, binary.BigEndian, round); err != nil { if err := binary.Write(h, binary.BigEndian, round); err != nil {
return nil, xerrors.Errorf("deriving randomness: %w", err) return nil, xerrors.Errorf("deriving randomness: %w", err)
} }
h.Write(entropy) _, err = h.Write(entropy)
if err != nil {
return nil, xerrors.Errorf("hashing entropy: %w", err)
}
return h.Sum(nil), nil return h.Sum(nil), nil
} }
func (cs *ChainStore) GetRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) (out []byte, err error) { func (cs *ChainStore) GetRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
_, span := trace.StartSpan(ctx, "store.GetRandomness") _, span := trace.StartSpan(ctx, "store.GetRandomness")
defer span.End() defer span.End()
span.AddAttributes(trace.Int64Attribute("round", int64(round))) span.AddAttributes(trace.Int64Attribute("round", int64(round)))
//defer func() { ts, err := cs.LoadTipSet(types.NewTipSetKey(blks...))
//log.Infof("getRand %v %d %d %x -> %x", blks, pers, round, entropy, out)
//}()
for {
nts, err := cs.LoadTipSet(types.NewTipSetKey(blks...))
if err != nil { if err != nil {
return nil, err return nil, err
} }
mtb := nts.MinTicketBlock() if round > ts.Height() {
return nil, xerrors.Errorf("cannot draw randomness from the future")
}
searchHeight := round
if searchHeight < 0 {
searchHeight = 0
}
randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, true)
if err != nil {
return nil, err
}
mtb := randTs.MinTicketBlock()
// if at (or just past -- for null epochs) appropriate epoch // if at (or just past -- for null epochs) appropriate epoch
// or at genesis (works for negative epochs) // or at genesis (works for negative epochs)
if nts.Height() <= round || mtb.Height == 0 { return DrawRandomness(mtb.Ticket.VRFProof, pers, round, entropy)
return DrawRandomness(nts.MinTicketBlock().Ticket.VRFProof, pers, round, entropy)
}
blks = mtb.Parents
}
} }
// GetTipsetByHeight returns the tipset on the chain behind 'ts' at the given // GetTipsetByHeight returns the tipset on the chain behind 'ts' at the given
@ -1149,7 +1204,6 @@ func (cr *chainRand) GetRandomness(ctx context.Context, pers crypto.DomainSepara
func (cs *ChainStore) GetTipSetFromKey(tsk types.TipSetKey) (*types.TipSet, error) { func (cs *ChainStore) GetTipSetFromKey(tsk types.TipSetKey) (*types.TipSet, error) {
if tsk.IsEmpty() { if tsk.IsEmpty() {
return cs.GetHeaviestTipSet(), nil return cs.GetHeaviestTipSet(), nil
} else {
return cs.LoadTipSet(tsk)
} }
return cs.LoadTipSet(tsk)
} }

View File

@ -22,8 +22,8 @@ import (
) )
func init() { func init() {
miner.SupportedProofTypes = map[abi.RegisteredProof]struct{}{ miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
abi.RegisteredProof_StackedDRG2KiBSeal: {}, abi.RegisteredSealProof_StackedDrg2KiBV1: {},
} }
power.ConsensusMinerMinPower = big.NewInt(2048) power.ConsensusMinerMinPower = big.NewInt(2048)
verifreg.MinVerifiedDealSize = big.NewInt(256) verifreg.MinVerifiedDealSize = big.NewInt(256)
@ -55,7 +55,7 @@ func BenchmarkGetRandomness(b *testing.B) {
b.Fatal(err) b.Fatal(err)
} }
bds, err := lr.Datastore("/blocks") bds, err := lr.Datastore("/chain")
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }

View File

@ -57,8 +57,7 @@ func HandleIncomingBlocks(ctx context.Context, bsub *pubsub.Subscription, s *cha
return return
} }
//nolint:golint src := msg.GetFrom()
src := peer.ID(msg.GetFrom())
go func() { go func() {
start := time.Now() start := time.Now()
@ -204,7 +203,7 @@ func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub
} }
} }
err = sigs.CheckBlockSignature(blk.Header, ctx, key) err = sigs.CheckBlockSignature(ctx, blk.Header, key)
if err != nil { if err != nil {
log.Errorf("block signature verification failed: %s", err) log.Errorf("block signature verification failed: %s", err)
recordFailure("signature_verification_failed") recordFailure("signature_verification_failed")

View File

@ -8,7 +8,6 @@ import (
"os" "os"
"sort" "sort"
"strings" "strings"
"sync"
"time" "time"
"github.com/Gurpartap/async" "github.com/Gurpartap/async"
@ -50,10 +49,37 @@ import (
"github.com/filecoin-project/lotus/metrics" "github.com/filecoin-project/lotus/metrics"
) )
// Blocks that are more than MaxHeightDrift epochs above
//the theoretical max height based on systime are quickly rejected
const MaxHeightDrift = 5
var log = logging.Logger("chain") var log = logging.Logger("chain")
var LocalIncoming = "incoming" var LocalIncoming = "incoming"
// Syncer is in charge of running the chain synchronization logic. As such, it
// is tasked with these functions, amongst others:
//
// * Fast-forwards the chain as it learns of new TipSets from the network via
// the SyncManager.
// * Applies the fork choice rule to select the correct side when confronted
// with a fork in the network.
// * Requests block headers and messages from other peers when not available
// in our BlockStore.
// * Tracks blocks marked as bad in a cache.
// * Keeps the BlockStore and ChainStore consistent with our view of the world,
// the latter of which in turn informs other components when a reorg has been
// committed.
//
// The Syncer does not run workers itself. It's mainly concerned with
// ensuring a consistent state of chain consensus. The reactive and network-
// interfacing processes are part of other components, such as the SyncManager
// (which owns the sync scheduler and sync workers), BlockSync, the HELLO
// protocol, and the gossipsub block propagation layer.
//
// {hint/concept} The fork-choice rule as it currently stands is: "pick the
// chain with the heaviest weight, so long as it hasnt deviated one finality
// threshold from our head (900 epochs, parameter determined by spec-actors)".
type Syncer struct { type Syncer struct {
// The interface for accessing and putting tipsets into local storage // The interface for accessing and putting tipsets into local storage
store *store.ChainStore store *store.ChainStore
@ -86,6 +112,7 @@ type Syncer struct {
verifier ffiwrapper.Verifier verifier ffiwrapper.Verifier
} }
// NewSyncer creates a new Syncer object.
func NewSyncer(sm *stmgr.StateManager, bsync *blocksync.BlockSync, connmgr connmgr.ConnManager, self peer.ID, beacon beacon.RandomBeacon, verifier ffiwrapper.Verifier) (*Syncer, error) { func NewSyncer(sm *stmgr.StateManager, bsync *blocksync.BlockSync, connmgr connmgr.ConnManager, self peer.ID, beacon beacon.RandomBeacon, verifier ffiwrapper.Verifier) (*Syncer, error) {
gen, err := sm.ChainStore().GetGenesis() gen, err := sm.ChainStore().GetGenesis()
if err != nil { if err != nil {
@ -134,6 +161,11 @@ func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool {
return false return false
} }
if syncer.IsEpochBeyondCurrMax(fts.TipSet().Height()) {
log.Errorf("Received block with impossibly large height %d", fts.TipSet().Height())
return false
}
for _, b := range fts.Blocks { for _, b := range fts.Blocks {
if reason, ok := syncer.bad.Has(b.Cid()); ok { if reason, ok := syncer.bad.Has(b.Cid()); ok {
log.Warnf("InformNewHead called on block marked as bad: %s (reason: %s)", b.Cid(), reason) log.Warnf("InformNewHead called on block marked as bad: %s (reason: %s)", b.Cid(), reason)
@ -183,6 +215,11 @@ func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool {
return true return true
} }
// IncomingBlocks spawns a goroutine that subscribes to the local eventbus to
// receive new block headers as they arrive from the network, and sends them to
// the returned channel.
//
// These blocks have not necessarily been incorporated to our view of the chain.
func (syncer *Syncer) IncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) { func (syncer *Syncer) IncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) {
sub := syncer.incoming.Sub(LocalIncoming) sub := syncer.incoming.Sub(LocalIncoming)
out := make(chan *types.BlockHeader, 10) out := make(chan *types.BlockHeader, 10)
@ -210,11 +247,15 @@ func (syncer *Syncer) IncomingBlocks(ctx context.Context) (<-chan *types.BlockHe
return out, nil return out, nil
} }
// ValidateMsgMeta performs structural and content hash validation of the
// messages within this block. If validation passes, it stores the messages in
// the underlying IPLD block store.
func (syncer *Syncer) ValidateMsgMeta(fblk *types.FullBlock) error { func (syncer *Syncer) ValidateMsgMeta(fblk *types.FullBlock) error {
if msgc := len(fblk.BlsMessages) + len(fblk.SecpkMessages); msgc > build.BlockMessageLimit { if msgc := len(fblk.BlsMessages) + len(fblk.SecpkMessages); msgc > build.BlockMessageLimit {
return xerrors.Errorf("block %s has too many messages (%d)", fblk.Header.Cid(), msgc) return xerrors.Errorf("block %s has too many messages (%d)", fblk.Header.Cid(), msgc)
} }
// Collect the CIDs of both types of messages separately: BLS and Secpk.
var bcids, scids []cbg.CBORMarshaler var bcids, scids []cbg.CBORMarshaler
for _, m := range fblk.BlsMessages { for _, m := range fblk.BlsMessages {
c := cbg.CborCid(m.Cid()) c := cbg.CborCid(m.Cid())
@ -232,11 +273,14 @@ func (syncer *Syncer) ValidateMsgMeta(fblk *types.FullBlock) error {
blockstore := syncer.store.Blockstore() blockstore := syncer.store.Blockstore()
bs := cbor.NewCborStore(blockstore) bs := cbor.NewCborStore(blockstore)
// Compute the root CID of the combined message trie.
smroot, err := computeMsgMeta(bs, bcids, scids) smroot, err := computeMsgMeta(bs, bcids, scids)
if err != nil { if err != nil {
return xerrors.Errorf("validating msgmeta, compute failed: %w", err) return xerrors.Errorf("validating msgmeta, compute failed: %w", err)
} }
// Check that the message trie root matches with what's in the block.
if fblk.Header.Messages != smroot { if fblk.Header.Messages != smroot {
return xerrors.Errorf("messages in full block did not match msgmeta root in header (%s != %s)", fblk.Header.Messages, smroot) return xerrors.Errorf("messages in full block did not match msgmeta root in header (%s != %s)", fblk.Header.Messages, smroot)
} }
@ -346,6 +390,8 @@ func zipTipSetAndMessages(bs cbor.IpldStore, ts *types.TipSet, allbmsgs []*types
return fts, nil return fts, nil
} }
// computeMsgMeta computes the root CID of the combined arrays of message CIDs
// of both types (BLS and Secpk).
func computeMsgMeta(bs cbor.IpldStore, bmsgCids, smsgCids []cbg.CBORMarshaler) (cid.Cid, error) { func computeMsgMeta(bs cbor.IpldStore, bmsgCids, smsgCids []cbg.CBORMarshaler) (cid.Cid, error) {
ctx := context.TODO() ctx := context.TODO()
bmroot, err := amt.FromArray(ctx, bs, bmsgCids) bmroot, err := amt.FromArray(ctx, bs, bmsgCids)
@ -369,14 +415,24 @@ func computeMsgMeta(bs cbor.IpldStore, bmsgCids, smsgCids []cbg.CBORMarshaler) (
return mrcid, nil return mrcid, nil
} }
// FetchTipSet tries to load the provided tipset from the store, and falls back
// to the network (BlockSync) by querying the supplied peer if not found
// locally.
//
// {hint/usage} This is used from the HELLO protocol, to fetch the greeting
// peer's heaviest tipset if we don't have it.
func (syncer *Syncer) FetchTipSet(ctx context.Context, p peer.ID, tsk types.TipSetKey) (*store.FullTipSet, error) { func (syncer *Syncer) FetchTipSet(ctx context.Context, p peer.ID, tsk types.TipSetKey) (*store.FullTipSet, error) {
if fts, err := syncer.tryLoadFullTipSet(tsk); err == nil { if fts, err := syncer.tryLoadFullTipSet(tsk); err == nil {
return fts, nil return fts, nil
} }
// fall back to the network.
return syncer.Bsync.GetFullTipSet(ctx, p, tsk) return syncer.Bsync.GetFullTipSet(ctx, p, tsk)
} }
// tryLoadFullTipSet queries the tipset in the ChainStore, and returns a full
// representation of it containing FullBlocks. If ALL blocks are not found
// locally, it errors entirely with blockstore.ErrNotFound.
func (syncer *Syncer) tryLoadFullTipSet(tsk types.TipSetKey) (*store.FullTipSet, error) { func (syncer *Syncer) tryLoadFullTipSet(tsk types.TipSetKey) (*store.FullTipSet, error) {
ts, err := syncer.store.LoadTipSet(tsk) ts, err := syncer.store.LoadTipSet(tsk)
if err != nil { if err != nil {
@ -401,6 +457,12 @@ func (syncer *Syncer) tryLoadFullTipSet(tsk types.TipSetKey) (*store.FullTipSet,
return fts, nil return fts, nil
} }
// Sync tries to advance our view of the chain to `maybeHead`. It does nothing
// if our current head is heavier than the requested tipset, or if we're already
// at the requested head, or if the head is the genesis.
//
// Most of the heavy-lifting logic happens in syncer#collectChain. Refer to the
// godocs on that method for a more detailed view.
func (syncer *Syncer) Sync(ctx context.Context, maybeHead *types.TipSet) error { func (syncer *Syncer) Sync(ctx context.Context, maybeHead *types.TipSet) error {
ctx, span := trace.StartSpan(ctx, "chain.Sync") ctx, span := trace.StartSpan(ctx, "chain.Sync")
defer span.End() defer span.End()
@ -467,7 +529,11 @@ func (syncer *Syncer) ValidateTipSet(ctx context.Context, fts *store.FullTipSet)
return nil return nil
} }
var futures []async.ErrorFuture
for _, b := range fts.Blocks { for _, b := range fts.Blocks {
b := b // rebind to a scoped variable
futures = append(futures, async.Err(func() error {
if err := syncer.ValidateBlock(ctx, b); err != nil { if err := syncer.ValidateBlock(ctx, b); err != nil {
if isPermanent(err) { if isPermanent(err) {
syncer.bad.Add(b.Cid(), err.Error()) syncer.bad.Add(b.Cid(), err.Error())
@ -478,6 +544,13 @@ func (syncer *Syncer) ValidateTipSet(ctx context.Context, fts *store.FullTipSet)
if err := syncer.sm.ChainStore().AddToTipSetTracker(b.Header); err != nil { if err := syncer.sm.ChainStore().AddToTipSetTracker(b.Header); err != nil {
return xerrors.Errorf("failed to add validated header to tipset tracker: %w", err) return xerrors.Errorf("failed to add validated header to tipset tracker: %w", err)
} }
return nil
}))
}
for _, f := range futures {
if err := f.AwaitContext(ctx); err != nil {
return err
}
} }
return nil return nil
} }
@ -528,8 +601,25 @@ func blockSanityChecks(h *types.BlockHeader) error {
return nil return nil
} }
// Should match up with 'Semantical Validation' in validation.md in the spec // ValidateBlock should match up with 'Semantical Validation' in validation.md in the spec
func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) error { func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) (err error) {
defer func() {
// b.Cid() could panic for empty blocks that are used in tests.
if rerr := recover(); rerr != nil {
err = xerrors.Errorf("validate block panic: %w", rerr)
return
}
}()
isValidated, err := syncer.store.IsBlockValidated(ctx, b.Cid())
if err != nil {
return xerrors.Errorf("check block validation cache %s: %w", b.Cid(), err)
}
if isValidated {
return nil
}
validationStart := time.Now() validationStart := time.Now()
defer func() { defer func() {
dur := time.Since(validationStart) dur := time.Since(validationStart)
@ -576,18 +666,18 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) err
// fast checks first // fast checks first
now := uint64(time.Now().Unix()) now := uint64(time.Now().Unix())
if h.Timestamp > now+build.AllowableClockDrift { if h.Timestamp > now+build.AllowableClockDriftSecs {
return xerrors.Errorf("block was from the future (now=%d, blk=%d): %w", now, h.Timestamp, ErrTemporal) return xerrors.Errorf("block was from the future (now=%d, blk=%d): %w", now, h.Timestamp, ErrTemporal)
} }
if h.Timestamp > now { if h.Timestamp > now {
log.Warn("Got block from the future, but within threshold", h.Timestamp, time.Now().Unix()) log.Warn("Got block from the future, but within threshold", h.Timestamp, time.Now().Unix())
} }
if h.Timestamp < baseTs.MinTimestamp()+(build.BlockDelay*uint64(h.Height-baseTs.Height())) { if h.Timestamp < baseTs.MinTimestamp()+(build.BlockDelaySecs*uint64(h.Height-baseTs.Height())) {
log.Warn("timestamp funtimes: ", h.Timestamp, baseTs.MinTimestamp(), h.Height, baseTs.Height()) log.Warn("timestamp funtimes: ", h.Timestamp, baseTs.MinTimestamp(), h.Height, baseTs.Height())
diff := (baseTs.MinTimestamp() + (build.BlockDelay * uint64(h.Height-baseTs.Height()))) - h.Timestamp diff := (baseTs.MinTimestamp() + (build.BlockDelaySecs * uint64(h.Height-baseTs.Height()))) - h.Timestamp
return xerrors.Errorf("block was generated too soon (h.ts:%d < base.mints:%d + BLOCK_DELAY:%d * deltaH:%d; diff %d)", h.Timestamp, baseTs.MinTimestamp(), build.BlockDelay, h.Height-baseTs.Height(), diff) return xerrors.Errorf("block was generated too soon (h.ts:%d < base.mints:%d + BLOCK_DELAY:%d * deltaH:%d; diff %d)", h.Timestamp, baseTs.MinTimestamp(), build.BlockDelaySecs, h.Height-baseTs.Height(), diff)
} }
msgsCheck := async.Err(func() error { msgsCheck := async.Err(func() error {
@ -650,7 +740,7 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) err
return xerrors.Errorf("could not draw randomness: %w", err) return xerrors.Errorf("could not draw randomness: %w", err)
} }
if err := gen.VerifyVRF(ctx, waddr, vrfBase, h.ElectionProof.VRFProof); err != nil { if err := VerifyElectionPoStVRF(ctx, waddr, vrfBase, h.ElectionProof.VRFProof); err != nil {
return xerrors.Errorf("validating block election proof failed: %w", err) return xerrors.Errorf("validating block election proof failed: %w", err)
} }
@ -676,7 +766,7 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) err
}) })
blockSigCheck := async.Err(func() error { blockSigCheck := async.Err(func() error {
if err := sigs.CheckBlockSignature(h, ctx, waddr); err != nil { if err := sigs.CheckBlockSignature(ctx, h, waddr); err != nil {
return xerrors.Errorf("check block signature failed: %w", err) return xerrors.Errorf("check block signature failed: %w", err)
} }
return nil return nil
@ -711,7 +801,7 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) err
return xerrors.Errorf("failed to compute vrf base for ticket: %w", err) return xerrors.Errorf("failed to compute vrf base for ticket: %w", err)
} }
err = gen.VerifyVRF(ctx, waddr, vrfBase, h.Ticket.VRFProof) err = VerifyElectionPoStVRF(ctx, waddr, vrfBase, h.Ticket.VRFProof)
if err != nil { if err != nil {
return xerrors.Errorf("validating block tickets failed: %w", err) return xerrors.Errorf("validating block tickets failed: %w", err)
} }
@ -759,7 +849,11 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) err
} }
} }
return merr if err := syncer.store.MarkBlockAsValidated(ctx, b.Cid()); err != nil {
return xerrors.Errorf("caching block validation %s: %w", b.Cid(), err)
}
return nil
} }
func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, h *types.BlockHeader, prevBeacon types.BeaconEntry, lbst cid.Cid, waddr address.Address) error { func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, h *types.BlockHeader, prevBeacon types.BeaconEntry, lbst cid.Cid, waddr address.Address) error {
@ -857,7 +951,7 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock
// Phase 1: syntactic validation, as defined in the spec // Phase 1: syntactic validation, as defined in the spec
minGas := vm.PricelistByEpoch(baseTs.Height()).OnChainMessage(msg.ChainLength()) minGas := vm.PricelistByEpoch(baseTs.Height()).OnChainMessage(msg.ChainLength())
if err := m.ValidForBlockInclusion(minGas); err != nil { if err := m.ValidForBlockInclusion(minGas.Total()); err != nil {
return err return err
} }
@ -949,23 +1043,14 @@ func (syncer *Syncer) verifyBlsAggregate(ctx context.Context, sig *crypto.Signat
trace.Int64Attribute("msgCount", int64(len(msgs))), trace.Int64Attribute("msgCount", int64(len(msgs))),
) )
var wg sync.WaitGroup bmsgs := make([]bls.Message, len(msgs))
for i, m := range msgs {
digests := make([]bls.Digest, len(msgs)) bmsgs[i] = m.Bytes()
for i := 0; i < 10; i++ {
wg.Add(1)
go func(w int) {
defer wg.Done()
for j := 0; (j*10)+w < len(msgs); j++ {
digests[j*10+w] = bls.Hash(bls.Message(msgs[j*10+w].Bytes()))
} }
}(i)
}
wg.Wait()
var bsig bls.Signature var bsig bls.Signature
copy(bsig[:], sig.Data) copy(bsig[:], sig.Data)
if !bls.Verify(&bsig, digests, pubks) { if !bls.HashVerify(&bsig, bmsgs, pubks) {
return xerrors.New("bls aggregate signature failed to verify") return xerrors.New("bls aggregate signature failed to verify")
} }
@ -982,6 +1067,39 @@ func extractSyncState(ctx context.Context) *SyncerState {
return nil return nil
} }
// collectHeaders collects the headers from the blocks between any two tipsets.
//
// `from` is the heaviest/projected/target tipset we have learned about, and
// `to` is usually an anchor tipset we already have in our view of the chain
// (which could be the genesis).
//
// collectHeaders checks if portions of the chain are in our ChainStore; falling
// down to the network to retrieve the missing parts. If during the process, any
// portion we receive is in our denylist (bad list), we short-circuit.
//
// {hint/naming}: `from` and `to` is in inverse order. `from` is the highest,
// and `to` is the lowest. This method traverses the chain backwards.
//
// {hint/usage}: This is used by collectChain, which is in turn called from the
// main Sync method (Syncer#Sync), so it's a pretty central method.
//
// {hint/logic}: The logic of this method is as follows:
//
// 1. Check that the from tipset is not linked to a parent block known to be
// bad.
// 2. Check the consistency of beacon entries in the from tipset. We check
// total equality of the BeaconEntries in each block.
// 3. Travers the chain backwards, for each tipset:
// 3a. Load it from the chainstore; if found, it move on to its parent.
// 3b. Query our peers via BlockSync in batches, requesting up to a
// maximum of 500 tipsets every time.
//
// Once we've concluded, if we find a mismatching tipset at the height where the
// anchor tipset should be, we are facing a fork, and we invoke Syncer#syncFork
// to resolve it. Refer to the godocs there.
//
// All throughout the process, we keep checking if the received blocks are in
// the deny list, and short-circuit the process if so.
func (syncer *Syncer) collectHeaders(ctx context.Context, from *types.TipSet, to *types.TipSet) ([]*types.TipSet, error) { func (syncer *Syncer) collectHeaders(ctx context.Context, from *types.TipSet, to *types.TipSet) ([]*types.TipSet, error) {
ctx, span := trace.StartSpan(ctx, "collectHeaders") ctx, span := trace.StartSpan(ctx, "collectHeaders")
defer span.End() defer span.End()
@ -998,6 +1116,8 @@ func (syncer *Syncer) collectHeaders(ctx context.Context, from *types.TipSet, to
} }
} }
// Check if the parents of the from block are in the denylist.
// i.e. if a fork of the chain has been requested that we know to be bad.
for _, pcid := range from.Parents().Cids() { for _, pcid := range from.Parents().Cids() {
if reason, ok := syncer.bad.Has(pcid); ok { if reason, ok := syncer.bad.Has(pcid); ok {
markBad("linked to %s", pcid) markBad("linked to %s", pcid)
@ -1068,8 +1188,8 @@ loop:
} }
// NB: GetBlocks validates that the blocks are in-fact the ones we // NB: GetBlocks validates that the blocks are in-fact the ones we
// requested, and that they are correctly linked to eachother. It does // requested, and that they are correctly linked to one another. It does
// not validate any state transitions // not validate any state transitions.
window := 500 window := 500
if gap := int(blockSet[len(blockSet)-1].Height() - untilHeight); gap < window { if gap := int(blockSet[len(blockSet)-1].Height() - untilHeight); gap < window {
window = gap window = gap
@ -1110,7 +1230,6 @@ loop:
at = blks[len(blks)-1].Parents() at = blks[len(blks)-1].Parents()
} }
// We have now ascertained that this is *not* a 'fast forward'
if !types.CidArrsEqual(blockSet[len(blockSet)-1].Parents().Cids(), to.Cids()) { if !types.CidArrsEqual(blockSet[len(blockSet)-1].Parents().Cids(), to.Cids()) {
last := blockSet[len(blockSet)-1] last := blockSet[len(blockSet)-1]
if last.Parents() == to.Parents() { if last.Parents() == to.Parents() {
@ -1118,6 +1237,8 @@ loop:
return blockSet, nil return blockSet, nil
} }
// We have now ascertained that this is *not* a 'fast forward'
log.Warnf("(fork detected) synced header chain (%s - %d) does not link to our best block (%s - %d)", from.Cids(), from.Height(), to.Cids(), to.Height()) log.Warnf("(fork detected) synced header chain (%s - %d) does not link to our best block (%s - %d)", from.Cids(), from.Height(), to.Cids(), to.Height())
fork, err := syncer.syncFork(ctx, last, to) fork, err := syncer.syncFork(ctx, last, to)
if err != nil { if err != nil {
@ -1139,6 +1260,12 @@ loop:
var ErrForkTooLong = fmt.Errorf("fork longer than threshold") var ErrForkTooLong = fmt.Errorf("fork longer than threshold")
// syncFork tries to obtain the chain fragment that links a fork into a common
// ancestor in our view of the chain.
//
// If the fork is too long (build.ForkLengthThreshold), we add the entire subchain to the
// denylist. Else, we find the common ancestor, and add the missing chain
// fragment until the fork point to the returned []TipSet.
func (syncer *Syncer) syncFork(ctx context.Context, from *types.TipSet, to *types.TipSet) ([]*types.TipSet, error) { func (syncer *Syncer) syncFork(ctx context.Context, from *types.TipSet, to *types.TipSet) ([]*types.TipSet, error) {
tips, err := syncer.Bsync.GetBlocks(ctx, from.Parents(), int(build.ForkLengthThreshold)) tips, err := syncer.Bsync.GetBlocks(ctx, from.Parents(), int(build.ForkLengthThreshold))
if err != nil { if err != nil {
@ -1290,6 +1417,25 @@ func persistMessages(bs bstore.Blockstore, bst *blocksync.BSTipSet) error {
return nil return nil
} }
// collectChain tries to advance our view of the chain to the purported head.
//
// It goes through various stages:
//
// 1. StageHeaders: we proceed in the sync process by requesting block headers
// from our peers, moving back from their heads, until we reach a tipset
// that we have in common (such a common tipset must exist, thought it may
// simply be the genesis block).
//
// If the common tipset is our head, we treat the sync as a "fast-forward",
// else we must drop part of our chain to connect to the peer's head
// (referred to as "forking").
//
// 2. StagePersistHeaders: now that we've collected the missing headers,
// augmented by those on the other side of a fork, we persist them to the
// BlockStore.
//
// 3. StageMessages: having acquired the headers and found a common tipset,
// we then move forward, requesting the full blocks, including the messages.
func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet) error { func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet) error {
ctx, span := trace.StartSpan(ctx, "collectChain") ctx, span := trace.StartSpan(ctx, "collectChain")
defer span.End() defer span.End()
@ -1336,12 +1482,11 @@ func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet) error
return nil return nil
} }
func VerifyElectionPoStVRF(ctx context.Context, evrf []byte, rand []byte, worker address.Address) error { func VerifyElectionPoStVRF(ctx context.Context, worker address.Address, rand []byte, evrf []byte) error {
if err := gen.VerifyVRF(ctx, worker, rand, evrf); err != nil { if build.InsecurePoStValidation {
return xerrors.Errorf("failed to verify post_randomness vrf: %w", err)
}
return nil return nil
}
return gen.VerifyVRF(ctx, worker, rand, evrf)
} }
func (syncer *Syncer) State() []SyncerState { func (syncer *Syncer) State() []SyncerState {
@ -1352,6 +1497,7 @@ func (syncer *Syncer) State() []SyncerState {
return out return out
} }
// MarkBad manually adds a block to the "bad blocks" cache.
func (syncer *Syncer) MarkBad(blk cid.Cid) { func (syncer *Syncer) MarkBad(blk cid.Cid) {
syncer.bad.Add(blk, "manually marked bad") syncer.bad.Add(blk, "manually marked bad")
} }
@ -1359,7 +1505,7 @@ func (syncer *Syncer) MarkBad(blk cid.Cid) {
func (syncer *Syncer) CheckBadBlockCache(blk cid.Cid) (string, bool) { func (syncer *Syncer) CheckBadBlockCache(blk cid.Cid) (string, bool) {
return syncer.bad.Has(blk) return syncer.bad.Has(blk)
} }
func (syncer *Syncer) getLatestBeaconEntry(ctx context.Context, ts *types.TipSet) (*types.BeaconEntry, error) { func (syncer *Syncer) getLatestBeaconEntry(_ context.Context, ts *types.TipSet) (*types.BeaconEntry, error) {
cur := ts cur := ts
for i := 0; i < 20; i++ { for i := 0; i < 20; i++ {
cbe := cur.Blocks()[0].BeaconEntries cbe := cur.Blocks()[0].BeaconEntries
@ -1380,3 +1526,13 @@ func (syncer *Syncer) getLatestBeaconEntry(ctx context.Context, ts *types.TipSet
return nil, xerrors.Errorf("found NO beacon entries in the 20 blocks prior to given tipset") return nil, xerrors.Errorf("found NO beacon entries in the 20 blocks prior to given tipset")
} }
func (syncer *Syncer) IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool {
g, err := syncer.store.GetGenesis()
if err != nil {
return false
}
now := uint64(time.Now().Unix())
return epoch > (abi.ChainEpoch((now-g.Timestamp)/build.BlockDelaySecs) + MaxHeightDrift)
}

View File

@ -34,8 +34,8 @@ import (
func init() { func init() {
build.InsecurePoStValidation = true build.InsecurePoStValidation = true
os.Setenv("TRUST_PARAMS", "1") os.Setenv("TRUST_PARAMS", "1")
miner.SupportedProofTypes = map[abi.RegisteredProof]struct{}{ miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
abi.RegisteredProof_StackedDRG2KiBSeal: {}, abi.RegisteredSealProof_StackedDrg2KiBV1: {},
} }
power.ConsensusMinerMinPower = big.NewInt(2048) power.ConsensusMinerMinPower = big.NewInt(2048)
verifreg.MinVerifiedDealSize = big.NewInt(256) verifreg.MinVerifiedDealSize = big.NewInt(256)
@ -408,7 +408,7 @@ func TestSyncBadTimestamp(t *testing.T) {
base := tu.g.CurTipset base := tu.g.CurTipset
tu.g.Timestamper = func(pts *types.TipSet, tl abi.ChainEpoch) uint64 { tu.g.Timestamper = func(pts *types.TipSet, tl abi.ChainEpoch) uint64 {
return pts.MinTimestamp() + (build.BlockDelay / 2) return pts.MinTimestamp() + (build.BlockDelaySecs / 2)
} }
fmt.Println("BASE: ", base.Cids()) fmt.Println("BASE: ", base.Cids())
@ -417,7 +417,7 @@ func TestSyncBadTimestamp(t *testing.T) {
a1 := tu.mineOnBlock(base, 0, nil, false, true) a1 := tu.mineOnBlock(base, 0, nil, false, true)
tu.g.Timestamper = nil tu.g.Timestamper = nil
tu.g.ResyncBankerNonce(a1.TipSet()) require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet()))
fmt.Println("After mine bad block!") fmt.Println("After mine bad block!")
tu.printHeads() tu.printHeads()
@ -479,7 +479,7 @@ func TestSyncFork(t *testing.T) {
a := tu.mineOnBlock(a1, p1, []int{0}, true, false) a := tu.mineOnBlock(a1, p1, []int{0}, true, false)
a = tu.mineOnBlock(a, p1, []int{0}, true, false) a = tu.mineOnBlock(a, p1, []int{0}, true, false)
tu.g.ResyncBankerNonce(a1.TipSet()) require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet()))
// chain B will now be heaviest // chain B will now be heaviest
b := tu.mineOnBlock(base, p2, []int{1}, true, false) b := tu.mineOnBlock(base, p2, []int{1}, true, false)
b = tu.mineOnBlock(b, p2, []int{1}, true, false) b = tu.mineOnBlock(b, p2, []int{1}, true, false)

View File

@ -76,7 +76,7 @@ func SizeStr(bi BigInt) string {
} }
f, _ := r.Float64() f, _ := r.Float64()
return fmt.Sprintf("%.3g %s", f, byteSizeUnits[i]) return fmt.Sprintf("%.4g %s", f, byteSizeUnits[i])
} }
var deciUnits = []string{"", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"} var deciUnits = []string{"", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"}

View File

@ -3,7 +3,12 @@ package types
import ( import (
"bytes" "bytes"
"math/big" "math/big"
"math/rand"
"strings"
"testing" "testing"
"time"
"github.com/docker/go-units"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@ -60,8 +65,10 @@ func TestSizeStr(t *testing.T) {
}{ }{
{0, "0 B"}, {0, "0 B"},
{1, "1 B"}, {1, "1 B"},
{1016, "1016 B"},
{1024, "1 KiB"}, {1024, "1 KiB"},
{2000, "1.95 KiB"}, {1000 * 1024, "1000 KiB"},
{2000, "1.953 KiB"},
{5 << 20, "5 MiB"}, {5 << 20, "5 MiB"},
{11 << 60, "11 EiB"}, {11 << 60, "11 EiB"},
} }
@ -71,6 +78,22 @@ func TestSizeStr(t *testing.T) {
} }
} }
func TestSizeStrUnitsSymmetry(t *testing.T) {
s := rand.NewSource(time.Now().UnixNano())
r := rand.New(s)
for i := 0; i < 1000000; i++ {
n := r.Uint64()
l := strings.ReplaceAll(units.BytesSize(float64(n)), " ", "")
r := strings.ReplaceAll(SizeStr(NewInt(n)), " ", "")
assert.NotContains(t, l, "e+")
assert.NotContains(t, r, "e+")
assert.Equal(t, l, r, "wrong formatting for %d", n)
}
}
func TestSizeStrBig(t *testing.T) { func TestSizeStrBig(t *testing.T) {
ZiB := big.NewInt(50000) ZiB := big.NewInt(50000)
ZiB = ZiB.Lsh(ZiB, 70) ZiB = ZiB.Lsh(ZiB, 70)

View File

@ -74,8 +74,8 @@ type BlockHeader struct {
validated bool // true if the signature has been validated validated bool // true if the signature has been validated
} }
func (b *BlockHeader) ToStorageBlock() (block.Block, error) { func (blk *BlockHeader) ToStorageBlock() (block.Block, error) {
data, err := b.Serialize() data, err := blk.Serialize()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -89,8 +89,8 @@ func (b *BlockHeader) ToStorageBlock() (block.Block, error) {
return block.NewBlockWithCid(data, c) return block.NewBlockWithCid(data, c)
} }
func (b *BlockHeader) Cid() cid.Cid { func (blk *BlockHeader) Cid() cid.Cid {
sb, err := b.ToStorageBlock() sb, err := blk.ToStorageBlock()
if err != nil { if err != nil {
panic(err) // Not sure i'm entirely comfortable with this one, needs to be checked panic(err) // Not sure i'm entirely comfortable with this one, needs to be checked
} }

View File

@ -4,13 +4,15 @@ import (
"bytes" "bytes"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/stretchr/testify/require"
"reflect" "reflect"
"testing" "testing"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/specs-actors/actors/crypto"
cid "github.com/ipfs/go-cid" cid "github.com/ipfs/go-cid"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/crypto"
) )
func testBlockHeader(t testing.TB) *BlockHeader { func testBlockHeader(t testing.TB) *BlockHeader {
@ -78,7 +80,7 @@ func TestInteropBH(t *testing.T) {
} }
posts := []abi.PoStProof{ posts := []abi.PoStProof{
{abi.RegisteredProof_StackedDRG2KiBWinningPoSt, []byte{0x07}}, {abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, []byte{0x07}},
} }
bh := &BlockHeader{ bh := &BlockHeader{
@ -115,10 +117,8 @@ func TestInteropBH(t *testing.T) {
} }
// acquired from go-filecoin // acquired from go-filecoin
gfc := "8f5501d04cb15021bf6bd003073d79e2238d4e61f1ad22814301020381420a0b818205410c818209410781d82a5827000171a0e402202f84fef0d7cc2d7f9f00d22445f7bf7539fdd685fd9f284aa37f3822b57619cc430003e802d82a5827000171a0e402202f84fef0d7cc2d7f9f00d22445f7bf7539fdd685fd9f284aa37f3822b57619ccd82a5827000171a0e402202f84fef0d7cc2d7f9f00d22445f7bf7539fdd685fd9f284aa37f3822b57619ccd82a5827000171a0e402202f84fef0d7cc2d7f9f00d22445f7bf7539fdd685fd9f284aa37f3822b57619cc410001f603" gfc := "8f5501d04cb15021bf6bd003073d79e2238d4e61f1ad22814301020381420a0b818205410c818200410781d82a5827000171a0e402202f84fef0d7cc2d7f9f00d22445f7bf7539fdd685fd9f284aa37f3822b57619cc430003e802d82a5827000171a0e402202f84fef0d7cc2d7f9f00d22445f7bf7539fdd685fd9f284aa37f3822b57619ccd82a5827000171a0e402202f84fef0d7cc2d7f9f00d22445f7bf7539fdd685fd9f284aa37f3822b57619ccd82a5827000171a0e402202f84fef0d7cc2d7f9f00d22445f7bf7539fdd685fd9f284aa37f3822b57619cc410001f603"
if gfc != hex.EncodeToString(bhsb) { require.Equal(t, gfc, hex.EncodeToString(bhsb))
t.Fatal("not equal!")
}
} }
func BenchmarkBlockHeaderMarshal(b *testing.B) { func BenchmarkBlockHeaderMarshal(b *testing.B) {

View File

@ -1,12 +1,103 @@
package types package types
import "time" import (
"encoding/json"
"fmt"
"runtime"
"strings"
"time"
)
type ExecutionResult struct { type ExecutionTrace struct {
Msg *Message Msg *Message
MsgRct *MessageReceipt MsgRct *MessageReceipt
Error string Error string
Duration time.Duration Duration time.Duration
GasCharges []*GasTrace
Subcalls []*ExecutionResult Subcalls []ExecutionTrace
}
type GasTrace struct {
Name string
Location []Loc `json:"loc"`
TotalGas int64 `json:"tg"`
ComputeGas int64 `json:"cg"`
StorageGas int64 `json:"sg"`
TotalVirtualGas int64 `json:"vtg"`
VirtualComputeGas int64 `json:"vcg"`
VirtualStorageGas int64 `json:"vsg"`
TimeTaken time.Duration `json:"tt"`
Extra interface{} `json:"ex,omitempty"`
Callers []uintptr `json:"-"`
}
type Loc struct {
File string
Line int
Function string
}
func (l Loc) Show() bool {
ignorePrefix := []string{
"reflect.",
"github.com/filecoin-project/lotus/chain/vm.(*Invoker).transform",
"github.com/filecoin-project/go-amt-ipld/",
}
for _, pre := range ignorePrefix {
if strings.HasPrefix(l.Function, pre) {
return false
}
}
return true
}
func (l Loc) String() string {
file := strings.Split(l.File, "/")
fn := strings.Split(l.Function, "/")
var fnpkg string
if len(fn) > 2 {
fnpkg = strings.Join(fn[len(fn)-2:], "/")
} else {
fnpkg = l.Function
}
return fmt.Sprintf("%s@%s:%d", fnpkg, file[len(file)-1], l.Line)
}
func (l Loc) Important() bool {
if strings.HasPrefix(l.Function, "github.com/filecoin-project/specs-actors/actors/builtin") {
return true
}
return false
}
func (gt *GasTrace) MarshalJSON() ([]byte, error) {
type GasTraceCopy GasTrace
if len(gt.Location) == 0 {
if len(gt.Callers) != 0 {
frames := runtime.CallersFrames(gt.Callers)
for {
frame, more := frames.Next()
if frame.Function == "github.com/filecoin-project/lotus/chain/vm.(*VM).ApplyMessage" {
break
}
l := Loc{
File: frame.File,
Line: frame.Line,
Function: frame.Function,
}
gt.Location = append(gt.Location, l)
if !more {
break
}
}
}
}
cpy := (*GasTraceCopy)(gt)
return json.Marshal(cpy)
} }

View File

@ -11,7 +11,7 @@ import (
type FIL BigInt type FIL BigInt
func (f FIL) String() string { func (f FIL) String() string {
r := new(big.Rat).SetFrac(f.Int, big.NewInt(build.FilecoinPrecision)) r := new(big.Rat).SetFrac(f.Int, big.NewInt(int64(build.FilecoinPrecision)))
if r.Sign() == 0 { if r.Sign() == 0 {
return "0" return "0"
} }
@ -33,7 +33,7 @@ func ParseFIL(s string) (FIL, error) {
return FIL{}, fmt.Errorf("failed to parse %q as a decimal number", s) return FIL{}, fmt.Errorf("failed to parse %q as a decimal number", s)
} }
r = r.Mul(r, big.NewRat(build.FilecoinPrecision, 1)) r = r.Mul(r, big.NewRat(int64(build.FilecoinPrecision), 1))
if !r.IsInt() { if !r.IsInt() {
return FIL{}, fmt.Errorf("invalid FIL value: %q", s) return FIL{}, fmt.Errorf("invalid FIL value: %q", s)
} }

View File

@ -21,6 +21,7 @@ type ChainMsg interface {
Cid() cid.Cid Cid() cid.Cid
VMMessage() *Message VMMessage() *Message
ToStorageBlock() (block.Block, error) ToStorageBlock() (block.Block, error)
// FIXME: This is the *message* length, this name is misleading.
ChainLength() int ChainLength() int
} }
@ -41,20 +42,16 @@ type Message struct {
Params []byte Params []byte
} }
func (t *Message) BlockMiner() address.Address { func (m *Message) Caller() address.Address {
panic("implement me") return m.From
} }
func (t *Message) Caller() address.Address { func (m *Message) Receiver() address.Address {
return t.From return m.To
} }
func (t *Message) Receiver() address.Address { func (m *Message) ValueReceived() abi.TokenAmount {
return t.To return m.Value
}
func (t *Message) ValueReceived() abi.TokenAmount {
return t.Value
} }
func DecodeMessage(b []byte) (*Message, error) { func DecodeMessage(b []byte) (*Message, error) {

View File

@ -49,6 +49,11 @@ func MkBlock(parents *types.TipSet, weightInc uint64, ticketNonce uint64) *types
panic(err) panic(err)
} }
pstateRoot := c
if parents != nil {
pstateRoot = parents.Blocks()[0].ParentStateRoot
}
var pcids []cid.Cid var pcids []cid.Cid
var height abi.ChainEpoch var height abi.ChainEpoch
weight := types.NewInt(weightInc) weight := types.NewInt(weightInc)
@ -72,7 +77,7 @@ func MkBlock(parents *types.TipSet, weightInc uint64, ticketNonce uint64) *types
ParentWeight: weight, ParentWeight: weight,
Messages: c, Messages: c,
Height: height, Height: height,
ParentStateRoot: c, ParentStateRoot: pstateRoot,
BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS, Data: []byte("boo! im a signature")}, BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS, Data: []byte("boo! im a signature")},
} }
} }

View File

@ -9,12 +9,12 @@ import (
"github.com/multiformats/go-multihash" "github.com/multiformats/go-multihash"
) )
func (m *SignedMessage) ToStorageBlock() (block.Block, error) { func (sm *SignedMessage) ToStorageBlock() (block.Block, error) {
if m.Signature.Type == crypto.SigTypeBLS { if sm.Signature.Type == crypto.SigTypeBLS {
return m.Message.ToStorageBlock() return sm.Message.ToStorageBlock()
} }
data, err := m.Serialize() data, err := sm.Serialize()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -28,12 +28,12 @@ func (m *SignedMessage) ToStorageBlock() (block.Block, error) {
return block.NewBlockWithCid(data, c) return block.NewBlockWithCid(data, c)
} }
func (m *SignedMessage) Cid() cid.Cid { func (sm *SignedMessage) Cid() cid.Cid {
if m.Signature.Type == crypto.SigTypeBLS { if sm.Signature.Type == crypto.SigTypeBLS {
return m.Message.Cid() return sm.Message.Cid()
} }
sb, err := m.ToStorageBlock() sb, err := sm.ToStorageBlock()
if err != nil { if err != nil {
panic(err) panic(err)
} }

View File

@ -23,8 +23,6 @@ type TipSet struct {
height abi.ChainEpoch height abi.ChainEpoch
} }
// why didnt i just export the fields? Because the struct has methods with the
// same names already
type ExpTipSet struct { type ExpTipSet struct {
Cids []cid.Cid Cids []cid.Cid
Blocks []*BlockHeader Blocks []*BlockHeader
@ -32,6 +30,8 @@ type ExpTipSet struct {
} }
func (ts *TipSet) MarshalJSON() ([]byte, error) { func (ts *TipSet) MarshalJSON() ([]byte, error) {
// why didnt i just export the fields? Because the struct has methods with the
// same names already
return json.Marshal(ExpTipSet{ return json.Marshal(ExpTipSet{
Cids: ts.cids, Cids: ts.cids,
Blocks: ts.blks, Blocks: ts.blks,

View File

@ -61,9 +61,9 @@ func TestTipSetKey(t *testing.T) {
t.Run("JSON", func(t *testing.T) { t.Run("JSON", func(t *testing.T) {
k0 := NewTipSetKey() k0 := NewTipSetKey()
verifyJson(t, "[]", k0) verifyJSON(t, "[]", k0)
k3 := NewTipSetKey(c1, c2, c3) k3 := NewTipSetKey(c1, c2, c3)
verifyJson(t, `[`+ verifyJSON(t, `[`+
`{"/":"bafy2bzacecesrkxghscnq7vatble2hqdvwat6ed23vdu4vvo3uuggsoaya7ki"},`+ `{"/":"bafy2bzacecesrkxghscnq7vatble2hqdvwat6ed23vdu4vvo3uuggsoaya7ki"},`+
`{"/":"bafy2bzacebxfyh2fzoxrt6kcgc5dkaodpcstgwxxdizrww225vrhsizsfcg4g"},`+ `{"/":"bafy2bzacebxfyh2fzoxrt6kcgc5dkaodpcstgwxxdizrww225vrhsizsfcg4g"},`+
`{"/":"bafy2bzacedwviarjtjraqakob5pslltmuo5n3xev3nt5zylezofkbbv5jclyu"}`+ `{"/":"bafy2bzacedwviarjtjraqakob5pslltmuo5n3xev3nt5zylezofkbbv5jclyu"}`+
@ -71,7 +71,7 @@ func TestTipSetKey(t *testing.T) {
}) })
} }
func verifyJson(t *testing.T, expected string, k TipSetKey) { func verifyJSON(t *testing.T, expected string, k TipSetKey) {
bytes, err := json.Marshal(k) bytes, err := json.Marshal(k)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, expected, string(bytes)) assert.Equal(t, expected, string(bytes))

View File

@ -7,6 +7,7 @@ import (
"os" "os"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/chain/gen" "github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
@ -173,14 +174,18 @@ func WriteJsonToFile(fname string, obj interface{}) error {
if err != nil { if err != nil {
return err return err
} }
defer fi.Close() defer fi.Close() //nolint:errcheck
out, err := json.MarshalIndent(obj, "", " ") out, err := json.MarshalIndent(obj, "", " ")
if err != nil { if err != nil {
return err return err
} }
fi.Write(out) _, err = fi.Write(out)
if err != nil {
return xerrors.Errorf("writing json: %w", err)
}
return nil return nil
} }

View File

@ -3,6 +3,7 @@ package vm
import ( import (
"fmt" "fmt"
"github.com/filecoin-project/go-address"
addr "github.com/filecoin-project/go-address" addr "github.com/filecoin-project/go-address"
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/crypto" "github.com/filecoin-project/specs-actors/actors/crypto"
@ -11,34 +12,74 @@ import (
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
) )
const (
GasStorageMulti = 1
GasComputeMulti = 1
)
type GasCharge struct {
Name string
Extra interface{}
ComputeGas int64
StorageGas int64
VirtualCompute int64
VirtualStorage int64
}
func (g GasCharge) Total() int64 {
return g.ComputeGas*GasComputeMulti + g.StorageGas*GasStorageMulti
}
func (g GasCharge) WithVirtual(compute, storage int64) GasCharge {
out := g
out.VirtualCompute = compute
out.VirtualStorage = storage
return out
}
func (g GasCharge) WithExtra(extra interface{}) GasCharge {
out := g
out.Extra = extra
return out
}
func newGasCharge(name string, computeGas int64, storageGas int64) GasCharge {
return GasCharge{
Name: name,
ComputeGas: computeGas,
StorageGas: storageGas,
}
}
// Pricelist provides prices for operations in the VM. // Pricelist provides prices for operations in the VM.
// //
// Note: this interface should be APPEND ONLY since last chain checkpoint // Note: this interface should be APPEND ONLY since last chain checkpoint
type Pricelist interface { type Pricelist interface {
// OnChainMessage returns the gas used for storing a message of a given size in the chain. // OnChainMessage returns the gas used for storing a message of a given size in the chain.
OnChainMessage(msgSize int) int64 OnChainMessage(msgSize int) GasCharge
// OnChainReturnValue returns the gas used for storing the response of a message in the chain. // OnChainReturnValue returns the gas used for storing the response of a message in the chain.
OnChainReturnValue(dataSize int) int64 OnChainReturnValue(dataSize int) GasCharge
// OnMethodInvocation returns the gas used when invoking a method. // OnMethodInvocation returns the gas used when invoking a method.
OnMethodInvocation(value abi.TokenAmount, methodNum abi.MethodNum) int64 OnMethodInvocation(value abi.TokenAmount, methodNum abi.MethodNum) GasCharge
// OnIpldGet returns the gas used for storing an object // OnIpldGet returns the gas used for storing an object
OnIpldGet(dataSize int) int64 OnIpldGet(dataSize int) GasCharge
// OnIpldPut returns the gas used for storing an object // OnIpldPut returns the gas used for storing an object
OnIpldPut(dataSize int) int64 OnIpldPut(dataSize int) GasCharge
// OnCreateActor returns the gas used for creating an actor // OnCreateActor returns the gas used for creating an actor
OnCreateActor() int64 OnCreateActor() GasCharge
// OnDeleteActor returns the gas used for deleting an actor // OnDeleteActor returns the gas used for deleting an actor
OnDeleteActor() int64 OnDeleteActor() GasCharge
OnVerifySignature(sigType crypto.SigType, planTextSize int) (int64, error) OnVerifySignature(sigType crypto.SigType, planTextSize int) (GasCharge, error)
OnHashing(dataSize int) int64 OnHashing(dataSize int) GasCharge
OnComputeUnsealedSectorCid(proofType abi.RegisteredProof, pieces []abi.PieceInfo) int64 OnComputeUnsealedSectorCid(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) GasCharge
OnVerifySeal(info abi.SealVerifyInfo) int64 OnVerifySeal(info abi.SealVerifyInfo) GasCharge
OnVerifyPost(info abi.WindowPoStVerifyInfo) int64 OnVerifyPost(info abi.WindowPoStVerifyInfo) GasCharge
OnVerifyConsensusFault() int64 OnVerifyConsensusFault() GasCharge
} }
var prices = map[abi.ChainEpoch]Pricelist{ var prices = map[abi.ChainEpoch]Pricelist{
@ -92,7 +133,7 @@ func PricelistByEpoch(epoch abi.ChainEpoch) Pricelist {
type pricedSyscalls struct { type pricedSyscalls struct {
under vmr.Syscalls under vmr.Syscalls
pl Pricelist pl Pricelist
chargeGas func(int64) chargeGas func(GasCharge)
} }
// Verifies that a signature is valid for an address and plaintext. // Verifies that a signature is valid for an address and plaintext.
@ -102,30 +143,40 @@ func (ps pricedSyscalls) VerifySignature(signature crypto.Signature, signer addr
return err return err
} }
ps.chargeGas(c) ps.chargeGas(c)
defer ps.chargeGas(gasOnActorExec)
return ps.under.VerifySignature(signature, signer, plaintext) return ps.under.VerifySignature(signature, signer, plaintext)
} }
// Hashes input data using blake2b with 256 bit output. // Hashes input data using blake2b with 256 bit output.
func (ps pricedSyscalls) HashBlake2b(data []byte) [32]byte { func (ps pricedSyscalls) HashBlake2b(data []byte) [32]byte {
ps.chargeGas(ps.pl.OnHashing(len(data))) ps.chargeGas(ps.pl.OnHashing(len(data)))
defer ps.chargeGas(gasOnActorExec)
return ps.under.HashBlake2b(data) return ps.under.HashBlake2b(data)
} }
// Computes an unsealed sector CID (CommD) from its constituent piece CIDs (CommPs) and sizes. // Computes an unsealed sector CID (CommD) from its constituent piece CIDs (CommPs) and sizes.
func (ps pricedSyscalls) ComputeUnsealedSectorCID(reg abi.RegisteredProof, pieces []abi.PieceInfo) (cid.Cid, error) { func (ps pricedSyscalls) ComputeUnsealedSectorCID(reg abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) {
ps.chargeGas(ps.pl.OnComputeUnsealedSectorCid(reg, pieces)) ps.chargeGas(ps.pl.OnComputeUnsealedSectorCid(reg, pieces))
defer ps.chargeGas(gasOnActorExec)
return ps.under.ComputeUnsealedSectorCID(reg, pieces) return ps.under.ComputeUnsealedSectorCID(reg, pieces)
} }
// Verifies a sector seal proof. // Verifies a sector seal proof.
func (ps pricedSyscalls) VerifySeal(vi abi.SealVerifyInfo) error { func (ps pricedSyscalls) VerifySeal(vi abi.SealVerifyInfo) error {
ps.chargeGas(ps.pl.OnVerifySeal(vi)) ps.chargeGas(ps.pl.OnVerifySeal(vi))
defer ps.chargeGas(gasOnActorExec)
return ps.under.VerifySeal(vi) return ps.under.VerifySeal(vi)
} }
// Verifies a proof of spacetime. // Verifies a proof of spacetime.
func (ps pricedSyscalls) VerifyPoSt(vi abi.WindowPoStVerifyInfo) error { func (ps pricedSyscalls) VerifyPoSt(vi abi.WindowPoStVerifyInfo) error {
ps.chargeGas(ps.pl.OnVerifyPost(vi)) ps.chargeGas(ps.pl.OnVerifyPost(vi))
defer ps.chargeGas(gasOnActorExec)
return ps.under.VerifyPoSt(vi) return ps.under.VerifyPoSt(vi)
} }
@ -141,5 +192,21 @@ func (ps pricedSyscalls) VerifyPoSt(vi abi.WindowPoStVerifyInfo) error {
// Returns nil and an error if the headers don't prove a fault. // Returns nil and an error if the headers don't prove a fault.
func (ps pricedSyscalls) VerifyConsensusFault(h1 []byte, h2 []byte, extra []byte) (*runtime.ConsensusFault, error) { func (ps pricedSyscalls) VerifyConsensusFault(h1 []byte, h2 []byte, extra []byte) (*runtime.ConsensusFault, error) {
ps.chargeGas(ps.pl.OnVerifyConsensusFault()) ps.chargeGas(ps.pl.OnVerifyConsensusFault())
defer ps.chargeGas(gasOnActorExec)
return ps.under.VerifyConsensusFault(h1, h2, extra) return ps.under.VerifyConsensusFault(h1, h2, extra)
} }
func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]abi.SealVerifyInfo) (map[address.Address][]bool, error) {
var gasChargeSum GasCharge
gasChargeSum.Name = "BatchVerifySeals"
count := int64(0)
for _, svis := range inp {
count += int64(len(svis))
}
gasChargeSum = gasChargeSum.WithExtra(count).WithVirtual(129778623*count+716683250, 0)
ps.chargeGas(gasChargeSum) // TODO: this is only called by the cron actor. Should we even charge gas?
defer ps.chargeGas(gasOnActorExec)
return ps.under.BatchVerifySeals(inp)
}

View File

@ -2,7 +2,9 @@ package vm
import ( import (
"fmt" "fmt"
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/filecoin-project/specs-actors/actors/builtin" "github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/specs-actors/actors/crypto" "github.com/filecoin-project/specs-actors/actors/crypto"
) )
@ -84,80 +86,112 @@ type pricelistV0 struct {
var _ Pricelist = (*pricelistV0)(nil) var _ Pricelist = (*pricelistV0)(nil)
// OnChainMessage returns the gas used for storing a message of a given size in the chain. // OnChainMessage returns the gas used for storing a message of a given size in the chain.
func (pl *pricelistV0) OnChainMessage(msgSize int) int64 { func (pl *pricelistV0) OnChainMessage(msgSize int) GasCharge {
return pl.onChainMessageBase + pl.onChainMessagePerByte*int64(msgSize) return newGasCharge("OnChainMessage", 0, pl.onChainMessageBase+pl.onChainMessagePerByte*int64(msgSize)).WithVirtual(77302, 0)
} }
// OnChainReturnValue returns the gas used for storing the response of a message in the chain. // OnChainReturnValue returns the gas used for storing the response of a message in the chain.
func (pl *pricelistV0) OnChainReturnValue(dataSize int) int64 { func (pl *pricelistV0) OnChainReturnValue(dataSize int) GasCharge {
return int64(dataSize) * pl.onChainReturnValuePerByte return newGasCharge("OnChainReturnValue", 0, int64(dataSize)*pl.onChainReturnValuePerByte).WithVirtual(107294, 0)
} }
// OnMethodInvocation returns the gas used when invoking a method. // OnMethodInvocation returns the gas used when invoking a method.
func (pl *pricelistV0) OnMethodInvocation(value abi.TokenAmount, methodNum abi.MethodNum) int64 { func (pl *pricelistV0) OnMethodInvocation(value abi.TokenAmount, methodNum abi.MethodNum) GasCharge {
ret := pl.sendBase ret := pl.sendBase
extra := ""
virtGas := int64(1072944)
if value != abi.NewTokenAmount(0) { if value != abi.NewTokenAmount(0) {
// TODO: fix this, it is comparing pointers instead of values
// see vv
ret += pl.sendTransferFunds ret += pl.sendTransferFunds
} }
if big.Cmp(value, abi.NewTokenAmount(0)) != 0 {
virtGas += 497495
if methodNum == builtin.MethodSend {
// transfer only
virtGas += 973940
}
extra += "t"
}
if methodNum != builtin.MethodSend { if methodNum != builtin.MethodSend {
ret += pl.sendInvokeMethod ret += pl.sendInvokeMethod
extra += "i"
// running actors is cheaper becase we hand over to actors
virtGas += -295779
} }
return ret return newGasCharge("OnMethodInvocation", ret, 0).WithVirtual(virtGas, 0).WithExtra(extra)
} }
// OnIpldGet returns the gas used for storing an object // OnIpldGet returns the gas used for storing an object
func (pl *pricelistV0) OnIpldGet(dataSize int) int64 { func (pl *pricelistV0) OnIpldGet(dataSize int) GasCharge {
return pl.ipldGetBase + int64(dataSize)*pl.ipldGetPerByte return newGasCharge("OnIpldGet", pl.ipldGetBase+int64(dataSize)*pl.ipldGetPerByte, 0).
WithExtra(dataSize).WithVirtual(433685, 0)
} }
// OnIpldPut returns the gas used for storing an object // OnIpldPut returns the gas used for storing an object
func (pl *pricelistV0) OnIpldPut(dataSize int) int64 { func (pl *pricelistV0) OnIpldPut(dataSize int) GasCharge {
return pl.ipldPutBase + int64(dataSize)*pl.ipldPutPerByte return newGasCharge("OnIpldPut", pl.ipldPutBase, int64(dataSize)*pl.ipldPutPerByte).
WithExtra(dataSize).WithVirtual(88970, 0)
} }
// OnCreateActor returns the gas used for creating an actor // OnCreateActor returns the gas used for creating an actor
func (pl *pricelistV0) OnCreateActor() int64 { func (pl *pricelistV0) OnCreateActor() GasCharge {
return pl.createActorBase + pl.createActorExtra return newGasCharge("OnCreateActor", pl.createActorBase, pl.createActorExtra).WithVirtual(65636, 0)
} }
// OnDeleteActor returns the gas used for deleting an actor // OnDeleteActor returns the gas used for deleting an actor
func (pl *pricelistV0) OnDeleteActor() int64 { func (pl *pricelistV0) OnDeleteActor() GasCharge {
return pl.deleteActor return newGasCharge("OnDeleteActor", 0, pl.deleteActor)
} }
// OnVerifySignature // OnVerifySignature
func (pl *pricelistV0) OnVerifySignature(sigType crypto.SigType, planTextSize int) (int64, error) {
func (pl *pricelistV0) OnVerifySignature(sigType crypto.SigType, planTextSize int) (GasCharge, error) {
costFn, ok := pl.verifySignature[sigType] costFn, ok := pl.verifySignature[sigType]
if !ok { if !ok {
return 0, fmt.Errorf("cost function for signature type %d not supported", sigType) return GasCharge{}, fmt.Errorf("cost function for signature type %d not supported", sigType)
} }
return costFn(int64(planTextSize)), nil sigName, _ := sigType.Name()
virtGas := int64(0)
switch sigType {
case crypto.SigTypeBLS:
virtGas = 220138570
case crypto.SigTypeSecp256k1:
virtGas = 7053730
}
return newGasCharge("OnVerifySignature", costFn(int64(planTextSize)), 0).
WithExtra(map[string]interface{}{
"type": sigName,
"size": planTextSize,
}).WithVirtual(virtGas, 0), nil
} }
// OnHashing // OnHashing
func (pl *pricelistV0) OnHashing(dataSize int) int64 { func (pl *pricelistV0) OnHashing(dataSize int) GasCharge {
return pl.hashingBase + int64(dataSize)*pl.hashingPerByte return newGasCharge("OnHashing", pl.hashingBase+int64(dataSize)*pl.hashingPerByte, 0).WithExtra(dataSize).WithVirtual(77300, 0)
} }
// OnComputeUnsealedSectorCid // OnComputeUnsealedSectorCid
func (pl *pricelistV0) OnComputeUnsealedSectorCid(proofType abi.RegisteredProof, pieces []abi.PieceInfo) int64 { func (pl *pricelistV0) OnComputeUnsealedSectorCid(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) GasCharge {
// TODO: this needs more cost tunning, check with @lotus // TODO: this needs more cost tunning, check with @lotus
return pl.computeUnsealedSectorCidBase return newGasCharge("OnComputeUnsealedSectorCid", pl.computeUnsealedSectorCidBase, 0).WithVirtual(382370, 0)
} }
// OnVerifySeal // OnVerifySeal
func (pl *pricelistV0) OnVerifySeal(info abi.SealVerifyInfo) int64 { func (pl *pricelistV0) OnVerifySeal(info abi.SealVerifyInfo) GasCharge {
// TODO: this needs more cost tunning, check with @lotus // TODO: this needs more cost tunning, check with @lotus
return pl.verifySealBase return newGasCharge("OnVerifySeal", pl.verifySealBase, 0).WithVirtual(199954003, 0)
} }
// OnVerifyPost // OnVerifyPost
func (pl *pricelistV0) OnVerifyPost(info abi.WindowPoStVerifyInfo) int64 { func (pl *pricelistV0) OnVerifyPost(info abi.WindowPoStVerifyInfo) GasCharge {
// TODO: this needs more cost tunning, check with @lotus // TODO: this needs more cost tunning, check with @lotus
return pl.verifyPostBase return newGasCharge("OnVerifyPost", pl.verifyPostBase, 0).WithVirtual(2629471704, 0).WithExtra(len(info.ChallengedSectors))
} }
// OnVerifyConsensusFault // OnVerifyConsensusFault
func (pl *pricelistV0) OnVerifyConsensusFault() int64 { func (pl *pricelistV0) OnVerifyConsensusFault() GasCharge {
return pl.verifyConsensusFault return newGasCharge("OnVerifyConsensusFault", pl.verifyConsensusFault, 0).WithVirtual(551935, 0)
} }

View File

@ -32,7 +32,7 @@ import (
"github.com/filecoin-project/lotus/chain/actors/aerrors" "github.com/filecoin-project/lotus/chain/actors/aerrors"
) )
type invoker struct { type Invoker struct {
builtInCode map[cid.Cid]nativeCode builtInCode map[cid.Cid]nativeCode
builtInState map[cid.Cid]reflect.Type builtInState map[cid.Cid]reflect.Type
} }
@ -40,8 +40,8 @@ type invoker struct {
type invokeFunc func(rt runtime.Runtime, params []byte) ([]byte, aerrors.ActorError) type invokeFunc func(rt runtime.Runtime, params []byte) ([]byte, aerrors.ActorError)
type nativeCode []invokeFunc type nativeCode []invokeFunc
func NewInvoker() *invoker { func NewInvoker() *Invoker {
inv := &invoker{ inv := &Invoker{
builtInCode: make(map[cid.Cid]nativeCode), builtInCode: make(map[cid.Cid]nativeCode),
builtInState: make(map[cid.Cid]reflect.Type), builtInState: make(map[cid.Cid]reflect.Type),
} }
@ -62,7 +62,7 @@ func NewInvoker() *invoker {
return inv return inv
} }
func (inv *invoker) Invoke(codeCid cid.Cid, rt runtime.Runtime, method abi.MethodNum, params []byte) ([]byte, aerrors.ActorError) { func (inv *Invoker) Invoke(codeCid cid.Cid, rt runtime.Runtime, method abi.MethodNum, params []byte) ([]byte, aerrors.ActorError) {
code, ok := inv.builtInCode[codeCid] code, ok := inv.builtInCode[codeCid]
if !ok { if !ok {
@ -76,7 +76,7 @@ func (inv *invoker) Invoke(codeCid cid.Cid, rt runtime.Runtime, method abi.Metho
} }
func (inv *invoker) Register(c cid.Cid, instance Invokee, state interface{}) { func (inv *Invoker) Register(c cid.Cid, instance Invokee, state interface{}) {
code, err := inv.transform(instance) code, err := inv.transform(instance)
if err != nil { if err != nil {
panic(xerrors.Errorf("%s: %w", string(c.Hash()), err)) panic(xerrors.Errorf("%s: %w", string(c.Hash()), err))
@ -89,9 +89,7 @@ type Invokee interface {
Exports() []interface{} Exports() []interface{}
} }
var tAError = reflect.TypeOf((*aerrors.ActorError)(nil)).Elem() func (*Invoker) transform(instance Invokee) (nativeCode, error) {
func (*invoker) transform(instance Invokee) (nativeCode, error) {
itype := reflect.TypeOf(instance) itype := reflect.TypeOf(instance)
exports := instance.Exports() exports := instance.Exports()
for i, m := range exports { for i, m := range exports {

View File

@ -76,7 +76,7 @@ func (basicContract) InvokeSomething10(rt runtime.Runtime, params *basicParams)
} }
func TestInvokerBasic(t *testing.T) { func TestInvokerBasic(t *testing.T) {
inv := invoker{} inv := Invoker{}
code, err := inv.transform(basicContract{}) code, err := inv.transform(basicContract{})
assert.NoError(t, err) assert.NoError(t, err)

View File

@ -2,6 +2,7 @@ package vm
import ( import (
"context" "context"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/specs-actors/actors/abi/big" "github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/filecoin-project/specs-actors/actors/builtin" "github.com/filecoin-project/specs-actors/actors/builtin"
@ -27,7 +28,7 @@ func init() {
var EmptyObjectCid cid.Cid var EmptyObjectCid cid.Cid
// Creates account actors from only BLS/SECP256K1 addresses. // TryCreateAccountActor creates account actors from only BLS/SECP256K1 addresses.
func TryCreateAccountActor(rt *Runtime, addr address.Address) (*types.Actor, aerrors.ActorError) { func TryCreateAccountActor(rt *Runtime, addr address.Address) (*types.Actor, aerrors.ActorError) {
addrID, err := rt.state.RegisterNewAddress(addr) addrID, err := rt.state.RegisterNewAddress(addr)
if err != nil { if err != nil {

View File

@ -5,6 +5,7 @@ import (
"context" "context"
"encoding/binary" "encoding/binary"
"fmt" "fmt"
gruntime "runtime"
"time" "time"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
@ -51,10 +52,12 @@ type Runtime struct {
origin address.Address origin address.Address
originNonce uint64 originNonce uint64
internalExecutions []*types.ExecutionResult executionTrace types.ExecutionTrace
numActorsCreated uint64 numActorsCreated uint64
allowInternal bool allowInternal bool
callerValidated bool callerValidated bool
lastGasChargeTime time.Time
lastGasCharge *types.GasTrace
} }
func (rt *Runtime) TotalFilCircSupply() abi.TokenAmount { func (rt *Runtime) TotalFilCircSupply() abi.TokenAmount {
@ -107,8 +110,8 @@ type notFoundErr interface {
IsNotFound() bool IsNotFound() bool
} }
func (rs *Runtime) Get(c cid.Cid, o vmr.CBORUnmarshaler) bool { func (rt *Runtime) Get(c cid.Cid, o vmr.CBORUnmarshaler) bool {
if err := rs.cst.Get(context.TODO(), c, o); err != nil { if err := rt.cst.Get(context.TODO(), c, o); err != nil {
var nfe notFoundErr var nfe notFoundErr
if xerrors.As(err, &nfe) && nfe.IsNotFound() { if xerrors.As(err, &nfe) && nfe.IsNotFound() {
if xerrors.As(err, new(cbor.SerializationError)) { if xerrors.As(err, new(cbor.SerializationError)) {
@ -122,8 +125,8 @@ func (rs *Runtime) Get(c cid.Cid, o vmr.CBORUnmarshaler) bool {
return true return true
} }
func (rs *Runtime) Put(x vmr.CBORMarshaler) cid.Cid { func (rt *Runtime) Put(x vmr.CBORMarshaler) cid.Cid {
c, err := rs.cst.Put(context.TODO(), x) c, err := rt.cst.Put(context.TODO(), x)
if err != nil { if err != nil {
if xerrors.As(err, new(cbor.SerializationError)) { if xerrors.As(err, new(cbor.SerializationError)) {
panic(aerrors.Newf(exitcode.ErrSerialization, "failed to marshal cbor object %s", err)) panic(aerrors.Newf(exitcode.ErrSerialization, "failed to marshal cbor object %s", err))
@ -135,7 +138,7 @@ func (rs *Runtime) Put(x vmr.CBORMarshaler) cid.Cid {
var _ vmr.Runtime = (*Runtime)(nil) var _ vmr.Runtime = (*Runtime)(nil)
func (rs *Runtime) shimCall(f func() interface{}) (rval []byte, aerr aerrors.ActorError) { func (rt *Runtime) shimCall(f func() interface{}) (rval []byte, aerr aerrors.ActorError) {
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
if ar, ok := r.(aerrors.ActorError); ok { if ar, ok := r.(aerrors.ActorError); ok {
@ -150,8 +153,8 @@ func (rs *Runtime) shimCall(f func() interface{}) (rval []byte, aerr aerrors.Act
ret := f() ret := f()
if !rs.callerValidated { if !rt.callerValidated {
rs.Abortf(exitcode.SysErrorIllegalActor, "Caller MUST be validated during method execution") rt.Abortf(exitcode.SysErrorIllegalActor, "Caller MUST be validated during method execution")
} }
switch ret := ret.(type) { switch ret := ret.(type) {
@ -172,25 +175,25 @@ func (rs *Runtime) shimCall(f func() interface{}) (rval []byte, aerr aerrors.Act
} }
} }
func (rs *Runtime) Message() vmr.Message { func (rt *Runtime) Message() vmr.Message {
return rs.vmsg return rt.vmsg
} }
func (rs *Runtime) ValidateImmediateCallerAcceptAny() { func (rt *Runtime) ValidateImmediateCallerAcceptAny() {
rs.abortIfAlreadyValidated() rt.abortIfAlreadyValidated()
return return
} }
func (rs *Runtime) CurrentBalance() abi.TokenAmount { func (rt *Runtime) CurrentBalance() abi.TokenAmount {
b, err := rs.GetBalance(rs.Message().Receiver()) b, err := rt.GetBalance(rt.Message().Receiver())
if err != nil { if err != nil {
rs.Abortf(exitcode.ExitCode(err.RetCode()), "get current balance: %v", err) rt.Abortf(err.RetCode(), "get current balance: %v", err)
} }
return b return b
} }
func (rs *Runtime) GetActorCodeCID(addr address.Address) (ret cid.Cid, ok bool) { func (rt *Runtime) GetActorCodeCID(addr address.Address) (ret cid.Cid, ok bool) {
act, err := rs.state.GetActor(addr) act, err := rt.state.GetActor(addr)
if err != nil { if err != nil {
if xerrors.Is(err, types.ErrActorNotFound) { if xerrors.Is(err, types.ErrActorNotFound) {
return cid.Undef, false return cid.Undef, false
@ -210,8 +213,8 @@ func (rt *Runtime) GetRandomness(personalization crypto.DomainSeparationTag, ran
return res return res
} }
func (rs *Runtime) Store() vmr.Store { func (rt *Runtime) Store() vmr.Store {
return rs return rt
} }
func (rt *Runtime) NewActorAddress() address.Address { func (rt *Runtime) NewActorAddress() address.Address {
@ -236,12 +239,12 @@ func (rt *Runtime) NewActorAddress() address.Address {
return addr return addr
} }
func (rt *Runtime) CreateActor(codeId cid.Cid, address address.Address) { func (rt *Runtime) CreateActor(codeID cid.Cid, address address.Address) {
if !builtin.IsBuiltinActor(codeId) { if !builtin.IsBuiltinActor(codeID) {
rt.Abortf(exitcode.SysErrorIllegalArgument, "Can only create built-in actors.") rt.Abortf(exitcode.SysErrorIllegalArgument, "Can only create built-in actors.")
} }
if builtin.IsSingletonActor(codeId) { if builtin.IsSingletonActor(codeID) {
rt.Abortf(exitcode.SysErrorIllegalArgument, "Can only have one instance of singleton actors.") rt.Abortf(exitcode.SysErrorIllegalArgument, "Can only have one instance of singleton actors.")
} }
@ -250,10 +253,10 @@ func (rt *Runtime) CreateActor(codeId cid.Cid, address address.Address) {
rt.Abortf(exitcode.SysErrorIllegalArgument, "Actor address already exists") rt.Abortf(exitcode.SysErrorIllegalArgument, "Actor address already exists")
} }
rt.ChargeGas(rt.Pricelist().OnCreateActor()) rt.chargeGas(rt.Pricelist().OnCreateActor())
err = rt.state.SetActor(address, &types.Actor{ err = rt.state.SetActor(address, &types.Actor{
Code: codeId, Code: codeID,
Head: EmptyObjectCid, Head: EmptyObjectCid,
Nonce: 0, Nonce: 0,
Balance: big.Zero(), Balance: big.Zero(),
@ -261,10 +264,11 @@ func (rt *Runtime) CreateActor(codeId cid.Cid, address address.Address) {
if err != nil { if err != nil {
panic(aerrors.Fatalf("creating actor entry: %v", err)) panic(aerrors.Fatalf("creating actor entry: %v", err))
} }
_ = rt.chargeGasSafe(gasOnActorExec)
} }
func (rt *Runtime) DeleteActor(addr address.Address) { func (rt *Runtime) DeleteActor(addr address.Address) {
rt.ChargeGas(rt.Pricelist().OnDeleteActor()) rt.chargeGas(rt.Pricelist().OnDeleteActor())
act, err := rt.state.GetActor(rt.Message().Receiver()) act, err := rt.state.GetActor(rt.Message().Receiver())
if err != nil { if err != nil {
if xerrors.Is(err, types.ErrActorNotFound) { if xerrors.Is(err, types.ErrActorNotFound) {
@ -281,14 +285,14 @@ func (rt *Runtime) DeleteActor(addr address.Address) {
if err := rt.state.DeleteActor(rt.Message().Receiver()); err != nil { if err := rt.state.DeleteActor(rt.Message().Receiver()); err != nil {
panic(aerrors.Fatalf("failed to delete actor: %s", err)) panic(aerrors.Fatalf("failed to delete actor: %s", err))
} }
_ = rt.chargeGasSafe(gasOnActorExec)
} }
func (rs *Runtime) Syscalls() vmr.Syscalls { func (rt *Runtime) Syscalls() vmr.Syscalls {
// TODO: Make sure this is wrapped in something that charges gas for each of the calls return rt.sys
return rs.sys
} }
func (rs *Runtime) StartSpan(name string) vmr.TraceSpan { func (rt *Runtime) StartSpan(name string) vmr.TraceSpan {
panic("implement me") panic("implement me")
} }
@ -308,12 +312,12 @@ func (rt *Runtime) Context() context.Context {
return rt.ctx return rt.ctx
} }
func (rs *Runtime) Abortf(code exitcode.ExitCode, msg string, args ...interface{}) { func (rt *Runtime) Abortf(code exitcode.ExitCode, msg string, args ...interface{}) {
log.Warnf("Abortf: ", fmt.Sprintf(msg, args...)) log.Warnf("Abortf: " + fmt.Sprintf(msg, args...))
panic(aerrors.NewfSkip(2, code, msg, args...)) panic(aerrors.NewfSkip(2, code, msg, args...))
} }
func (rs *Runtime) AbortStateMsg(msg string) { func (rt *Runtime) AbortStateMsg(msg string) {
panic(aerrors.NewfSkip(3, 101, msg)) panic(aerrors.NewfSkip(3, 101, msg))
} }
@ -331,8 +335,8 @@ func (rt *Runtime) ValidateImmediateCallerType(ts ...cid.Cid) {
rt.Abortf(exitcode.SysErrForbidden, "caller cid type %q was not one of %v", callerCid, ts) rt.Abortf(exitcode.SysErrForbidden, "caller cid type %q was not one of %v", callerCid, ts)
} }
func (rs *Runtime) CurrEpoch() abi.ChainEpoch { func (rt *Runtime) CurrEpoch() abi.ChainEpoch {
return rs.height return rt.height
} }
type dumbWrapperType struct { type dumbWrapperType struct {
@ -343,31 +347,33 @@ func (dwt *dumbWrapperType) Into(um vmr.CBORUnmarshaler) error {
return um.UnmarshalCBOR(bytes.NewReader(dwt.val)) return um.UnmarshalCBOR(bytes.NewReader(dwt.val))
} }
func (rs *Runtime) Send(to address.Address, method abi.MethodNum, m vmr.CBORMarshaler, value abi.TokenAmount) (vmr.SendReturn, exitcode.ExitCode) { func (rt *Runtime) Send(to address.Address, method abi.MethodNum, m vmr.CBORMarshaler, value abi.TokenAmount) (vmr.SendReturn, exitcode.ExitCode) {
if !rs.allowInternal { if !rt.allowInternal {
rs.Abortf(exitcode.SysErrorIllegalActor, "runtime.Send() is currently disallowed") rt.Abortf(exitcode.SysErrorIllegalActor, "runtime.Send() is currently disallowed")
} }
var params []byte var params []byte
if m != nil { if m != nil {
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
if err := m.MarshalCBOR(buf); err != nil { if err := m.MarshalCBOR(buf); err != nil {
rs.Abortf(exitcode.SysErrInvalidParameters, "failed to marshal input parameters: %s", err) rt.Abortf(exitcode.SysErrInvalidParameters, "failed to marshal input parameters: %s", err)
} }
params = buf.Bytes() params = buf.Bytes()
} }
ret, err := rs.internalSend(rs.Message().Receiver(), to, method, types.BigInt(value), params) ret, err := rt.internalSend(rt.Message().Receiver(), to, method, value, params)
if err != nil { if err != nil {
if err.IsFatal() { if err.IsFatal() {
panic(err) panic(err)
} }
log.Warnf("vmctx send failed: to: %s, method: %d: ret: %d, err: %s", to, method, ret, err) log.Warnf("vmctx send failed: to: %s, method: %d: ret: %d, err: %s", to, method, ret, err)
return nil, exitcode.ExitCode(err.RetCode()) return nil, err.RetCode()
} }
_ = rt.chargeGasSafe(gasOnActorExec)
return &dumbWrapperType{ret}, 0 return &dumbWrapperType{ret}, 0
} }
func (rt *Runtime) internalSend(from, to address.Address, method abi.MethodNum, value types.BigInt, params []byte) ([]byte, aerrors.ActorError) { func (rt *Runtime) internalSend(from, to address.Address, method abi.MethodNum, value types.BigInt, params []byte) ([]byte, aerrors.ActorError) {
start := time.Now() start := time.Now()
ctx, span := trace.StartSpan(rt.ctx, "vmc.Send") ctx, span := trace.StartSpan(rt.ctx, "vmc.Send")
defer span.End() defer span.End()
@ -394,77 +400,62 @@ func (rt *Runtime) internalSend(from, to address.Address, method abi.MethodNum,
} }
defer st.ClearSnapshot() defer st.ClearSnapshot()
ret, errSend, subrt := rt.vm.send(ctx, msg, rt, 0) ret, errSend, subrt := rt.vm.send(ctx, msg, rt, nil, start)
if errSend != nil { if errSend != nil {
if errRevert := st.Revert(); errRevert != nil { if errRevert := st.Revert(); errRevert != nil {
return nil, aerrors.Escalate(errRevert, "failed to revert state tree after failed subcall") return nil, aerrors.Escalate(errRevert, "failed to revert state tree after failed subcall")
} }
} }
mr := types.MessageReceipt{
ExitCode: exitcode.ExitCode(aerrors.RetCode(errSend)),
Return: ret,
GasUsed: 0,
}
er := types.ExecutionResult{
Msg: msg,
MsgRct: &mr,
Duration: time.Since(start),
}
if errSend != nil {
er.Error = errSend.Error()
}
if subrt != nil { if subrt != nil {
er.Subcalls = subrt.internalExecutions
rt.numActorsCreated = subrt.numActorsCreated rt.numActorsCreated = subrt.numActorsCreated
} }
rt.internalExecutions = append(rt.internalExecutions, &er) rt.executionTrace.Subcalls = append(rt.executionTrace.Subcalls, subrt.executionTrace)
return ret, errSend return ret, errSend
} }
func (rs *Runtime) State() vmr.StateHandle { func (rt *Runtime) State() vmr.StateHandle {
return &shimStateHandle{rs: rs} return &shimStateHandle{rt: rt}
} }
type shimStateHandle struct { type shimStateHandle struct {
rs *Runtime rt *Runtime
} }
func (ssh *shimStateHandle) Create(obj vmr.CBORMarshaler) { func (ssh *shimStateHandle) Create(obj vmr.CBORMarshaler) {
c := ssh.rs.Put(obj) c := ssh.rt.Put(obj)
ssh.rs.stateCommit(EmptyObjectCid, c) // TODO: handle error below
ssh.rt.stateCommit(EmptyObjectCid, c)
} }
func (ssh *shimStateHandle) Readonly(obj vmr.CBORUnmarshaler) { func (ssh *shimStateHandle) Readonly(obj vmr.CBORUnmarshaler) {
act, err := ssh.rs.state.GetActor(ssh.rs.Message().Receiver()) act, err := ssh.rt.state.GetActor(ssh.rt.Message().Receiver())
if err != nil { if err != nil {
ssh.rs.Abortf(exitcode.SysErrorIllegalArgument, "failed to get actor for Readonly state: %s", err) ssh.rt.Abortf(exitcode.SysErrorIllegalArgument, "failed to get actor for Readonly state: %s", err)
} }
ssh.rs.Get(act.Head, obj) ssh.rt.Get(act.Head, obj)
} }
func (ssh *shimStateHandle) Transaction(obj vmr.CBORer, f func() interface{}) interface{} { func (ssh *shimStateHandle) Transaction(obj vmr.CBORer, f func() interface{}) interface{} {
if obj == nil { if obj == nil {
ssh.rs.Abortf(exitcode.SysErrorIllegalActor, "Must not pass nil to Transaction()") ssh.rt.Abortf(exitcode.SysErrorIllegalActor, "Must not pass nil to Transaction()")
} }
act, err := ssh.rs.state.GetActor(ssh.rs.Message().Receiver()) act, err := ssh.rt.state.GetActor(ssh.rt.Message().Receiver())
if err != nil { if err != nil {
ssh.rs.Abortf(exitcode.SysErrorIllegalActor, "failed to get actor for Transaction: %s", err) ssh.rt.Abortf(exitcode.SysErrorIllegalActor, "failed to get actor for Transaction: %s", err)
} }
baseState := act.Head baseState := act.Head
ssh.rs.Get(baseState, obj) ssh.rt.Get(baseState, obj)
ssh.rs.allowInternal = false ssh.rt.allowInternal = false
out := f() out := f()
ssh.rs.allowInternal = true ssh.rt.allowInternal = true
c := ssh.rs.Put(obj) c := ssh.rt.Put(obj)
ssh.rs.stateCommit(baseState, c) // TODO: handle error below
ssh.rt.stateCommit(baseState, c)
return out return out
} }
@ -501,22 +492,78 @@ func (rt *Runtime) stateCommit(oldh, newh cid.Cid) aerrors.ActorError {
return nil return nil
} }
func (rt *Runtime) ChargeGas(toUse int64) { func (rt *Runtime) finilizeGasTracing() {
err := rt.chargeGasSafe(toUse) if rt.lastGasCharge != nil {
rt.lastGasCharge.TimeTaken = time.Since(rt.lastGasChargeTime)
}
}
// ChargeGas is spec actors function
func (rt *Runtime) ChargeGas(name string, compute int64, virtual int64) {
err := rt.chargeGasInternal(newGasCharge(name, compute, 0).WithVirtual(virtual, 0), 1)
if err != nil { if err != nil {
panic(err) panic(err)
} }
} }
func (rt *Runtime) chargeGasSafe(toUse int64) aerrors.ActorError { func (rt *Runtime) chargeGas(gas GasCharge) {
err := rt.chargeGasInternal(gas, 1)
if err != nil {
panic(err)
}
}
func (rt *Runtime) chargeGasFunc(skip int) func(GasCharge) {
return func(gas GasCharge) {
err := rt.chargeGasInternal(gas, 1+skip)
if err != nil {
panic(err)
}
}
}
func (rt *Runtime) chargeGasInternal(gas GasCharge, skip int) aerrors.ActorError {
toUse := gas.Total()
var callers [10]uintptr
cout := gruntime.Callers(2+skip, callers[:])
now := time.Now()
if rt.lastGasCharge != nil {
rt.lastGasCharge.TimeTaken = now.Sub(rt.lastGasChargeTime)
}
gasTrace := types.GasTrace{
Name: gas.Name,
Extra: gas.Extra,
TotalGas: toUse,
ComputeGas: gas.ComputeGas,
StorageGas: gas.StorageGas,
TotalVirtualGas: gas.VirtualCompute*GasComputeMulti + gas.VirtualStorage*GasStorageMulti,
VirtualComputeGas: gas.VirtualCompute,
VirtualStorageGas: gas.VirtualStorage,
Callers: callers[:cout],
}
rt.executionTrace.GasCharges = append(rt.executionTrace.GasCharges, &gasTrace)
rt.lastGasChargeTime = now
rt.lastGasCharge = &gasTrace
if rt.gasUsed+toUse > rt.gasAvailable { if rt.gasUsed+toUse > rt.gasAvailable {
rt.gasUsed = rt.gasAvailable rt.gasUsed = rt.gasAvailable
return aerrors.Newf(exitcode.SysErrOutOfGas, "not enough gas: used=%d, available=%d", rt.gasUsed, rt.gasAvailable) return aerrors.Newf(exitcode.SysErrOutOfGas, "not enough gas: used=%d, available=%d",
rt.gasUsed, rt.gasAvailable)
} }
rt.gasUsed += toUse rt.gasUsed += toUse
return nil return nil
} }
func (rt *Runtime) chargeGasSafe(gas GasCharge) aerrors.ActorError {
return rt.chargeGasInternal(gas, 1)
}
func (rt *Runtime) Pricelist() Pricelist { func (rt *Runtime) Pricelist() Pricelist {
return rt.pricelist return rt.pricelist
} }

View File

@ -4,6 +4,8 @@ import (
"bytes" "bytes"
"context" "context"
"fmt" "fmt"
goruntime "runtime"
"sync"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
@ -41,7 +43,7 @@ type syscallShim struct {
verifier ffiwrapper.Verifier verifier ffiwrapper.Verifier
} }
func (ss *syscallShim) ComputeUnsealedSectorCID(st abi.RegisteredProof, pieces []abi.PieceInfo) (cid.Cid, error) { func (ss *syscallShim) ComputeUnsealedSectorCID(st abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) {
var sum abi.PaddedPieceSize var sum abi.PaddedPieceSize
for _, p := range pieces { for _, p := range pieces {
sum += p.Size sum += p.Size
@ -113,7 +115,7 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime.Consen
// (b) time-offset mining fault // (b) time-offset mining fault
// strictly speaking no need to compare heights based on double fork mining check above, // strictly speaking no need to compare heights based on double fork mining check above,
// but at same height this would be a different fault. // but at same height this would be a different fault.
if !types.CidArrsEqual(blockA.Parents, blockB.Parents) && blockA.Height != blockB.Height { if types.CidArrsEqual(blockA.Parents, blockB.Parents) && blockA.Height != blockB.Height {
consensusFault = &runtime.ConsensusFault{ consensusFault = &runtime.ConsensusFault{
Target: blockA.Miner, Target: blockA.Miner,
Epoch: blockB.Height, Epoch: blockB.Height,
@ -157,7 +159,7 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime.Consen
} }
if sigErr := ss.VerifyBlockSig(&blockB); sigErr != nil { if sigErr := ss.VerifyBlockSig(&blockB); sigErr != nil {
return nil, xerrors.Errorf("cannot verify first block sig: %w", sigErr) return nil, xerrors.Errorf("cannot verify second block sig: %w", sigErr)
} }
return consensusFault, nil return consensusFault, nil
@ -183,7 +185,7 @@ func (ss *syscallShim) VerifyBlockSig(blk *types.BlockHeader) error {
return err return err
} }
if err := sigs.CheckBlockSignature(blk, ss.ctx, waddr); err != nil { if err := sigs.CheckBlockSignature(ss.ctx, blk, waddr); err != nil {
return err return err
} }
@ -201,20 +203,6 @@ func (ss *syscallShim) VerifyPoSt(proof abi.WindowPoStVerifyInfo) error {
return nil return nil
} }
func cidToCommD(c cid.Cid) [32]byte {
b := c.Bytes()
var out [32]byte
copy(out[:], b[len(b)-32:])
return out
}
func cidToCommR(c cid.Cid) [32]byte {
b := c.Bytes()
var out [32]byte
copy(out[:], b[len(b)-32:])
return out
}
func (ss *syscallShim) VerifySeal(info abi.SealVerifyInfo) error { func (ss *syscallShim) VerifySeal(info abi.SealVerifyInfo) error {
//_, span := trace.StartSpan(ctx, "ValidatePoRep") //_, span := trace.StartSpan(ctx, "ValidatePoRep")
//defer span.End() //defer span.End()
@ -225,7 +213,7 @@ func (ss *syscallShim) VerifySeal(info abi.SealVerifyInfo) error {
} }
ticket := []byte(info.Randomness) ticket := []byte(info.Randomness)
proof := []byte(info.Proof) proof := info.Proof
seed := []byte(info.InteractiveRandomness) seed := []byte(info.InteractiveRandomness)
log.Debugf("Verif r:%x; d:%x; m:%s; t:%x; s:%x; N:%d; p:%x", info.SealedCID, info.UnsealedCID, miner, ticket, seed, info.SectorID.Number, proof) log.Debugf("Verif r:%x; d:%x; m:%s; t:%x; s:%x; N:%d; p:%x", info.SealedCID, info.UnsealedCID, miner, ticket, seed, info.SectorID.Number, proof)
@ -252,3 +240,37 @@ func (ss *syscallShim) VerifySignature(sig crypto.Signature, addr address.Addres
return sigs.Verify(&sig, kaddr, input) return sigs.Verify(&sig, kaddr, input)
} }
var BatchSealVerifyParallelism = goruntime.NumCPU()
func (ss *syscallShim) BatchVerifySeals(inp map[address.Address][]abi.SealVerifyInfo) (map[address.Address][]bool, error) {
out := make(map[address.Address][]bool)
sema := make(chan struct{}, BatchSealVerifyParallelism)
var wg sync.WaitGroup
for addr, seals := range inp {
results := make([]bool, len(seals))
out[addr] = results
for i, s := range seals {
wg.Add(1)
go func(ma address.Address, ix int, svi abi.SealVerifyInfo, res []bool) {
defer wg.Done()
sema <- struct{}{}
if err := ss.VerifySeal(svi); err != nil {
log.Warnw("seal verify in batch failed", "miner", ma, "index", ix, "err", err)
res[ix] = false
} else {
res[ix] = true
}
<-sema
}(addr, i, s, results)
}
}
wg.Wait()
return out, nil
}

View File

@ -41,6 +41,7 @@ func init() {
func TestChainValidationMessageSuite(t *testing.T) { func TestChainValidationMessageSuite(t *testing.T) {
f := factory.NewFactories() f := factory.NewFactories()
for _, testCase := range suites.MessageTestCases() { for _, testCase := range suites.MessageTestCases() {
testCase := testCase
if TestSuiteSkipper.Skip(testCase) { if TestSuiteSkipper.Skip(testCase) {
continue continue
} }
@ -53,6 +54,7 @@ func TestChainValidationMessageSuite(t *testing.T) {
func TestChainValidationTipSetSuite(t *testing.T) { func TestChainValidationTipSetSuite(t *testing.T) {
f := factory.NewFactories() f := factory.NewFactories()
for _, testCase := range suites.TipSetTestCases() { for _, testCase := range suites.TipSetTestCases() {
testCase := testCase
if TestSuiteSkipper.Skip(testCase) { if TestSuiteSkipper.Skip(testCase) {
continue continue
} }

View File

@ -35,6 +35,7 @@ import (
) )
var log = logging.Logger("vm") var log = logging.Logger("vm")
var gasOnActorExec = newGasCharge("OnActorExec", 0, 0)
// ResolveToKeyAddr returns the public key type of address (`BLS`/`SECP256K1`) of an account actor identified by `addr`. // ResolveToKeyAddr returns the public key type of address (`BLS`/`SECP256K1`) of an account actor identified by `addr`.
func ResolveToKeyAddr(state types.StateTree, cst cbor.IpldStore, addr address.Address) (address.Address, aerrors.ActorError) { func ResolveToKeyAddr(state types.StateTree, cst cbor.IpldStore, addr address.Address) (address.Address, aerrors.ActorError) {
@ -62,17 +63,19 @@ func ResolveToKeyAddr(state types.StateTree, cst cbor.IpldStore, addr address.Ad
var _ cbor.IpldBlockstore = (*gasChargingBlocks)(nil) var _ cbor.IpldBlockstore = (*gasChargingBlocks)(nil)
type gasChargingBlocks struct { type gasChargingBlocks struct {
chargeGas func(int64) chargeGas func(GasCharge)
pricelist Pricelist pricelist Pricelist
under cbor.IpldBlockstore under cbor.IpldBlockstore
} }
func (bs *gasChargingBlocks) Get(c cid.Cid) (block.Block, error) { func (bs *gasChargingBlocks) Get(c cid.Cid) (block.Block, error) {
bs.chargeGas(newGasCharge("OnIpldGetStart", 0, 0))
blk, err := bs.under.Get(c) blk, err := bs.under.Get(c)
if err != nil { if err != nil {
return nil, aerrors.Escalate(err, "failed to get block from blockstore") return nil, aerrors.Escalate(err, "failed to get block from blockstore")
} }
bs.chargeGas(bs.pricelist.OnIpldGet(len(blk.RawData()))) bs.chargeGas(bs.pricelist.OnIpldGet(len(blk.RawData())))
bs.chargeGas(gasOnActorExec)
return blk, nil return blk, nil
} }
@ -83,6 +86,7 @@ func (bs *gasChargingBlocks) Put(blk block.Block) error {
if err := bs.under.Put(blk); err != nil { if err := bs.under.Put(blk); err != nil {
return aerrors.Escalate(err, "failed to write data to disk") return aerrors.Escalate(err, "failed to write data to disk")
} }
bs.chargeGas(gasOnActorExec)
return nil return nil
} }
@ -102,15 +106,16 @@ func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, origin addres
pricelist: PricelistByEpoch(vm.blockHeight), pricelist: PricelistByEpoch(vm.blockHeight),
allowInternal: true, allowInternal: true,
callerValidated: false, callerValidated: false,
executionTrace: types.ExecutionTrace{Msg: msg},
} }
rt.cst = &cbor.BasicIpldStore{ rt.cst = &cbor.BasicIpldStore{
Blocks: &gasChargingBlocks{rt.ChargeGas, rt.pricelist, vm.cst.Blocks}, Blocks: &gasChargingBlocks{rt.chargeGasFunc(2), rt.pricelist, vm.cst.Blocks},
Atlas: vm.cst.Atlas, Atlas: vm.cst.Atlas,
} }
rt.sys = pricedSyscalls{ rt.sys = pricedSyscalls{
under: vm.Syscalls, under: vm.Syscalls,
chargeGas: rt.ChargeGas, chargeGas: rt.chargeGasFunc(1),
pl: rt.pricelist, pl: rt.pricelist,
} }
@ -131,7 +136,7 @@ type VM struct {
cst *cbor.BasicIpldStore cst *cbor.BasicIpldStore
buf *bufbstore.BufferedBS buf *bufbstore.BufferedBS
blockHeight abi.ChainEpoch blockHeight abi.ChainEpoch
inv *invoker inv *Invoker
rand Rand rand Rand
Syscalls runtime.Syscalls Syscalls runtime.Syscalls
@ -165,34 +170,48 @@ type ApplyRet struct {
types.MessageReceipt types.MessageReceipt
ActorErr aerrors.ActorError ActorErr aerrors.ActorError
Penalty types.BigInt Penalty types.BigInt
InternalExecutions []*types.ExecutionResult ExecutionTrace types.ExecutionTrace
Duration time.Duration Duration time.Duration
} }
func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime, func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime,
gasCharge int64) ([]byte, aerrors.ActorError, *Runtime) { gasCharge *GasCharge, start time.Time) ([]byte, aerrors.ActorError, *Runtime) {
st := vm.cstate st := vm.cstate
gasUsed := gasCharge
origin := msg.From origin := msg.From
on := msg.Nonce on := msg.Nonce
var nac uint64 = 0 var nac uint64 = 0
var gasUsed int64
if parent != nil { if parent != nil {
gasUsed = parent.gasUsed + gasUsed gasUsed = parent.gasUsed
origin = parent.origin origin = parent.origin
on = parent.originNonce on = parent.originNonce
nac = parent.numActorsCreated nac = parent.numActorsCreated
} }
rt := vm.makeRuntime(ctx, msg, origin, on, gasUsed, nac) rt := vm.makeRuntime(ctx, msg, origin, on, gasUsed, nac)
rt.lastGasChargeTime = start
if parent != nil { if parent != nil {
rt.lastGasChargeTime = parent.lastGasChargeTime
rt.lastGasCharge = parent.lastGasCharge
defer func() { defer func() {
parent.gasUsed = rt.gasUsed parent.gasUsed = rt.gasUsed
parent.lastGasChargeTime = rt.lastGasChargeTime
parent.lastGasCharge = rt.lastGasCharge
}() }()
} }
if gasCharge != nil {
if err := rt.chargeGasSafe(*gasCharge); err != nil {
// this should never happen
return nil, aerrors.Wrap(err, "not enough gas for initial message charge, this should not happen"), rt
}
}
ret, err := func() ([]byte, aerrors.ActorError) {
if aerr := rt.chargeGasSafe(rt.Pricelist().OnMethodInvocation(msg.Value, msg.Method)); aerr != nil { if aerr := rt.chargeGasSafe(rt.Pricelist().OnMethodInvocation(msg.Value, msg.Method)); aerr != nil {
return nil, aerrors.Wrap(aerr, "not enough gas for method invocation"), rt return nil, aerrors.Wrap(aerr, "not enough gas for method invocation")
} }
toActor, err := st.GetActor(msg.To) toActor, err := st.GetActor(msg.To)
@ -200,26 +219,42 @@ func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime,
if xerrors.Is(err, init_.ErrAddressNotFound) { if xerrors.Is(err, init_.ErrAddressNotFound) {
a, err := TryCreateAccountActor(rt, msg.To) a, err := TryCreateAccountActor(rt, msg.To)
if err != nil { if err != nil {
return nil, aerrors.Wrapf(err, "could not create account"), rt return nil, aerrors.Wrapf(err, "could not create account")
} }
toActor = a toActor = a
} else { } else {
return nil, aerrors.Escalate(err, "getting actor"), rt return nil, aerrors.Escalate(err, "getting actor")
} }
} }
if types.BigCmp(msg.Value, types.NewInt(0)) != 0 { if types.BigCmp(msg.Value, types.NewInt(0)) != 0 {
if err := vm.transfer(msg.From, msg.To, msg.Value); err != nil { if err := vm.transfer(msg.From, msg.To, msg.Value); err != nil {
return nil, aerrors.Wrap(err, "failed to transfer funds"), nil return nil, aerrors.Wrap(err, "failed to transfer funds")
} }
} }
if msg.Method != 0 { if msg.Method != 0 {
var ret []byte
_ = rt.chargeGasSafe(gasOnActorExec)
ret, err := vm.Invoke(toActor, rt, msg.Method, msg.Params) ret, err := vm.Invoke(toActor, rt, msg.Method, msg.Params)
return ret, err, rt _ = rt.chargeGasSafe(newGasCharge("OnActorExecDone", 0, 0))
return ret, err
}
return nil, nil
}()
mr := types.MessageReceipt{
ExitCode: aerrors.RetCode(err),
Return: ret,
GasUsed: rt.gasUsed,
}
rt.executionTrace.MsgRct = &mr
rt.executionTrace.Duration = time.Since(start)
if err != nil {
rt.executionTrace.Error = err.Error()
} }
return nil, nil, rt return ret, err, rt
} }
func checkMessage(msg *types.Message) error { func checkMessage(msg *types.Message) error {
@ -243,15 +278,16 @@ func checkMessage(msg *types.Message) error {
func (vm *VM) ApplyImplicitMessage(ctx context.Context, msg *types.Message) (*ApplyRet, error) { func (vm *VM) ApplyImplicitMessage(ctx context.Context, msg *types.Message) (*ApplyRet, error) {
start := time.Now() start := time.Now()
ret, actorErr, rt := vm.send(ctx, msg, nil, 0) ret, actorErr, rt := vm.send(ctx, msg, nil, nil, start)
rt.finilizeGasTracing()
return &ApplyRet{ return &ApplyRet{
MessageReceipt: types.MessageReceipt{ MessageReceipt: types.MessageReceipt{
ExitCode: exitcode.ExitCode(aerrors.RetCode(actorErr)), ExitCode: aerrors.RetCode(actorErr),
Return: ret, Return: ret,
GasUsed: 0, GasUsed: 0,
}, },
ActorErr: actorErr, ActorErr: actorErr,
InternalExecutions: rt.internalExecutions, ExecutionTrace: rt.executionTrace,
Penalty: types.NewInt(0), Penalty: types.NewInt(0),
Duration: time.Since(start), Duration: time.Since(start),
}, actorErr }, actorErr
@ -276,7 +312,8 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
pl := PricelistByEpoch(vm.blockHeight) pl := PricelistByEpoch(vm.blockHeight)
msgGasCost := pl.OnChainMessage(cmsg.ChainLength()) msgGas := pl.OnChainMessage(cmsg.ChainLength())
msgGasCost := msgGas.Total()
// this should never happen, but is currently still exercised by some tests // this should never happen, but is currently still exercised by some tests
if msgGasCost > msg.GasLimit { if msgGasCost > msg.GasLimit {
return &ApplyRet{ return &ApplyRet{
@ -359,7 +396,7 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
} }
defer st.ClearSnapshot() defer st.ClearSnapshot()
ret, actorErr, rt := vm.send(ctx, msg, nil, msgGasCost) ret, actorErr, rt := vm.send(ctx, msg, nil, &msgGas, start)
if aerrors.IsFatal(actorErr) { if aerrors.IsFatal(actorErr) {
return nil, xerrors.Errorf("[from=%s,to=%s,n=%d,m=%d,h=%d] fatal error: %w", msg.From, msg.To, msg.Nonce, msg.Method, vm.blockHeight, actorErr) return nil, xerrors.Errorf("[from=%s,to=%s,n=%d,m=%d,h=%d] fatal error: %w", msg.From, msg.To, msg.Nonce, msg.Method, vm.blockHeight, actorErr)
} }
@ -413,14 +450,16 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
return nil, xerrors.Errorf("gas handling math is wrong") return nil, xerrors.Errorf("gas handling math is wrong")
} }
rt.finilizeGasTracing()
return &ApplyRet{ return &ApplyRet{
MessageReceipt: types.MessageReceipt{ MessageReceipt: types.MessageReceipt{
ExitCode: exitcode.ExitCode(errcode), ExitCode: errcode,
Return: ret, Return: ret,
GasUsed: gasUsed, GasUsed: gasUsed,
}, },
ActorErr: actorErr, ActorErr: actorErr,
InternalExecutions: rt.internalExecutions, ExecutionTrace: rt.executionTrace,
Penalty: types.NewInt(0), Penalty: types.NewInt(0),
Duration: time.Since(start), Duration: time.Since(start),
}, nil }, nil
@ -454,7 +493,7 @@ func (vm *VM) Flush(ctx context.Context) (cid.Cid, error) {
return root, nil return root, nil
} }
// vm.MutateState(idAddr, func(cst cbor.IpldStore, st *ActorStateType) error {...}) // MutateState usage: MutateState(ctx, idAddr, func(cst cbor.IpldStore, st *ActorStateType) error {...})
func (vm *VM) MutateState(ctx context.Context, addr address.Address, fn interface{}) error { func (vm *VM) MutateState(ctx context.Context, addr address.Address, fn interface{}) error {
act, err := vm.cstate.GetActor(addr) act, err := vm.cstate.GetActor(addr)
if err != nil { if err != nil {
@ -591,7 +630,7 @@ func (vm *VM) Invoke(act *types.Actor, rt *Runtime, method abi.MethodNum, params
return ret, nil return ret, nil
} }
func (vm *VM) SetInvoker(i *invoker) { func (vm *VM) SetInvoker(i *Invoker) {
vm.inv = i vm.inv = i
} }
@ -607,16 +646,30 @@ func (vm *VM) transfer(from, to address.Address, amt types.BigInt) aerrors.Actor
return nil return nil
} }
fromID, err := vm.cstate.LookupID(from)
if err != nil {
return aerrors.Fatalf("transfer failed when resolving sender address: %s", err)
}
toID, err := vm.cstate.LookupID(to)
if err != nil {
return aerrors.Fatalf("transfer failed when resolving receiver address: %s", err)
}
if fromID == toID {
return nil
}
if amt.LessThan(types.NewInt(0)) { if amt.LessThan(types.NewInt(0)) {
return aerrors.Newf(exitcode.SysErrForbidden, "attempted to transfer negative value: %s", amt) return aerrors.Newf(exitcode.SysErrForbidden, "attempted to transfer negative value: %s", amt)
} }
f, err := vm.cstate.GetActor(from) f, err := vm.cstate.GetActor(fromID)
if err != nil { if err != nil {
return aerrors.Fatalf("transfer failed when retrieving sender actor: %s", err) return aerrors.Fatalf("transfer failed when retrieving sender actor: %s", err)
} }
t, err := vm.cstate.GetActor(to) t, err := vm.cstate.GetActor(toID)
if err != nil { if err != nil {
return aerrors.Fatalf("transfer failed when retrieving receiver actor: %s", err) return aerrors.Fatalf("transfer failed when retrieving receiver actor: %s", err)
} }
@ -626,11 +679,11 @@ func (vm *VM) transfer(from, to address.Address, amt types.BigInt) aerrors.Actor
} }
depositFunds(t, amt) depositFunds(t, amt)
if err := vm.cstate.SetActor(from, f); err != nil { if err := vm.cstate.SetActor(fromID, f); err != nil {
return aerrors.Fatalf("transfer failed when setting receiver actor: %s", err) return aerrors.Fatalf("transfer failed when setting receiver actor: %s", err)
} }
if err := vm.cstate.SetActor(to, t); err != nil { if err := vm.cstate.SetActor(toID, t); err != nil {
return aerrors.Fatalf("transfer failed when setting sender actor: %s", err) return aerrors.Fatalf("transfer failed when setting sender actor: %s", err)
} }

View File

@ -12,8 +12,8 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
_ "github.com/filecoin-project/lotus/lib/sigs/bls" _ "github.com/filecoin-project/lotus/lib/sigs/bls" // enable bls signatures
_ "github.com/filecoin-project/lotus/lib/sigs/secp" _ "github.com/filecoin-project/lotus/lib/sigs/secp" // enable secp signatures
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/sigs" "github.com/filecoin-project/lotus/lib/sigs"

View File

@ -3,8 +3,8 @@ package cli
import ( import (
"fmt" "fmt"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"gopkg.in/urfave/cli.v2"
"github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/go-jsonrpc/auth"

View File

@ -22,9 +22,9 @@ import (
"github.com/filecoin-project/specs-actors/actors/builtin/power" "github.com/filecoin-project/specs-actors/actors/builtin/power"
"github.com/filecoin-project/specs-actors/actors/util/adt" "github.com/filecoin-project/specs-actors/actors/util/adt"
cid "github.com/ipfs/go-cid" cid "github.com/ipfs/go-cid"
"github.com/urfave/cli/v2"
cbg "github.com/whyrusleeping/cbor-gen" cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"gopkg.in/urfave/cli.v2"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
@ -221,6 +221,9 @@ var chainStatObjCmd = &cli.Command{
base := cid.Undef base := cid.Undef
if cctx.IsSet("base") { if cctx.IsSet("base") {
base, err = cid.Decode(cctx.String("base")) base, err = cid.Decode(cctx.String("base"))
if err != nil {
return err
}
} }
stats, err := api.ChainStatObj(ctx, obj, base) stats, err := api.ChainStatObj(ctx, obj, base)
@ -442,6 +445,7 @@ var chainGetCmd = &cli.Command{
- /ipfs/[cid]/@Hu:123 - get uvarint elem 123 from hamt - /ipfs/[cid]/@Hu:123 - get uvarint elem 123 from hamt
- /ipfs/[cid]/@Ha:t01 - get element under Addr(t01).Bytes - /ipfs/[cid]/@Ha:t01 - get element under Addr(t01).Bytes
- /ipfs/[cid]/@A:10 - get 10th amt element - /ipfs/[cid]/@A:10 - get 10th amt element
- .../@Ha:t01/@state - get pretty map-based actor state
List of --as-type types: List of --as-type types:
- raw - raw
@ -803,7 +807,12 @@ var chainExportCmd = &cli.Command{
if err != nil { if err != nil {
return err return err
} }
defer fi.Close() defer func() {
err := fi.Close()
if err != nil {
fmt.Printf("error closing output file: %+v", err)
}
}()
ts, err := LoadTipSet(ctx, cctx, api) ts, err := LoadTipSet(ctx, cctx, api)
if err != nil { if err != nil {
@ -835,6 +844,10 @@ var slashConsensusFault = &cli.Command{
Name: "miner", Name: "miner",
Usage: "Miner address", Usage: "Miner address",
}, },
&cli.StringFlag{
Name: "extra",
Usage: "Extra block cid",
},
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx) api, closer, err := GetFullNodeAPI(cctx)
@ -879,10 +892,34 @@ var slashConsensusFault = &cli.Command{
return err return err
} }
params, err := actors.SerializeParams(&miner.ReportConsensusFaultParams{ params := miner.ReportConsensusFaultParams{
BlockHeader1: bh1, BlockHeader1: bh1,
BlockHeader2: bh2, BlockHeader2: bh2,
}) }
if cctx.String("extra") != "" {
cExtra, err := cid.Parse(cctx.String("extra"))
if err != nil {
return xerrors.Errorf("parsing cid extra: %w", err)
}
bExtra, err := api.ChainGetBlock(ctx, cExtra)
if err != nil {
return xerrors.Errorf("getting block extra: %w", err)
}
be, err := cborutil.Dump(bExtra)
if err != nil {
return err
}
params.BlockHeaderExtra = be
}
enc, err := actors.SerializeParams(&params)
if err != nil {
return err
}
if cctx.String("miner") == "" { if cctx.String("miner") == "" {
return xerrors.Errorf("--miner flag is required") return xerrors.Errorf("--miner flag is required")
@ -900,7 +937,7 @@ var slashConsensusFault = &cli.Command{
GasPrice: types.NewInt(1), GasPrice: types.NewInt(1),
GasLimit: 10000000, GasLimit: 10000000,
Method: builtin.MethodsMiner.ReportConsensusFault, Method: builtin.MethodsMiner.ReportConsensusFault,
Params: params, Params: enc,
} }
smsg, err := api.MpoolPushMessage(ctx, msg) smsg, err := api.MpoolPushMessage(ctx, msg)

View File

@ -11,14 +11,15 @@ import (
"github.com/ipfs/go-cidutil/cidenc" "github.com/ipfs/go-cidutil/cidenc"
"github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peer"
"github.com/multiformats/go-multibase" "github.com/multiformats/go-multibase"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"gopkg.in/urfave/cli.v2"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/builtin/market" "github.com/filecoin-project/specs-actors/actors/builtin/market"
"github.com/filecoin-project/lotus/api"
lapi "github.com/filecoin-project/lotus/api" lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
) )
@ -391,6 +392,10 @@ var clientRetrieveCmd = &cli.Command{
Name: "car", Name: "car",
Usage: "export to a car file instead of a regular file", Usage: "export to a car file instead of a regular file",
}, },
&cli.StringFlag{
Name: "miner",
Usage: "miner address for retrieval, if not present it'll use local discovery",
},
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
if cctx.NArg() != 2 { if cctx.NArg() != 2 {
@ -398,7 +403,7 @@ var clientRetrieveCmd = &cli.Command{
return nil return nil
} }
api, closer, err := GetFullNodeAPI(cctx) fapi, closer, err := GetFullNodeAPI(cctx)
if err != nil { if err != nil {
return err return err
} }
@ -409,7 +414,7 @@ var clientRetrieveCmd = &cli.Command{
if cctx.String("address") != "" { if cctx.String("address") != "" {
payer, err = address.NewFromString(cctx.String("address")) payer, err = address.NewFromString(cctx.String("address"))
} else { } else {
payer, err = api.WalletDefaultAddress(ctx) payer, err = fapi.WalletDefaultAddress(ctx)
} }
if err != nil { if err != nil {
return err return err
@ -432,23 +437,39 @@ var clientRetrieveCmd = &cli.Command{
return nil return nil
}*/ // TODO: fix }*/ // TODO: fix
offers, err := api.ClientFindData(ctx, file) var offer api.QueryOffer
minerStrAddr := cctx.String("miner")
if minerStrAddr == "" { // Local discovery
offers, err := fapi.ClientFindData(ctx, file)
if err != nil { if err != nil {
return err return err
} }
// TODO: parse offer strings from `client find`, make this smarter // TODO: parse offer strings from `client find`, make this smarter
if len(offers) < 1 { if len(offers) < 1 {
fmt.Println("Failed to find file") fmt.Println("Failed to find file")
return nil return nil
} }
offer = offers[0]
} else { // Directed retrieval
minerAddr, err := address.NewFromString(minerStrAddr)
if err != nil {
return err
}
offer, err = fapi.ClientMinerQueryOffer(ctx, file, minerAddr)
if err != nil {
return err
}
}
if offer.Err != "" {
return fmt.Errorf("The received offer errored: %s", offer.Err)
}
ref := &lapi.FileRef{ ref := &lapi.FileRef{
Path: cctx.Args().Get(1), Path: cctx.Args().Get(1),
IsCAR: cctx.Bool("car"), IsCAR: cctx.Bool("car"),
} }
if err := api.ClientRetrieve(ctx, offers[0].Order(payer), ref); err != nil { if err := fapi.ClientRetrieve(ctx, offer.Order(payer), ref); err != nil {
return xerrors.Errorf("Retrieval Failed: %w", err) return xerrors.Errorf("Retrieval Failed: %w", err)
} }
@ -506,11 +527,11 @@ var clientQueryAskCmd = &cli.Command{
return xerrors.Errorf("failed to get peerID for miner: %w", err) return xerrors.Errorf("failed to get peerID for miner: %w", err)
} }
if mi.PeerId == peer.ID("SETME") { if peer.ID(mi.PeerId) == peer.ID("SETME") {
return fmt.Errorf("the miner hasn't initialized yet") return fmt.Errorf("the miner hasn't initialized yet")
} }
pid = mi.PeerId pid = peer.ID(mi.PeerId)
} }
ask, err := api.ClientQueryAsk(ctx, pid, maddr) ask, err := api.ClientQueryAsk(ctx, pid, maddr)

View File

@ -13,8 +13,8 @@ import (
"github.com/mitchellh/go-homedir" "github.com/mitchellh/go-homedir"
"github.com/multiformats/go-multiaddr" "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr-net" manet "github.com/multiformats/go-multiaddr-net"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"gopkg.in/urfave/cli.v2"
"github.com/filecoin-project/go-jsonrpc" "github.com/filecoin-project/go-jsonrpc"

View File

@ -3,8 +3,8 @@ package cli
import ( import (
"fmt" "fmt"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"gopkg.in/urfave/cli.v2"
) )
var logCmd = &cli.Command{ var logCmd = &cli.Command{

View File

@ -5,8 +5,8 @@ import (
"fmt" "fmt"
"sort" "sort"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"gopkg.in/urfave/cli.v2"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"

View File

@ -17,12 +17,13 @@ import (
cid "github.com/ipfs/go-cid" cid "github.com/ipfs/go-cid"
"github.com/ipfs/go-hamt-ipld" "github.com/ipfs/go-hamt-ipld"
cbor "github.com/ipfs/go-ipld-cbor" cbor "github.com/ipfs/go-ipld-cbor"
"github.com/urfave/cli/v2"
cbg "github.com/whyrusleeping/cbor-gen" cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"gopkg.in/urfave/cli.v2"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/apibstore" "github.com/filecoin-project/lotus/api/apibstore"
"github.com/filecoin-project/lotus/build"
types "github.com/filecoin-project/lotus/chain/types" types "github.com/filecoin-project/lotus/chain/types"
) )
@ -117,7 +118,7 @@ var msigCreateCmd = &cli.Command{
} }
// wait for it to get mined into a block // wait for it to get mined into a block
wait, err := api.StateWaitMsg(ctx, msgCid) wait, err := api.StateWaitMsg(ctx, msgCid, build.MessageConfidence)
if err != nil { if err != nil {
return err return err
} }
@ -206,7 +207,10 @@ var msigInspectCmd = &cli.Command{
tx := pending[txid] tx := pending[txid]
fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%d\t%x\n", txid, state(tx), tx.To, types.FIL(tx.Value), tx.Method, tx.Params) fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%d\t%x\n", txid, state(tx), tx.To, types.FIL(tx.Value), tx.Method, tx.Params)
} }
w.Flush() if err := w.Flush(); err != nil {
return xerrors.Errorf("flushing output: %+v", err)
}
} }
return nil return nil
@ -333,7 +337,7 @@ var msigProposeCmd = &cli.Command{
fmt.Println("send proposal in message: ", msgCid) fmt.Println("send proposal in message: ", msgCid)
wait, err := api.StateWaitMsg(ctx, msgCid) wait, err := api.StateWaitMsg(ctx, msgCid, build.MessageConfidence)
if err != nil { if err != nil {
return err return err
} }
@ -449,7 +453,7 @@ var msigApproveCmd = &cli.Command{
fmt.Println("sent approval in message: ", msgCid) fmt.Println("sent approval in message: ", msgCid)
wait, err := api.StateWaitMsg(ctx, msgCid) wait, err := api.StateWaitMsg(ctx, msgCid, build.MessageConfidence)
if err != nil { if err != nil {
return err return err
} }

View File

@ -7,7 +7,7 @@ import (
"github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peer"
"gopkg.in/urfave/cli.v2" "github.com/urfave/cli/v2"
"github.com/filecoin-project/lotus/lib/addrutil" "github.com/filecoin-project/lotus/lib/addrutil"
) )
@ -21,6 +21,7 @@ var netCmd = &cli.Command{
netListen, netListen,
netId, netId,
netFindPeer, netFindPeer,
netScores,
}, },
} }
@ -44,7 +45,30 @@ var netPeers = &cli.Command{
}) })
for _, peer := range peers { for _, peer := range peers {
fmt.Println(peer) fmt.Printf("%s, %s\n", peer.ID, peer.Addrs)
}
return nil
},
}
var netScores = &cli.Command{
Name: "scores",
Usage: "Print peers' pubsub scores",
Action: func(cctx *cli.Context) error {
api, closer, err := GetAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
scores, err := api.NetPubsubScores(ctx)
if err != nil {
return err
}
for _, peer := range scores {
fmt.Printf("%s, %f\n", peer.ID, peer.Score)
} }
return nil return nil

View File

@ -3,8 +3,8 @@ package cli
import ( import (
"github.com/docker/go-units" "github.com/docker/go-units"
paramfetch "github.com/filecoin-project/go-paramfetch" paramfetch "github.com/filecoin-project/go-paramfetch"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"gopkg.in/urfave/cli.v2"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
) )
@ -12,20 +12,18 @@ import (
var fetchParamCmd = &cli.Command{ var fetchParamCmd = &cli.Command{
Name: "fetch-params", Name: "fetch-params",
Usage: "Fetch proving parameters", Usage: "Fetch proving parameters",
Flags: []cli.Flag{ ArgsUsage: "[sectorSize]",
&cli.StringFlag{
Name: "proving-params",
Usage: "download params used creating proofs for given size, i.e. 32GiB",
},
},
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
sectorSizeInt, err := units.RAMInBytes(cctx.String("proving-params")) if !cctx.Args().Present() {
return xerrors.Errorf("must pass sector size to fetch params for (specify as \"32GiB\", for instance)")
}
sectorSizeInt, err := units.RAMInBytes(cctx.Args().First())
if err != nil { if err != nil {
return err return xerrors.Errorf("error parsing sector size (specify as \"32GiB\", for instance): %w", err)
} }
sectorSize := uint64(sectorSizeInt) sectorSize := uint64(sectorSizeInt)
err = paramfetch.GetParams(ReqContext(cctx), build.ParametersJson(), sectorSize) err = paramfetch.GetParams(ReqContext(cctx), build.ParametersJSON(), sectorSize)
if err != nil { if err != nil {
return xerrors.Errorf("fetching proof parameters: %w", err) return xerrors.Errorf("fetching proof parameters: %w", err)
} }

View File

@ -4,10 +4,11 @@ import (
"bytes" "bytes"
"encoding/base64" "encoding/base64"
"fmt" "fmt"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/specs-actors/actors/builtin/paych" "github.com/filecoin-project/specs-actors/actors/builtin/paych"
"gopkg.in/urfave/cli.v2" "github.com/urfave/cli/v2"
types "github.com/filecoin-project/lotus/chain/types" types "github.com/filecoin-project/lotus/chain/types"
) )
@ -361,7 +362,7 @@ var paychVoucherSubmitCmd = &cli.Command{
return err return err
} }
mwait, err := api.StateWaitMsg(ctx, mcid) mwait, err := api.StateWaitMsg(ctx, mcid, build.MessageConfidence)
if err != nil { if err != nil {
return err return err
} }
@ -370,7 +371,7 @@ var paychVoucherSubmitCmd = &cli.Command{
return fmt.Errorf("message execution failed (exit code %d)", mwait.Receipt.ExitCode) return fmt.Errorf("message execution failed (exit code %d)", mwait.Receipt.ExitCode)
} }
fmt.Println("channel updated succesfully") fmt.Println("channel updated successfully")
return nil return nil
}, },

View File

@ -5,7 +5,7 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"gopkg.in/urfave/cli.v2" "github.com/urfave/cli/v2"
) )
var sendCmd = &cli.Command{ var sendCmd = &cli.Command{

View File

@ -5,6 +5,8 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"html/template"
"os"
"reflect" "reflect"
"sort" "sort"
"strconv" "strconv"
@ -14,9 +16,9 @@ import (
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peer"
"github.com/multiformats/go-multihash" "github.com/multiformats/go-multihash"
"github.com/urfave/cli/v2"
cbg "github.com/whyrusleeping/cbor-gen" cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"gopkg.in/urfave/cli.v2"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"
@ -31,15 +33,17 @@ import (
"github.com/filecoin-project/specs-actors/actors/builtin/power" "github.com/filecoin-project/specs-actors/actors/builtin/power"
"github.com/filecoin-project/specs-actors/actors/builtin/reward" "github.com/filecoin-project/specs-actors/actors/builtin/reward"
"github.com/filecoin-project/specs-actors/actors/builtin/verifreg" "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
"github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
"github.com/filecoin-project/specs-actors/actors/util/adt" "github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/miner" "github.com/filecoin-project/lotus/miner"
) )
type methodMeta struct { type methodMeta struct {
name string Name string
params reflect.Type params reflect.Type
ret reflect.Type ret reflect.Type
@ -67,7 +71,7 @@ func init() {
nf := rt.NumField() nf := rt.NumField()
methods[c] = append(methods[c], methodMeta{ methods[c] = append(methods[c], methodMeta{
name: "Send", Name: "Send",
params: reflect.TypeOf(new(adt.EmptyValue)), params: reflect.TypeOf(new(adt.EmptyValue)),
ret: reflect.TypeOf(new(adt.EmptyValue)), ret: reflect.TypeOf(new(adt.EmptyValue)),
}) })
@ -77,7 +81,7 @@ func init() {
export := reflect.TypeOf(exports[i+1]) export := reflect.TypeOf(exports[i+1])
methods[c] = append(methods[c], methodMeta{ methods[c] = append(methods[c], methodMeta{
name: rt.Field(i).Name, Name: rt.Field(i).Name,
params: export.In(1), params: export.In(1),
ret: export.Out(0), ret: export.Out(0),
}) })
@ -143,23 +147,11 @@ var stateMinerInfo = &cli.Command{
return err return err
} }
act, err := api.StateGetActor(ctx, addr, ts.Key()) mi, err := api.StateMinerInfo(ctx, addr, ts.Key())
if err != nil { if err != nil {
return err return err
} }
aso, err := api.ChainReadObj(ctx, act.Head)
if err != nil {
return err
}
var mst miner2.State
if err := mst.UnmarshalCBOR(bytes.NewReader(aso)); err != nil {
return err
}
mi := mst.Info
fmt.Printf("Owner:\t%s\n", mi.Owner) fmt.Printf("Owner:\t%s\n", mi.Owner)
fmt.Printf("Worker:\t%s\n", mi.Worker) fmt.Printf("Worker:\t%s\n", mi.Worker)
fmt.Printf("PeerID:\t%s\n", mi.PeerId) fmt.Printf("PeerID:\t%s\n", mi.PeerId)
@ -359,7 +351,7 @@ var stateReplaySetCmd = &cli.Command{
return fmt.Errorf("message cid was invalid: %s", err) return fmt.Errorf("message cid was invalid: %s", err)
} }
api, closer, err := GetFullNodeAPI(cctx) fapi, closer, err := GetFullNodeAPI(cctx)
if err != nil { if err != nil {
return err return err
} }
@ -381,7 +373,7 @@ var stateReplaySetCmd = &cli.Command{
if len(tscids) > 0 { if len(tscids) > 0 {
var headers []*types.BlockHeader var headers []*types.BlockHeader
for _, c := range tscids { for _, c := range tscids {
h, err := api.ChainGetBlock(ctx, c) h, err := fapi.ChainGetBlock(ctx, c)
if err != nil { if err != nil {
return err return err
} }
@ -391,12 +383,13 @@ var stateReplaySetCmd = &cli.Command{
ts, err = types.NewTipSet(headers) ts, err = types.NewTipSet(headers)
} else { } else {
r, err := api.StateWaitMsg(ctx, mcid) var r *api.MsgLookup
r, err = fapi.StateWaitMsg(ctx, mcid, build.MessageConfidence)
if err != nil { if err != nil {
return xerrors.Errorf("finding message in chain: %w", err) return xerrors.Errorf("finding message in chain: %w", err)
} }
ts, err = api.ChainGetTipSet(ctx, r.TipSet.Parents()) ts, err = fapi.ChainGetTipSet(ctx, r.TipSet.Parents())
} }
if err != nil { if err != nil {
return err return err
@ -404,7 +397,7 @@ var stateReplaySetCmd = &cli.Command{
} }
res, err := api.StateReplay(ctx, ts.Key(), mcid) res, err := fapi.StateReplay(ctx, ts.Key(), mcid)
if err != nil { if err != nil {
return xerrors.Errorf("replay call failed: %w", err) return xerrors.Errorf("replay call failed: %w", err)
} }
@ -749,12 +742,7 @@ var stateReadStateCmd = &cli.Command{
return err return err
} }
act, err := api.StateGetActor(ctx, addr, ts.Key()) as, err := api.StateReadState(ctx, addr, ts.Key())
if err != nil {
return err
}
as, err := api.StateReadState(ctx, act, ts.Key())
if err != nil { if err != nil {
return err return err
} }
@ -934,37 +922,29 @@ var stateComputeStateCmd = &cli.Command{
return c.Code, nil return c.Code, nil
} }
return computeStateHtml(ts, stout, getCode) return computeStateHTMLTempl(ts, stout, getCode)
} }
fmt.Println("computed state cid: ", stout.Root) fmt.Println("computed state cid: ", stout.Root)
if cctx.Bool("show-trace") { if cctx.Bool("show-trace") {
for _, ir := range stout.Trace { for _, ir := range stout.Trace {
fmt.Printf("%s\t%s\t%s\t%d\t%x\t%d\t%x\n", ir.Msg.From, ir.Msg.To, ir.Msg.Value, ir.Msg.Method, ir.Msg.Params, ir.MsgRct.ExitCode, ir.MsgRct.Return) fmt.Printf("%s\t%s\t%s\t%d\t%x\t%d\t%x\n", ir.Msg.From, ir.Msg.To, ir.Msg.Value, ir.Msg.Method, ir.Msg.Params, ir.MsgRct.ExitCode, ir.MsgRct.Return)
printInternalExecutions("\t", ir.InternalExecutions) printInternalExecutions("\t", ir.ExecutionTrace.Subcalls)
} }
} }
return nil return nil
}, },
} }
func printInternalExecutions(prefix string, trace []*types.ExecutionResult) { func printInternalExecutions(prefix string, trace []types.ExecutionTrace) {
for _, im := range trace { for _, im := range trace {
fmt.Printf("%s%s\t%s\t%s\t%d\t%x\t%d\t%x\n", prefix, im.Msg.From, im.Msg.To, im.Msg.Value, im.Msg.Method, im.Msg.Params, im.MsgRct.ExitCode, im.MsgRct.Return) fmt.Printf("%s%s\t%s\t%s\t%d\t%x\t%d\t%x\n", prefix, im.Msg.From, im.Msg.To, im.Msg.Value, im.Msg.Method, im.Msg.Params, im.MsgRct.ExitCode, im.MsgRct.Return)
printInternalExecutions(prefix+"\t", im.Subcalls) printInternalExecutions(prefix+"\t", im.Subcalls)
} }
} }
func codeStr(c cid.Cid) string { var compStateTemplate = `
cmh, err := multihash.Decode(c.Hash()) <html>
if err != nil {
panic(err)
}
return string(cmh.Digest)
}
func computeStateHtml(ts *types.TipSet, o *api.ComputeStateOutput, getCode func(addr address.Address) (cid.Cid, error)) error {
fmt.Printf(`<html>
<head> <head>
<style> <style>
html, body { font-family: monospace; } html, body { font-family: monospace; }
@ -986,123 +966,259 @@ func computeStateHtml(ts *types.TipSet, o *api.ComputeStateOutput, getCode func(
} }
.slow-true-false { color: #660; } .slow-true-false { color: #660; }
.slow-true-true { color: #f80; } .slow-true-true { color: #f80; }
.deemp { color: #444; }
table {
font-size: 12px;
border-collapse: collapse;
}
tr {
border-top: 1px solid black;
border-bottom: 1px solid black;
}
tr.sum { border-top: 2px solid black; }
tr:first-child { border-top: none; }
tr:last-child { border-bottom: none; }
.ellipsis-content,
.ellipsis-toggle input {
display: none;
}
.ellipsis-toggle {
cursor: pointer;
}
/**
Checked State
**/
.ellipsis-toggle input:checked + .ellipsis {
display: none;
}
.ellipsis-toggle input:checked ~ .ellipsis-content {
display: inline;
background-color: #ddd;
}
hr {
border: none;
height: 1px;
background-color: black;
margin: 0;
}
</style> </style>
</head> </head>
<body> <body>
<div>Tipset: <b>%s</b></div> <div>Tipset: <b>{{.TipSet.Key}}</b></div>
<div>Height: %d</div> <div>Epoch: {{.TipSet.Height}}</div>
<div>State CID: <b>%s</b></div> <div>State CID: <b>{{.Comp.Root}}</b></div>
<div>Calls</div>`, ts.Key(), ts.Height(), o.Root) <div>Calls</div>
{{range .Comp.Trace}}
{{template "message" (Call .ExecutionTrace false .Msg.Cid.String)}}
{{end}}
</body>
</html>
`
for _, ir := range o.Trace { var compStateMsg = `
toCode, err := getCode(ir.Msg.To) <div class="exec" id="{{.Hash}}">
if err != nil { {{$code := GetCode .Msg.To}}
return xerrors.Errorf("getting code for %s: %w", toCode, err) <div>
} <a href="#{{.Hash}}">
{{if not .Subcall}}
<h2 class="call">
{{else}}
<h4 class="call">
{{end}}
{{- CodeStr $code}}:{{GetMethod ($code) (.Msg.Method)}}
{{if not .Subcall}}
</h2>
{{else}}
</h4>
{{end}}
</a>
</div>
params, err := jsonParams(toCode, ir.Msg.Method, ir.Msg.Params) <div><b>{{.Msg.From}}</b> -&gt; <b>{{.Msg.To}}</b> ({{ToFil .Msg.Value}} FIL), M{{.Msg.Method}}</div>
if err != nil { {{if not .Subcall}}<div><small>Msg CID: {{.Msg.Cid}}</small></div>{{end}}
return xerrors.Errorf("decoding params: %w", err) {{if gt (len .Msg.Params) 0}}
} <div><pre class="params">{{JsonParams ($code) (.Msg.Method) (.Msg.Params) | html}}</pre></div>
{{end}}
<div><span class="slow-{{IsSlow .Duration}}-{{IsVerySlow .Duration}}">Took {{.Duration}}</span>, <span class="exit{{IntExit .MsgRct.ExitCode}}">Exit: <b>{{.MsgRct.ExitCode}}</b></span>{{if gt (len .MsgRct.Return) 0}}, Return{{end}}</div>
if len(ir.Msg.Params) != 0 { {{if gt (len .MsgRct.Return) 0}}
params = `<div><pre class="params">` + params + `</pre></div>` <div><pre class="ret">{{JsonReturn ($code) (.Msg.Method) (.MsgRct.Return) | html}}</pre></div>
} else { {{end}}
params = ""
}
ret, err := jsonReturn(toCode, ir.Msg.Method, ir.MsgRct.Return) {{if ne .MsgRct.ExitCode 0}}
if err != nil { <div class="error">Error: <pre>{{.Error}}</pre></div>
return xerrors.Errorf("decoding return value: %w", err) {{end}}
}
if len(ir.MsgRct.Return) == 0 { <details>
ret = "</div>" <summary>Gas Trace</summary>
} else { <table>
ret = `, Return</div><div><pre class="ret">` + ret + `</pre></div>` <tr><th>Name</th><th>Total/Compute/Storage</th><th>Time Taken</th><th>Location</th></tr>
} {{define "virt" -}}
{{- if . -}}
<span class="deemp">+({{.}})</span>
{{- end -}}
{{- end}}
slow := ir.Duration > 10*time.Millisecond {{define "gasC" -}}
veryslow := ir.Duration > 50*time.Millisecond <td>{{.TotalGas}}{{template "virt" .TotalVirtualGas }}/{{.ComputeGas}}{{template "virt" .VirtualComputeGas}}/{{.StorageGas}}{{template "virt" .VirtualStorageGas}}</td>
{{- end}}
cid := ir.Msg.Cid() {{range .GasCharges}}
<tr><td>{{.Name}}{{if .Extra}}:{{.Extra}}{{end}}</td>
{{template "gasC" .}}
<td>{{.TimeTaken}}</td>
<td>
{{ $fImp := FirstImportant .Location }}
{{ if $fImp }}
<details>
<summary>{{ $fImp }}</summary><hr />
{{ $elipOn := false }}
{{ range $index, $ele := .Location -}}
{{- if $index }}<br />{{end -}}
{{- if .Show -}}
{{ if $elipOn }}
{{ $elipOn = false }}
</span></label>
{{end}}
fmt.Printf(`<div class="exec" id="%s"> {{- if .Important }}<b>{{end -}}
<div><a href="#%s"><h2 class="call">%s:%s</h2></a></div> {{- . -}}
<div><b>%s</b> -&gt; <b>%s</b> (%s FIL), M%d</div> {{if .Important }}</b>{{end}}
<div><small>Msg CID: %s</small></div> {{else}}
%s {{ if not $elipOn }}
<div><span class="slow-%t-%t">Took %s</span>, <span class="exit%d">Exit: <b>%d</b></span>%s {{ $elipOn = true }}
`, cid, cid, codeStr(toCode), methods[toCode][ir.Msg.Method].name, ir.Msg.From, ir.Msg.To, types.FIL(ir.Msg.Value), ir.Msg.Method, cid, params, slow, veryslow, ir.Duration, ir.MsgRct.ExitCode, ir.MsgRct.ExitCode, ret) <label class="ellipsis-toggle"><input type="checkbox" /><span class="ellipsis">[]<br /></span>
if ir.MsgRct.ExitCode != 0 { <span class="ellipsis-content">
fmt.Printf(`<div class="error">Error: <pre>%s</pre></div>`, ir.Error) {{end}}
} {{- "" -}}
{{- . -}}
{{end}}
{{end}}
{{ if $elipOn }}
{{ $elipOn = false }}
</span></label>
{{end}}
</details>
{{end}}
</td></tr>
{{end}}
{{with SumGas .GasCharges}}
<tr class="sum"><td><b>Sum</b></td>
{{template "gasC" .}}
<td>{{.TimeTaken}}</td>
<td></td></tr>
{{end}}
</table>
</details>
if len(ir.InternalExecutions) > 0 {
fmt.Println("<div>Internal executions:</div>")
if err := printInternalExecutionsHtml(ir.InternalExecutions, getCode); err != nil {
return err
}
}
fmt.Println("</div>")
}
fmt.Printf(`</body> {{if gt (len .Subcalls) 0}}
</html>`) <div>Subcalls:</div>
return nil {{$hash := .Hash}}
{{range .Subcalls}}
{{template "message" (Call . true (printf "%s-%s" $hash .Msg.Cid.String))}}
{{end}}
{{end}}
</div>`
type compStateHTMLIn struct {
TipSet *types.TipSet
Comp *api.ComputeStateOutput
} }
func printInternalExecutionsHtml(trace []*types.ExecutionResult, getCode func(addr address.Address) (cid.Cid, error)) error { func computeStateHTMLTempl(ts *types.TipSet, o *api.ComputeStateOutput, getCode func(addr address.Address) (cid.Cid, error)) error {
for _, im := range trace { t, err := template.New("compute_state").Funcs(map[string]interface{}{
toCode, err := getCode(im.Msg.To) "GetCode": getCode,
"GetMethod": getMethod,
"ToFil": toFil,
"JsonParams": jsonParams,
"JsonReturn": jsonReturn,
"IsSlow": isSlow,
"IsVerySlow": isVerySlow,
"IntExit": func(i exitcode.ExitCode) int64 { return int64(i) },
"SumGas": sumGas,
"CodeStr": codeStr,
"Call": call,
"FirstImportant": func(locs []types.Loc) *types.Loc {
if len(locs) != 0 {
for _, l := range locs {
if l.Important() {
return &l
}
}
return &locs[0]
}
return nil
},
}).Parse(compStateTemplate)
if err != nil { if err != nil {
return xerrors.Errorf("getting code for %s: %w", toCode, err)
}
params, err := jsonParams(toCode, im.Msg.Method, im.Msg.Params)
if err != nil {
return xerrors.Errorf("decoding params: %w", err)
}
if len(im.Msg.Params) != 0 {
params = `<div><pre class="params">` + params + `</pre></div>`
} else {
params = ""
}
ret, err := jsonReturn(toCode, im.Msg.Method, im.MsgRct.Return)
if err != nil {
return xerrors.Errorf("decoding return value: %w", err)
}
if len(im.MsgRct.Return) == 0 {
ret = "</div>"
} else {
ret = `, Return</div><div><pre class="ret">` + ret + `</pre></div>`
}
slow := im.Duration > 10*time.Millisecond
veryslow := im.Duration > 50*time.Millisecond
fmt.Printf(`<div class="exec">
<div><h4 class="call">%s:%s</h4></div>
<div><b>%s</b> -&gt; <b>%s</b> (%s FIL), M%d</div>
%s
<div><span class="slow-%t-%t">Took %s</span>, <span class="exit%d">Exit: <b>%d</b></span>%s
`, codeStr(toCode), methods[toCode][im.Msg.Method].name, im.Msg.From, im.Msg.To, types.FIL(im.Msg.Value), im.Msg.Method, params, slow, veryslow, im.Duration, im.MsgRct.ExitCode, im.MsgRct.ExitCode, ret)
if im.MsgRct.ExitCode != 0 {
fmt.Printf(`<div class="error">Error: <pre>%s</pre></div>`, im.Error)
}
if len(im.Subcalls) > 0 {
fmt.Println("<div>Subcalls:</div>")
if err := printInternalExecutionsHtml(im.Subcalls, getCode); err != nil {
return err return err
} }
} t, err = t.New("message").Parse(compStateMsg)
fmt.Println("</div>") if err != nil {
return err
} }
return nil return t.ExecuteTemplate(os.Stdout, "compute_state", &compStateHTMLIn{
TipSet: ts,
Comp: o,
})
}
type callMeta struct {
types.ExecutionTrace
Subcall bool
Hash string
}
func call(e types.ExecutionTrace, subcall bool, hash string) callMeta {
return callMeta{
ExecutionTrace: e,
Subcall: subcall,
Hash: hash,
}
}
func codeStr(c cid.Cid) string {
cmh, err := multihash.Decode(c.Hash())
if err != nil {
panic(err)
}
return string(cmh.Digest)
}
func getMethod(code cid.Cid, method abi.MethodNum) string {
return methods[code][method].Name
}
func toFil(f types.BigInt) types.FIL {
return types.FIL(f)
}
func isSlow(t time.Duration) bool {
return t > 10*time.Millisecond
}
func isVerySlow(t time.Duration) bool {
return t > 50*time.Millisecond
}
func sumGas(changes []*types.GasTrace) types.GasTrace {
var out types.GasTrace
for _, gc := range changes {
out.TotalGas += gc.TotalGas
out.ComputeGas += gc.ComputeGas
out.StorageGas += gc.StorageGas
out.TotalVirtualGas += gc.TotalVirtualGas
out.VirtualComputeGas += gc.VirtualComputeGas
out.VirtualStorageGas += gc.VirtualStorageGas
}
return out
} }
func jsonParams(code cid.Cid, method abi.MethodNum, params []byte) (string, error) { func jsonParams(code cid.Cid, method abi.MethodNum, params []byte) (string, error) {
@ -1155,7 +1271,7 @@ var stateWaitMsgCmd = &cli.Command{
return err return err
} }
mw, err := api.StateWaitMsg(ctx, msg) mw, err := api.StateWaitMsg(ctx, msg, build.MessageConfidence)
if err != nil { if err != nil {
return err return err
} }

View File

@ -7,7 +7,7 @@ import (
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"
cid "github.com/ipfs/go-cid" cid "github.com/ipfs/go-cid"
"gopkg.in/urfave/cli.v2" "github.com/urfave/cli/v2"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
@ -186,7 +186,7 @@ func SyncWait(ctx context.Context, napi api.FullNode) error {
fmt.Printf("\r\x1b[2KWorker %d: Target: %s\tState: %s\tHeight: %d", working, target, chain.SyncStageString(ss.Stage), ss.Height) fmt.Printf("\r\x1b[2KWorker %d: Target: %s\tState: %s\tHeight: %d", working, target, chain.SyncStageString(ss.Stage), ss.Height)
if time.Now().Unix()-int64(head.MinTimestamp()) < build.BlockDelay { if time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs) {
fmt.Println("\nDone!") fmt.Println("\nDone!")
return nil return nil
} }

Some files were not shown because too many files have changed in this diff Show More