Merge branch 'master' into inmem-journal

This commit is contained in:
Raúl Kripalani 2020-08-26 16:38:23 +01:00
commit efdfd3ee3e
343 changed files with 15949 additions and 6148 deletions

View File

@ -252,6 +252,27 @@ jobs:
- run: - run:
command: "! go fmt ./... 2>&1 | read" command: "! go fmt ./... 2>&1 | read"
cbor-gen-check:
executor: golang
steps:
- install-deps
- prepare
- run: make deps
- run: go install golang.org/x/tools/cmd/goimports
- run: go install github.com/hannahhoward/cbor-gen-for
- run: go generate ./...
- run: git --no-pager diff
- run: git --no-pager diff --quiet
docs-check:
executor: golang
steps:
- install-deps
- prepare
- run: make docsgen
- run: git --no-pager diff
- run: git --no-pager diff --quiet
lint: &lint lint: &lint
description: | description: |
Run golangci-lint. Run golangci-lint.
@ -288,9 +309,6 @@ jobs:
command: | command: |
$HOME/.local/bin/golangci-lint run -v --timeout 2m \ $HOME/.local/bin/golangci-lint run -v --timeout 2m \
--concurrency << parameters.concurrency >> << parameters.args >> --concurrency << parameters.concurrency >> << parameters.args >>
lint-changes:
<<: *lint
lint-all: lint-all:
<<: *lint <<: *lint
@ -319,10 +337,11 @@ workflows:
version: 2.1 version: 2.1
ci: ci:
jobs: jobs:
- lint-changes: - lint-all
args: "--new-from-rev origin/next"
- mod-tidy-check - mod-tidy-check
- gofmt - gofmt
- cbor-gen-check
- docs-check
- test: - test:
codecov-upload: true codecov-upload: true
test-suite-name: full test-suite-name: full

View File

@ -17,18 +17,27 @@ A brief description of the problem you encountered while proving (sealing) a sec
Including what commands you ran, and a description of your setup, is very helpful. Including what commands you ran, and a description of your setup, is very helpful.
**Sectors list**
The output of `./lotus-miner sectors list`.
**Sectors status** **Sectors status**
The output of `./lotus-miner sectors status --log <sectorId>` for the failed sector(s). The output of `lotus-miner sectors status --log <sectorId>` for the failed sector(s).
**Lotus miner logs** **Lotus miner logs**
Please go through the logs of your miner, and include screenshots of any error-like messages you find. Please go through the logs of your miner, and include screenshots of any error-like messages you find.
Alternatively please upload full log files and share a link here
**Lotus miner diagnostic info**
Please collect the following diagnostic information, and share a link here
* lotus-miner diagnostic info `lotus-miner info all > allinfo`
** Code modifications **
If you have modified parts of lotus, please describe which areas were modified,
and the scope of those modifications
**Version** **Version**
The output of `./lotus --version`. The output of `lotus --version`.

View File

@ -23,6 +23,14 @@ issues:
- "Potential file inclusion via variable" - "Potential file inclusion via variable"
- "should have( a package)? comment" - "should have( a package)? comment"
- "Error return value of `logging.SetLogLevel` is not checked" - "Error return value of `logging.SetLogLevel` is not checked"
- "comment on exported"
- "(func|method) \\w+ should be \\w+"
- "(type|var|struct field|(method|func) parameter) `\\w+` should be `\\w+`"
- "(G306|G301|G307|G108|G302|G204|G104)"
- "don't use ALL_CAPS in Go names"
- "string .* has .* occurrences, make it a constant"
- "a blank import should be only in a main or test package, or have a comment justifying it"
- "package comment should be of the form"
exclude-use-default: false exclude-use-default: false
exclude-rules: exclude-rules:
@ -46,6 +54,19 @@ issues:
linters: linters:
- gosec - gosec
- path: chain/vectors/gen/.*
linters:
- gosec
- path: cmd/lotus-bench/.*
linters:
- gosec
- path: api/test/.*
text: "context.Context should be the first parameter"
linters:
- golint
linters-settings: linters-settings:
goconst: goconst:
min-occurrences: 6 min-occurrences: 6

View File

@ -1,7 +1,189 @@
# lotus changelog # Lotus changelog
## 0.1.0 / 2019-12-11 # 0.5.4
We are very excited to release **lotus** 0.1.0. This is our testnet release. To install lotus and join the testnet, please visit [docs.lotu.sh](docs.lotu.sh). Please file bug reports as [issues](https://github.com/filecoin-project/lotus/issues). A patch release, containing a few nice bugfixes and improvements:
A huge thank you to all contributors for this testnet release! - Fix parsing of peer ID in `lotus-miner actor set-peer-id` (@whyrusleeping)
- Update dependencies, fixing several bugs (@Stebalien)
- Fix remaining linter warnings (@Stebalien)
- Use safe string truncation (@Ingar)
- Allow tweaking of blocksync message window size (@whyrusleeping)
- Add some additional gas stats to metrics (@Kubuxu)
- Fix an edge case bug in message selection, add many tests (@vyzo)
# 0.5.3
Yet another hotfix release.
A lesson for readers, having people who have been awake for 12+ hours review
your hotfix PR is not a good idea. Find someone who has enough slept recently
enough to give you good code review, otherwise you'll end up quickly bumping
versions again.
- Fixed a bug in the mempool that was introduced in v0.5.2
# 0.5.2 / 2020-08-24
This is a hotfix release.
- Fix message selection to not include messages that are invalid for block
inclusion.
- Improve SelectMessage handling of the case where the message pools tipset
differs from our mining base.
# 0.5.1 / 2020-08-24
The Space Race release!
This release contains the genesis car file and bootstrap peers for the space
race network.
Additionally, we included two small fixes to genesis creation:
- Randomize ticket value in genesis generation
- Correctly set t099 (burnt funds actor) to have valid account actor state
# 0.5.0 / 2020-08-20
This version of Lotus will be used for the incentivized testnet Space Race competition,
and can be considered mainnet-ready code. It includes some protocol
changes, upgrades of core dependencies, and various bugfixes and UX/performance improvements.
## Highlights
Among the highlights included in this release are:
- Gas changes: We implemented EIP-1559 and introduced real gas values.
- Deal-making: We now support "Committed Capacity" sectors, "fast-retrieval" deals,
and the packing of multiple deals into a single sector.
- Renamed features: We renamed some of the binaries, environment variables, and default
paths associated with a Lotus node.
### Gas changes
We made some significant changes to the mechanics of gas in this release.
#### Network fee
We implemented something similar to
[Ethereum's EIP-1559](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-1559.md).
The `Message` structure had three changes:
- The `GasPrice` field has been removed
- A new `GasFeeCap` field has been added, which controls the maximum cost
the sender incurs for the message
- A new `GasPremium` field has been added, which controls the reward a miner
earns for including the message
A sender will never be charged more than `GasFeeCap * GasLimit`.
A miner will typically earn `GasPremium * GasLimit` as a reward.
The `Blockheader` structure has one new field, called `ParentBaseFee`.
Informally speaking,the `ParentBaseFee`
is increased when blocks are densely packed with messages, and decreased otherwise.
The `ParentBaseFee` is used when calculating how much a sender burns when executing a message. _Burning_ simply refers to sending attoFIL to a dedicated, unreachable account.
A message causes `ParentBaseFee * GasUsed` attoFIL to be burnt.
#### Real gas values
This release also includes our first "real" gas costs for primitive operations.
The costs were designed to account for both the _time_ that message execution takes,
as well as the _space_ a message adds to the state tree.
## Deal-making changes
There are three key changes to the deal-making process.
#### Committed Capacity sectors
Miners can now pledge "Committed Capacity" (CC) sectors, which are explicitly
stated as containing junk data, and must not include any deals. Miners can do this
to increase their storage power, and win block rewards from this pledged storage.
They can mark these sectors as "upgradable" with `lotus-miner sectors mark-for-upgrade`.
If the miner receives and accepts one or more storage deals, the sector that includes
those deals will _replace_ the CC sector. This is intended to maximize the amount of useful
storage on the Filecoin network.
#### Fast-retrieval deals
Clients can now include a `fast-retrieval` flag when proposing deals with storage miners.
If set to true, the miner will include an extra copy of the deal data. This
data can be quickly served in a retrieval deal, since it will not need to be unsealed.
#### Multiple deals per sector
Miners can now pack multiple deals into a single sector, so long as all the deals
fit into the sector capacity. This should increase the packing efficiency of miners.
### Renamed features
To improve the user experience, we updated several names to mainatin
standard prefixing, and to better reflect the meaning of the features being referenced.
In particular, the Lotus miner binary is now called `lotus-miner`, the default
path for miner data is now `~/.lotusminer`, and the environment variable
that sets the path for miner data is now `$LOTUS_MINER_PATH`. A full list of renamed
features can be found [here](https://github.com/filecoin-project/lotus/issues/2304).
## Changelog
#### Downstream upgrades
- Upgrades markets to v0.5.6 (https://github.com/filecoin-project/lotus/pull/3058)
- Upgrades specs-actors to v0.9.3 (https://github.com/filecoin-project/lotus/pull/3151)
#### Core protocol
- Introduces gas values, replacing placeholders (https://github.com/filecoin-project/lotus/pull/2343)
- Implements EIP-1559, introducing a network base fee, message gas fee cap, and message gas fee premium (https://github.com/filecoin-project/lotus/pull/2874)
- Implements Poisson Sortition for elections (https://github.com/filecoin-project/lotus/pull/2084)
#### Deal-making lifecycle
- Introduces "Committed Capacity" sectors (https://github.com/filecoin-project/lotus/pull/2220)
- Introduces "fast-retrieval" flag for deals (https://github.com/filecoin-project/lotus/pull/2323
- Supports packing multiple deals into one sector (https://github.com/filecoin-project/storage-fsm/pull/38)
#### Enhancements
- Optimized message pool selection logic (https://github.com/filecoin-project/lotus/pull/2838)
- Window-based scheduling of sealing tasks (https://github.com/filecoin-project/sector-storage/pull/67)
- Faster window PoSt (https://github.com/filecoin-project/lotus/pull/2209/files)
- Refactors the payment channel manager (https://github.com/filecoin-project/lotus/pull/2640)
- Refactors blocksync (https://github.com/filecoin-project/lotus/pull/2715/files)
#### UX
- Provide status updates for data-transfer (https://github.com/filecoin-project/lotus/pull/3162, https://github.com/filecoin-project/lotus/pull/3191)
- Miners can customise asks (https://github.com/filecoin-project/lotus/pull/2046)
- Miners can toggle auto-acceptance of deals (https://github.com/filecoin-project/lotus/pull/1994)
- Miners can maintain a blocklist of piece CIDs (https://github.com/filecoin-project/lotus/pull/2069)
## Contributors
The following contributors had 10 or more commits go into this release.
We are grateful for every contribution!
| Contributor | Commits | Lines ± |
|--------------------|---------|---------------|
| magik6k | 361 | +13197/-6136 |
| Kubuxu | 227 | +5670/-2587 |
| arajasek | 120 | +2916/-1264 |
| whyrusleeping | 112 | +3979/-1089 |
| vyzo | 99 | +3343/-1305 |
| dirkmc | 68 | +8732/-3621 |
| laser | 45 | +1489/-501 |
| hannahhoward | 43 | +2654/-990 |
| frrist | 37 | +6630/-4338 |
| schomatis | 28 | +3016/-1368 |
| placer14 | 27 | +824/-350 |
| raulk | 25 | +28718/-29849 |
| mrsmkl | 22 | +560/-368 |
| travisperson | 18 | +1354/-314 |
| nonsense | 16 | +2956/-2842 |
| ingar | 13 | +331/-123 |
| daviddias | 11 | +311/-11 |
| Stebalien | 11 | +1204/-980 |
| RobQuistNL | 10 | +69/-74 |
# 0.1.0 / 2019-12-11
We are very excited to release **lotus** 0.1.0. This is our testnet release. To install lotus and join the testnet, please visit [lotu.sh](lotu.sh). Please file bug reports as [issues](https://github.com/filecoin-project/lotus/issues).
A huge thank you to all contributors for this testnet release!

View File

@ -148,7 +148,7 @@ BINS+=lotus-fountain
lotus-chainwatch: lotus-chainwatch:
rm -f lotus-chainwatch rm -f lotus-chainwatch
go build -o lotus-chainwatch ./cmd/lotus-chainwatch go build $(GOFLAGS) -o lotus-chainwatch ./cmd/lotus-chainwatch
.PHONY: lotus-chainwatch .PHONY: lotus-chainwatch
BINS+=lotus-chainwatch BINS+=lotus-chainwatch
@ -272,11 +272,15 @@ dist-clean:
type-gen: type-gen:
go run ./gen/main.go go run ./gen/main.go
go generate ./...
method-gen: method-gen:
(cd ./lotuspond/front/src/chain && go run ./methodgen.go) (cd ./lotuspond/front/src/chain && go run ./methodgen.go)
gen: type-gen method-gen gen: type-gen method-gen
docsgen:
go run ./api/docgen > documentation/en/api-methods.md
print-%: print-%:
@echo $*=$($*) @echo $*=$($*)

View File

@ -1,16 +1,24 @@
<p align="center"> <p align="center">
<a href="https://docs.lotu.sh/" title="Lotus Docs"> <a href="https://lotu.sh/" title="Lotus Docs">
<img src="documentation/images/lotus_logo_h.png" alt="Project Lotus Logo" width="244" /> <img src="documentation/images/lotus_logo_h.png" alt="Project Lotus Logo" width="244" />
</a> </a>
</p> </p>
<h1 align="center">Project Lotus - 莲</h1> <h1 align="center">Project Lotus - 莲</h1>
<p align="center">
<a href="https://circleci.com/gh/filecoin-project/lotus"><img src="https://circleci.com/gh/filecoin-project/lotus.svg?style=svg"></a>
<a href="https://codecov.io/gh/filecoin-project/lotus"><img src="https://codecov.io/gh/filecoin-project/lotus/branch/master/graph/badge.svg"></a>
<a href="https://goreportcard.com/report/github.com/filecoin-project/lotus"><img src="https://goreportcard.com/badge/github.com/filecoin-project/lotus" /></a>
<a href=""><img src="https://img.shields.io/badge/golang-%3E%3D1.14.7-blue.svg" /></a>
<br>
</p>
Lotus is an implementation of the Filecoin Distributed Storage Network. For more details about Filecoin, check out the [Filecoin Spec](https://spec.filecoin.io). Lotus is an implementation of the Filecoin Distributed Storage Network. For more details about Filecoin, check out the [Filecoin Spec](https://spec.filecoin.io).
## Building & Documentation ## Building & Documentation
For instructions on how to build lotus from source, please visit [https://docs.lotu.sh](https://docs.lotu.sh) or read the source [here](https://github.com/filecoin-project/lotus/tree/master/documentation). For instructions on how to build lotus from source, please visit [https://lotu.sh](https://lotu.sh) or read the source [here](https://github.com/filecoin-project/lotus/tree/master/documentation).
## Reporting a Vulnerability ## Reporting a Vulnerability

View File

@ -4,11 +4,10 @@ import (
"context" "context"
"fmt" "fmt"
"github.com/filecoin-project/go-jsonrpc/auth"
"github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peer"
"github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
) )
@ -28,6 +27,7 @@ type Common interface {
NetDisconnect(context.Context, peer.ID) error NetDisconnect(context.Context, peer.ID) error
NetFindPeer(context.Context, peer.ID) (peer.AddrInfo, error) NetFindPeer(context.Context, peer.ID) (peer.AddrInfo, error)
NetPubsubScores(context.Context) ([]PubsubScore, error) NetPubsubScores(context.Context) ([]PubsubScore, error)
NetAutoNatStatus(context.Context) (NatInfo, error)
// MethodGroup: Common // MethodGroup: Common
@ -65,3 +65,8 @@ type Version struct {
func (v Version) String() string { func (v Version) String() string {
return fmt.Sprintf("%s+api%s", v.Version, v.APIVersion.String()) return fmt.Sprintf("%s+api%s", v.Version, v.APIVersion.String())
} }
type NatInfo struct {
Reachability network.Reachability
PublicAddr string
}

View File

@ -21,6 +21,7 @@ import (
"github.com/filecoin-project/specs-actors/actors/crypto" "github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
marketevents "github.com/filecoin-project/lotus/markets/loggers"
"github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/dtypes"
) )
@ -39,8 +40,11 @@ type FullNode interface {
// ChainHead returns the current head of the chain. // ChainHead returns the current head of the chain.
ChainHead(context.Context) (*types.TipSet, error) ChainHead(context.Context) (*types.TipSet, error)
// ChainGetRandomness is used to sample the chain for randomness. // ChainGetRandomnessFromTickets is used to sample the chain for randomness.
ChainGetRandomness(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
// ChainGetRandomnessFromBeacon is used to sample the beacon for randomness.
ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
// ChainGetBlock returns the block specified by the given CID. // ChainGetBlock returns the block specified by the given CID.
ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error) ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error)
@ -128,6 +132,9 @@ type FullNode interface {
GasEstimateGasPremium(_ context.Context, nblocksincl uint64, GasEstimateGasPremium(_ context.Context, nblocksincl uint64,
sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error)
// GasEstimateMessageGas estimates gas values for unset message gas fields
GasEstimateMessageGas(context.Context, *types.Message, *MessageSendSpec, types.TipSetKey) (*types.Message, error)
// MethodGroup: Sync // MethodGroup: Sync
// The Sync method group contains methods for interacting with and // The Sync method group contains methods for interacting with and
// observing the lotus sync service. // observing the lotus sync service.
@ -159,20 +166,27 @@ type FullNode interface {
MpoolPending(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) MpoolPending(context.Context, types.TipSetKey) ([]*types.SignedMessage, error)
// MpoolSelect returns a list of pending messages for inclusion in the next block // MpoolSelect returns a list of pending messages for inclusion in the next block
MpoolSelect(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) MpoolSelect(context.Context, types.TipSetKey, float64) ([]*types.SignedMessage, error)
// MpoolPush pushes a signed message to mempool. // MpoolPush pushes a signed message to mempool.
MpoolPush(context.Context, *types.SignedMessage) (cid.Cid, error) MpoolPush(context.Context, *types.SignedMessage) (cid.Cid, error)
// MpoolPushMessage atomically assigns a nonce, signs, and pushes a message // MpoolPushMessage atomically assigns a nonce, signs, and pushes a message
// to mempool. // to mempool.
MpoolPushMessage(context.Context, *types.Message) (*types.SignedMessage, error) // maxFee is only used when GasFeeCap/GasPremium fields aren't specified
//
// When maxFee is set to 0, MpoolPushMessage will guess appropriate fee
// based on current chain conditions
MpoolPushMessage(ctx context.Context, msg *types.Message, spec *MessageSendSpec) (*types.SignedMessage, error)
// MpoolGetNonce gets next nonce for the specified sender. // MpoolGetNonce gets next nonce for the specified sender.
// Note that this method may not be atomic. Use MpoolPushMessage instead. // Note that this method may not be atomic. Use MpoolPushMessage instead.
MpoolGetNonce(context.Context, address.Address) (uint64, error) MpoolGetNonce(context.Context, address.Address) (uint64, error)
MpoolSub(context.Context) (<-chan MpoolUpdate, error) MpoolSub(context.Context) (<-chan MpoolUpdate, error)
// MpoolClear clears pending messages from the mpool
MpoolClear(context.Context, bool) error
// MpoolGetConfig returns (a copy of) the current mpool config // MpoolGetConfig returns (a copy of) the current mpool config
MpoolGetConfig(context.Context) (*types.MpoolConfig, error) MpoolGetConfig(context.Context) (*types.MpoolConfig, error)
// MpoolSetConfig sets the mpool config to (a copy of) the supplied config // MpoolSetConfig sets the mpool config to (a copy of) the supplied config
@ -237,14 +251,20 @@ type FullNode interface {
ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (QueryOffer, error) ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (QueryOffer, error)
// ClientRetrieve initiates the retrieval of a file, as specified in the order. // ClientRetrieve initiates the retrieval of a file, as specified in the order.
ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *FileRef) error ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *FileRef) error
// ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel
// of status updates.
ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *FileRef) (<-chan marketevents.RetrievalEvent, error)
// ClientQueryAsk returns a signed StorageAsk from the specified miner. // ClientQueryAsk returns a signed StorageAsk from the specified miner.
ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error)
// ClientCalcCommP calculates the CommP for a specified file, based on the sector size of the provided miner. // ClientCalcCommP calculates the CommP for a specified file
ClientCalcCommP(ctx context.Context, inpath string, miner address.Address) (*CommPRet, error) ClientCalcCommP(ctx context.Context, inpath string) (*CommPRet, error)
// ClientGenCar generates a CAR file for the specified file. // ClientGenCar generates a CAR file for the specified file.
ClientGenCar(ctx context.Context, ref FileRef, outpath string) error ClientGenCar(ctx context.Context, ref FileRef, outpath string) error
// ClientDealSize calculates real deal data size // ClientDealSize calculates real deal data size
ClientDealSize(ctx context.Context, root cid.Cid) (DataSize, error) ClientDealSize(ctx context.Context, root cid.Cid) (DataSize, error)
// ClientListTransfers returns the status of all ongoing transfers of data
ClientListDataTransfers(ctx context.Context) ([]DataTransferChannel, error)
ClientDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error)
// ClientUnimport removes references to the specified file from filestore // ClientUnimport removes references to the specified file from filestore
//ClientUnimport(path string) //ClientUnimport(path string)
@ -290,11 +310,11 @@ type FullNode interface {
// StateMinerPartitions loads miner partitions for the specified miner/deadline // StateMinerPartitions loads miner partitions for the specified miner/deadline
StateMinerPartitions(context.Context, address.Address, uint64, types.TipSetKey) ([]*miner.Partition, error) StateMinerPartitions(context.Context, address.Address, uint64, types.TipSetKey) ([]*miner.Partition, error)
// StateMinerFaults returns a bitfield indicating the faulty sectors of the given miner // StateMinerFaults returns a bitfield indicating the faulty sectors of the given miner
StateMinerFaults(context.Context, address.Address, types.TipSetKey) (*abi.BitField, error) StateMinerFaults(context.Context, address.Address, types.TipSetKey) (abi.BitField, error)
// StateAllMinerFaults returns all non-expired Faults that occur within lookback epochs of the given tipset // StateAllMinerFaults returns all non-expired Faults that occur within lookback epochs of the given tipset
StateAllMinerFaults(ctx context.Context, lookback abi.ChainEpoch, ts types.TipSetKey) ([]*Fault, error) StateAllMinerFaults(ctx context.Context, lookback abi.ChainEpoch, ts types.TipSetKey) ([]*Fault, error)
// StateMinerRecoveries returns a bitfield indicating the recovering sectors of the given miner // StateMinerRecoveries returns a bitfield indicating the recovering sectors of the given miner
StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (*abi.BitField, error) StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (abi.BitField, error)
// StateMinerInitialPledgeCollateral returns the precommit deposit for the specified miner's sector // StateMinerInitialPledgeCollateral returns the precommit deposit for the specified miner's sector
StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error)
// StateMinerInitialPledgeCollateral returns the initial pledge collateral for the specified miner's sector // StateMinerInitialPledgeCollateral returns the initial pledge collateral for the specified miner's sector
@ -311,7 +331,6 @@ type FullNode interface {
StateSectorExpiration(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*SectorExpiration, error) StateSectorExpiration(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*SectorExpiration, error)
// StateSectorPartition finds deadline/partition with the specified sector // StateSectorPartition finds deadline/partition with the specified sector
StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*SectorLocation, error) StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*SectorLocation, error)
StatePledgeCollateral(context.Context, types.TipSetKey) (types.BigInt, error)
// StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed // StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed
StateSearchMsg(context.Context, cid.Cid) (*MsgLookup, error) StateSearchMsg(context.Context, cid.Cid) (*MsgLookup, error)
// StateWaitMsg looks back in the chain for a message. If not found, it blocks until the // StateWaitMsg looks back in the chain for a message. If not found, it blocks until the
@ -352,7 +371,7 @@ type FullNode interface {
StateDealProviderCollateralBounds(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (DealCollateralBounds, error) StateDealProviderCollateralBounds(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (DealCollateralBounds, error)
// StateCirculatingSupply returns the circulating supply of Filecoin at the given tipset // StateCirculatingSupply returns the circulating supply of Filecoin at the given tipset
StateCirculatingSupply(context.Context, types.TipSetKey) (abi.TokenAmount, error) StateCirculatingSupply(context.Context, types.TipSetKey) (CirculatingSupply, error)
// MethodGroup: Msig // MethodGroup: Msig
// The Msig methods are used to interact with multisig wallets on the // The Msig methods are used to interact with multisig wallets on the
@ -462,9 +481,12 @@ type DealInfo struct {
Duration uint64 Duration uint64
DealID abi.DealID DealID abi.DealID
CreationTime time.Time
} }
type MsgLookup struct { type MsgLookup struct {
Message cid.Cid // Can be different than requested, in case it was replaced, but only gas values changed
Receipt types.MessageReceipt Receipt types.MessageReceipt
ReturnDec interface{} ReturnDec interface{}
TipSet types.TipSetKey TipSet types.TipSetKey
@ -507,14 +529,14 @@ type PaychStatus struct {
} }
type ChannelInfo struct { type ChannelInfo struct {
Channel address.Address Channel address.Address
ChannelMessage cid.Cid WaitSentinel cid.Cid
} }
type PaymentInfo struct { type PaymentInfo struct {
Channel address.Address Channel address.Address
ChannelMessage *cid.Cid WaitSentinel cid.Cid
Vouchers []*paych.SignedVoucher Vouchers []*paych.SignedVoucher
} }
type VoucherSpec struct { type VoucherSpec struct {
@ -601,14 +623,15 @@ type MethodCall struct {
} }
type StartDealParams struct { type StartDealParams struct {
Data *storagemarket.DataRef Data *storagemarket.DataRef
Wallet address.Address Wallet address.Address
Miner address.Address Miner address.Address
EpochPrice types.BigInt EpochPrice types.BigInt
MinBlocksDuration uint64 MinBlocksDuration uint64
DealStartEpoch abi.ChainEpoch ProviderCollateral big.Int
FastRetrieval bool DealStartEpoch abi.ChainEpoch
VerifiedDeal bool FastRetrieval bool
VerifiedDeal bool
} }
type IpldObject struct { type IpldObject struct {
@ -665,6 +688,14 @@ type DealCollateralBounds struct {
Max abi.TokenAmount Max abi.TokenAmount
} }
type CirculatingSupply struct {
FilVested abi.TokenAmount
FilMined abi.TokenAmount
FilBurnt abi.TokenAmount
FilLocked abi.TokenAmount
FilCirculating abi.TokenAmount
}
type MiningBaseInfo struct { type MiningBaseInfo struct {
MinerPower types.BigInt MinerPower types.BigInt
NetworkPower types.BigInt NetworkPower types.BigInt

View File

@ -12,9 +12,9 @@ import (
"github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/sector-storage/fsutil" "github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
"github.com/filecoin-project/sector-storage/stores" "github.com/filecoin-project/lotus/extern/sector-storage/stores"
"github.com/filecoin-project/sector-storage/storiface" "github.com/filecoin-project/lotus/extern/sector-storage/storiface"
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"
) )
@ -79,6 +79,8 @@ type StorageMiner interface {
MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error) MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error)
MarketSetRetrievalAsk(ctx context.Context, rask *retrievalmarket.Ask) error MarketSetRetrievalAsk(ctx context.Context, rask *retrievalmarket.Ask) error
MarketGetRetrievalAsk(ctx context.Context) (*retrievalmarket.Ask, error) MarketGetRetrievalAsk(ctx context.Context) (*retrievalmarket.Ask, error)
MarketListDataTransfers(ctx context.Context) ([]DataTransferChannel, error)
MarketDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error)
DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error
DealsList(ctx context.Context) ([]storagemarket.StorageDeal, error) DealsList(ctx context.Context) ([]storagemarket.StorageDeal, error)
@ -118,15 +120,16 @@ type SectorLog struct {
} }
type SectorInfo struct { type SectorInfo struct {
SectorID abi.SectorNumber SectorID abi.SectorNumber
State SectorState State SectorState
CommD *cid.Cid CommD *cid.Cid
CommR *cid.Cid CommR *cid.Cid
Proof []byte Proof []byte
Deals []abi.DealID Deals []abi.DealID
Ticket SealTicket Ticket SealTicket
Seed SealSeed Seed SealSeed
Retries uint64 Retries uint64
ToUpgrade bool
LastErr string LastErr string

View File

@ -6,9 +6,9 @@ import (
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
"github.com/filecoin-project/sector-storage/sealtasks" "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
"github.com/filecoin-project/sector-storage/stores" "github.com/filecoin-project/lotus/extern/sector-storage/stores"
"github.com/filecoin-project/sector-storage/storiface" "github.com/filecoin-project/lotus/extern/sector-storage/storiface"
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/specs-storage/storage"
@ -23,6 +23,8 @@ type WorkerAPI interface {
Paths(context.Context) ([]stores.StoragePath, error) Paths(context.Context) ([]stores.StoragePath, error)
Info(context.Context) (storiface.WorkerInfo, error) Info(context.Context) (storiface.WorkerInfo, error)
AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error)
storage.Sealer storage.Sealer
MoveStorage(ctx context.Context, sector abi.SectorID) error MoveStorage(ctx context.Context, sector abi.SectorID) error

View File

@ -15,10 +15,11 @@ import (
"github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/go-multistore" "github.com/filecoin-project/go-multistore"
"github.com/filecoin-project/sector-storage/fsutil" "github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
"github.com/filecoin-project/sector-storage/sealtasks" "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
"github.com/filecoin-project/sector-storage/stores" "github.com/filecoin-project/lotus/extern/sector-storage/stores"
"github.com/filecoin-project/sector-storage/storiface" "github.com/filecoin-project/lotus/extern/sector-storage/storiface"
marketevents "github.com/filecoin-project/lotus/markets/loggers"
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/abi/big" "github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/filecoin-project/specs-actors/actors/builtin/miner" "github.com/filecoin-project/specs-actors/actors/builtin/miner"
@ -48,6 +49,7 @@ type CommonStruct struct {
NetDisconnect func(context.Context, peer.ID) error `perm:"write"` NetDisconnect func(context.Context, peer.ID) error `perm:"write"`
NetFindPeer func(context.Context, peer.ID) (peer.AddrInfo, error) `perm:"read"` NetFindPeer func(context.Context, peer.ID) (peer.AddrInfo, error) `perm:"read"`
NetPubsubScores func(context.Context) ([]api.PubsubScore, error) `perm:"read"` NetPubsubScores func(context.Context) ([]api.PubsubScore, error) `perm:"read"`
NetAutoNatStatus func(context.Context) (api.NatInfo, error) `perm:"read"`
ID func(context.Context) (peer.ID, error) `perm:"read"` ID func(context.Context) (peer.ID, error) `perm:"read"`
Version func(context.Context) (api.Version, error) `perm:"read"` Version func(context.Context) (api.Version, error) `perm:"read"`
@ -65,31 +67,33 @@ type FullNodeStruct struct {
CommonStruct CommonStruct
Internal struct { Internal struct {
ChainNotify func(context.Context) (<-chan []*api.HeadChange, error) `perm:"read"` ChainNotify func(context.Context) (<-chan []*api.HeadChange, error) `perm:"read"`
ChainHead func(context.Context) (*types.TipSet, error) `perm:"read"` ChainHead func(context.Context) (*types.TipSet, error) `perm:"read"`
ChainGetRandomness func(context.Context, types.TipSetKey, crypto.DomainSeparationTag, abi.ChainEpoch, []byte) (abi.Randomness, error) `perm:"read"` ChainGetRandomnessFromTickets func(context.Context, types.TipSetKey, crypto.DomainSeparationTag, abi.ChainEpoch, []byte) (abi.Randomness, error) `perm:"read"`
ChainGetBlock func(context.Context, cid.Cid) (*types.BlockHeader, error) `perm:"read"` ChainGetRandomnessFromBeacon func(context.Context, types.TipSetKey, crypto.DomainSeparationTag, abi.ChainEpoch, []byte) (abi.Randomness, error) `perm:"read"`
ChainGetTipSet func(context.Context, types.TipSetKey) (*types.TipSet, error) `perm:"read"` ChainGetBlock func(context.Context, cid.Cid) (*types.BlockHeader, error) `perm:"read"`
ChainGetBlockMessages func(context.Context, cid.Cid) (*api.BlockMessages, error) `perm:"read"` ChainGetTipSet func(context.Context, types.TipSetKey) (*types.TipSet, error) `perm:"read"`
ChainGetParentReceipts func(context.Context, cid.Cid) ([]*types.MessageReceipt, error) `perm:"read"` ChainGetBlockMessages func(context.Context, cid.Cid) (*api.BlockMessages, error) `perm:"read"`
ChainGetParentMessages func(context.Context, cid.Cid) ([]api.Message, error) `perm:"read"` ChainGetParentReceipts func(context.Context, cid.Cid) ([]*types.MessageReceipt, error) `perm:"read"`
ChainGetTipSetByHeight func(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) `perm:"read"` ChainGetParentMessages func(context.Context, cid.Cid) ([]api.Message, error) `perm:"read"`
ChainReadObj func(context.Context, cid.Cid) ([]byte, error) `perm:"read"` ChainGetTipSetByHeight func(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) `perm:"read"`
ChainHasObj func(context.Context, cid.Cid) (bool, error) `perm:"read"` ChainReadObj func(context.Context, cid.Cid) ([]byte, error) `perm:"read"`
ChainStatObj func(context.Context, cid.Cid, cid.Cid) (api.ObjStat, error) `perm:"read"` ChainHasObj func(context.Context, cid.Cid) (bool, error) `perm:"read"`
ChainSetHead func(context.Context, types.TipSetKey) error `perm:"admin"` ChainStatObj func(context.Context, cid.Cid, cid.Cid) (api.ObjStat, error) `perm:"read"`
ChainGetGenesis func(context.Context) (*types.TipSet, error) `perm:"read"` ChainSetHead func(context.Context, types.TipSetKey) error `perm:"admin"`
ChainTipSetWeight func(context.Context, types.TipSetKey) (types.BigInt, error) `perm:"read"` ChainGetGenesis func(context.Context) (*types.TipSet, error) `perm:"read"`
ChainGetNode func(ctx context.Context, p string) (*api.IpldObject, error) `perm:"read"` ChainTipSetWeight func(context.Context, types.TipSetKey) (types.BigInt, error) `perm:"read"`
ChainGetMessage func(context.Context, cid.Cid) (*types.Message, error) `perm:"read"` ChainGetNode func(ctx context.Context, p string) (*api.IpldObject, error) `perm:"read"`
ChainGetPath func(context.Context, types.TipSetKey, types.TipSetKey) ([]*api.HeadChange, error) `perm:"read"` ChainGetMessage func(context.Context, cid.Cid) (*types.Message, error) `perm:"read"`
ChainExport func(context.Context, types.TipSetKey) (<-chan []byte, error) `perm:"read"` ChainGetPath func(context.Context, types.TipSetKey, types.TipSetKey) ([]*api.HeadChange, error) `perm:"read"`
ChainExport func(context.Context, types.TipSetKey) (<-chan []byte, error) `perm:"read"`
BeaconGetEntry func(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"` BeaconGetEntry func(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"`
GasEstimateGasPremium func(context.Context, uint64, address.Address, int64, types.TipSetKey) (types.BigInt, error) `perm:"read"` GasEstimateGasPremium func(context.Context, uint64, address.Address, int64, types.TipSetKey) (types.BigInt, error) `perm:"read"`
GasEstimateGasLimit func(context.Context, *types.Message, types.TipSetKey) (int64, error) `perm:"read"` GasEstimateGasLimit func(context.Context, *types.Message, types.TipSetKey) (int64, error) `perm:"read"`
GasEstimateFeeCap func(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error) `perm:"read"` GasEstimateFeeCap func(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error) `perm:"read"`
GasEstimateMessageGas func(context.Context, *types.Message, *api.MessageSendSpec, types.TipSetKey) (*types.Message, error) `perm:"read"`
SyncState func(context.Context) (*api.SyncState, error) `perm:"read"` SyncState func(context.Context) (*api.SyncState, error) `perm:"read"`
SyncSubmitBlock func(ctx context.Context, blk *types.BlockMsg) error `perm:"write"` SyncSubmitBlock func(ctx context.Context, blk *types.BlockMsg) error `perm:"write"`
@ -97,14 +101,18 @@ type FullNodeStruct struct {
SyncMarkBad func(ctx context.Context, bcid cid.Cid) error `perm:"admin"` SyncMarkBad func(ctx context.Context, bcid cid.Cid) error `perm:"admin"`
SyncCheckBad func(ctx context.Context, bcid cid.Cid) (string, error) `perm:"read"` SyncCheckBad func(ctx context.Context, bcid cid.Cid) (string, error) `perm:"read"`
MpoolGetConfig func(context.Context) (*types.MpoolConfig, error) `perm:"read"` MpoolGetConfig func(context.Context) (*types.MpoolConfig, error) `perm:"read"`
MpoolSetConfig func(context.Context, *types.MpoolConfig) error `perm:"write"` MpoolSetConfig func(context.Context, *types.MpoolConfig) error `perm:"write"`
MpoolSelect func(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) `perm:"read"`
MpoolPending func(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) `perm:"read"` MpoolSelect func(context.Context, types.TipSetKey, float64) ([]*types.SignedMessage, error) `perm:"read"`
MpoolPush func(context.Context, *types.SignedMessage) (cid.Cid, error) `perm:"write"`
MpoolPushMessage func(context.Context, *types.Message) (*types.SignedMessage, error) `perm:"sign"` MpoolPending func(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) `perm:"read"`
MpoolGetNonce func(context.Context, address.Address) (uint64, error) `perm:"read"` MpoolClear func(context.Context, bool) error `perm:"write"`
MpoolSub func(context.Context) (<-chan api.MpoolUpdate, error) `perm:"read"`
MpoolPush func(context.Context, *types.SignedMessage) (cid.Cid, error) `perm:"write"`
MpoolPushMessage func(context.Context, *types.Message, *api.MessageSendSpec) (*types.SignedMessage, error) `perm:"sign"`
MpoolGetNonce func(context.Context, address.Address) (uint64, error) `perm:"read"`
MpoolSub func(context.Context) (<-chan api.MpoolUpdate, error) `perm:"read"`
MinerGetBaseInfo func(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*api.MiningBaseInfo, error) `perm:"read"` MinerGetBaseInfo func(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*api.MiningBaseInfo, error) `perm:"read"`
MinerCreateBlock func(context.Context, *api.BlockTemplate) (*types.BlockMsg, error) `perm:"write"` MinerCreateBlock func(context.Context, *api.BlockTemplate) (*types.BlockMsg, error) `perm:"write"`
@ -122,20 +130,23 @@ type FullNodeStruct struct {
WalletImport func(context.Context, *types.KeyInfo) (address.Address, error) `perm:"admin"` WalletImport func(context.Context, *types.KeyInfo) (address.Address, error) `perm:"admin"`
WalletDelete func(context.Context, address.Address) error `perm:"write"` WalletDelete func(context.Context, address.Address) error `perm:"write"`
ClientImport func(ctx context.Context, ref api.FileRef) (*api.ImportRes, error) `perm:"admin"` ClientImport func(ctx context.Context, ref api.FileRef) (*api.ImportRes, error) `perm:"admin"`
ClientListImports func(ctx context.Context) ([]api.Import, error) `perm:"write"` ClientListImports func(ctx context.Context) ([]api.Import, error) `perm:"write"`
ClientRemoveImport func(ctx context.Context, importID multistore.StoreID) error `perm:"admin"` ClientRemoveImport func(ctx context.Context, importID multistore.StoreID) error `perm:"admin"`
ClientHasLocal func(ctx context.Context, root cid.Cid) (bool, error) `perm:"write"` ClientHasLocal func(ctx context.Context, root cid.Cid) (bool, error) `perm:"write"`
ClientFindData func(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]api.QueryOffer, error) `perm:"read"` ClientFindData func(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]api.QueryOffer, error) `perm:"read"`
ClientMinerQueryOffer func(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (api.QueryOffer, error) `perm:"read"` ClientMinerQueryOffer func(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (api.QueryOffer, error) `perm:"read"`
ClientStartDeal func(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) `perm:"admin"` ClientStartDeal func(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) `perm:"admin"`
ClientGetDealInfo func(context.Context, cid.Cid) (*api.DealInfo, error) `perm:"read"` ClientGetDealInfo func(context.Context, cid.Cid) (*api.DealInfo, error) `perm:"read"`
ClientListDeals func(ctx context.Context) ([]api.DealInfo, error) `perm:"write"` ClientListDeals func(ctx context.Context) ([]api.DealInfo, error) `perm:"write"`
ClientRetrieve func(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error `perm:"admin"` ClientRetrieve func(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error `perm:"admin"`
ClientQueryAsk func(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error) `perm:"read"` ClientRetrieveWithEvents func(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"`
ClientCalcCommP func(ctx context.Context, inpath string, miner address.Address) (*api.CommPRet, error) `perm:"read"` ClientQueryAsk func(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error) `perm:"read"`
ClientGenCar func(ctx context.Context, ref api.FileRef, outpath string) error `perm:"write"` ClientCalcCommP func(ctx context.Context, inpath string) (*api.CommPRet, error) `perm:"read"`
ClientDealSize func(ctx context.Context, root cid.Cid) (api.DataSize, error) `perm:"read"` ClientGenCar func(ctx context.Context, ref api.FileRef, outpath string) error `perm:"write"`
ClientDealSize func(ctx context.Context, root cid.Cid) (api.DataSize, error) `perm:"read"`
ClientListDataTransfers func(ctx context.Context) ([]api.DataTransferChannel, error) `perm:"write"`
ClientDataTransferUpdates func(ctx context.Context) (<-chan api.DataTransferChannel, error) `perm:"write"`
StateNetworkName func(context.Context) (dtypes.NetworkName, error) `perm:"read"` StateNetworkName func(context.Context) (dtypes.NetworkName, error) `perm:"read"`
StateMinerSectors func(context.Context, address.Address, *abi.BitField, bool, types.TipSetKey) ([]*api.ChainSectorInfo, error) `perm:"read"` StateMinerSectors func(context.Context, address.Address, *abi.BitField, bool, types.TipSetKey) ([]*api.ChainSectorInfo, error) `perm:"read"`
@ -145,9 +156,9 @@ type FullNodeStruct struct {
StateMinerInfo func(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error) `perm:"read"` StateMinerInfo func(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error) `perm:"read"`
StateMinerDeadlines func(context.Context, address.Address, types.TipSetKey) ([]*miner.Deadline, error) `perm:"read"` StateMinerDeadlines func(context.Context, address.Address, types.TipSetKey) ([]*miner.Deadline, error) `perm:"read"`
StateMinerPartitions func(context.Context, address.Address, uint64, types.TipSetKey) ([]*miner.Partition, error) `perm:"read"` StateMinerPartitions func(context.Context, address.Address, uint64, types.TipSetKey) ([]*miner.Partition, error) `perm:"read"`
StateMinerFaults func(context.Context, address.Address, types.TipSetKey) (*abi.BitField, error) `perm:"read"` StateMinerFaults func(context.Context, address.Address, types.TipSetKey) (abi.BitField, error) `perm:"read"`
StateAllMinerFaults func(context.Context, abi.ChainEpoch, types.TipSetKey) ([]*api.Fault, error) `perm:"read"` StateAllMinerFaults func(context.Context, abi.ChainEpoch, types.TipSetKey) ([]*api.Fault, error) `perm:"read"`
StateMinerRecoveries func(context.Context, address.Address, types.TipSetKey) (*abi.BitField, error) `perm:"read"` StateMinerRecoveries func(context.Context, address.Address, types.TipSetKey) (abi.BitField, error) `perm:"read"`
StateMinerPreCommitDepositForPower func(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) `perm:"read"` StateMinerPreCommitDepositForPower func(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) `perm:"read"`
StateMinerInitialPledgeCollateral func(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) `perm:"read"` StateMinerInitialPledgeCollateral func(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) `perm:"read"`
StateMinerAvailableBalance func(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) `perm:"read"` StateMinerAvailableBalance func(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) `perm:"read"`
@ -159,7 +170,6 @@ type FullNodeStruct struct {
StateReplay func(context.Context, types.TipSetKey, cid.Cid) (*api.InvocResult, error) `perm:"read"` StateReplay func(context.Context, types.TipSetKey, cid.Cid) (*api.InvocResult, error) `perm:"read"`
StateGetActor func(context.Context, address.Address, types.TipSetKey) (*types.Actor, error) `perm:"read"` StateGetActor func(context.Context, address.Address, types.TipSetKey) (*types.Actor, error) `perm:"read"`
StateReadState func(context.Context, address.Address, types.TipSetKey) (*api.ActorState, error) `perm:"read"` StateReadState func(context.Context, address.Address, types.TipSetKey) (*api.ActorState, error) `perm:"read"`
StatePledgeCollateral func(context.Context, types.TipSetKey) (types.BigInt, error) `perm:"read"`
StateWaitMsg func(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error) `perm:"read"` StateWaitMsg func(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error) `perm:"read"`
StateSearchMsg func(context.Context, cid.Cid) (*api.MsgLookup, error) `perm:"read"` StateSearchMsg func(context.Context, cid.Cid) (*api.MsgLookup, error) `perm:"read"`
StateListMiners func(context.Context, types.TipSetKey) ([]address.Address, error) `perm:"read"` StateListMiners func(context.Context, types.TipSetKey) ([]address.Address, error) `perm:"read"`
@ -177,7 +187,7 @@ type FullNodeStruct struct {
StateCompute func(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*api.ComputeStateOutput, error) `perm:"read"` StateCompute func(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*api.ComputeStateOutput, error) `perm:"read"`
StateVerifiedClientStatus func(context.Context, address.Address, types.TipSetKey) (*verifreg.DataCap, error) `perm:"read"` StateVerifiedClientStatus func(context.Context, address.Address, types.TipSetKey) (*verifreg.DataCap, error) `perm:"read"`
StateDealProviderCollateralBounds func(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (api.DealCollateralBounds, error) `perm:"read"` StateDealProviderCollateralBounds func(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (api.DealCollateralBounds, error) `perm:"read"`
StateCirculatingSupply func(context.Context, types.TipSetKey) (abi.TokenAmount, error) `perm:"read"` StateCirculatingSupply func(context.Context, types.TipSetKey) (api.CirculatingSupply, error) `perm:"read"`
MsigGetAvailableBalance func(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) `perm:"read"` MsigGetAvailableBalance func(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) `perm:"read"`
MsigCreate func(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (cid.Cid, error) `perm:"sign"` MsigCreate func(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (cid.Cid, error) `perm:"sign"`
@ -230,6 +240,8 @@ type StorageMinerStruct struct {
MarketGetAsk func(ctx context.Context) (*storagemarket.SignedStorageAsk, error) `perm:"read"` MarketGetAsk func(ctx context.Context) (*storagemarket.SignedStorageAsk, error) `perm:"read"`
MarketSetRetrievalAsk func(ctx context.Context, rask *retrievalmarket.Ask) error `perm:"admin"` MarketSetRetrievalAsk func(ctx context.Context, rask *retrievalmarket.Ask) error `perm:"admin"`
MarketGetRetrievalAsk func(ctx context.Context) (*retrievalmarket.Ask, error) `perm:"read"` MarketGetRetrievalAsk func(ctx context.Context) (*retrievalmarket.Ask, error) `perm:"read"`
MarketListDataTransfers func(ctx context.Context) ([]api.DataTransferChannel, error) `perm:"write"`
MarketDataTransferUpdates func(ctx context.Context) (<-chan api.DataTransferChannel, error) `perm:"write"`
PledgeSector func(context.Context) error `perm:"write"` PledgeSector func(context.Context) error `perm:"write"`
@ -296,6 +308,7 @@ type WorkerStruct struct {
Paths func(context.Context) ([]stores.StoragePath, error) `perm:"admin"` Paths func(context.Context) ([]stores.StoragePath, error) `perm:"admin"`
Info func(context.Context) (storiface.WorkerInfo, error) `perm:"admin"` Info func(context.Context) (storiface.WorkerInfo, error) `perm:"admin"`
AddPiece func(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) `perm:"admin"`
SealPreCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) `perm:"admin"` SealPreCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) `perm:"admin"`
SealPreCommit2 func(context.Context, abi.SectorID, storage.PreCommit1Out) (cids storage.SectorCids, err error) `perm:"admin"` SealPreCommit2 func(context.Context, abi.SectorID, storage.PreCommit1Out) (cids storage.SectorCids, err error) `perm:"admin"`
SealCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) `perm:"admin"` SealCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) `perm:"admin"`
@ -327,6 +340,7 @@ func (c *CommonStruct) AuthNew(ctx context.Context, perms []auth.Permission) ([]
func (c *CommonStruct) NetPubsubScores(ctx context.Context) ([]api.PubsubScore, error) { func (c *CommonStruct) NetPubsubScores(ctx context.Context) ([]api.PubsubScore, error) {
return c.Internal.NetPubsubScores(ctx) return c.Internal.NetPubsubScores(ctx)
} }
func (c *CommonStruct) NetConnectedness(ctx context.Context, pid peer.ID) (network.Connectedness, error) { func (c *CommonStruct) NetConnectedness(ctx context.Context, pid peer.ID) (network.Connectedness, error) {
return c.Internal.NetConnectedness(ctx, pid) return c.Internal.NetConnectedness(ctx, pid)
} }
@ -351,6 +365,10 @@ func (c *CommonStruct) NetFindPeer(ctx context.Context, p peer.ID) (peer.AddrInf
return c.Internal.NetFindPeer(ctx, p) return c.Internal.NetFindPeer(ctx, p)
} }
func (c *CommonStruct) NetAutoNatStatus(ctx context.Context) (api.NatInfo, error) {
return c.Internal.NetAutoNatStatus(ctx)
}
// ID implements API.ID // ID implements API.ID
func (c *CommonStruct) ID(ctx context.Context) (peer.ID, error) { func (c *CommonStruct) ID(ctx context.Context) (peer.ID, error) {
return c.Internal.ID(ctx) return c.Internal.ID(ctx)
@ -419,11 +437,15 @@ func (c *FullNodeStruct) ClientRetrieve(ctx context.Context, order api.Retrieval
return c.Internal.ClientRetrieve(ctx, order, ref) return c.Internal.ClientRetrieve(ctx, order, ref)
} }
func (c *FullNodeStruct) ClientRetrieveWithEvents(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
return c.Internal.ClientRetrieveWithEvents(ctx, order, ref)
}
func (c *FullNodeStruct) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error) { func (c *FullNodeStruct) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error) {
return c.Internal.ClientQueryAsk(ctx, p, miner) return c.Internal.ClientQueryAsk(ctx, p, miner)
} }
func (c *FullNodeStruct) ClientCalcCommP(ctx context.Context, inpath string, miner address.Address) (*api.CommPRet, error) { func (c *FullNodeStruct) ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet, error) {
return c.Internal.ClientCalcCommP(ctx, inpath, miner) return c.Internal.ClientCalcCommP(ctx, inpath)
} }
func (c *FullNodeStruct) ClientGenCar(ctx context.Context, ref api.FileRef, outpath string) error { func (c *FullNodeStruct) ClientGenCar(ctx context.Context, ref api.FileRef, outpath string) error {
@ -434,17 +456,27 @@ func (c *FullNodeStruct) ClientDealSize(ctx context.Context, root cid.Cid) (api.
return c.Internal.ClientDealSize(ctx, root) return c.Internal.ClientDealSize(ctx, root)
} }
func (c *FullNodeStruct) GasEstimateGasPremium(ctx context.Context, nblocksincl uint64, func (c *FullNodeStruct) ClientListDataTransfers(ctx context.Context) ([]api.DataTransferChannel, error) {
sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) { return c.Internal.ClientListDataTransfers(ctx)
}
func (c *FullNodeStruct) ClientDataTransferUpdates(ctx context.Context) (<-chan api.DataTransferChannel, error) {
return c.Internal.ClientDataTransferUpdates(ctx)
}
func (c *FullNodeStruct) GasEstimateGasPremium(ctx context.Context, nblocksincl uint64, sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) {
return c.Internal.GasEstimateGasPremium(ctx, nblocksincl, sender, gaslimit, tsk) return c.Internal.GasEstimateGasPremium(ctx, nblocksincl, sender, gaslimit, tsk)
} }
func (c *FullNodeStruct) GasEstimateFeeCap(ctx context.Context, msg *types.Message,
maxqueueblks int64, tsk types.TipSetKey) (types.BigInt, error) { func (c *FullNodeStruct) GasEstimateFeeCap(ctx context.Context, msg *types.Message, maxqueueblks int64, tsk types.TipSetKey) (types.BigInt, error) {
return c.Internal.GasEstimateFeeCap(ctx, msg, maxqueueblks, tsk) return c.Internal.GasEstimateFeeCap(ctx, msg, maxqueueblks, tsk)
} }
func (c *FullNodeStruct) GasEstimateGasLimit(ctx context.Context, msg *types.Message, func (c *FullNodeStruct) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) {
tsk types.TipSetKey) (int64, error) { return c.Internal.GasEstimateMessageGas(ctx, msg, spec, tsk)
}
func (c *FullNodeStruct) GasEstimateGasLimit(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (int64, error) {
return c.Internal.GasEstimateGasLimit(ctx, msg, tsk) return c.Internal.GasEstimateGasLimit(ctx, msg, tsk)
} }
@ -456,20 +488,24 @@ func (c *FullNodeStruct) MpoolSetConfig(ctx context.Context, cfg *types.MpoolCon
return c.Internal.MpoolSetConfig(ctx, cfg) return c.Internal.MpoolSetConfig(ctx, cfg)
} }
func (c *FullNodeStruct) MpoolSelect(ctx context.Context, tsk types.TipSetKey) ([]*types.SignedMessage, error) { func (c *FullNodeStruct) MpoolSelect(ctx context.Context, tsk types.TipSetKey, tq float64) ([]*types.SignedMessage, error) {
return c.Internal.MpoolSelect(ctx, tsk) return c.Internal.MpoolSelect(ctx, tsk, tq)
} }
func (c *FullNodeStruct) MpoolPending(ctx context.Context, tsk types.TipSetKey) ([]*types.SignedMessage, error) { func (c *FullNodeStruct) MpoolPending(ctx context.Context, tsk types.TipSetKey) ([]*types.SignedMessage, error) {
return c.Internal.MpoolPending(ctx, tsk) return c.Internal.MpoolPending(ctx, tsk)
} }
func (c *FullNodeStruct) MpoolClear(ctx context.Context, local bool) error {
return c.Internal.MpoolClear(ctx, local)
}
func (c *FullNodeStruct) MpoolPush(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) { func (c *FullNodeStruct) MpoolPush(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) {
return c.Internal.MpoolPush(ctx, smsg) return c.Internal.MpoolPush(ctx, smsg)
} }
func (c *FullNodeStruct) MpoolPushMessage(ctx context.Context, msg *types.Message) (*types.SignedMessage, error) { func (c *FullNodeStruct) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) {
return c.Internal.MpoolPushMessage(ctx, msg) return c.Internal.MpoolPushMessage(ctx, msg, spec)
} }
func (c *FullNodeStruct) MpoolSub(ctx context.Context) (<-chan api.MpoolUpdate, error) { func (c *FullNodeStruct) MpoolSub(ctx context.Context) (<-chan api.MpoolUpdate, error) {
@ -488,8 +524,12 @@ func (c *FullNodeStruct) ChainHead(ctx context.Context) (*types.TipSet, error) {
return c.Internal.ChainHead(ctx) return c.Internal.ChainHead(ctx)
} }
func (c *FullNodeStruct) ChainGetRandomness(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { func (c *FullNodeStruct) ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) {
return c.Internal.ChainGetRandomness(ctx, tsk, personalization, randEpoch, entropy) return c.Internal.ChainGetRandomnessFromTickets(ctx, tsk, personalization, randEpoch, entropy)
}
func (c *FullNodeStruct) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) {
return c.Internal.ChainGetRandomnessFromBeacon(ctx, tsk, personalization, randEpoch, entropy)
} }
func (c *FullNodeStruct) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) { func (c *FullNodeStruct) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) {
@ -668,7 +708,7 @@ func (c *FullNodeStruct) StateMinerPartitions(ctx context.Context, m address.Add
return c.Internal.StateMinerPartitions(ctx, m, dlIdx, tsk) return c.Internal.StateMinerPartitions(ctx, m, dlIdx, tsk)
} }
func (c *FullNodeStruct) StateMinerFaults(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*abi.BitField, error) { func (c *FullNodeStruct) StateMinerFaults(ctx context.Context, actor address.Address, tsk types.TipSetKey) (abi.BitField, error) {
return c.Internal.StateMinerFaults(ctx, actor, tsk) return c.Internal.StateMinerFaults(ctx, actor, tsk)
} }
@ -676,7 +716,7 @@ func (c *FullNodeStruct) StateAllMinerFaults(ctx context.Context, cutoff abi.Cha
return c.Internal.StateAllMinerFaults(ctx, cutoff, endTsk) return c.Internal.StateAllMinerFaults(ctx, cutoff, endTsk)
} }
func (c *FullNodeStruct) StateMinerRecoveries(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*abi.BitField, error) { func (c *FullNodeStruct) StateMinerRecoveries(ctx context.Context, actor address.Address, tsk types.TipSetKey) (abi.BitField, error) {
return c.Internal.StateMinerRecoveries(ctx, actor, tsk) return c.Internal.StateMinerRecoveries(ctx, actor, tsk)
} }
@ -724,10 +764,6 @@ func (c *FullNodeStruct) StateReadState(ctx context.Context, addr address.Addres
return c.Internal.StateReadState(ctx, addr, tsk) return c.Internal.StateReadState(ctx, addr, tsk)
} }
func (c *FullNodeStruct) StatePledgeCollateral(ctx context.Context, tsk types.TipSetKey) (types.BigInt, error) {
return c.Internal.StatePledgeCollateral(ctx, tsk)
}
func (c *FullNodeStruct) StateWaitMsg(ctx context.Context, msgc cid.Cid, confidence uint64) (*api.MsgLookup, error) { func (c *FullNodeStruct) StateWaitMsg(ctx context.Context, msgc cid.Cid, confidence uint64) (*api.MsgLookup, error) {
return c.Internal.StateWaitMsg(ctx, msgc, confidence) return c.Internal.StateWaitMsg(ctx, msgc, confidence)
} }
@ -792,7 +828,7 @@ func (c *FullNodeStruct) StateDealProviderCollateralBounds(ctx context.Context,
return c.Internal.StateDealProviderCollateralBounds(ctx, size, verified, tsk) return c.Internal.StateDealProviderCollateralBounds(ctx, size, verified, tsk)
} }
func (c *FullNodeStruct) StateCirculatingSupply(ctx context.Context, tsk types.TipSetKey) (abi.TokenAmount, error) { func (c *FullNodeStruct) StateCirculatingSupply(ctx context.Context, tsk types.TipSetKey) (api.CirculatingSupply, error) {
return c.Internal.StateCirculatingSupply(ctx, tsk) return c.Internal.StateCirculatingSupply(ctx, tsk)
} }
@ -836,8 +872,8 @@ func (c *FullNodeStruct) PaychGet(ctx context.Context, from, to address.Address,
return c.Internal.PaychGet(ctx, from, to, amt) return c.Internal.PaychGet(ctx, from, to, amt)
} }
func (c *FullNodeStruct) PaychGetWaitReady(ctx context.Context, mcid cid.Cid) (address.Address, error) { func (c *FullNodeStruct) PaychGetWaitReady(ctx context.Context, sentinel cid.Cid) (address.Address, error) {
return c.Internal.PaychGetWaitReady(ctx, mcid) return c.Internal.PaychGetWaitReady(ctx, sentinel)
} }
func (c *FullNodeStruct) PaychList(ctx context.Context) ([]address.Address, error) { func (c *FullNodeStruct) PaychList(ctx context.Context) ([]address.Address, error) {
@ -1052,6 +1088,14 @@ func (c *StorageMinerStruct) MarketGetRetrievalAsk(ctx context.Context) (*retrie
return c.Internal.MarketGetRetrievalAsk(ctx) return c.Internal.MarketGetRetrievalAsk(ctx)
} }
func (c *StorageMinerStruct) MarketListDataTransfers(ctx context.Context) ([]api.DataTransferChannel, error) {
return c.Internal.MarketListDataTransfers(ctx)
}
func (c *StorageMinerStruct) MarketDataTransferUpdates(ctx context.Context) (<-chan api.DataTransferChannel, error) {
return c.Internal.MarketDataTransferUpdates(ctx)
}
func (c *StorageMinerStruct) DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error { func (c *StorageMinerStruct) DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error {
return c.Internal.DealsImportData(ctx, dealPropCid, file) return c.Internal.DealsImportData(ctx, dealPropCid, file)
} }
@ -1138,6 +1182,10 @@ func (w *WorkerStruct) Info(ctx context.Context) (storiface.WorkerInfo, error) {
return w.Internal.Info(ctx) return w.Internal.Info(ctx)
} }
func (w *WorkerStruct) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) {
return w.Internal.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData)
}
func (w *WorkerStruct) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) { func (w *WorkerStruct) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) {
return w.Internal.SealPreCommit1(ctx, sector, ticket, pieces) return w.Internal.SealPreCommit1(ctx, sector, ticket, pieces)
} }

View File

@ -6,8 +6,8 @@ import (
"fmt" "fmt"
"io" "io"
"github.com/filecoin-project/specs-actors/actors/abi" abi "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/builtin/paych" paych "github.com/filecoin-project/specs-actors/actors/builtin/paych"
cbg "github.com/whyrusleeping/cbor-gen" cbg "github.com/whyrusleeping/cbor-gen"
xerrors "golang.org/x/xerrors" xerrors "golang.org/x/xerrors"
) )
@ -41,26 +41,20 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error {
return err return err
} }
// t.ChannelMessage (cid.Cid) (struct) // t.WaitSentinel (cid.Cid) (struct)
if len("ChannelMessage") > cbg.MaxLength { if len("WaitSentinel") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"ChannelMessage\" was too long") return xerrors.Errorf("Value in field \"WaitSentinel\" was too long")
} }
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ChannelMessage"))); err != nil { if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("WaitSentinel"))); err != nil {
return err return err
} }
if _, err := io.WriteString(w, string("ChannelMessage")); err != nil { if _, err := io.WriteString(w, string("WaitSentinel")); err != nil {
return err return err
} }
if t.ChannelMessage == nil { if err := cbg.WriteCidBuf(scratch, w, t.WaitSentinel); err != nil {
if _, err := w.Write(cbg.CborNull); err != nil { return xerrors.Errorf("failed to write cid field t.WaitSentinel: %w", err)
return err
}
} else {
if err := cbg.WriteCidBuf(scratch, w, *t.ChannelMessage); err != nil {
return xerrors.Errorf("failed to write cid field t.ChannelMessage: %w", err)
}
} }
// t.Vouchers ([]*paych.SignedVoucher) (slice) // t.Vouchers ([]*paych.SignedVoucher) (slice)
@ -133,29 +127,17 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) error {
} }
} }
// t.ChannelMessage (cid.Cid) (struct) // t.WaitSentinel (cid.Cid) (struct)
case "ChannelMessage": case "WaitSentinel":
{ {
pb, err := br.PeekByte() c, err := cbg.ReadCid(br)
if err != nil { if err != nil {
return err return xerrors.Errorf("failed to read cid field t.WaitSentinel: %w", err)
} }
if pb == cbg.CborNull[0] {
var nbuf [1]byte
if _, err := br.Read(nbuf[:]); err != nil {
return err
}
} else {
c, err := cbg.ReadCid(br) t.WaitSentinel = c
if err != nil {
return xerrors.Errorf("failed to read cid field t.ChannelMessage: %w", err)
}
t.ChannelMessage = &c
}
} }
// t.Vouchers ([]*paych.SignedVoucher) (slice) // t.Vouchers ([]*paych.SignedVoucher) (slice)

View File

@ -1,18 +1,23 @@
package client package client
import ( import (
"context"
"net/http" "net/http"
"net/url"
"path"
"time"
"github.com/filecoin-project/go-jsonrpc" "github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/apistruct" "github.com/filecoin-project/lotus/api/apistruct"
"github.com/filecoin-project/lotus/lib/rpcenc"
) )
// NewCommonRPC creates a new http jsonrpc client. // NewCommonRPC creates a new http jsonrpc client.
func NewCommonRPC(addr string, requestHeader http.Header) (api.Common, jsonrpc.ClientCloser, error) { func NewCommonRPC(ctx context.Context, addr string, requestHeader http.Header) (api.Common, jsonrpc.ClientCloser, error) {
var res apistruct.CommonStruct var res apistruct.CommonStruct
closer, err := jsonrpc.NewMergeClient(addr, "Filecoin", closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
[]interface{}{ []interface{}{
&res.Internal, &res.Internal,
}, },
@ -23,9 +28,9 @@ func NewCommonRPC(addr string, requestHeader http.Header) (api.Common, jsonrpc.C
} }
// NewFullNodeRPC creates a new http jsonrpc client. // NewFullNodeRPC creates a new http jsonrpc client.
func NewFullNodeRPC(addr string, requestHeader http.Header) (api.FullNode, jsonrpc.ClientCloser, error) { func NewFullNodeRPC(ctx context.Context, addr string, requestHeader http.Header) (api.FullNode, jsonrpc.ClientCloser, error) {
var res apistruct.FullNodeStruct var res apistruct.FullNodeStruct
closer, err := jsonrpc.NewMergeClient(addr, "Filecoin", closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
[]interface{}{ []interface{}{
&res.CommonStruct.Internal, &res.CommonStruct.Internal,
&res.Internal, &res.Internal,
@ -35,26 +40,44 @@ func NewFullNodeRPC(addr string, requestHeader http.Header) (api.FullNode, jsonr
} }
// NewStorageMinerRPC creates a new http jsonrpc client for miner // NewStorageMinerRPC creates a new http jsonrpc client for miner
func NewStorageMinerRPC(addr string, requestHeader http.Header) (api.StorageMiner, jsonrpc.ClientCloser, error) { func NewStorageMinerRPC(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (api.StorageMiner, jsonrpc.ClientCloser, error) {
var res apistruct.StorageMinerStruct var res apistruct.StorageMinerStruct
closer, err := jsonrpc.NewMergeClient(addr, "Filecoin", closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
[]interface{}{ []interface{}{
&res.CommonStruct.Internal, &res.CommonStruct.Internal,
&res.Internal, &res.Internal,
}, },
requestHeader, requestHeader,
opts...,
) )
return &res, closer, err return &res, closer, err
} }
func NewWorkerRPC(addr string, requestHeader http.Header) (api.WorkerAPI, jsonrpc.ClientCloser, error) { func NewWorkerRPC(ctx context.Context, addr string, requestHeader http.Header) (api.WorkerAPI, jsonrpc.ClientCloser, error) {
u, err := url.Parse(addr)
if err != nil {
return nil, nil, err
}
switch u.Scheme {
case "ws":
u.Scheme = "http"
case "wss":
u.Scheme = "https"
}
///rpc/v0 -> /rpc/streams/v0/push
u.Path = path.Join(u.Path, "../streams/v0/push")
var res apistruct.WorkerStruct var res apistruct.WorkerStruct
closer, err := jsonrpc.NewMergeClient(addr, "Filecoin", closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
[]interface{}{ []interface{}{
&res.Internal, &res.Internal,
}, },
requestHeader, requestHeader,
rpcenc.ReaderParamEncoder(u.String()),
jsonrpc.WithNoReconnect(),
jsonrpc.WithTimeout(30*time.Second),
) )
return &res, closer, err return &res, closer, err

View File

@ -12,22 +12,29 @@ import (
"time" "time"
"unicode" "unicode"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-filestore"
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/multiformats/go-multiaddr"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-bitfield"
datatransfer "github.com/filecoin-project/go-data-transfer"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/go-multistore"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/apistruct" "github.com/filecoin-project/lotus/api/apistruct"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-filestore"
"github.com/libp2p/go-libp2p-core/network"
peer "github.com/libp2p/go-libp2p-peer"
"github.com/multiformats/go-multiaddr"
) )
var ExampleValues = map[reflect.Type]interface{}{ var ExampleValues = map[reflect.Type]interface{}{
@ -66,11 +73,12 @@ func init() {
ExampleValues[reflect.TypeOf(addr)] = addr ExampleValues[reflect.TypeOf(addr)] = addr
pid, err := peer.IDB58Decode("12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf") pid, err := peer.Decode("12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf")
if err != nil { if err != nil {
panic(err) panic(err)
} }
addExample(pid) addExample(pid)
addExample(&pid)
addExample(bitfield.NewFromSet([]uint64{5})) addExample(bitfield.NewFromSet([]uint64{5}))
addExample(abi.RegisteredSealProof_StackedDrg32GiBV1) addExample(abi.RegisteredSealProof_StackedDrg32GiBV1)
@ -98,6 +106,12 @@ func init() {
addExample(build.APIVersion) addExample(build.APIVersion)
addExample(api.PCHInbound) addExample(api.PCHInbound)
addExample(time.Minute) addExample(time.Minute)
addExample(datatransfer.TransferID(3))
addExample(datatransfer.Ongoing)
addExample(multistore.StoreID(50))
addExample(retrievalmarket.ClientEventDealAccepted)
addExample(retrievalmarket.DealStatusNew)
addExample(network.ReachabilityPublic)
addExample(&types.ExecutionTrace{ addExample(&types.ExecutionTrace{
Msg: exampleValue(reflect.TypeOf(&types.Message{}), nil).(*types.Message), Msg: exampleValue(reflect.TypeOf(&types.Message{}), nil).(*types.Message),
MsgRct: exampleValue(reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt), MsgRct: exampleValue(reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt),
@ -111,6 +125,14 @@ func init() {
addExample(map[string]api.MarketBalance{ addExample(map[string]api.MarketBalance{
"t026363": exampleValue(reflect.TypeOf(api.MarketBalance{}), nil).(api.MarketBalance), "t026363": exampleValue(reflect.TypeOf(api.MarketBalance{}), nil).(api.MarketBalance),
}) })
addExample(map[string]*pubsub.TopicScoreSnapshot{
"/blocks": {
TimeInMesh: time.Minute,
FirstMessageDeliveries: 122,
MeshMessageDeliveries: 1234,
InvalidMessageDeliveries: 3,
},
})
maddr, err := multiaddr.NewMultiaddr("/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior") maddr, err := multiaddr.NewMultiaddr("/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior")
if err != nil { if err != nil {

56
api/test/blockminer.go Normal file
View File

@ -0,0 +1,56 @@
package test
import (
"context"
"fmt"
"sync/atomic"
"testing"
"time"
"github.com/filecoin-project/lotus/miner"
"github.com/filecoin-project/specs-actors/actors/abi"
)
type BlockMiner struct {
ctx context.Context
t *testing.T
miner TestStorageNode
blocktime time.Duration
mine int64
nulls int64
done chan struct{}
}
func NewBlockMiner(ctx context.Context, t *testing.T, miner TestStorageNode, blocktime time.Duration) *BlockMiner {
return &BlockMiner{
ctx: ctx,
t: t,
miner: miner,
blocktime: blocktime,
mine: int64(1),
done: make(chan struct{}),
}
}
func (bm *BlockMiner) MineBlocks() {
time.Sleep(time.Second)
go func() {
defer close(bm.done)
for atomic.LoadInt64(&bm.mine) == 1 {
time.Sleep(bm.blocktime)
nulls := atomic.SwapInt64(&bm.nulls, 0)
if err := bm.miner.MineOne(bm.ctx, miner.MineReq{
InjectNulls: abi.ChainEpoch(nulls),
Done: func(bool, error) {},
}); err != nil {
bm.t.Error(err)
}
}
}()
}
func (bm *BlockMiner) Stop() {
atomic.AddInt64(&bm.mine, -1)
fmt.Println("shutting down mining")
<-bm.done
}

View File

@ -20,7 +20,7 @@ func TestCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) {
_ = os.Setenv("BELLMAN_NO_GPU", "1") _ = os.Setenv("BELLMAN_NO_GPU", "1")
ctx := context.Background() ctx := context.Background()
n, sn := b(t, 1, oneMiner) n, sn := b(t, 1, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI) client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0] miner := sn[0]

View File

@ -22,8 +22,8 @@ import (
"github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
"github.com/filecoin-project/lotus/miner" "github.com/filecoin-project/lotus/miner"
sealing "github.com/filecoin-project/storage-fsm"
dag "github.com/ipfs/go-merkledag" dag "github.com/ipfs/go-merkledag"
dstest "github.com/ipfs/go-merkledag/test" dstest "github.com/ipfs/go-merkledag/test"
unixfile "github.com/ipfs/go-unixfs/file" unixfile "github.com/ipfs/go-unixfs/file"
@ -47,7 +47,7 @@ func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport
_ = os.Setenv("BELLMAN_NO_GPU", "1") _ = os.Setenv("BELLMAN_NO_GPU", "1")
ctx := context.Background() ctx := context.Background()
n, sn := b(t, 1, oneMiner) n, sn := b(t, 1, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI) client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0] miner := sn[0]
@ -84,7 +84,7 @@ func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) {
_ = os.Setenv("BELLMAN_NO_GPU", "1") _ = os.Setenv("BELLMAN_NO_GPU", "1")
ctx := context.Background() ctx := context.Background()
n, sn := b(t, 1, oneMiner) n, sn := b(t, 1, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI) client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0] miner := sn[0]
@ -141,14 +141,14 @@ func makeDeal(t *testing.T, ctx context.Context, rseed int, client *impl.FullNod
info, err := client.ClientGetDealInfo(ctx, *deal) info, err := client.ClientGetDealInfo(ctx, *deal)
require.NoError(t, err) require.NoError(t, err)
testRetrieval(t, ctx, err, client, fcid, &info.PieceCID, carExport, data) testRetrieval(t, ctx, client, fcid, &info.PieceCID, carExport, data)
} }
func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) { func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) {
_ = os.Setenv("BELLMAN_NO_GPU", "1") _ = os.Setenv("BELLMAN_NO_GPU", "1")
ctx := context.Background() ctx := context.Background()
n, sn := b(t, 1, oneMiner) n, sn := b(t, 1, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI) client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0] miner := sn[0]
@ -193,7 +193,7 @@ func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Durati
info, err := client.ClientGetDealInfo(ctx, *deal) info, err := client.ClientGetDealInfo(ctx, *deal)
require.NoError(t, err) require.NoError(t, err)
testRetrieval(t, ctx, err, client, fcid, &info.PieceCID, false, data) testRetrieval(t, ctx, client, fcid, &info.PieceCID, false, data)
atomic.AddInt64(&mine, -1) atomic.AddInt64(&mine, -1)
fmt.Println("shutting down mining") fmt.Println("shutting down mining")
<-done <-done
@ -203,7 +203,7 @@ func TestSenondDealRetrieval(t *testing.T, b APIBuilder, blocktime time.Duration
_ = os.Setenv("BELLMAN_NO_GPU", "1") _ = os.Setenv("BELLMAN_NO_GPU", "1")
ctx := context.Background() ctx := context.Background()
n, sn := b(t, 1, oneMiner) n, sn := b(t, 1, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI) client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0] miner := sn[0]
@ -267,7 +267,7 @@ func TestSenondDealRetrieval(t *testing.T, b APIBuilder, blocktime time.Duration
rf, _ := miner.SectorsRefs(ctx) rf, _ := miner.SectorsRefs(ctx)
fmt.Printf("refs: %+v\n", rf) fmt.Printf("refs: %+v\n", rf)
testRetrieval(t, ctx, err, client, fcid2, &info.PieceCID, false, data2) testRetrieval(t, ctx, client, fcid2, &info.PieceCID, false, data2)
} }
atomic.AddInt64(&mine, -1) atomic.AddInt64(&mine, -1)
@ -373,7 +373,7 @@ func startSealingWaiting(t *testing.T, ctx context.Context, miner TestStorageNod
} }
} }
func testRetrieval(t *testing.T, ctx context.Context, err error, client *impl.FullNodeAPI, fcid cid.Cid, piece *cid.Cid, carExport bool, data []byte) { func testRetrieval(t *testing.T, ctx context.Context, client *impl.FullNodeAPI, fcid cid.Cid, piece *cid.Cid, carExport bool, data []byte) {
offers, err := client.ClientFindData(ctx, fcid, piece) offers, err := client.ClientFindData(ctx, fcid, piece)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -398,9 +398,11 @@ func testRetrieval(t *testing.T, ctx context.Context, err error, client *impl.Fu
Path: filepath.Join(rpath, "ret"), Path: filepath.Join(rpath, "ret"),
IsCAR: carExport, IsCAR: carExport,
} }
err = client.ClientRetrieve(ctx, offers[0].Order(caddr), ref) updates, err := client.ClientRetrieveWithEvents(ctx, offers[0].Order(caddr), ref)
if err != nil { for update := range updates {
t.Fatalf("%+v", err) if update.Err != "" {
t.Fatalf("%v", err)
}
} }
rdata, err := ioutil.ReadFile(filepath.Join(rpath, "ret")) rdata, err := ioutil.ReadFile(filepath.Join(rpath, "ret"))

View File

@ -20,11 +20,12 @@ import (
"github.com/filecoin-project/lotus/node/impl" "github.com/filecoin-project/lotus/node/impl"
) )
//nolint:deadcode,varcheck
var log = logging.Logger("apitest") var log = logging.Logger("apitest")
func (ts *testSuite) testMining(t *testing.T) { func (ts *testSuite) testMining(t *testing.T) {
ctx := context.Background() ctx := context.Background()
apis, sn := ts.makeNodes(t, 1, oneMiner) apis, sn := ts.makeNodes(t, 1, OneMiner)
api := apis[0] api := apis[0]
newHeads, err := api.ChainNotify(ctx) newHeads, err := api.ChainNotify(ctx)
@ -38,7 +39,7 @@ func (ts *testSuite) testMining(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, abi.ChainEpoch(2), h1.Height()) require.Equal(t, abi.ChainEpoch(2), h1.Height())
err = sn[0].MineOne(ctx, MineNext) MineUntilBlock(ctx, t, sn[0], nil)
require.NoError(t, err) require.NoError(t, err)
<-newHeads <-newHeads
@ -55,7 +56,7 @@ func (ts *testSuite) testMiningReal(t *testing.T) {
}() }()
ctx := context.Background() ctx := context.Background()
apis, sn := ts.makeNodes(t, 1, oneMiner) apis, sn := ts.makeNodes(t, 1, OneMiner)
api := apis[0] api := apis[0]
newHeads, err := api.ChainNotify(ctx) newHeads, err := api.ChainNotify(ctx)
@ -69,7 +70,7 @@ func (ts *testSuite) testMiningReal(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, abi.ChainEpoch(2), h1.Height()) require.Equal(t, abi.ChainEpoch(2), h1.Height())
err = sn[0].MineOne(ctx, MineNext) MineUntilBlock(ctx, t, sn[0], nil)
require.NoError(t, err) require.NoError(t, err)
<-newHeads <-newHeads
@ -78,7 +79,7 @@ func (ts *testSuite) testMiningReal(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, abi.ChainEpoch(3), h2.Height()) require.Equal(t, abi.ChainEpoch(3), h2.Height())
err = sn[0].MineOne(ctx, MineNext) MineUntilBlock(ctx, t, sn[0], nil)
require.NoError(t, err) require.NoError(t, err)
<-newHeads <-newHeads

View File

@ -23,14 +23,13 @@ import (
"github.com/filecoin-project/lotus/chain/events/state" "github.com/filecoin-project/lotus/chain/events/state"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/wallet" "github.com/filecoin-project/lotus/chain/wallet"
"github.com/filecoin-project/lotus/miner"
) )
func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) { func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) {
_ = os.Setenv("BELLMAN_NO_GPU", "1") _ = os.Setenv("BELLMAN_NO_GPU", "1")
ctx := context.Background() ctx := context.Background()
n, sn := b(t, 2, oneMiner) n, sn := b(t, 2, OneMiner)
paymentCreator := n[0] paymentCreator := n[0]
paymentReceiver := n[1] paymentReceiver := n[1]
@ -51,8 +50,8 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) {
} }
// start mining blocks // start mining blocks
bm := newBlockMiner(ctx, t, miner, blocktime) bm := NewBlockMiner(ctx, t, miner, blocktime)
bm.mineBlocks() bm.MineBlocks()
// send some funds to register the receiver // send some funds to register the receiver
receiverAddr, err := paymentReceiver.WalletNew(ctx, wallet.ActSigType("secp256k1")) receiverAddr, err := paymentReceiver.WalletNew(ctx, wallet.ActSigType("secp256k1"))
@ -60,7 +59,7 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) {
t.Fatal(err) t.Fatal(err)
} }
sendFunds(ctx, t, paymentCreator, receiverAddr, abi.NewTokenAmount(1e18)) SendFunds(ctx, t, paymentCreator, receiverAddr, abi.NewTokenAmount(1e18))
// setup the payment channel // setup the payment channel
createrAddr, err := paymentCreator.WalletDefaultAddress(ctx) createrAddr, err := paymentCreator.WalletDefaultAddress(ctx)
@ -74,7 +73,7 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) {
t.Fatal(err) t.Fatal(err)
} }
channel, err := paymentCreator.PaychGetWaitReady(ctx, channelInfo.ChannelMessage) channel, err := paymentCreator.PaychGetWaitReady(ctx, channelInfo.WaitSentinel)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -154,6 +153,9 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) {
}, int(build.MessageConfidence)+1, build.SealRandomnessLookbackLimit, func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) { }, int(build.MessageConfidence)+1, build.SealRandomnessLookbackLimit, func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) {
return preds.OnPaymentChannelActorChanged(channel, preds.OnToSendAmountChanges())(ctx, oldTs.Key(), newTs.Key()) return preds.OnPaymentChannelActorChanged(channel, preds.OnToSendAmountChanges())(ctx, oldTs.Key(), newTs.Key())
}) })
if err != nil {
t.Fatal(err)
}
select { select {
case <-finished: case <-finished:
@ -201,10 +203,10 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) {
} }
// shut down mining // shut down mining
bm.stop() bm.Stop()
} }
func waitForBlocks(ctx context.Context, t *testing.T, bm *blockMiner, paymentReceiver TestNode, receiverAddr address.Address, count int) { func waitForBlocks(ctx context.Context, t *testing.T, bm *BlockMiner, paymentReceiver TestNode, receiverAddr address.Address, count int) {
// We need to add null blocks in batches, if we add too many the chain can't sync // We need to add null blocks in batches, if we add too many the chain can't sync
batchSize := 60 batchSize := 60
for i := 0; i < count; i += batchSize { for i := 0; i < count; i += batchSize {
@ -221,7 +223,7 @@ func waitForBlocks(ctx context.Context, t *testing.T, bm *blockMiner, paymentRec
To: builtin.BurntFundsActorAddr, To: builtin.BurntFundsActorAddr,
From: receiverAddr, From: receiverAddr,
Value: types.NewInt(0), Value: types.NewInt(0),
}) }, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -249,73 +251,3 @@ func waitForMessage(ctx context.Context, t *testing.T, paymentCreator TestNode,
fmt.Println("Confirmed", desc) fmt.Println("Confirmed", desc)
return res return res
} }
type blockMiner struct {
ctx context.Context
t *testing.T
miner TestStorageNode
blocktime time.Duration
mine int64
nulls int64
done chan struct{}
}
func newBlockMiner(ctx context.Context, t *testing.T, miner TestStorageNode, blocktime time.Duration) *blockMiner {
return &blockMiner{
ctx: ctx,
t: t,
miner: miner,
blocktime: blocktime,
mine: int64(1),
done: make(chan struct{}),
}
}
func (bm *blockMiner) mineBlocks() {
time.Sleep(time.Second)
go func() {
defer close(bm.done)
for atomic.LoadInt64(&bm.mine) == 1 {
time.Sleep(bm.blocktime)
nulls := atomic.SwapInt64(&bm.nulls, 0)
if err := bm.miner.MineOne(bm.ctx, miner.MineReq{
InjectNulls: abi.ChainEpoch(nulls),
Done: func(bool, error) {},
}); err != nil {
bm.t.Error(err)
}
}
}()
}
func (bm *blockMiner) stop() {
atomic.AddInt64(&bm.mine, -1)
fmt.Println("shutting down mining")
<-bm.done
}
func sendFunds(ctx context.Context, t *testing.T, sender TestNode, addr address.Address, amount abi.TokenAmount) {
senderAddr, err := sender.WalletDefaultAddress(ctx)
if err != nil {
t.Fatal(err)
}
msg := &types.Message{
From: senderAddr,
To: addr,
Value: amount,
}
sm, err := sender.MpoolPushMessage(ctx, msg)
if err != nil {
t.Fatal(err)
}
res, err := sender.StateWaitMsg(ctx, sm.Cid(), 1)
if err != nil {
t.Fatal(err)
}
if res.Receipt.ExitCode != 0 {
t.Fatal("did not successfully send money")
}
}

View File

@ -4,6 +4,8 @@ import (
"context" "context"
"testing" "testing"
"github.com/multiformats/go-multiaddr"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -14,10 +16,16 @@ import (
type TestNode struct { type TestNode struct {
api.FullNode api.FullNode
// ListenAddr is the address on which an API server is listening, if an
// API server is created for this Node
ListenAddr multiaddr.Multiaddr
} }
type TestStorageNode struct { type TestStorageNode struct {
api.StorageMiner api.StorageMiner
// ListenAddr is the address on which an API server is listening, if an
// API server is created for this Node
ListenAddr multiaddr.Multiaddr
MineOne func(context.Context, miner.MineReq) error MineOne func(context.Context, miner.MineReq) error
} }
@ -54,11 +62,11 @@ func TestApis(t *testing.T, b APIBuilder) {
t.Run("testMiningReal", ts.testMiningReal) t.Run("testMiningReal", ts.testMiningReal)
} }
var oneMiner = []StorageMiner{{Full: 0, Preseal: PresealGenesis}} var OneMiner = []StorageMiner{{Full: 0, Preseal: PresealGenesis}}
func (ts *testSuite) testVersion(t *testing.T) { func (ts *testSuite) testVersion(t *testing.T) {
ctx := context.Background() ctx := context.Background()
apis, _ := ts.makeNodes(t, 1, oneMiner) apis, _ := ts.makeNodes(t, 1, OneMiner)
api := apis[0] api := apis[0]
v, err := api.Version(ctx) v, err := api.Version(ctx)
@ -70,7 +78,7 @@ func (ts *testSuite) testVersion(t *testing.T) {
func (ts *testSuite) testID(t *testing.T) { func (ts *testSuite) testID(t *testing.T) {
ctx := context.Background() ctx := context.Background()
apis, _ := ts.makeNodes(t, 1, oneMiner) apis, _ := ts.makeNodes(t, 1, OneMiner)
api := apis[0] api := apis[0]
id, err := api.ID(ctx) id, err := api.ID(ctx)
@ -82,7 +90,7 @@ func (ts *testSuite) testID(t *testing.T) {
func (ts *testSuite) testConnectTwo(t *testing.T) { func (ts *testSuite) testConnectTwo(t *testing.T) {
ctx := context.Background() ctx := context.Background()
apis, _ := ts.makeNodes(t, 2, oneMiner) apis, _ := ts.makeNodes(t, 2, OneMiner)
p, err := apis[0].NetPeers(ctx) p, err := apis[0].NetPeers(ctx)
if err != nil { if err != nil {

64
api/test/util.go Normal file
View File

@ -0,0 +1,64 @@
package test
import (
"context"
"testing"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/miner"
)
func SendFunds(ctx context.Context, t *testing.T, sender TestNode, addr address.Address, amount abi.TokenAmount) {
senderAddr, err := sender.WalletDefaultAddress(ctx)
if err != nil {
t.Fatal(err)
}
msg := &types.Message{
From: senderAddr,
To: addr,
Value: amount,
}
sm, err := sender.MpoolPushMessage(ctx, msg, nil)
if err != nil {
t.Fatal(err)
}
res, err := sender.StateWaitMsg(ctx, sm.Cid(), 1)
if err != nil {
t.Fatal(err)
}
if res.Receipt.ExitCode != 0 {
t.Fatal("did not successfully send money")
}
}
func MineUntilBlock(ctx context.Context, t *testing.T, sn TestStorageNode, cb func()) {
for i := 0; i < 1000; i++ {
var success bool
var err error
wait := make(chan struct{})
sn.MineOne(ctx, miner.MineReq{
Done: func(win bool, e error) {
success = win
err = e
wait <- struct{}{}
},
})
<-wait
if err != nil {
t.Fatal(err)
}
if success {
if cb != nil {
cb()
}
return
}
t.Log("did not mine block, trying again", i)
}
t.Fatal("failed to mine 1000 times in a row...")
}

View File

@ -12,10 +12,10 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/sector-storage/mock" "github.com/filecoin-project/lotus/extern/sector-storage/mock"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"
miner2 "github.com/filecoin-project/specs-actors/actors/builtin/miner" miner2 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
sealing "github.com/filecoin-project/storage-fsm"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
@ -24,11 +24,16 @@ import (
"github.com/filecoin-project/lotus/node/impl" "github.com/filecoin-project/lotus/node/impl"
) )
func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) { func init() {
os.Setenv("BELLMAN_NO_GPU", "1") err := os.Setenv("BELLMAN_NO_GPU", "1")
if err != nil {
panic(fmt.Sprintf("failed to set BELLMAN_NO_GPU env variable: %s", err))
}
}
func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
ctx := context.Background() ctx := context.Background()
n, sn := b(t, 1, oneMiner) n, sn := b(t, 1, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI) client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0] miner := sn[0]
@ -110,10 +115,8 @@ func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n,
} }
func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) { func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
os.Setenv("BELLMAN_NO_GPU", "1")
ctx := context.Background() ctx := context.Background()
n, sn := b(t, 1, oneMiner) n, sn := b(t, 1, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI) client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0] miner := sn[0]

View File

@ -2,9 +2,14 @@ package api
import ( import (
"encoding/json" "encoding/json"
"fmt"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
datatransfer "github.com/filecoin-project/go-data-transfer"
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/filecoin-project/specs-actors/actors/builtin/miner" "github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peer"
pubsub "github.com/libp2p/go-libp2p-pubsub" pubsub "github.com/libp2p/go-libp2p-pubsub"
@ -45,11 +50,12 @@ type PubsubScore struct {
} }
type MinerInfo struct { type MinerInfo struct {
Owner address.Address // Must be an ID-address. Owner address.Address // Must be an ID-address.
Worker address.Address // Must be an ID-address. Worker address.Address // Must be an ID-address.
NewWorker address.Address // Must be an ID-address. NewWorker address.Address // Must be an ID-address.
ControlAddresses []address.Address // Must be an ID-addresses.
WorkerChangeEpoch abi.ChainEpoch WorkerChangeEpoch abi.ChainEpoch
PeerId peer.ID PeerId *peer.ID
Multiaddrs []abi.Multiaddrs Multiaddrs []abi.Multiaddrs
SealProofType abi.RegisteredSealProof SealProofType abi.RegisteredSealProof
SectorSize abi.SectorSize SectorSize abi.SectorSize
@ -57,12 +63,20 @@ type MinerInfo struct {
} }
func NewApiMinerInfo(info *miner.MinerInfo) MinerInfo { func NewApiMinerInfo(info *miner.MinerInfo) MinerInfo {
var pid *peer.ID
if peerID, err := peer.IDFromBytes(info.PeerId); err == nil {
pid = &peerID
}
mi := MinerInfo{ mi := MinerInfo{
Owner: info.Owner, Owner: info.Owner,
Worker: info.Worker, Worker: info.Worker,
NewWorker: address.Undef, ControlAddresses: info.ControlAddresses,
WorkerChangeEpoch: -1,
PeerId: peer.ID(info.PeerId), NewWorker: address.Undef,
WorkerChangeEpoch: -1,
PeerId: pid,
Multiaddrs: info.Multiaddrs, Multiaddrs: info.Multiaddrs,
SealProofType: info.SealProofType, SealProofType: info.SealProofType,
SectorSize: info.SectorSize, SectorSize: info.SectorSize,
@ -76,3 +90,63 @@ func NewApiMinerInfo(info *miner.MinerInfo) MinerInfo {
return mi return mi
} }
type MessageSendSpec struct {
MaxFee abi.TokenAmount
}
var DefaultMessageSendSpec = MessageSendSpec{
MaxFee: big.Zero(),
}
func (ms *MessageSendSpec) Get() MessageSendSpec {
if ms == nil {
return DefaultMessageSendSpec
}
return *ms
}
type DataTransferChannel struct {
TransferID datatransfer.TransferID
Status datatransfer.Status
BaseCID cid.Cid
IsInitiator bool
IsSender bool
Voucher string
Message string
OtherPeer peer.ID
Transferred uint64
}
// NewDataTransferChannel constructs an API DataTransferChannel type from full channel state snapshot and a host id
func NewDataTransferChannel(hostID peer.ID, channelState datatransfer.ChannelState) DataTransferChannel {
channel := DataTransferChannel{
TransferID: channelState.TransferID(),
Status: channelState.Status(),
BaseCID: channelState.BaseCID(),
IsSender: channelState.Sender() == hostID,
Message: channelState.Message(),
}
stringer, ok := channelState.Voucher().(fmt.Stringer)
if ok {
channel.Voucher = stringer.String()
} else {
voucherJSON, err := json.Marshal(channelState.Voucher())
if err != nil {
channel.Voucher = fmt.Errorf("Voucher Serialization: %w", err).Error()
} else {
channel.Voucher = string(voucherJSON)
}
}
if channel.IsSender {
channel.IsInitiator = !channelState.IsPull()
channel.Transferred = channelState.Sent()
channel.OtherPeer = channelState.Recipient()
} else {
channel.IsInitiator = channelState.IsPull()
channel.Transferred = channelState.Received()
channel.OtherPeer = channelState.Sender()
}
return channel
}

View File

@ -1,12 +1,6 @@
/dns4/bootstrap-0-sin.fil-test.net/tcp/1347/p2p/12D3KooWPdUquftaQvoQEtEdsRBAhwD6jopbF2oweVTzR59VbHEd /dns4/bootstrap-0.testnet.fildev.network/tcp/1347/p2p/12D3KooWJTUBUjtzWJGWU1XSiY21CwmHaCNLNYn2E7jqHEHyZaP7
/ip4/86.109.15.57/tcp/1347/p2p/12D3KooWPdUquftaQvoQEtEdsRBAhwD6jopbF2oweVTzR59VbHEd /dns4/bootstrap-1.testnet.fildev.network/tcp/1347/p2p/12D3KooW9yeKXha4hdrJKq74zEo99T8DhriQdWNoojWnnQbsgB3v
/dns4/bootstrap-0-dfw.fil-test.net/tcp/1347/p2p/12D3KooWQSCkHCzosEyrh8FgYfLejKgEPM5VB6qWzZE3yDAuXn8d /dns4/bootstrap-2.testnet.fildev.network/tcp/1347/p2p/12D3KooWCrx8yVG9U9Kf7w8KLN3Edkj5ZKDhgCaeMqQbcQUoB6CT
/ip4/139.178.84.45/tcp/1347/p2p/12D3KooWQSCkHCzosEyrh8FgYfLejKgEPM5VB6qWzZE3yDAuXn8d /dns4/bootstrap-4.testnet.fildev.network/tcp/1347/p2p/12D3KooWPkL9LrKRQgHtq7kn9ecNhGU9QaziG8R5tX8v9v7t3h34
/dns4/bootstrap-0-fra.fil-test.net/tcp/1347/p2p/12D3KooWEXN2eQmoyqnNjde9PBAQfQLHN67jcEdWU6JougWrgXJK /dns4/bootstrap-3.testnet.fildev.network/tcp/1347/p2p/12D3KooWKYSsbpgZ3HAjax5M1BXCwXLa6gVkUARciz7uN3FNtr7T
/ip4/136.144.49.17/tcp/1347/p2p/12D3KooWEXN2eQmoyqnNjde9PBAQfQLHN67jcEdWU6JougWrgXJK /dns4/bootstrap-5.testnet.fildev.network/tcp/1347/p2p/12D3KooWQYzqnLASJAabyMpPb1GcWZvNSe7JDcRuhdRqonFoiK9W
/dns4/bootstrap-1-sin.fil-test.net/tcp/1347/p2p/12D3KooWLmJkZd33mJhjg5RrpJ6NFep9SNLXWc4uVngV4TXKwzYw
/ip4/86.109.15.123/tcp/1347/p2p/12D3KooWLmJkZd33mJhjg5RrpJ6NFep9SNLXWc4uVngV4TXKwzYw
/dns4/bootstrap-1-dfw.fil-test.net/tcp/1347/p2p/12D3KooWGXLHjiz6pTRu7x2pkgTVCoxcCiVxcNLpMnWcJ3JiNEy5
/ip4/139.178.86.3/tcp/1347/p2p/12D3KooWGXLHjiz6pTRu7x2pkgTVCoxcCiVxcNLpMnWcJ3JiNEy5
/dns4/bootstrap-1-fra.fil-test.net/tcp/1347/p2p/12D3KooW9szZmKttS9A1FafH3Zc2pxKwwmvCWCGKkRP4KmbhhC4R
/ip4/136.144.49.131/tcp/1347/p2p/12D3KooW9szZmKttS9A1FafH3Zc2pxKwwmvCWCGKkRP4KmbhhC4R

View File

@ -2,7 +2,7 @@ package build
import "github.com/filecoin-project/lotus/node/modules/dtypes" import "github.com/filecoin-project/lotus/node/modules/dtypes"
var DrandNetwork = DrandMainnet var DrandNetwork = DrandIncentinet
func DrandConfig() dtypes.DrandConfig { func DrandConfig() dtypes.DrandConfig {
return DrandConfigs[DrandNetwork] return DrandConfigs[DrandNetwork]
@ -15,6 +15,7 @@ const (
DrandTestnet DrandTestnet
DrandDevnet DrandDevnet
DrandLocalnet DrandLocalnet
DrandIncentinet
) )
var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{ var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{
@ -55,4 +56,17 @@ var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{
}, },
ChainInfoJSON: `{"public_key":"8cda589f88914aa728fd183f383980b35789ce81b274e5daee1f338b77d02566ef4d3fb0098af1f844f10f9c803c1827","period":25,"genesis_time":1595348225,"hash":"e73b7dc3c4f6a236378220c0dd6aa110eb16eed26c11259606e07ee122838d4f","groupHash":"567d4785122a5a3e75a9bc9911d7ea807dd85ff76b78dc4ff06b075712898607"}`, ChainInfoJSON: `{"public_key":"8cda589f88914aa728fd183f383980b35789ce81b274e5daee1f338b77d02566ef4d3fb0098af1f844f10f9c803c1827","period":25,"genesis_time":1595348225,"hash":"e73b7dc3c4f6a236378220c0dd6aa110eb16eed26c11259606e07ee122838d4f","groupHash":"567d4785122a5a3e75a9bc9911d7ea807dd85ff76b78dc4ff06b075712898607"}`,
}, },
DrandIncentinet: {
Servers: []string{
"https://pl-eu.incentinet.drand.sh",
"https://pl-us.incentinet.drand.sh",
"https://pl-sin.incentinet.drand.sh",
},
Relays: []string{
"/dnsaddr/pl-eu.incentinet.drand.sh/",
"/dnsaddr/pl-us.incentinet.drand.sh/",
"/dnsaddr/pl-sin.incentinet.drand.sh/",
},
ChainInfoJSON: `{"public_key":"8cad0c72c606ab27d36ee06de1d5b2db1faf92e447025ca37575ab3a8aac2eaae83192f846fc9e158bc738423753d000","period":30,"genesis_time":1595873820,"hash":"80c8b872c714f4c00fdd3daa465d5514049f457f01f85a4caf68cdcd394ba039","groupHash":"d9406aaed487f7af71851b4399448e311f2328923d454e971536c05398ce2d9b"}`,
},
} }

Binary file not shown.

View File

@ -89,11 +89,14 @@ const VerifSigCacheSize = 32000
// TODO: If this is gonna stay, it should move to specs-actors // TODO: If this is gonna stay, it should move to specs-actors
const BlockMessageLimit = 10000 const BlockMessageLimit = 10000
const BlockGasLimit = 10_000_000_000 const BlockGasLimit = 10_000_000_000
const BlockGasTarget = BlockGasLimit / 2 const BlockGasTarget = BlockGasLimit / 2
const BaseFeeMaxChangeDenom = 8 // 12.5% const BaseFeeMaxChangeDenom = 8 // 12.5%
const InitialBaseFee = 100e6 const InitialBaseFee = 100e6
const MinimumBaseFee = 100 const MinimumBaseFee = 100
const PackingEfficiencyNum = 4
const PackingEfficiencyDenom = 5
// Actor consts // Actor consts
// TODO: Pull from actors when its made not private // TODO: Pull from actors when its made not private

View File

@ -66,4 +66,7 @@ var (
// Actor consts // Actor consts
// TODO: Pull from actors when its made not private // TODO: Pull from actors when its made not private
MinDealDuration = abi.ChainEpoch(180 * builtin.EpochsInDay) MinDealDuration = abi.ChainEpoch(180 * builtin.EpochsInDay)
PackingEfficiencyNum int64 = 4
PackingEfficiencyDenom int64 = 5
) )

View File

@ -13,7 +13,7 @@ import (
) )
func init() { func init() {
power.ConsensusMinerMinPower = big.NewInt(1024 << 30) power.ConsensusMinerMinPower = big.NewInt(10 << 40)
miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{ miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
abi.RegisteredSealProof_StackedDrg32GiBV1: {}, abi.RegisteredSealProof_StackedDrg32GiBV1: {},
abi.RegisteredSealProof_StackedDrg64GiBV1: {}, abi.RegisteredSealProof_StackedDrg64GiBV1: {},

View File

@ -1,152 +1,152 @@
{ {
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.params": { "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.params": {
"cid": "QmeDRyxek34F1H6xJY6AkFdWvPsy5F6dKTrebV3ZtWT4ky", "cid": "QmVxjFRyhmyQaZEtCh7nk2abc7LhFkzhnRX4rcHqCCpikR",
"digest": "f5827f2d8801c62c831e0f972f6dc8bb", "digest": "7610b9f82bfc88405b7a832b651ce2f6",
"sector_size": 2048 "sector_size": 2048
}, },
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.vk": { "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.vk": {
"cid": "QmUw1ZmG4BBbX19MsbH3zAEGKUc42iFJc5ZAyomDHeJTsA", "cid": "QmcS5JZs8X3TdtkEBpHAdUYjdNDqcL7fWQFtQz69mpnu2X",
"digest": "398fecdb4b2de445125852bc3c080b35", "digest": "0e0958009936b9d5e515ec97b8cb792d",
"sector_size": 2048 "sector_size": 2048
}, },
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.params": { "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.params": {
"cid": "QmUeNKp9YZpiAFm81RV5KuxH1FDGJx2DuwcbU2XNSZLLSv", "cid": "QmUiRx71uxfmUE8V3H9sWAsAXoM88KR4eo1ByvvcFNeTLR",
"digest": "2b6d2972ac9e862e8134d98fb695b0c5", "digest": "1a7d4a9c8a502a497ed92a54366af33f",
"sector_size": 536870912 "sector_size": 536870912
}, },
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.vk": { "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.vk": {
"cid": "QmQaQmTXX995Akd66ggtJY5bNx6Gkxk8P34JTdMMq8393G", "cid": "QmfCeddjFpWtavzfEzZpJfzSajGNwfL4RjFXWAvA9TSnTV",
"digest": "3688c9eb256b7b17f411dad78d5ef74a", "digest": "4dae975de4f011f101f5a2f86d1daaba",
"sector_size": 536870912 "sector_size": 536870912
}, },
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.params": { "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.params": {
"cid": "QmfEYTMSkwGJTumQx26iKXGNKiYh3mmAC4SkdybZpJCj5p", "cid": "QmcSTqDcFVLGGVYz1njhUZ7B6fkKtBumsLUwx4nkh22TzS",
"digest": "09bff16aed893349d94485cfae366a9c", "digest": "82c88066be968bb550a05e30ff6c2413",
"sector_size": 2048 "sector_size": 2048
}, },
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.vk": { "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.vk": {
"cid": "QmP4ThPieSUJyRanjibWpT5R5cCMzMAU4j8Y7kBn7CSW1Q", "cid": "QmSTCXF2ipGA3f6muVo6kHc2URSx6PzZxGUqu7uykaH5KU",
"digest": "142f2f7e8f1b1779290315cabfd2c803", "digest": "ffd79788d614d27919ae5bd2d94eacb6",
"sector_size": 2048 "sector_size": 2048
}, },
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.params": { "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.params": {
"cid": "QmcAixrHsz29DgvtZiMc2kQjvPRvWxYUp36QYmRDZbmREm", "cid": "QmU9SBzJNrcjRFDiFc4GcApqdApN6z9X7MpUr66mJ2kAJP",
"digest": "8f987f64d434365562180b96ec12e299", "digest": "700171ecf7334e3199437c930676af82",
"sector_size": 8388608 "sector_size": 8388608
}, },
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.vk": { "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.vk": {
"cid": "QmT4iFnbL6r4txS5PXsiV7NTzbhCxHy54PvdkJJGV2VFXb", "cid": "QmbmUMa3TbbW3X5kFhExs6WgC4KeWT18YivaVmXDkB6ANG",
"digest": "94b6c24ac01924f4feeecedd16b5d77d", "digest": "79ebb55f56fda427743e35053edad8fc",
"sector_size": 8388608 "sector_size": 8388608
}, },
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.params": { "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.params": {
"cid": "QmbjFst6SFCK1KsTQrfwPdxf3VTNa1raed574tEZZ9PoyQ", "cid": "QmdNEL2RtqL52GQNuj8uz6mVj5Z34NVnbaJ1yMyh1oXtBx",
"digest": "2c245fe8179839dd6c6cdea207c67ae8", "digest": "c49499bb76a0762884896f9683403f55",
"sector_size": 8388608 "sector_size": 8388608
}, },
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.vk": { "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.vk": {
"cid": "QmQJKmvZN1a5cQ1Nw6CDyXs3nuRPzvyU5NvCFMUL2BfcZC", "cid": "QmUiVYCQUgr6Y13pZFr8acWpSM4xvTXUdcvGmxyuHbKhsc",
"digest": "56ae47bfda53bb8d22981ed8d8d27d72", "digest": "34d4feeacd9abf788d69ef1bb4d8fd00",
"sector_size": 8388608 "sector_size": 8388608
}, },
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.params": { "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.params": {
"cid": "QmQCABxeTpdvXTyjDyk7nPBxkQzCh7MXfGztWnSXEPKMLW", "cid": "QmVgCsJFRXKLuuUhT3aMYwKVGNA9rDeR6DCrs7cAe8riBT",
"digest": "7e6b2eb5ecbb11ac651ad66ebbb2075a", "digest": "827359440349fe8f5a016e7598993b79",
"sector_size": 536870912 "sector_size": 536870912
}, },
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.vk": { "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.vk": {
"cid": "QmPBweyugh5Sx4umk8ULhgEGbjY8xmWLfU6M7EMpc8Mad6", "cid": "QmfA31fbCWojSmhSGvvfxmxaYCpMoXP95zEQ9sLvBGHNaN",
"digest": "94a8d9e25a9ab9674d339833664eba25", "digest": "bd2cd62f65c1ab84f19ca27e97b7c731",
"sector_size": 536870912 "sector_size": 536870912
}, },
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.params": { "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.params": {
"cid": "QmY5yax1E9KymBnCeHksE9Zi8NieZbmwcpoDGoabkeeb9h", "cid": "QmaUmfcJt6pozn8ndq1JVBzLRjRJdHMTPd4foa8iw5sjBZ",
"digest": "c909ea9e3fe25ab9b391a64593afdbba", "digest": "2cf49eb26f1fee94c85781a390ddb4c8",
"sector_size": 34359738368 "sector_size": 34359738368
}, },
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.vk": { "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.vk": {
"cid": "QmXnPo4yH5mwMguwrvqgRfduSttbmPrXtbBfbwU21wQWHt", "cid": "QmR9i9KL3vhhAqTBGj1bPPC7LvkptxrH9RvxJxLN1vvsBE",
"digest": "caf900461e988bbf86dbcaca087b7864", "digest": "0f8ec542485568fa3468c066e9fed82b",
"sector_size": 34359738368 "sector_size": 34359738368
}, },
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.params": { "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.params": {
"cid": "QmZtzzPWwmZEgR7MSMvXRbt9KVK8k4XZ5RLWHybHJW9SdE", "cid": "Qmdtczp7p4wrbDofmHdGhiixn9irAcN77mV9AEHZBaTt1i",
"digest": "a2844f0703f186d143a06146a04577d8", "digest": "d84f79a16fe40e9e25a36e2107bb1ba0",
"sector_size": 34359738368 "sector_size": 34359738368
}, },
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.vk": { "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.vk": {
"cid": "QmWxEA7EdQCUJTzjNpxg5XTF45D2uVyYnN1QRUb5TRYU8M", "cid": "QmZCvxKcKP97vDAk8Nxs9R1fWtqpjQrAhhfXPoCi1nkDoF",
"digest": "2306247a1e616dbe07f01b88196c2044", "digest": "fc02943678dd119e69e7fab8420e8819",
"sector_size": 34359738368 "sector_size": 34359738368
}, },
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.params": { "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.params": {
"cid": "QmP676KwuvyF9Y64uJnXvLtvD1xcuWQ6wD23RzYtQ6dd4f", "cid": "QmeAN4vuANhXsF8xP2Lx5j2L6yMSdogLzpcvqCJThRGK1V",
"digest": "215b1c667a4f46a1d0178338df568615", "digest": "3810b7780ac0e299b22ae70f1f94c9bc",
"sector_size": 68719476736 "sector_size": 68719476736
}, },
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.vk": { "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.vk": {
"cid": "QmPvPwbJtcSGyqB1rQJhSF5yvFbX9ZBSsHVej5F8JUyHUJ", "cid": "QmWV8rqZLxs1oQN9jxNWmnT1YdgLwCcscv94VARrhHf1T7",
"digest": "0c9c423b28b1455fcbc329a1045fd4dd", "digest": "59d2bf1857adc59a4f08fcf2afaa916b",
"sector_size": 68719476736 "sector_size": 68719476736
}, },
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.params": { "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.params": {
"cid": "QmUxPQfvckzm1t6MFRdDZ1fDK5UJzAjK7pTZ97cwyachdr", "cid": "QmVkrXc1SLcpgcudK5J25HH93QvR9tNsVhVTYHm5UymXAz",
"digest": "965132f51ae445b0e6d32692b7561995", "digest": "2170a91ad5bae22ea61f2ea766630322",
"sector_size": 68719476736 "sector_size": 68719476736
}, },
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.vk": { "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.vk": {
"cid": "QmTxq2EBnQWb5R8tS4MHdchj4vNfLYGoSXxwJFvs5xgW4K", "cid": "QmbfQjPD7EpzjhWGmvWAsyN2mAZ4PcYhsf3ujuhU9CSuBm",
"digest": "fc8c3d26e0e56373ad96cb41520d55a6", "digest": "6d3789148fb6466d07ee1e24d6292fd6",
"sector_size": 68719476736 "sector_size": 68719476736
}, },
"v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.params": { "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.params": {
"cid": "QmRjgZHERgqGoRagR788Kh6ybi26csVYa8mqbqhmZm57Jx", "cid": "QmWceMgnWYLopMuM4AoGMvGEau7tNe5UK83XFjH5V9B17h",
"digest": "cfc7b0897d1eee48c586f7beb89e67f7", "digest": "434fb1338ecfaf0f59256f30dde4968f",
"sector_size": 2048 "sector_size": 2048
}, },
"v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.vk": { "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.vk": {
"cid": "QmNjvnvFP7KgovHUddULoB19fBHT81iz7NcUbzEHZUUPsm", "cid": "QmamahpFCstMUqHi2qGtVoDnRrsXhid86qsfvoyCTKJqHr",
"digest": "fb59bd061c987eac7068008c44de346b", "digest": "dc1ade9929ade1708238f155343044ac",
"sector_size": 2048 "sector_size": 2048
}, },
"v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.params": { "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.params": {
"cid": "QmTpRPBA4dt8fgGpcVzi4L1KA1U2eBHCE8WVmS2GUygMvT", "cid": "QmYBpTt7LWNAWr1JXThV5VxX7wsQFLd1PHrGYVbrU1EZjC",
"digest": "36d465915b0afbf96bd08e7915e00952", "digest": "6c77597eb91ab936c1cef4cf19eba1b3",
"sector_size": 536870912 "sector_size": 536870912
}, },
"v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.vk": { "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.vk": {
"cid": "QmRzDyVfQCLsxspoVsed5bcQRsG6KiktngJfcNBL3TJPZe", "cid": "QmWionkqH2B6TXivzBSQeSyBxojaiAFbzhjtwYRrfwd8nH",
"digest": "99d16df0eb6a7e227a4f4570c4f6b6f1", "digest": "065179da19fbe515507267677f02823e",
"sector_size": 536870912 "sector_size": 536870912
}, },
"v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.params": { "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.params": {
"cid": "QmV8ZjTSGzDUWmFvsq9NSyPBR7eDDUcvCPNgj2yE7HMAFu", "cid": "QmPXAPPuQtuQz7Zz3MHMAMEtsYwqM1o9H1csPLeiMUQwZH",
"digest": "34f3ddf1d1c9f41c0cd73b91e8b4bc27", "digest": "09e612e4eeb7a0eb95679a88404f960c",
"sector_size": 8388608 "sector_size": 8388608
}, },
"v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.vk": { "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.vk": {
"cid": "QmTa3VbjTiqJWU6r4WKayaQrUaaBsrpp5UDqYvPDd2C5hs", "cid": "QmYCuipFyvVW1GojdMrjK1JnMobXtT4zRCZs1CGxjizs99",
"digest": "ec62d59651daa5631d3d1e9c782dd940", "digest": "b687beb9adbd9dabe265a7e3620813e4",
"sector_size": 8388608 "sector_size": 8388608
}, },
"v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.params": { "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.params": {
"cid": "Qmf8ngfArxrv9tFWDqBcNegdBMymvuakwyHKd1pbW3pbsb", "cid": "QmengpM684XLQfG8754ToonszgEg2bQeAGUan5uXTHUQzJ",
"digest": "a16d6f4c6424fb280236739f84b24f97", "digest": "6a388072a518cf46ebd661f5cc46900a",
"sector_size": 34359738368 "sector_size": 34359738368
}, },
"v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.vk": { "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.vk": {
"cid": "QmfQgVFerArJ6Jupwyc9tKjLD9n1J9ajLHBdpY465tRM7M", "cid": "Qmf93EMrADXAK6CyiSfE8xx45fkMfR3uzKEPCvZC1n2kzb",
"digest": "7a139d82b8a02e35279d657e197f5c1f", "digest": "0c7b4aac1c40fdb7eb82bc355b41addf",
"sector_size": 34359738368 "sector_size": 34359738368
}, },
"v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.params": { "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.params": {
"cid": "QmfDha8271nXJn14Aq3qQeghjMBWbs6HNSGa6VuzCVk4TW", "cid": "QmS7ye6Ri2MfFzCkcUJ7FQ6zxDKuJ6J6B8k5PN7wzSR9sX",
"digest": "5d3cd3f107a3bea8a96d1189efd2965c", "digest": "1801f8a6e1b00bceb00cc27314bb5ce3",
"sector_size": 68719476736 "sector_size": 68719476736
}, },
"v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.vk": { "v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.vk": {
"cid": "QmRVtTtiFzHJTHurYzaCvetGAchux9cktixT4aGHthN6Zt", "cid": "QmehSmC6BhrgRZakPDta2ewoH9nosNzdjCqQRXsNFNUkLN",
"digest": "62c366405404e60f171e661492740b1c", "digest": "a89884252c04c298d0b3c81bfd884164",
"sector_size": 68719476736 "sector_size": 68719476736
} }
} }

View File

@ -25,7 +25,7 @@ func buildType() string {
} }
// BuildVersion is the local build version, set by build system // BuildVersion is the local build version, set by build system
const BuildVersion = "0.4.4" const BuildVersion = "0.5.4"
func UserVersion() string { func UserVersion() string {
return BuildVersion + buildType() + CurrentCommit return BuildVersion + buildType() + CurrentCommit
@ -53,7 +53,7 @@ func (ve Version) EqMajorMinor(v2 Version) bool {
} }
// APIVersion is a semver version of the rpc api exposed // APIVersion is a semver version of the rpc api exposed
var APIVersion Version = newVer(0, 10, 0) var APIVersion Version = newVer(0, 12, 0)
//nolint:varcheck,deadcode //nolint:varcheck,deadcode
const ( const (

View File

@ -171,6 +171,10 @@ func (db *DrandBeacon) VerifyEntry(curr types.BeaconEntry, prev types.BeaconEntr
// TODO handle genesis better // TODO handle genesis better
return nil return nil
} }
if be := db.getCachedValue(curr.Round); be != nil {
// return no error if the value is in the cache already
return nil
}
b := &dchain.Beacon{ b := &dchain.Beacon{
PreviousSig: prev.Data, PreviousSig: prev.Data,
Round: curr.Round, Round: curr.Round,

View File

@ -7,8 +7,8 @@ import (
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/hashicorp/golang-lru" lru "github.com/hashicorp/golang-lru"
peer "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peer"
) )
type blockReceiptTracker struct { type blockReceiptTracker struct {

View File

@ -6,7 +6,7 @@ import (
"fmt" "fmt"
"io" "io"
"github.com/filecoin-project/lotus/chain/types" types "github.com/filecoin-project/lotus/chain/types"
cid "github.com/ipfs/go-cid" cid "github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen" cbg "github.com/whyrusleeping/cbor-gen"
xerrors "golang.org/x/xerrors" xerrors "golang.org/x/xerrors"
@ -625,16 +625,14 @@ func (t *BSTipSet) UnmarshalCBOR(r io.Reader) error {
{ {
pb, err := br.PeekByte() b, err := br.ReadByte()
if err != nil { if err != nil {
return err return err
} }
if pb == cbg.CborNull[0] { if b != cbg.CborNull[0] {
var nbuf [1]byte if err := br.UnreadByte(); err != nil {
if _, err := br.Read(nbuf[:]); err != nil {
return err return err
} }
} else {
t.Messages = new(CompactedMessages) t.Messages = new(CompactedMessages)
if err := t.Messages.UnmarshalCBOR(br); err != nil { if err := t.Messages.UnmarshalCBOR(br); err != nil {
return xerrors.Errorf("unmarshaling t.Messages pointer: %w", err) return xerrors.Errorf("unmarshaling t.Messages pointer: %w", err)

View File

@ -172,7 +172,7 @@ func (client *BlockSync) processResponse(
resLength, req.Length) resLength, req.Length)
} }
if resLength < int(req.Length) && res.Status != Partial { if resLength < int(req.Length) && res.Status != Partial {
return nil, xerrors.Errorf("got less than requested without a proper status: %s", res.Status) return nil, xerrors.Errorf("got less than requested without a proper status: %d", res.Status)
} }
validRes := &validatedResponse{} validRes := &validatedResponse{}
@ -205,7 +205,7 @@ func (client *BlockSync) processResponse(
validRes.messages = make([]*CompactedMessages, resLength) validRes.messages = make([]*CompactedMessages, resLength)
for i := 0; i < resLength; i++ { for i := 0; i < resLength; i++ {
if res.Chain[i].Messages == nil { if res.Chain[i].Messages == nil {
return nil, xerrors.Errorf("no messages included for tipset at height (head - %d): %w", i) return nil, xerrors.Errorf("no messages included for tipset at height (head - %d)", i)
} }
validRes.messages[i] = res.Chain[i].Messages validRes.messages[i] = res.Chain[i].Messages
} }
@ -308,6 +308,12 @@ func (client *BlockSync) GetChainMessages(
length uint64, length uint64,
) ([]*CompactedMessages, error) { ) ([]*CompactedMessages, error) {
ctx, span := trace.StartSpan(ctx, "GetChainMessages") ctx, span := trace.StartSpan(ctx, "GetChainMessages")
if span.IsRecordingEvents() {
span.AddAttributes(
trace.StringAttribute("tipset", fmt.Sprint(head.Cids())),
trace.Int64Attribute("count", int64(length)),
)
}
defer span.End() defer span.End()
req := &Request{ req := &Request{

View File

@ -1,9 +1,10 @@
package blocksync package blocksync
import ( import (
"time"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/store"
"time"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log" logging "github.com/ipfs/go-log"

View File

@ -1,6 +1,8 @@
package state package state
import ( import (
"bytes"
"github.com/filecoin-project/specs-actors/actors/util/adt" "github.com/filecoin-project/specs-actors/actors/util/adt"
typegen "github.com/whyrusleeping/cbor-gen" typegen "github.com/whyrusleeping/cbor-gen"
) )
@ -39,8 +41,11 @@ func DiffAdtArray(preArr, curArr *adt.Array, out AdtArrayDiff) error {
return nil return nil
} }
if err := out.Modify(uint64(i), prevVal, curVal); err != nil { // no modification
return err if !bytes.Equal(prevVal.Raw, curVal.Raw) {
if err := out.Modify(uint64(i), prevVal, curVal); err != nil {
return err
}
} }
return curArr.Delete(uint64(i)) return curArr.Delete(uint64(i))
@ -90,8 +95,11 @@ func DiffAdtMap(preMap, curMap *adt.Map, out AdtMapDiff) error {
return nil return nil
} }
if err := out.Modify(key, prevVal, curVal); err != nil { // no modification
return err if !bytes.Equal(prevVal.Raw, curVal.Raw) {
if err := out.Modify(key, prevVal, curVal); err != nil {
return err
}
} }
return curMap.Delete(k) return curMap.Delete(k)

View File

@ -40,7 +40,7 @@ func TestDiffAdtArray(t *testing.T) {
require.NoError(t, arrB.Set(5, runtime.CBORBytes{8})) // add require.NoError(t, arrB.Set(5, runtime.CBORBytes{8})) // add
require.NoError(t, arrB.Set(6, runtime.CBORBytes{9})) // add require.NoError(t, arrB.Set(6, runtime.CBORBytes{9})) // add
changes := new(TestAdtDiff) changes := new(TestDiffArray)
assert.NoError(t, DiffAdtArray(arrA, arrB, changes)) assert.NoError(t, DiffAdtArray(arrA, arrB, changes))
assert.NotNil(t, changes) assert.NotNil(t, changes)
@ -71,38 +71,186 @@ func TestDiffAdtArray(t *testing.T) {
assert.EqualValues(t, []byte{1}, changes.Removed[1].val) assert.EqualValues(t, []byte{1}, changes.Removed[1].val)
} }
type adtDiffResult struct { func TestDiffAdtMap(t *testing.T) {
key uint64 ctxstoreA := newContextStore()
val runtime.CBORBytes ctxstoreB := newContextStore()
mapA := adt.MakeEmptyMap(ctxstoreA)
mapB := adt.MakeEmptyMap(ctxstoreB)
require.NoError(t, mapA.Put(adt.UIntKey(0), runtime.CBORBytes([]byte{0}))) // delete
require.NoError(t, mapA.Put(adt.UIntKey(1), runtime.CBORBytes([]byte{0}))) // modify
require.NoError(t, mapB.Put(adt.UIntKey(1), runtime.CBORBytes([]byte{1})))
require.NoError(t, mapA.Put(adt.UIntKey(2), runtime.CBORBytes([]byte{1}))) // delete
require.NoError(t, mapA.Put(adt.UIntKey(3), runtime.CBORBytes([]byte{0}))) // noop
require.NoError(t, mapB.Put(adt.UIntKey(3), runtime.CBORBytes([]byte{0})))
require.NoError(t, mapA.Put(adt.UIntKey(4), runtime.CBORBytes([]byte{0}))) // modify
require.NoError(t, mapB.Put(adt.UIntKey(4), runtime.CBORBytes([]byte{6})))
require.NoError(t, mapB.Put(adt.UIntKey(5), runtime.CBORBytes{8})) // add
require.NoError(t, mapB.Put(adt.UIntKey(6), runtime.CBORBytes{9})) // add
changes := new(TestDiffMap)
assert.NoError(t, DiffAdtMap(mapA, mapB, changes))
assert.NotNil(t, changes)
assert.Equal(t, 2, len(changes.Added))
// keys 5 and 6 were added
assert.EqualValues(t, uint64(6), changes.Added[0].key)
assert.EqualValues(t, []byte{9}, changes.Added[0].val)
assert.EqualValues(t, uint64(5), changes.Added[1].key)
assert.EqualValues(t, []byte{8}, changes.Added[1].val)
assert.Equal(t, 2, len(changes.Modified))
// keys 1 and 4 were modified
assert.EqualValues(t, uint64(1), changes.Modified[0].From.key)
assert.EqualValues(t, []byte{0}, changes.Modified[0].From.val)
assert.EqualValues(t, uint64(1), changes.Modified[0].To.key)
assert.EqualValues(t, []byte{1}, changes.Modified[0].To.val)
assert.EqualValues(t, uint64(4), changes.Modified[1].From.key)
assert.EqualValues(t, []byte{0}, changes.Modified[1].From.val)
assert.EqualValues(t, uint64(4), changes.Modified[1].To.key)
assert.EqualValues(t, []byte{6}, changes.Modified[1].To.val)
assert.Equal(t, 2, len(changes.Removed))
// keys 0 and 2 were deleted
assert.EqualValues(t, uint64(0), changes.Removed[0].key)
assert.EqualValues(t, []byte{0}, changes.Removed[0].val)
assert.EqualValues(t, uint64(2), changes.Removed[1].key)
assert.EqualValues(t, []byte{1}, changes.Removed[1].val)
} }
type TestAdtDiff struct { type TestDiffMap struct {
Added []adtDiffResult Added []adtMapDiffResult
Modified []TestAdtDiffModified Modified []TestAdtMapDiffModified
Removed []adtDiffResult Removed []adtMapDiffResult
} }
var _ AdtArrayDiff = &TestAdtDiff{} var _ AdtMapDiff = &TestDiffMap{}
type TestAdtDiffModified struct { func (t *TestDiffMap) AsKey(key string) (adt.Keyer, error) {
From adtDiffResult k, err := adt.ParseUIntKey(key)
To adtDiffResult if err != nil {
return nil, err
}
return adt.UIntKey(k), nil
} }
func (t *TestAdtDiff) Add(key uint64, val *typegen.Deferred) error { func (t *TestDiffMap) Add(key string, val *typegen.Deferred) error {
v := new(runtime.CBORBytes) v := new(runtime.CBORBytes)
err := v.UnmarshalCBOR(bytes.NewReader(val.Raw)) err := v.UnmarshalCBOR(bytes.NewReader(val.Raw))
if err != nil { if err != nil {
return err return err
} }
t.Added = append(t.Added, adtDiffResult{ k, err := adt.ParseUIntKey(key)
if err != nil {
return err
}
t.Added = append(t.Added, adtMapDiffResult{
key: k,
val: *v,
})
return nil
}
func (t *TestDiffMap) Modify(key string, from, to *typegen.Deferred) error {
vFrom := new(runtime.CBORBytes)
err := vFrom.UnmarshalCBOR(bytes.NewReader(from.Raw))
if err != nil {
return err
}
vTo := new(runtime.CBORBytes)
err = vTo.UnmarshalCBOR(bytes.NewReader(to.Raw))
if err != nil {
return err
}
k, err := adt.ParseUIntKey(key)
if err != nil {
return err
}
if !bytes.Equal(*vFrom, *vTo) {
t.Modified = append(t.Modified, TestAdtMapDiffModified{
From: adtMapDiffResult{
key: k,
val: *vFrom,
},
To: adtMapDiffResult{
key: k,
val: *vTo,
},
})
}
return nil
}
func (t *TestDiffMap) Remove(key string, val *typegen.Deferred) error {
v := new(runtime.CBORBytes)
err := v.UnmarshalCBOR(bytes.NewReader(val.Raw))
if err != nil {
return err
}
k, err := adt.ParseUIntKey(key)
if err != nil {
return err
}
t.Removed = append(t.Removed, adtMapDiffResult{
key: k,
val: *v,
})
return nil
}
type adtMapDiffResult struct {
key uint64
val runtime.CBORBytes
}
type TestAdtMapDiffModified struct {
From adtMapDiffResult
To adtMapDiffResult
}
type adtArrayDiffResult struct {
key uint64
val runtime.CBORBytes
}
type TestDiffArray struct {
Added []adtArrayDiffResult
Modified []TestAdtArrayDiffModified
Removed []adtArrayDiffResult
}
var _ AdtArrayDiff = &TestDiffArray{}
type TestAdtArrayDiffModified struct {
From adtArrayDiffResult
To adtArrayDiffResult
}
func (t *TestDiffArray) Add(key uint64, val *typegen.Deferred) error {
v := new(runtime.CBORBytes)
err := v.UnmarshalCBOR(bytes.NewReader(val.Raw))
if err != nil {
return err
}
t.Added = append(t.Added, adtArrayDiffResult{
key: key, key: key,
val: *v, val: *v,
}) })
return nil return nil
} }
func (t *TestAdtDiff) Modify(key uint64, from, to *typegen.Deferred) error { func (t *TestDiffArray) Modify(key uint64, from, to *typegen.Deferred) error {
vFrom := new(runtime.CBORBytes) vFrom := new(runtime.CBORBytes)
err := vFrom.UnmarshalCBOR(bytes.NewReader(from.Raw)) err := vFrom.UnmarshalCBOR(bytes.NewReader(from.Raw))
if err != nil { if err != nil {
@ -116,12 +264,12 @@ func (t *TestAdtDiff) Modify(key uint64, from, to *typegen.Deferred) error {
} }
if !bytes.Equal(*vFrom, *vTo) { if !bytes.Equal(*vFrom, *vTo) {
t.Modified = append(t.Modified, TestAdtDiffModified{ t.Modified = append(t.Modified, TestAdtArrayDiffModified{
From: adtDiffResult{ From: adtArrayDiffResult{
key: key, key: key,
val: *vFrom, val: *vFrom,
}, },
To: adtDiffResult{ To: adtArrayDiffResult{
key: key, key: key,
val: *vTo, val: *vTo,
}, },
@ -130,13 +278,13 @@ func (t *TestAdtDiff) Modify(key uint64, from, to *typegen.Deferred) error {
return nil return nil
} }
func (t *TestAdtDiff) Remove(key uint64, val *typegen.Deferred) error { func (t *TestDiffArray) Remove(key uint64, val *typegen.Deferred) error {
v := new(runtime.CBORBytes) v := new(runtime.CBORBytes)
err := v.UnmarshalCBOR(bytes.NewReader(val.Raw)) err := v.UnmarshalCBOR(bytes.NewReader(val.Raw))
if err != nil { if err != nil {
return err return err
} }
t.Removed = append(t.Removed, adtDiffResult{ t.Removed = append(t.Removed, adtArrayDiffResult{
key: key, key: key,
val: *v, val: *v,
}) })

View File

@ -5,17 +5,17 @@ import (
"context" "context"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
typegen "github.com/whyrusleeping/cbor-gen"
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/abi/big" "github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/filecoin-project/specs-actors/actors/builtin" "github.com/filecoin-project/specs-actors/actors/builtin"
init_ "github.com/filecoin-project/specs-actors/actors/builtin/init"
"github.com/filecoin-project/specs-actors/actors/builtin/market" "github.com/filecoin-project/specs-actors/actors/builtin/market"
"github.com/filecoin-project/specs-actors/actors/builtin/miner" "github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/specs-actors/actors/builtin/paych" "github.com/filecoin-project/specs-actors/actors/builtin/paych"
"github.com/filecoin-project/specs-actors/actors/util/adt" "github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
typegen "github.com/whyrusleeping/cbor-gen"
"github.com/filecoin-project/lotus/api/apibstore" "github.com/filecoin-project/lotus/api/apibstore"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
@ -407,6 +407,21 @@ func (sp *StatePredicates) AvailableBalanceChangedForAddresses(getAddrs func() [
type DiffMinerActorStateFunc func(ctx context.Context, oldState *miner.State, newState *miner.State) (changed bool, user UserData, err error) type DiffMinerActorStateFunc func(ctx context.Context, oldState *miner.State, newState *miner.State) (changed bool, user UserData, err error)
func (sp *StatePredicates) OnInitActorChange(diffInitActorState DiffInitActorStateFunc) DiffTipSetKeyFunc {
return sp.OnActorStateChanged(builtin.InitActorAddr, func(ctx context.Context, oldActorStateHead, newActorStateHead cid.Cid) (changed bool, user UserData, err error) {
var oldState init_.State
if err := sp.cst.Get(ctx, oldActorStateHead, &oldState); err != nil {
return false, nil, err
}
var newState init_.State
if err := sp.cst.Get(ctx, newActorStateHead, &newState); err != nil {
return false, nil, err
}
return diffInitActorState(ctx, &oldState, &newState)
})
}
func (sp *StatePredicates) OnMinerActorChange(minerAddr address.Address, diffMinerActorState DiffMinerActorStateFunc) DiffTipSetKeyFunc { func (sp *StatePredicates) OnMinerActorChange(minerAddr address.Address, diffMinerActorState DiffMinerActorStateFunc) DiffTipSetKeyFunc {
return sp.OnActorStateChanged(minerAddr, func(ctx context.Context, oldActorStateHead, newActorStateHead cid.Cid) (changed bool, user UserData, err error) { return sp.OnActorStateChanged(minerAddr, func(ctx context.Context, oldActorStateHead, newActorStateHead cid.Cid) (changed bool, user UserData, err error) {
var oldState miner.State var oldState miner.State
@ -628,3 +643,145 @@ func (sp *StatePredicates) OnToSendAmountChanges() DiffPaymentChannelStateFunc {
}, nil }, nil
} }
} }
type AddressPair struct {
ID address.Address
PK address.Address
}
type InitActorAddressChanges struct {
Added []AddressPair
Modified []AddressChange
Removed []AddressPair
}
type AddressChange struct {
From AddressPair
To AddressPair
}
type DiffInitActorStateFunc func(ctx context.Context, oldState *init_.State, newState *init_.State) (changed bool, user UserData, err error)
func (i *InitActorAddressChanges) AsKey(key string) (adt.Keyer, error) {
addr, err := address.NewFromBytes([]byte(key))
if err != nil {
return nil, err
}
return adt.AddrKey(addr), nil
}
func (i *InitActorAddressChanges) Add(key string, val *typegen.Deferred) error {
pkAddr, err := address.NewFromBytes([]byte(key))
if err != nil {
return err
}
id := new(typegen.CborInt)
if err := id.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return err
}
idAddr, err := address.NewIDAddress(uint64(*id))
if err != nil {
return err
}
i.Added = append(i.Added, AddressPair{
ID: idAddr,
PK: pkAddr,
})
return nil
}
func (i *InitActorAddressChanges) Modify(key string, from, to *typegen.Deferred) error {
pkAddr, err := address.NewFromBytes([]byte(key))
if err != nil {
return err
}
fromID := new(typegen.CborInt)
if err := fromID.UnmarshalCBOR(bytes.NewReader(from.Raw)); err != nil {
return err
}
fromIDAddr, err := address.NewIDAddress(uint64(*fromID))
if err != nil {
return err
}
toID := new(typegen.CborInt)
if err := toID.UnmarshalCBOR(bytes.NewReader(to.Raw)); err != nil {
return err
}
toIDAddr, err := address.NewIDAddress(uint64(*toID))
if err != nil {
return err
}
i.Modified = append(i.Modified, AddressChange{
From: AddressPair{
ID: fromIDAddr,
PK: pkAddr,
},
To: AddressPair{
ID: toIDAddr,
PK: pkAddr,
},
})
return nil
}
func (i *InitActorAddressChanges) Remove(key string, val *typegen.Deferred) error {
pkAddr, err := address.NewFromBytes([]byte(key))
if err != nil {
return err
}
id := new(typegen.CborInt)
if err := id.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return err
}
idAddr, err := address.NewIDAddress(uint64(*id))
if err != nil {
return err
}
i.Removed = append(i.Removed, AddressPair{
ID: idAddr,
PK: pkAddr,
})
return nil
}
func (sp *StatePredicates) OnAddressMapChange() DiffInitActorStateFunc {
return func(ctx context.Context, oldState, newState *init_.State) (changed bool, user UserData, err error) {
ctxStore := &contextStore{
ctx: ctx,
cst: sp.cst,
}
addressChanges := &InitActorAddressChanges{
Added: []AddressPair{},
Modified: []AddressChange{},
Removed: []AddressPair{},
}
if oldState.AddressMap.Equals(newState.AddressMap) {
return false, nil, nil
}
oldAddrs, err := adt.AsMap(ctxStore, oldState.AddressMap)
if err != nil {
return false, nil, err
}
newAddrs, err := adt.AsMap(ctxStore, newState.AddressMap)
if err != nil {
return false, nil, err
}
if err := DiffAdtMap(oldAddrs, newAddrs, addressChanges); err != nil {
return false, nil, err
}
if len(addressChanges.Added)+len(addressChanges.Removed)+len(addressChanges.Modified) == 0 {
return false, nil, nil
}
return true, addressChanges, nil
}
}

View File

@ -114,10 +114,10 @@ func TestMarketPredicates(t *testing.T) {
} }
oldBalances := map[address.Address]balance{ oldBalances := map[address.Address]balance{
tutils.NewIDAddr(t, 1): balance{abi.NewTokenAmount(1000), abi.NewTokenAmount(1000)}, tutils.NewIDAddr(t, 1): {abi.NewTokenAmount(1000), abi.NewTokenAmount(1000)},
tutils.NewIDAddr(t, 2): balance{abi.NewTokenAmount(2000), abi.NewTokenAmount(500)}, tutils.NewIDAddr(t, 2): {abi.NewTokenAmount(2000), abi.NewTokenAmount(500)},
tutils.NewIDAddr(t, 3): balance{abi.NewTokenAmount(3000), abi.NewTokenAmount(2000)}, tutils.NewIDAddr(t, 3): {abi.NewTokenAmount(3000), abi.NewTokenAmount(2000)},
tutils.NewIDAddr(t, 5): balance{abi.NewTokenAmount(3000), abi.NewTokenAmount(1000)}, tutils.NewIDAddr(t, 5): {abi.NewTokenAmount(3000), abi.NewTokenAmount(1000)},
} }
oldStateC := createMarketState(ctx, t, store, oldDeals, oldProps, oldBalances) oldStateC := createMarketState(ctx, t, store, oldDeals, oldProps, oldBalances)
@ -162,10 +162,10 @@ func TestMarketPredicates(t *testing.T) {
// NB: DealProposals cannot be modified, so don't test that case. // NB: DealProposals cannot be modified, so don't test that case.
} }
newBalances := map[address.Address]balance{ newBalances := map[address.Address]balance{
tutils.NewIDAddr(t, 1): balance{abi.NewTokenAmount(3000), abi.NewTokenAmount(0)}, tutils.NewIDAddr(t, 1): {abi.NewTokenAmount(3000), abi.NewTokenAmount(0)},
tutils.NewIDAddr(t, 2): balance{abi.NewTokenAmount(2000), abi.NewTokenAmount(500)}, tutils.NewIDAddr(t, 2): {abi.NewTokenAmount(2000), abi.NewTokenAmount(500)},
tutils.NewIDAddr(t, 4): balance{abi.NewTokenAmount(5000), abi.NewTokenAmount(0)}, tutils.NewIDAddr(t, 4): {abi.NewTokenAmount(5000), abi.NewTokenAmount(0)},
tutils.NewIDAddr(t, 5): balance{abi.NewTokenAmount(1000), abi.NewTokenAmount(3000)}, tutils.NewIDAddr(t, 5): {abi.NewTokenAmount(1000), abi.NewTokenAmount(3000)},
} }
newStateC := createMarketState(ctx, t, store, newDeals, newProps, newBalances) newStateC := createMarketState(ctx, t, store, newDeals, newProps, newBalances)
@ -505,6 +505,7 @@ func createBalanceTable(ctx context.Context, t *testing.T, store adt.Store, bala
lockedMapRootCid, err := lockedMapRoot.Root() lockedMapRootCid, err := lockedMapRoot.Root()
require.NoError(t, err) require.NoError(t, err)
lockedRoot, err := adt.AsBalanceTable(store, lockedMapRootCid) lockedRoot, err := adt.AsBalanceTable(store, lockedMapRootCid)
require.NoError(t, err)
for addr, balance := range balances { for addr, balance := range balances {
err := escrowRoot.Add(addr, big.Add(balance.available, balance.locked)) err := escrowRoot.Add(addr, big.Add(balance.available, balance.locked))
@ -537,26 +538,24 @@ func createEmptyMinerState(ctx context.Context, t *testing.T, store adt.Store, o
emptyMap, err := adt.MakeEmptyMap(store).Root() emptyMap, err := adt.MakeEmptyMap(store).Root()
require.NoError(t, err) require.NoError(t, err)
emptyDeadline, err := store.Put(context.TODO(), &miner.Deadline{ emptyDeadline, err := store.Put(store.Context(), miner.ConstructDeadline(emptyArrayCid))
Partitions: emptyArrayCid, require.NoError(t, err)
ExpirationsEpochs: emptyArrayCid,
PostSubmissions: abi.NewBitField(), emptyVestingFunds := miner.ConstructVestingFunds()
EarlyTerminations: abi.NewBitField(), emptyVestingFundsCid, err := store.Put(store.Context(), emptyVestingFunds)
LiveSectors: 0,
})
require.NoError(t, err) require.NoError(t, err)
emptyDeadlines := miner.ConstructDeadlines(emptyDeadline) emptyDeadlines := miner.ConstructDeadlines(emptyDeadline)
emptyDeadlinesCid, err := store.Put(context.Background(), emptyDeadlines) emptyDeadlinesCid, err := store.Put(store.Context(), emptyDeadlines)
require.NoError(t, err) require.NoError(t, err)
minerInfo := emptyMap minerInfo := emptyMap
emptyBitfield := bitfield.NewFromSet(nil) emptyBitfield := bitfield.NewFromSet(nil)
emptyBitfieldCid, err := store.Put(context.Background(), emptyBitfield) emptyBitfieldCid, err := store.Put(store.Context(), emptyBitfield)
require.NoError(t, err) require.NoError(t, err)
state, err := miner.ConstructState(minerInfo, 123, emptyBitfieldCid, emptyArrayCid, emptyMap, emptyDeadlinesCid) state, err := miner.ConstructState(minerInfo, 123, emptyBitfieldCid, emptyArrayCid, emptyMap, emptyDeadlinesCid, emptyVestingFundsCid)
require.NoError(t, err) require.NoError(t, err)
return state return state

View File

@ -34,17 +34,18 @@ import (
"github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/chain/wallet" "github.com/filecoin-project/lotus/chain/wallet"
"github.com/filecoin-project/lotus/cmd/lotus-seed/seed" "github.com/filecoin-project/lotus/cmd/lotus-seed/seed"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/genesis" "github.com/filecoin-project/lotus/genesis"
"github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/lib/blockstore"
"github.com/filecoin-project/lotus/lib/sigs" "github.com/filecoin-project/lotus/lib/sigs"
"github.com/filecoin-project/lotus/node/repo" "github.com/filecoin-project/lotus/node/repo"
"github.com/filecoin-project/sector-storage/ffiwrapper"
) )
var log = logging.Logger("gen")
const msgsPerBlock = 20 const msgsPerBlock = 20
//nolint:deadcode,varcheck
var log = logging.Logger("gen")
var ValidWpostForTesting = []abi.PoStProof{{ var ValidWpostForTesting = []abi.PoStProof{{
ProofBytes: []byte("valid proof"), ProofBytes: []byte("valid proof"),
}} }}
@ -92,10 +93,8 @@ func (m mybs) Get(c cid.Cid) (block.Block, error) {
return b, nil return b, nil
} }
var rootkey, _ = address.NewIDAddress(80)
var rootkeyMultisig = genesis.MultisigMeta{ var rootkeyMultisig = genesis.MultisigMeta{
Signers: []address.Address{rootkey}, Signers: []address.Address{remAccTestKey},
Threshold: 1, Threshold: 1,
VestingDuration: 0, VestingDuration: 0,
VestingStart: 0, VestingStart: 0,
@ -107,6 +106,18 @@ var DefaultVerifregRootkeyActor = genesis.Actor{
Meta: rootkeyMultisig.ActorMeta(), Meta: rootkeyMultisig.ActorMeta(),
} }
var remAccTestKey, _ = address.NewFromString("t1ceb34gnsc6qk5dt6n7xg6ycwzasjhbxm3iylkiy")
var remAccMeta = genesis.MultisigMeta{
Signers: []address.Address{remAccTestKey},
Threshold: 1,
}
var DefaultRemainderAccountActor = genesis.Actor{
Type: genesis.TMultisig,
Balance: big.NewInt(0),
Meta: remAccMeta.ActorMeta(),
}
func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) { func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
saminer.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{ saminer.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
abi.RegisteredSealProof_StackedDrg2KiBV1: {}, abi.RegisteredSealProof_StackedDrg2KiBV1: {},
@ -210,9 +221,10 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
*genm1, *genm1,
*genm2, *genm2,
}, },
VerifregRootKey: DefaultVerifregRootkeyActor, VerifregRootKey: DefaultVerifregRootkeyActor,
NetworkName: "", RemainderAccount: DefaultRemainderAccountActor,
Timestamp: uint64(build.Clock.Now().Add(-500 * time.Duration(build.BlockDelaySecs) * time.Second).Unix()), NetworkName: "",
Timestamp: uint64(build.Clock.Now().Add(-500 * time.Duration(build.BlockDelaySecs) * time.Second).Unix()),
} }
genb, err := genesis2.MakeGenesisBlock(context.TODO(), bs, sys, tpl) genb, err := genesis2.MakeGenesisBlock(context.TODO(), bs, sys, tpl)
@ -272,6 +284,10 @@ func NewGenerator() (*ChainGen, error) {
return NewGeneratorWithSectors(1) return NewGeneratorWithSectors(1)
} }
func (cg *ChainGen) StateManager() *stmgr.StateManager {
return cg.sm
}
func (cg *ChainGen) SetStateManager(sm *stmgr.StateManager) { func (cg *ChainGen) SetStateManager(sm *stmgr.StateManager) {
cg.sm = sm cg.sm = sm
} }
@ -300,7 +316,8 @@ func (cg *ChainGen) GenesisCar() ([]byte, error) {
func CarWalkFunc(nd format.Node) (out []*format.Link, err error) { func CarWalkFunc(nd format.Node) (out []*format.Link, err error) {
for _, link := range nd.Links() { for _, link := range nd.Links() {
if link.Cid.Prefix().Codec == cid.FilCommitmentSealed || link.Cid.Prefix().Codec == cid.FilCommitmentUnsealed { pref := link.Cid.Prefix()
if pref.Codec == cid.FilCommitmentSealed || pref.Codec == cid.FilCommitmentUnsealed {
continue continue
} }
out = append(out, link) out = append(out, link)
@ -383,15 +400,32 @@ func (cg *ChainGen) SetWinningPoStProver(m address.Address, wpp WinningPoStProve
} }
func (cg *ChainGen) NextTipSetFromMiners(base *types.TipSet, miners []address.Address) (*MinedTipSet, error) { func (cg *ChainGen) NextTipSetFromMiners(base *types.TipSet, miners []address.Address) (*MinedTipSet, error) {
var blks []*types.FullBlock ms, err := cg.GetMessages(cg)
msgs, err := cg.GetMessages(cg)
if err != nil { if err != nil {
return nil, xerrors.Errorf("get random messages: %w", err) return nil, xerrors.Errorf("get random messages: %w", err)
} }
msgs := make([][]*types.SignedMessage, len(miners))
for i := range msgs {
msgs[i] = ms
}
fts, err := cg.NextTipSetFromMinersWithMessages(base, miners, msgs)
if err != nil {
return nil, err
}
return &MinedTipSet{
TipSet: fts,
Messages: ms,
}, nil
}
func (cg *ChainGen) NextTipSetFromMinersWithMessages(base *types.TipSet, miners []address.Address, msgs [][]*types.SignedMessage) (*store.FullTipSet, error) {
var blks []*types.FullBlock
for round := base.Height() + 1; len(blks) == 0; round++ { for round := base.Height() + 1; len(blks) == 0; round++ {
for _, m := range miners { for mi, m := range miners {
bvals, et, ticket, err := cg.nextBlockProof(context.TODO(), base, m, round) bvals, et, ticket, err := cg.nextBlockProof(context.TODO(), base, m, round)
if err != nil { if err != nil {
return nil, xerrors.Errorf("next block proof: %w", err) return nil, xerrors.Errorf("next block proof: %w", err)
@ -404,7 +438,7 @@ func (cg *ChainGen) NextTipSetFromMiners(base *types.TipSet, miners []address.Ad
return nil, err return nil, err
} }
fblk, err := cg.makeBlock(base, m, ticket, et, bvals, round, wpost, msgs) fblk, err := cg.makeBlock(base, m, ticket, et, bvals, round, wpost, msgs[mi])
if err != nil { if err != nil {
return nil, xerrors.Errorf("making a block for next tipset failed: %w", err) return nil, xerrors.Errorf("making a block for next tipset failed: %w", err)
} }
@ -418,12 +452,7 @@ func (cg *ChainGen) NextTipSetFromMiners(base *types.TipSet, miners []address.Ad
} }
} }
fts := store.NewFullTipSet(blks) return store.NewFullTipSet(blks), nil
return &MinedTipSet{
TipSet: fts,
Messages: msgs,
}, nil
} }
func (cg *ChainGen) makeBlock(parents *types.TipSet, m address.Address, vrfticket *types.Ticket, func (cg *ChainGen) makeBlock(parents *types.TipSet, m address.Address, vrfticket *types.Ticket,
@ -516,7 +545,8 @@ func (cg *ChainGen) YieldRepo() (repo.Repo, error) {
} }
type MiningCheckAPI interface { type MiningCheckAPI interface {
ChainGetRandomness(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
MinerGetBaseInfo(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*api.MiningBaseInfo, error) MinerGetBaseInfo(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*api.MiningBaseInfo, error)
@ -530,13 +560,22 @@ type mca struct {
bcn beacon.RandomBeacon bcn beacon.RandomBeacon
} }
func (mca mca) ChainGetRandomness(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { func (mca mca) ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) {
pts, err := mca.sm.ChainStore().LoadTipSet(tsk) pts, err := mca.sm.ChainStore().LoadTipSet(tsk)
if err != nil { if err != nil {
return nil, xerrors.Errorf("loading tipset key: %w", err) return nil, xerrors.Errorf("loading tipset key: %w", err)
} }
return mca.sm.ChainStore().GetRandomness(ctx, pts.Cids(), personalization, randEpoch, entropy) return mca.sm.ChainStore().GetChainRandomness(ctx, pts.Cids(), personalization, randEpoch, entropy)
}
func (mca mca) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) {
pts, err := mca.sm.ChainStore().LoadTipSet(tsk)
if err != nil {
return nil, xerrors.Errorf("loading tipset key: %w", err)
}
return mca.sm.ChainStore().GetBeaconRandomness(ctx, pts.Cids(), personalization, randEpoch, entropy)
} }
func (mca mca) MinerGetBaseInfo(ctx context.Context, maddr address.Address, epoch abi.ChainEpoch, tsk types.TipSetKey) (*api.MiningBaseInfo, error) { func (mca mca) MinerGetBaseInfo(ctx context.Context, maddr address.Address, epoch abi.ChainEpoch, tsk types.TipSetKey) (*api.MiningBaseInfo, error) {
@ -567,7 +606,7 @@ func IsRoundWinner(ctx context.Context, ts *types.TipSet, round abi.ChainEpoch,
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
if err := miner.MarshalCBOR(buf); err != nil { if err := miner.MarshalCBOR(buf); err != nil {
return nil, xerrors.Errorf("failed to cbor marshal address: %w") return nil, xerrors.Errorf("failed to cbor marshal address: %w", err)
} }
electionRand, err := store.DrawRandomness(brand.Data, crypto.DomainSeparationTag_ElectionProofProduction, round, buf.Bytes()) electionRand, err := store.DrawRandomness(brand.Data, crypto.DomainSeparationTag_ElectionProofProduction, round, buf.Bytes())

View File

@ -0,0 +1,41 @@
package genesis
import (
"encoding/hex"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
"github.com/multiformats/go-multihash"
)
const genesisMultihashString = "1220107d821c25dc0735200249df94a8bebc9c8e489744f86a4ca8919e81f19dcd72"
const genesisBlockHex = "a5684461746574696d6573323031372d30352d30352030313a32373a3531674e6574776f726b6846696c65636f696e65546f6b656e6846696c65636f696e6c546f6b656e416d6f756e7473a36b546f74616c537570706c796d322c3030302c3030302c303030664d696e6572736d312c3430302c3030302c3030306c50726f746f636f6c4c616273a36b446576656c6f706d656e746b3330302c3030302c3030306b46756e6472616973696e676b3230302c3030302c3030306a466f756e646174696f6e6b3130302c3030302c303030674d657373616765784854686973206973207468652047656e6573697320426c6f636b206f66207468652046696c65636f696e20446563656e7472616c697a65642053746f72616765204e6574776f726b2e"
var cidBuilder = cid.V1Builder{Codec: cid.DagCBOR, MhType: multihash.SHA2_256}
func expectedCid() cid.Cid {
mh, err := multihash.FromHexString(genesisMultihashString)
if err != nil {
panic(err)
}
return cid.NewCidV1(cidBuilder.Codec, mh)
}
func getGenesisBlock() (blocks.Block, error) {
genesisBlockData, err := hex.DecodeString(genesisBlockHex)
if err != nil {
return nil, err
}
genesisCid, err := cidBuilder.Sum(genesisBlockData)
if err != nil {
return nil, err
}
block, err := blocks.NewBlockWithCid(genesisBlockData, genesisCid)
if err != nil {
return nil, err
}
return block, nil
}

View File

@ -2,7 +2,9 @@ package genesis
import ( import (
"context" "context"
"crypto/rand"
"encoding/json" "encoding/json"
"fmt"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore"
@ -117,11 +119,6 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
return nil, nil, xerrors.Errorf("making new state tree: %w", err) return nil, nil, xerrors.Errorf("making new state tree: %w", err)
} }
emptyobject, err := cst.Put(context.TODO(), []struct{}{})
if err != nil {
return nil, nil, xerrors.Errorf("failed putting empty object: %w", err)
}
// Create system actor // Create system actor
sysact, err := SetupSystemActor(bs) sysact, err := SetupSystemActor(bs)
@ -134,7 +131,7 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
// Create init actor // Create init actor
initact, keyIDs, err := SetupInitActor(bs, template.NetworkName, template.Accounts, template.VerifregRootKey) idStart, initact, keyIDs, err := SetupInitActor(bs, template.NetworkName, template.Accounts, template.VerifregRootKey)
if err != nil { if err != nil {
return nil, nil, xerrors.Errorf("setup init actor: %w", err) return nil, nil, xerrors.Errorf("setup init actor: %w", err)
} }
@ -190,31 +187,47 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
return nil, nil, xerrors.Errorf("set market actor: %w", err) return nil, nil, xerrors.Errorf("set market actor: %w", err)
} }
burntRoot, err := cst.Put(ctx, &account.State{
Address: builtin.BurntFundsActorAddr,
})
if err != nil {
return nil, nil, xerrors.Errorf("failed to setup burnt funds actor state: %w", err)
}
// Setup burnt-funds // Setup burnt-funds
err = state.SetActor(builtin.BurntFundsActorAddr, &types.Actor{ err = state.SetActor(builtin.BurntFundsActorAddr, &types.Actor{
Code: builtin.AccountActorCodeID, Code: builtin.AccountActorCodeID,
Balance: types.NewInt(0), Balance: types.NewInt(0),
Head: emptyobject, Head: burntRoot,
}) })
if err != nil { if err != nil {
return nil, nil, xerrors.Errorf("set burnt funds account actor: %w", err) return nil, nil, xerrors.Errorf("set burnt funds account actor: %w", err)
} }
// Create accounts // Create accounts
for id, info := range template.Accounts { for _, info := range template.Accounts {
if info.Type != genesis.TAccount && info.Type != genesis.TMultisig {
switch info.Type {
case genesis.TAccount:
if err := createAccountActor(ctx, cst, state, info, keyIDs); err != nil {
return nil, nil, xerrors.Errorf("failed to create account actor: %w", err)
}
case genesis.TMultisig:
ida, err := address.NewIDAddress(uint64(idStart))
if err != nil {
return nil, nil, err
}
idStart++
if err := createMultisigAccount(ctx, bs, cst, state, ida, info, keyIDs); err != nil {
return nil, nil, err
}
default:
return nil, nil, xerrors.New("unsupported account type") return nil, nil, xerrors.New("unsupported account type")
} }
ida, err := address.NewIDAddress(uint64(AccountStart + id))
if err != nil {
return nil, nil, err
}
if err = createAccount(ctx, bs, cst, state, ida, info); err != nil {
return nil, nil, err
}
} }
vregroot, err := address.NewIDAddress(80) vregroot, err := address.NewIDAddress(80)
@ -222,8 +235,8 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
return nil, nil, err return nil, nil, err
} }
if err = createAccount(ctx, bs, cst, state, vregroot, template.VerifregRootKey); err != nil { if err = createMultisigAccount(ctx, bs, cst, state, vregroot, template.VerifregRootKey, keyIDs); err != nil {
return nil, nil, err return nil, nil, xerrors.Errorf("failed to set up verified registry signer: %w", err)
} }
// Setup the first verifier as ID-address 81 // Setup the first verifier as ID-address 81
@ -258,63 +271,132 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
Balance: types.NewInt(0), Balance: types.NewInt(0),
Head: verifierState, Head: verifierState,
}) })
if err != nil { if err != nil {
return nil, nil, xerrors.Errorf("setting account from actmap: %w", err) return nil, nil, xerrors.Errorf("setting account from actmap: %w", err)
} }
totalFilAllocated := big.Zero()
// flush as ForEach works on the HAMT
if _, err := state.Flush(ctx); err != nil {
return nil, nil, err
}
err = state.ForEach(func(addr address.Address, act *types.Actor) error {
totalFilAllocated = big.Add(totalFilAllocated, act.Balance)
return nil
})
if err != nil {
return nil, nil, xerrors.Errorf("summing account balances in state tree: %w", err)
}
totalFil := big.Mul(big.NewInt(int64(build.FilBase)), big.NewInt(int64(build.FilecoinPrecision)))
remainingFil := big.Sub(totalFil, totalFilAllocated)
if remainingFil.Sign() < 0 {
return nil, nil, xerrors.Errorf("somehow overallocated filecoin (allocated = %s)", types.FIL(totalFilAllocated))
}
remAccKey, err := address.NewIDAddress(90)
if err != nil {
return nil, nil, err
}
template.RemainderAccount.Balance = remainingFil
if err := createMultisigAccount(ctx, bs, cst, state, remAccKey, template.RemainderAccount, keyIDs); err != nil {
return nil, nil, xerrors.Errorf("failed to set up remainder account: %w", err)
}
return state, keyIDs, nil return state, keyIDs, nil
} }
func createAccount(ctx context.Context, bs bstore.Blockstore, cst cbor.IpldStore, state *state.StateTree, ida address.Address, info genesis.Actor) error { func createAccountActor(ctx context.Context, cst cbor.IpldStore, state *state.StateTree, info genesis.Actor, keyIDs map[address.Address]address.Address) error {
if info.Type == genesis.TAccount { var ainfo genesis.AccountMeta
var ainfo genesis.AccountMeta if err := json.Unmarshal(info.Meta, &ainfo); err != nil {
if err := json.Unmarshal(info.Meta, &ainfo); err != nil { return xerrors.Errorf("unmarshaling account meta: %w", err)
return xerrors.Errorf("unmarshaling account meta: %w", err) }
} st, err := cst.Put(ctx, &account.State{Address: ainfo.Owner})
st, err := cst.Put(ctx, &account.State{Address: ainfo.Owner}) if err != nil {
if err != nil { return err
return err
}
err = state.SetActor(ida, &types.Actor{
Code: builtin.AccountActorCodeID,
Balance: info.Balance,
Head: st,
})
if err != nil {
return xerrors.Errorf("setting account from actmap: %w", err)
}
} else if info.Type == genesis.TMultisig {
var ainfo genesis.MultisigMeta
if err := json.Unmarshal(info.Meta, &ainfo); err != nil {
return xerrors.Errorf("unmarshaling account meta: %w", err)
}
pending, err := adt.MakeEmptyMap(adt.WrapStore(ctx, cst)).Root()
if err != nil {
return xerrors.Errorf("failed to create empty map: %v", err)
}
st, err := cst.Put(ctx, &multisig.State{
Signers: ainfo.Signers,
NumApprovalsThreshold: uint64(ainfo.Threshold),
StartEpoch: abi.ChainEpoch(ainfo.VestingStart),
UnlockDuration: abi.ChainEpoch(ainfo.VestingDuration),
PendingTxns: pending,
InitialBalance: info.Balance,
})
if err != nil {
return err
}
err = state.SetActor(ida, &types.Actor{
Code: builtin.MultisigActorCodeID,
Balance: info.Balance,
Head: st,
})
if err != nil {
return xerrors.Errorf("setting account from actmap: %w", err)
}
} }
ida, ok := keyIDs[ainfo.Owner]
if !ok {
return fmt.Errorf("no registered ID for account actor: %s", ainfo.Owner)
}
err = state.SetActor(ida, &types.Actor{
Code: builtin.AccountActorCodeID,
Balance: info.Balance,
Head: st,
})
if err != nil {
return xerrors.Errorf("setting account from actmap: %w", err)
}
return nil
}
func createMultisigAccount(ctx context.Context, bs bstore.Blockstore, cst cbor.IpldStore, state *state.StateTree, ida address.Address, info genesis.Actor, keyIDs map[address.Address]address.Address) error {
if info.Type != genesis.TMultisig {
return fmt.Errorf("can only call createMultisigAccount with multisig Actor info")
}
var ainfo genesis.MultisigMeta
if err := json.Unmarshal(info.Meta, &ainfo); err != nil {
return xerrors.Errorf("unmarshaling account meta: %w", err)
}
pending, err := adt.MakeEmptyMap(adt.WrapStore(ctx, cst)).Root()
if err != nil {
return xerrors.Errorf("failed to create empty map: %v", err)
}
var signers []address.Address
for _, e := range ainfo.Signers {
idAddress, ok := keyIDs[e]
if !ok {
return fmt.Errorf("no registered key ID for signer: %s", e)
}
// Check if actor already exists
_, err := state.GetActor(e)
if err == nil {
signers = append(signers, idAddress)
continue
}
st, err := cst.Put(ctx, &account.State{Address: e})
if err != nil {
return err
}
err = state.SetActor(idAddress, &types.Actor{
Code: builtin.AccountActorCodeID,
Balance: types.NewInt(0),
Head: st,
})
if err != nil {
return xerrors.Errorf("setting account from actmap: %w", err)
}
signers = append(signers, idAddress)
}
st, err := cst.Put(ctx, &multisig.State{
Signers: signers,
NumApprovalsThreshold: uint64(ainfo.Threshold),
StartEpoch: abi.ChainEpoch(ainfo.VestingStart),
UnlockDuration: abi.ChainEpoch(ainfo.VestingDuration),
PendingTxns: pending,
InitialBalance: info.Balance,
})
if err != nil {
return err
}
err = state.SetActor(ida, &types.Actor{
Code: builtin.MultisigActorCodeID,
Balance: info.Balance,
Head: st,
})
if err != nil {
return xerrors.Errorf("setting account from actmap: %w", err)
}
return nil return nil
} }
@ -323,24 +405,25 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, stateroot ci
var sum abi.PaddedPieceSize var sum abi.PaddedPieceSize
vmopt := vm.VMOpts{ vmopt := vm.VMOpts{
StateBase: stateroot, StateBase: stateroot,
Epoch: 0, Epoch: 0,
Rand: &fakeRand{}, Rand: &fakeRand{},
Bstore: cs.Blockstore(), Bstore: cs.Blockstore(),
Syscalls: mkFakedSigSyscalls(cs.VMSys()), Syscalls: mkFakedSigSyscalls(cs.VMSys()),
VestedCalc: nil, CircSupplyCalc: nil,
BaseFee: types.NewInt(0), BaseFee: types.NewInt(0),
} }
vm, err := vm.NewVM(&vmopt) vm, err := vm.NewVM(&vmopt)
if err != nil { if err != nil {
return cid.Undef, xerrors.Errorf("failed to create NewVM: %w", err) return cid.Undef, xerrors.Errorf("failed to create NewVM: %w", err)
} }
for _, m := range template.Miners { for mi, m := range template.Miners {
for si, s := range m.Sectors {
if s.Deal.Provider != m.ID {
return cid.Undef, xerrors.Errorf("Sector %d in miner %d in template had mismatch in provider and miner ID: %s != %s", si, mi, s.Deal.Provider, m.ID)
}
// Add the miner to the market actor's balance table
_, err = doExec(ctx, vm, builtin.StorageMarketActorAddr, m.Owner, builtin.MethodsMarket.AddBalance, mustEnc(adt.Empty))
for _, s := range m.Sectors {
amt := s.Deal.PieceSize amt := s.Deal.PieceSize
verifNeeds[keyIDs[s.Deal.Client]] += amt verifNeeds[keyIDs[s.Deal.Client]] += amt
sum += amt sum += amt
@ -430,14 +513,38 @@ func MakeGenesisBlock(ctx context.Context, bs bstore.Blockstore, sys vm.SyscallB
log.Infof("Empty Genesis root: %s", emptyroot) log.Infof("Empty Genesis root: %s", emptyroot)
tickBuf := make([]byte, 32)
_, _ = rand.Read(tickBuf)
genesisticket := &types.Ticket{ genesisticket := &types.Ticket{
VRFProof: []byte("vrf proof0000000vrf proof0000000"), VRFProof: tickBuf,
}
filecoinGenesisCid, err := cid.Decode("bafyreiaqpwbbyjo4a42saasj36kkrpv4tsherf2e7bvezkert2a7dhonoi")
if err != nil {
return nil, xerrors.Errorf("failed to decode filecoin genesis block CID: %w", err)
}
if !expectedCid().Equals(filecoinGenesisCid) {
return nil, xerrors.Errorf("expectedCid != filecoinGenesisCid")
}
gblk, err := getGenesisBlock()
if err != nil {
return nil, xerrors.Errorf("failed to construct filecoin genesis block: %w", err)
}
if !filecoinGenesisCid.Equals(gblk.Cid()) {
return nil, xerrors.Errorf("filecoinGenesisCid != gblk.Cid")
}
if err := bs.Put(gblk); err != nil {
return nil, xerrors.Errorf("failed writing filecoin genesis block to blockstore: %w", err)
} }
b := &types.BlockHeader{ b := &types.BlockHeader{
Miner: builtin.SystemActorAddr, Miner: builtin.SystemActorAddr,
Ticket: genesisticket, Ticket: genesisticket,
Parents: []cid.Cid{}, Parents: []cid.Cid{filecoinGenesisCid},
Height: 0, Height: 0,
ParentWeight: types.NewInt(0), ParentWeight: types.NewInt(0),
ParentStateRoot: stateroot, ParentStateRoot: stateroot,

View File

@ -14,7 +14,7 @@ import (
"golang.org/x/xerrors" "golang.org/x/xerrors"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/abi/big" "github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/filecoin-project/specs-actors/actors/builtin" "github.com/filecoin-project/specs-actors/actors/builtin"
@ -57,18 +57,18 @@ func mkFakedSigSyscalls(base vm.SyscallBuilder) vm.SyscallBuilder {
} }
func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid, miners []genesis.Miner) (cid.Cid, error) { func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid, miners []genesis.Miner) (cid.Cid, error) {
vc := func(context.Context, abi.ChainEpoch) (abi.TokenAmount, error) { csc := func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error) {
return big.Zero(), nil return big.Zero(), nil
} }
vmopt := &vm.VMOpts{ vmopt := &vm.VMOpts{
StateBase: sroot, StateBase: sroot,
Epoch: 0, Epoch: 0,
Rand: &fakeRand{}, Rand: &fakeRand{},
Bstore: cs.Blockstore(), Bstore: cs.Blockstore(),
Syscalls: mkFakedSigSyscalls(cs.VMSys()), Syscalls: mkFakedSigSyscalls(cs.VMSys()),
VestedCalc: vc, CircSupplyCalc: csc,
BaseFee: types.NewInt(0), BaseFee: types.NewInt(0),
} }
vm, err := vm.NewVM(vmopt) vm, err := vm.NewVM(vmopt)
@ -129,6 +129,9 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
return nil return nil
}) })
if err != nil {
return cid.Undef, xerrors.Errorf("mutating state: %w", err)
}
} }
// Add market funds // Add market funds
@ -137,7 +140,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
params := mustEnc(&minerInfos[i].maddr) params := mustEnc(&minerInfos[i].maddr)
_, err := doExecValue(ctx, vm, builtin.StorageMarketActorAddr, m.Worker, m.MarketBalance, builtin.MethodsMarket.AddBalance, params) _, err := doExecValue(ctx, vm, builtin.StorageMarketActorAddr, m.Worker, m.MarketBalance, builtin.MethodsMarket.AddBalance, params)
if err != nil { if err != nil {
return cid.Undef, xerrors.Errorf("failed to create genesis miner: %w", err) return cid.Undef, xerrors.Errorf("failed to create genesis miner (add balance): %w", err)
} }
} }
@ -149,7 +152,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
ret, err := doExecValue(ctx, vm, builtin.StorageMarketActorAddr, m.Worker, big.Zero(), builtin.MethodsMarket.PublishStorageDeals, mustEnc(params)) ret, err := doExecValue(ctx, vm, builtin.StorageMarketActorAddr, m.Worker, big.Zero(), builtin.MethodsMarket.PublishStorageDeals, mustEnc(params))
if err != nil { if err != nil {
return xerrors.Errorf("failed to create genesis miner: %w", err) return xerrors.Errorf("failed to create genesis miner (publish deals): %w", err)
} }
var ids market.PublishStorageDealsReturn var ids market.PublishStorageDealsReturn
if err := ids.UnmarshalCBOR(bytes.NewReader(ret)); err != nil { if err := ids.UnmarshalCBOR(bytes.NewReader(ret)); err != nil {
@ -217,9 +220,12 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
} }
err = vm.MutateState(ctx, builtin.RewardActorAddr, func(sct cbor.IpldStore, st *reward.State) error { err = vm.MutateState(ctx, builtin.RewardActorAddr, func(sct cbor.IpldStore, st *reward.State) error {
st = reward.ConstructState(qaPow) *st = *reward.ConstructState(qaPow)
return nil return nil
}) })
if err != nil {
return cid.Undef, xerrors.Errorf("mutating state: %w", err)
}
} }
for i, m := range miners { for i, m := range miners {
@ -244,7 +250,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
// we've added fake power for this sector above, remove it now // we've added fake power for this sector above, remove it now
err = vm.MutateState(ctx, builtin.StoragePowerActorAddr, func(cst cbor.IpldStore, st *power.State) error { err = vm.MutateState(ctx, builtin.StoragePowerActorAddr, func(cst cbor.IpldStore, st *power.State) error {
st.TotalQualityAdjPower = types.BigSub(st.TotalQualityAdjPower, sectorWeight) st.TotalQualityAdjPower = types.BigSub(st.TotalQualityAdjPower, sectorWeight) //nolint:scopelint
st.TotalRawBytePower = types.BigSub(st.TotalRawBytePower, types.NewInt(uint64(m.SectorSize))) st.TotalRawBytePower = types.BigSub(st.TotalRawBytePower, types.NewInt(uint64(m.SectorSize)))
return nil return nil
}) })
@ -262,6 +268,8 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
return cid.Undef, xerrors.Errorf("getting current total power: %w", err) return cid.Undef, xerrors.Errorf("getting current total power: %w", err)
} }
pcd := miner.PreCommitDepositForPower(epochReward.ThisEpochRewardSmoothed, tpow.QualityAdjPowerSmoothed, sectorWeight)
pledge := miner.InitialPledgeForPower( pledge := miner.InitialPledgeForPower(
sectorWeight, sectorWeight,
epochReward.ThisEpochBaselinePower, epochReward.ThisEpochBaselinePower,
@ -271,6 +279,8 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
circSupply(ctx, vm, minerInfos[i].maddr), circSupply(ctx, vm, minerInfos[i].maddr),
) )
pledge = big.Add(pcd, pledge)
fmt.Println(types.FIL(pledge)) fmt.Println(types.FIL(pledge))
_, err = doExecValue(ctx, vm, minerInfos[i].maddr, m.Worker, pledge, builtin.MethodsMiner.PreCommitSector, mustEnc(params)) _, err = doExecValue(ctx, vm, minerInfos[i].maddr, m.Worker, pledge, builtin.MethodsMiner.PreCommitSector, mustEnc(params))
if err != nil { if err != nil {
@ -318,9 +328,15 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
// TODO: copied from actors test harness, deduplicate or remove from here // TODO: copied from actors test harness, deduplicate or remove from here
type fakeRand struct{} type fakeRand struct{}
func (fr *fakeRand) GetRandomness(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { func (fr *fakeRand) GetChainRandomness(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
out := make([]byte, 32) out := make([]byte, 32)
_, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) _, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out) //nolint
return out, nil
}
func (fr *fakeRand) GetBeaconRandomness(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
out := make([]byte, 32)
_, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint
return out, nil return out, nil
} }

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/specs-actors/actors/builtin" "github.com/filecoin-project/specs-actors/actors/builtin"
@ -19,9 +20,9 @@ import (
bstore "github.com/filecoin-project/lotus/lib/blockstore" bstore "github.com/filecoin-project/lotus/lib/blockstore"
) )
func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesis.Actor, rootVerifier genesis.Actor) (*types.Actor, map[address.Address]address.Address, error) { func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesis.Actor, rootVerifier genesis.Actor) (int64, *types.Actor, map[address.Address]address.Address, error) {
if len(initialActors) > MaxAccounts { if len(initialActors) > MaxAccounts {
return nil, nil, xerrors.New("too many initial actors") return 0, nil, nil, xerrors.New("too many initial actors")
} }
var ias init_.State var ias init_.State
@ -32,55 +33,105 @@ func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesi
amap := adt.MakeEmptyMap(store) amap := adt.MakeEmptyMap(store)
keyToId := map[address.Address]address.Address{} keyToId := map[address.Address]address.Address{}
counter := int64(AccountStart)
for i, a := range initialActors { for _, a := range initialActors {
if a.Type == genesis.TMultisig { if a.Type == genesis.TMultisig {
var ainfo genesis.MultisigMeta
if err := json.Unmarshal(a.Meta, &ainfo); err != nil {
return 0, nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err)
}
for _, e := range ainfo.Signers {
if _, ok := keyToId[e]; ok {
continue
}
fmt.Printf("init set %s t0%d\n", e, counter)
value := cbg.CborInt(counter)
if err := amap.Put(adt.AddrKey(e), &value); err != nil {
return 0, nil, nil, err
}
counter = counter + 1
var err error
keyToId[e], err = address.NewIDAddress(uint64(value))
if err != nil {
return 0, nil, nil, err
}
}
// Need to add actors for all multisigs too
continue continue
} }
if a.Type != genesis.TAccount { if a.Type != genesis.TAccount {
return nil, nil, xerrors.Errorf("unsupported account type: %s", a.Type) // TODO: Support msig (skip here) return 0, nil, nil, xerrors.Errorf("unsupported account type: %s", a.Type)
} }
var ainfo genesis.AccountMeta var ainfo genesis.AccountMeta
if err := json.Unmarshal(a.Meta, &ainfo); err != nil { if err := json.Unmarshal(a.Meta, &ainfo); err != nil {
return nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err) return 0, nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err)
} }
fmt.Printf("init set %s t0%d\n", ainfo.Owner, AccountStart+int64(i)) fmt.Printf("init set %s t0%d\n", ainfo.Owner, counter)
value := cbg.CborInt(AccountStart + int64(i)) value := cbg.CborInt(counter)
if err := amap.Put(adt.AddrKey(ainfo.Owner), &value); err != nil { if err := amap.Put(adt.AddrKey(ainfo.Owner), &value); err != nil {
return nil, nil, err return 0, nil, nil, err
} }
counter = counter + 1
var err error var err error
keyToId[ainfo.Owner], err = address.NewIDAddress(uint64(value)) keyToId[ainfo.Owner], err = address.NewIDAddress(uint64(value))
if err != nil { if err != nil {
return nil, nil, err return 0, nil, nil, err
} }
} }
if rootVerifier.Type == genesis.TAccount { if rootVerifier.Type == genesis.TAccount {
var ainfo genesis.AccountMeta var ainfo genesis.AccountMeta
if err := json.Unmarshal(rootVerifier.Meta, &ainfo); err != nil { if err := json.Unmarshal(rootVerifier.Meta, &ainfo); err != nil {
return nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err) return 0, nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err)
} }
value := cbg.CborInt(80) value := cbg.CborInt(80)
if err := amap.Put(adt.AddrKey(ainfo.Owner), &value); err != nil { if err := amap.Put(adt.AddrKey(ainfo.Owner), &value); err != nil {
return nil, nil, err return 0, nil, nil, err
}
} else if rootVerifier.Type == genesis.TMultisig {
var ainfo genesis.MultisigMeta
if err := json.Unmarshal(rootVerifier.Meta, &ainfo); err != nil {
return 0, nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err)
}
for _, e := range ainfo.Signers {
if _, ok := keyToId[e]; ok {
continue
}
fmt.Printf("init set %s t0%d\n", e, counter)
value := cbg.CborInt(counter)
if err := amap.Put(adt.AddrKey(e), &value); err != nil {
return 0, nil, nil, err
}
counter = counter + 1
var err error
keyToId[e], err = address.NewIDAddress(uint64(value))
if err != nil {
return 0, nil, nil, err
}
} }
} }
amapaddr, err := amap.Root() amapaddr, err := amap.Root()
if err != nil { if err != nil {
return nil, nil, err return 0, nil, nil, err
} }
ias.AddressMap = amapaddr ias.AddressMap = amapaddr
statecid, err := store.Put(store.Context(), &ias) statecid, err := store.Put(store.Context(), &ias)
if err != nil { if err != nil {
return nil, nil, err return 0, nil, nil, err
} }
act := &types.Actor{ act := &types.Actor{
@ -88,5 +139,5 @@ func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesi
Head: statecid, Head: statecid,
} }
return act, keyToId, nil return counter, act, keyToId, nil
} }

View File

@ -21,14 +21,10 @@ func mustEnc(i cbg.CBORMarshaler) []byte {
return enc return enc
} }
func doExec(ctx context.Context, vm *vm.VM, to, from address.Address, method abi.MethodNum, params []byte) ([]byte, error) {
return doExecValue(ctx, vm, to, from, types.NewInt(0), method, params)
}
func doExecValue(ctx context.Context, vm *vm.VM, to, from address.Address, value types.BigInt, method abi.MethodNum, params []byte) ([]byte, error) { func doExecValue(ctx context.Context, vm *vm.VM, to, from address.Address, value types.BigInt, method abi.MethodNum, params []byte) ([]byte, error) {
act, err := vm.StateTree().GetActor(from) act, err := vm.StateTree().GetActor(from)
if err != nil { if err != nil {
return nil, xerrors.Errorf("doExec failed to get from actor: %w", err) return nil, xerrors.Errorf("doExec failed to get from actor (%s): %w", from, err)
} }
ret, err := vm.ApplyImplicitMessage(ctx, &types.Message{ ret, err := vm.ApplyImplicitMessage(ctx, &types.Message{

View File

@ -14,7 +14,6 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/events" "github.com/filecoin-project/lotus/chain/events"
"github.com/filecoin-project/lotus/chain/events/state" "github.com/filecoin-project/lotus/chain/events/state"
@ -52,7 +51,7 @@ func StartFundManager(lc fx.Lifecycle, api API) *FundMgr {
match := func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) { match := func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) {
return dealDiffFn(ctx, oldTs.Key(), newTs.Key()) return dealDiffFn(ctx, oldTs.Key(), newTs.Key())
} }
return ev.StateChanged(fm.checkFunc, fm.stateChanged, fm.revert, int(build.MessageConfidence), events.NoTimeout, match) return ev.StateChanged(fm.checkFunc, fm.stateChanged, fm.revert, 0, events.NoTimeout, match)
}, },
}) })
return fm return fm
@ -60,7 +59,7 @@ func StartFundManager(lc fx.Lifecycle, api API) *FundMgr {
type fundMgrAPI interface { type fundMgrAPI interface {
StateMarketBalance(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error) StateMarketBalance(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error)
MpoolPushMessage(context.Context, *types.Message) (*types.SignedMessage, error) MpoolPushMessage(context.Context, *types.Message, *api.MessageSendSpec) (*types.SignedMessage, error)
StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error)
} }
@ -92,6 +91,10 @@ func (fm *FundMgr) stateChanged(ts *types.TipSet, ts2 *types.TipSet, states even
// overwrite our in memory cache with new values from chain (chain is canonical) // overwrite our in memory cache with new values from chain (chain is canonical)
fm.lk.Lock() fm.lk.Lock()
for addr, balanceChange := range changedBalances { for addr, balanceChange := range changedBalances {
if fm.available[addr].Int != nil {
log.Infof("State balance change recorded, prev: %s, new: %s", fm.available[addr].String(), balanceChange.To.String())
}
fm.available[addr] = balanceChange.To fm.available[addr] = balanceChange.To
} }
fm.lk.Unlock() fm.lk.Unlock()
@ -116,15 +119,17 @@ func (fm *FundMgr) EnsureAvailable(ctx context.Context, addr, wallet address.Add
return cid.Undef, err return cid.Undef, err
} }
fm.lk.Lock() fm.lk.Lock()
bal, err := fm.api.StateMarketBalance(ctx, addr, types.EmptyTSK)
if err != nil {
fm.lk.Unlock()
return cid.Undef, err
}
stateAvail := types.BigSub(bal.Escrow, bal.Locked)
avail, ok := fm.available[idAddr] avail, ok := fm.available[idAddr]
if !ok { if !ok {
bal, err := fm.api.StateMarketBalance(ctx, addr, types.EmptyTSK) avail = stateAvail
if err != nil {
fm.lk.Unlock()
return cid.Undef, err
}
avail = types.BigSub(bal.Escrow, bal.Locked)
} }
toAdd := types.BigSub(amt, avail) toAdd := types.BigSub(amt, avail)
@ -134,6 +139,8 @@ func (fm *FundMgr) EnsureAvailable(ctx context.Context, addr, wallet address.Add
fm.available[idAddr] = big.Add(avail, toAdd) fm.available[idAddr] = big.Add(avail, toAdd)
fm.lk.Unlock() fm.lk.Unlock()
log.Infof("Funds operation w/ Expected Balance: %s, In State: %s, Requested: %s, Adding: %s", avail.String(), stateAvail.String(), amt.String(), toAdd.String())
if toAdd.LessThanEqual(big.Zero()) { if toAdd.LessThanEqual(big.Zero()) {
return cid.Undef, nil return cid.Undef, nil
} }
@ -149,7 +156,7 @@ func (fm *FundMgr) EnsureAvailable(ctx context.Context, addr, wallet address.Add
Value: toAdd, Value: toAdd,
Method: builtin.MethodsMarket.AddBalance, Method: builtin.MethodsMarket.AddBalance,
Params: params, Params: params,
}) }, nil)
if err != nil { if err != nil {
return cid.Undef, err return cid.Undef, err
} }

View File

@ -36,7 +36,7 @@ func (fapi *fakeAPI) StateMarketBalance(context.Context, address.Address, types.
return fapi.returnedBalance, fapi.returnedBalanceErr return fapi.returnedBalance, fapi.returnedBalanceErr
} }
func (fapi *fakeAPI) MpoolPushMessage(ctx context.Context, msg *types.Message) (*types.SignedMessage, error) { func (fapi *fakeAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) {
fapi.receivedMessage = msg fapi.receivedMessage = msg
return &types.SignedMessage{ return &types.SignedMessage{
Message: *msg, Message: *msg,
@ -147,6 +147,7 @@ func TestAddFunds(t *testing.T) {
} }
for testCase, data := range testCases { for testCase, data := range testCases {
//nolint:scopelint
t.Run(testCase, func(t *testing.T) { t.Run(testCase, func(t *testing.T) {
ctx, cancel := context.WithTimeout(ctx, 5*time.Second) ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel() defer cancel()

View File

@ -0,0 +1,102 @@
package messagepool
import (
"math"
"sync"
)
var noWinnersProbCache []float64
var noWinnersProbOnce sync.Once
func noWinnersProb() []float64 {
noWinnersProbOnce.Do(func() {
poissPdf := func(x float64) float64 {
const Mu = 5
lg, _ := math.Lgamma(x + 1)
result := math.Exp((math.Log(Mu) * x) - lg - Mu)
return result
}
out := make([]float64, 0, MaxBlocks)
for i := 0; i < MaxBlocks; i++ {
out = append(out, poissPdf(float64(i)))
}
noWinnersProbCache = out
})
return noWinnersProbCache
}
var noWinnersProbAssumingCache []float64
var noWinnersProbAssumingOnce sync.Once
func noWinnersProbAssumingMoreThanOne() []float64 {
noWinnersProbAssumingOnce.Do(func() {
cond := math.Log(-1 + math.Exp(5))
poissPdf := func(x float64) float64 {
const Mu = 5
lg, _ := math.Lgamma(x + 1)
result := math.Exp((math.Log(Mu) * x) - lg - cond)
return result
}
out := make([]float64, 0, MaxBlocks)
for i := 0; i < MaxBlocks; i++ {
out = append(out, poissPdf(float64(i+1)))
}
noWinnersProbAssumingCache = out
})
return noWinnersProbAssumingCache
}
func binomialCoefficient(n, k float64) float64 {
if k > n {
return math.NaN()
}
r := 1.0
for d := 1.0; d <= k; d++ {
r *= n
r /= d
n--
}
return r
}
func (mp *MessagePool) blockProbabilities(tq float64) []float64 {
noWinners := noWinnersProbAssumingMoreThanOne()
p := 1 - tq
binoPdf := func(x, trials float64) float64 {
// based on https://github.com/atgjack/prob
if x > trials {
return 0
}
if p == 0 {
if x == 0 {
return 1.0
}
return 0.0
}
if p == 1 {
if x == trials {
return 1.0
}
return 0.0
}
coef := binomialCoefficient(trials, x)
pow := math.Pow(p, x) * math.Pow(1-p, trials-x)
if math.IsInf(coef, 0) {
return 0
}
return coef * pow
}
out := make([]float64, 0, MaxBlocks)
for place := 0; place < MaxBlocks; place++ {
var pPlace float64
for otherWinners, pCase := range noWinners {
pPlace += pCase * binoPdf(float64(place), float64(otherWinners))
}
out = append(out, pPlace)
}
return out
}

View File

@ -0,0 +1,43 @@
package messagepool
import (
"math"
"math/rand"
"testing"
"time"
)
func TestBlockProbability(t *testing.T) {
mp := &MessagePool{}
bp := mp.blockProbabilities(1 - 0.15)
t.Logf("%+v\n", bp)
for i := 0; i < len(bp)-1; i++ {
if bp[i] < bp[i+1] {
t.Fatalf("expected decreasing block probabilities for this quality: %d %f %f",
i, bp[i], bp[i+1])
}
}
}
func TestWinnerProba(t *testing.T) {
rand.Seed(time.Now().UnixNano())
const N = 1000000
winnerProba := noWinnersProb()
sum := 0
for i := 0; i < N; i++ {
minersRand := rand.Float64()
j := 0
for ; j < MaxBlocks; j++ {
minersRand -= winnerProba[j]
if minersRand < 0 {
break
}
}
sum += j
}
if avg := float64(sum) / N; math.Abs(avg-5) > 0.01 {
t.Fatalf("avg too far off: %f", avg)
}
}

View File

@ -2,6 +2,7 @@ package messagepool
import ( import (
"encoding/json" "encoding/json"
"fmt"
"time" "time"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
@ -35,10 +36,6 @@ func loadConfig(ds dtypes.MetadataDS) (*types.MpoolConfig, error) {
} }
cfg := new(types.MpoolConfig) cfg := new(types.MpoolConfig)
err = json.Unmarshal(cfgBytes, cfg) err = json.Unmarshal(cfgBytes, cfg)
if cfg.GasLimitOverestimation == 0 {
// TODO: remove in next reset
cfg.GasLimitOverestimation = GasLimitOverestimation
}
return cfg, err return cfg, err
} }
@ -56,16 +53,32 @@ func (mp *MessagePool) GetConfig() *types.MpoolConfig {
return mp.cfg.Clone() return mp.cfg.Clone()
} }
func (mp *MessagePool) SetConfig(cfg *types.MpoolConfig) { func validateConfg(cfg *types.MpoolConfig) error {
if cfg.ReplaceByFeeRatio < ReplaceByFeeRatioDefault {
return fmt.Errorf("'ReplaceByFeeRatio' is less than required %f < %f",
cfg.ReplaceByFeeRatio, ReplaceByFeeRatioDefault)
}
if cfg.GasLimitOverestimation < 1 {
return fmt.Errorf("'GasLimitOverestimation' cannot be less than 1")
}
return nil
}
func (mp *MessagePool) SetConfig(cfg *types.MpoolConfig) error {
if err := validateConfg(cfg); err != nil {
return err
}
cfg = cfg.Clone() cfg = cfg.Clone()
mp.cfgLk.Lock() mp.cfgLk.Lock()
mp.cfg = cfg mp.cfg = cfg
mp.rbfNum = types.NewInt(uint64((cfg.ReplaceByFeeRatio - 1) * RbfDenom))
err := saveConfig(cfg, mp.ds) err := saveConfig(cfg, mp.ds)
if err != nil { if err != nil {
log.Warnf("error persisting mpool config: %s", err) log.Warnf("error persisting mpool config: %s", err)
} }
mp.cfgLk.Unlock() mp.cfgLk.Unlock()
return nil
} }
func DefaultConfig() *types.MpoolConfig { func DefaultConfig() *types.MpoolConfig {

View File

@ -12,6 +12,7 @@ import (
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/crypto" "github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/hashicorp/go-multierror"
lru "github.com/hashicorp/golang-lru" lru "github.com/hashicorp/golang-lru"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore"
@ -20,14 +21,13 @@ import (
logging "github.com/ipfs/go-log/v2" logging "github.com/ipfs/go-log/v2"
pubsub "github.com/libp2p/go-libp2p-pubsub" pubsub "github.com/libp2p/go-libp2p-pubsub"
lps "github.com/whyrusleeping/pubsub" lps "github.com/whyrusleeping/pubsub"
"go.uber.org/multierr"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/journal" "github.com/filecoin-project/lotus/journal"
@ -39,12 +39,15 @@ import (
var log = logging.Logger("messagepool") var log = logging.Logger("messagepool")
const futureDebug = false var futureDebug = false
const repubMsgLimit = 5 var rbfNumBig = types.NewInt(uint64((ReplaceByFeeRatioDefault - 1) * RbfDenom))
var rbfDenomBig = types.NewInt(RbfDenom)
const RbfDenom = 256 const RbfDenom = 256
var RepublishInterval = pubsub.TimeCacheDuration + time.Duration(5*build.BlockDelaySecs+build.PropagationDelaySecs)*time.Second
var ( var (
ErrMessageTooBig = errors.New("message too big") ErrMessageTooBig = errors.New("message too big")
@ -58,6 +61,8 @@ var (
ErrBroadcastAnyway = errors.New("broadcasting message despite validation fail") ErrBroadcastAnyway = errors.New("broadcasting message despite validation fail")
ErrRBFTooLowPremium = errors.New("replace by fee has too low GasPremium") ErrRBFTooLowPremium = errors.New("replace by fee has too low GasPremium")
ErrTryAgain = errors.New("state inconsistency while pushing message; please try again")
) )
const ( const (
@ -91,8 +96,14 @@ type MessagePool struct {
ds dtypes.MetadataDS ds dtypes.MetadataDS
closer chan struct{} addSema chan struct{}
repubTk *clock.Ticker
closer chan struct{}
repubTk *clock.Ticker
repubTrigger chan struct{}
republished map[cid.Cid]struct{}
localAddrs map[address.Address]struct{} localAddrs map[address.Address]struct{}
@ -104,8 +115,6 @@ type MessagePool struct {
cfgLk sync.Mutex cfgLk sync.Mutex
cfg *types.MpoolConfig cfg *types.MpoolConfig
rbfNum, rbfDenom types.BigInt
api Provider api Provider
minGasPrice types.BigInt minGasPrice types.BigInt
@ -152,7 +161,7 @@ func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool) (bool, error) {
if m.Cid() != exms.Cid() { if m.Cid() != exms.Cid() {
// check if RBF passes // check if RBF passes
minPrice := exms.Message.GasPremium minPrice := exms.Message.GasPremium
minPrice = types.BigAdd(minPrice, types.BigDiv(types.BigMul(minPrice, mp.rbfNum), mp.rbfDenom)) minPrice = types.BigAdd(minPrice, types.BigDiv(types.BigMul(minPrice, rbfNumBig), rbfDenomBig))
minPrice = types.BigAdd(minPrice, types.NewInt(1)) minPrice = types.BigAdd(minPrice, types.NewInt(1))
if types.BigCmp(m.Message.GasPremium, minPrice) >= 0 { if types.BigCmp(m.Message.GasPremium, minPrice) >= 0 {
log.Infow("add with RBF", "oldpremium", exms.Message.GasPremium, log.Infow("add with RBF", "oldpremium", exms.Message.GasPremium,
@ -171,69 +180,6 @@ func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool) (bool, error) {
return !has, nil return !has, nil
} }
type Provider interface {
SubscribeHeadChanges(func(rev, app []*types.TipSet) error) *types.TipSet
PutMessage(m types.ChainMsg) (cid.Cid, error)
PubSubPublish(string, []byte) error
StateGetActor(address.Address, *types.TipSet) (*types.Actor, error)
StateAccountKey(context.Context, address.Address, *types.TipSet) (address.Address, error)
MessagesForBlock(*types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error)
MessagesForTipset(*types.TipSet) ([]types.ChainMsg, error)
LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error)
ChainComputeBaseFee(ctx context.Context, ts *types.TipSet) (types.BigInt, error)
}
type mpoolProvider struct {
sm *stmgr.StateManager
ps *pubsub.PubSub
}
func NewProvider(sm *stmgr.StateManager, ps *pubsub.PubSub) Provider {
return &mpoolProvider{sm: sm, ps: ps}
}
func (mpp *mpoolProvider) SubscribeHeadChanges(cb func(rev, app []*types.TipSet) error) *types.TipSet {
mpp.sm.ChainStore().SubscribeHeadChanges(cb)
return mpp.sm.ChainStore().GetHeaviestTipSet()
}
func (mpp *mpoolProvider) PutMessage(m types.ChainMsg) (cid.Cid, error) {
return mpp.sm.ChainStore().PutMessage(m)
}
func (mpp *mpoolProvider) PubSubPublish(k string, v []byte) error {
return mpp.ps.Publish(k, v)
}
func (mpp *mpoolProvider) StateGetActor(addr address.Address, ts *types.TipSet) (*types.Actor, error) {
var act types.Actor
return &act, mpp.sm.WithParentState(ts, mpp.sm.WithActor(addr, stmgr.GetActor(&act)))
}
func (mpp *mpoolProvider) StateAccountKey(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) {
return mpp.sm.ResolveToKeyAddress(ctx, addr, ts)
}
func (mpp *mpoolProvider) MessagesForBlock(h *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) {
return mpp.sm.ChainStore().MessagesForBlock(h)
}
func (mpp *mpoolProvider) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg, error) {
return mpp.sm.ChainStore().MessagesForTipset(ts)
}
func (mpp *mpoolProvider) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) {
return mpp.sm.ChainStore().LoadTipSet(tsk)
}
func (mpp *mpoolProvider) ChainComputeBaseFee(ctx context.Context, ts *types.TipSet) (types.BigInt, error) {
baseFee, err := mpp.sm.ChainStore().ComputeBaseFee(ctx, ts)
if err != nil {
return types.NewInt(0), xerrors.Errorf("computing base fee at %s: %w", ts, err)
}
return baseFee, nil
}
func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName) (*MessagePool, error) { func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName) (*MessagePool, error) {
cache, _ := lru.New2Q(build.BlsSignatureCacheSize) cache, _ := lru.New2Q(build.BlsSignatureCacheSize)
verifcache, _ := lru.New2Q(build.VerifSigCacheSize) verifcache, _ := lru.New2Q(build.VerifSigCacheSize)
@ -247,8 +193,10 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName) (*Messa
mp := &MessagePool{ mp := &MessagePool{
ds: ds, ds: ds,
addSema: make(chan struct{}, 1),
closer: make(chan struct{}), closer: make(chan struct{}),
repubTk: build.Clock.Ticker(time.Duration(build.BlockDelaySecs) * time.Second), repubTk: build.Clock.Ticker(RepublishInterval),
repubTrigger: make(chan struct{}, 1),
localAddrs: make(map[address.Address]struct{}), localAddrs: make(map[address.Address]struct{}),
pending: make(map[address.Address]*msgSet), pending: make(map[address.Address]*msgSet),
minGasPrice: types.NewInt(0), minGasPrice: types.NewInt(0),
@ -261,8 +209,6 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName) (*Messa
api: api, api: api,
netName: netName, netName: netName,
cfg: cfg, cfg: cfg,
rbfNum: types.NewInt(uint64((cfg.ReplaceByFeeRatio - 1) * RbfDenom)),
rbfDenom: types.NewInt(RbfDenom),
evtTypes: [...]journal.EventType{ evtTypes: [...]journal.EventType{
evtTypeMpoolAdd: journal.J.RegisterEventType("mpool", "add"), evtTypeMpoolAdd: journal.J.RegisterEventType("mpool", "add"),
evtTypeMpoolRemove: journal.J.RegisterEventType("mpool", "remove"), evtTypeMpoolRemove: journal.J.RegisterEventType("mpool", "remove"),
@ -273,12 +219,7 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName) (*Messa
// enable initial prunes // enable initial prunes
mp.pruneCooldown <- struct{}{} mp.pruneCooldown <- struct{}{}
if err := mp.loadLocal(); err != nil { // load the current tipset and subscribe to head changes _before_ loading local messages
log.Errorf("loading local messages: %+v", err)
}
go mp.runLoop()
mp.curTs = api.SubscribeHeadChanges(func(rev, app []*types.TipSet) error { mp.curTs = api.SubscribeHeadChanges(func(rev, app []*types.TipSet) error {
err := mp.HeadChange(rev, app) err := mp.HeadChange(rev, app)
if err != nil { if err != nil {
@ -287,6 +228,12 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName) (*Messa
return err return err
}) })
if err := mp.loadLocal(); err != nil {
log.Errorf("loading local messages: %+v", err)
}
go mp.runLoop()
return mp, nil return mp, nil
} }
@ -311,77 +258,12 @@ func (mp *MessagePool) runLoop() {
for { for {
select { select {
case <-mp.repubTk.C: case <-mp.repubTk.C:
mp.lk.Lock() if err := mp.republishPendingMessages(); err != nil {
log.Errorf("error while republishing messages: %s", err)
msgsForAddr := make(map[address.Address][]*types.SignedMessage)
for a := range mp.localAddrs {
msgsForAddr[a] = mp.pendingFor(a)
} }
case <-mp.repubTrigger:
mp.lk.Unlock() if err := mp.republishPendingMessages(); err != nil {
log.Errorf("error while republishing messages: %s", err)
var errout error
outputMsgs := []*types.SignedMessage{}
for a, msgs := range msgsForAddr {
a, err := mp.api.StateGetActor(a, nil)
if err != nil {
errout = multierr.Append(errout, xerrors.Errorf("could not get actor state: %w", err))
continue
}
curNonce := a.Nonce
for _, m := range msgs {
if m.Message.Nonce < curNonce {
continue
}
if m.Message.Nonce != curNonce {
break
}
outputMsgs = append(outputMsgs, m)
curNonce++
}
}
if len(outputMsgs) != 0 {
log.Infow("republishing local messages", "n", len(outputMsgs))
}
if len(outputMsgs) > repubMsgLimit {
outputMsgs = outputMsgs[:repubMsgLimit]
}
for _, msg := range outputMsgs {
msgb, err := msg.Serialize()
if err != nil {
errout = multierr.Append(errout, xerrors.Errorf("could not serialize: %w", err))
continue
}
err = mp.api.PubSubPublish(build.MessagesTopic(mp.netName), msgb)
if err != nil {
errout = multierr.Append(errout, xerrors.Errorf("could not publish: %w", err))
continue
}
}
if errout != nil {
log.Errorf("errors while republishing: %+v", errout)
}
if len(outputMsgs) > 0 {
journal.J.RecordEvent(mp.evtTypes[evtTypeMpoolRepub], func() interface{} {
msgs := make([]MessagePoolEvt_Message, 0, len(outputMsgs))
for _, m := range outputMsgs {
msgs = append(msgs, MessagePoolEvt_Message{Message: m.Message, CID: m.Cid()})
}
return MessagePoolEvt{
Action: "repub",
Messages: msgs,
Error: errout,
}
})
} }
case <-mp.pruneTrigger: case <-mp.pruneTrigger:
@ -394,7 +276,6 @@ func (mp *MessagePool) runLoop() {
return return
} }
} }
} }
func (mp *MessagePool) addLocal(m *types.SignedMessage, msgb []byte) error { func (mp *MessagePool) addLocal(m *types.SignedMessage, msgb []byte) error {
@ -417,8 +298,20 @@ func (mp *MessagePool) verifyMsgBeforePush(m *types.SignedMessage, epoch abi.Cha
} }
func (mp *MessagePool) Push(m *types.SignedMessage) (cid.Cid, error) { func (mp *MessagePool) Push(m *types.SignedMessage) (cid.Cid, error) {
err := mp.checkMessage(m)
if err != nil {
return cid.Undef, err
}
// serialize push access to reduce lock contention
mp.addSema <- struct{}{}
defer func() {
<-mp.addSema
}()
mp.curTsLk.Lock() mp.curTsLk.Lock()
epoch := mp.curTs.Height() curTs := mp.curTs
epoch := curTs.Height()
mp.curTsLk.Unlock() mp.curTsLk.Unlock()
if err := mp.verifyMsgBeforePush(m, epoch); err != nil { if err := mp.verifyMsgBeforePush(m, epoch); err != nil {
return cid.Undef, err return cid.Undef, err
@ -429,9 +322,17 @@ func (mp *MessagePool) Push(m *types.SignedMessage) (cid.Cid, error) {
return cid.Undef, err return cid.Undef, err
} }
if err := mp.Add(m); err != nil { mp.curTsLk.Lock()
if mp.curTs != curTs {
mp.curTsLk.Unlock()
return cid.Undef, ErrTryAgain
}
if err := mp.addTs(m, mp.curTs); err != nil {
mp.curTsLk.Unlock()
return cid.Undef, err return cid.Undef, err
} }
mp.curTsLk.Unlock()
mp.lk.Lock() mp.lk.Lock()
if err := mp.addLocal(m, msgb); err != nil { if err := mp.addLocal(m, msgb); err != nil {
@ -443,12 +344,17 @@ func (mp *MessagePool) Push(m *types.SignedMessage) (cid.Cid, error) {
return m.Cid(), mp.api.PubSubPublish(build.MessagesTopic(mp.netName), msgb) return m.Cid(), mp.api.PubSubPublish(build.MessagesTopic(mp.netName), msgb)
} }
func (mp *MessagePool) Add(m *types.SignedMessage) error { func (mp *MessagePool) checkMessage(m *types.SignedMessage) error {
// big messages are bad, anti DOS // big messages are bad, anti DOS
if m.Size() > 32*1024 { if m.Size() > 32*1024 {
return xerrors.Errorf("mpool message too large (%dB): %w", m.Size(), ErrMessageTooBig) return xerrors.Errorf("mpool message too large (%dB): %w", m.Size(), ErrMessageTooBig)
} }
// Perform syntaxtic validation, minGas=0 as we check if correctly in select messages
if err := m.Message.ValidForBlockInclusion(0); err != nil {
return xerrors.Errorf("message not valid for block inclusion: %w", err)
}
if m.Message.To == address.Undef { if m.Message.To == address.Undef {
return ErrInvalidToAddr return ErrInvalidToAddr
} }
@ -462,6 +368,21 @@ func (mp *MessagePool) Add(m *types.SignedMessage) error {
return err return err
} }
return nil
}
func (mp *MessagePool) Add(m *types.SignedMessage) error {
err := mp.checkMessage(m)
if err != nil {
return err
}
// serialize push access to reduce lock contention
mp.addSema <- struct{}{}
defer func() {
<-mp.addSema
}()
mp.curTsLk.Lock() mp.curTsLk.Lock()
defer mp.curTsLk.Unlock() defer mp.curTsLk.Unlock()
return mp.addTs(m, mp.curTs) return mp.addTs(m, mp.curTs)
@ -624,42 +545,16 @@ func (mp *MessagePool) getNonceLocked(addr address.Address, curTs *types.TipSet)
} }
func (mp *MessagePool) getStateNonce(addr address.Address, curTs *types.TipSet) (uint64, error) { func (mp *MessagePool) getStateNonce(addr address.Address, curTs *types.TipSet) (uint64, error) {
// TODO: this method probably should be cached act, err := mp.api.GetActorAfter(addr, curTs)
act, err := mp.api.StateGetActor(addr, curTs)
if err != nil { if err != nil {
return 0, err return 0, err
} }
baseNonce := act.Nonce return act.Nonce, nil
// TODO: the correct thing to do here is probably to set curTs to chain.head
// but since we have an accurate view of the world until a head change occurs,
// this should be fine
if curTs == nil {
return baseNonce, nil
}
msgs, err := mp.api.MessagesForTipset(curTs)
if err != nil {
return 0, xerrors.Errorf("failed to check messages for tipset: %w", err)
}
for _, m := range msgs {
msg := m.VMMessage()
if msg.From == addr {
if msg.Nonce != baseNonce {
return 0, xerrors.Errorf("tipset %s has bad nonce ordering (%d != %d)", curTs.Cids(), msg.Nonce, baseNonce)
}
baseNonce++
}
}
return baseNonce, nil
} }
func (mp *MessagePool) getStateBalance(addr address.Address, ts *types.TipSet) (types.BigInt, error) { func (mp *MessagePool) getStateBalance(addr address.Address, ts *types.TipSet) (types.BigInt, error) {
act, err := mp.api.StateGetActor(addr, ts) act, err := mp.api.GetActorAfter(addr, ts)
if err != nil { if err != nil {
return types.EmptyInt, err return types.EmptyInt, err
} }
@ -668,31 +563,64 @@ func (mp *MessagePool) getStateBalance(addr address.Address, ts *types.TipSet) (
} }
func (mp *MessagePool) PushWithNonce(ctx context.Context, addr address.Address, cb func(address.Address, uint64) (*types.SignedMessage, error)) (*types.SignedMessage, error) { func (mp *MessagePool) PushWithNonce(ctx context.Context, addr address.Address, cb func(address.Address, uint64) (*types.SignedMessage, error)) (*types.SignedMessage, error) {
mp.curTsLk.Lock() // serialize push access to reduce lock contention
defer mp.curTsLk.Unlock() mp.addSema <- struct{}{}
defer func() {
<-mp.addSema
}()
mp.curTsLk.Lock()
mp.lk.Lock() mp.lk.Lock()
defer mp.lk.Unlock()
curTs := mp.curTs
fromKey := addr fromKey := addr
if fromKey.Protocol() == address.ID { if fromKey.Protocol() == address.ID {
var err error var err error
fromKey, err = mp.api.StateAccountKey(ctx, fromKey, mp.curTs) fromKey, err = mp.api.StateAccountKey(ctx, fromKey, mp.curTs)
if err != nil { if err != nil {
mp.lk.Unlock()
mp.curTsLk.Unlock()
return nil, xerrors.Errorf("resolving sender key: %w", err) return nil, xerrors.Errorf("resolving sender key: %w", err)
} }
} }
nonce, err := mp.getNonceLocked(fromKey, mp.curTs) nonce, err := mp.getNonceLocked(fromKey, mp.curTs)
if err != nil { if err != nil {
mp.lk.Unlock()
mp.curTsLk.Unlock()
return nil, xerrors.Errorf("get nonce locked failed: %w", err) return nil, xerrors.Errorf("get nonce locked failed: %w", err)
} }
// release the locks for signing
mp.lk.Unlock()
mp.curTsLk.Unlock()
msg, err := cb(fromKey, nonce) msg, err := cb(fromKey, nonce)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// reacquire the locks and check state for consistency
mp.curTsLk.Lock()
defer mp.curTsLk.Unlock()
if mp.curTs != curTs {
return nil, ErrTryAgain
}
mp.lk.Lock()
defer mp.lk.Unlock()
nonce2, err := mp.getNonceLocked(fromKey, mp.curTs)
if err != nil {
return nil, xerrors.Errorf("get nonce locked failed: %w", err)
}
if nonce2 != nonce {
return nil, ErrTryAgain
}
if err := mp.verifyMsgBeforePush(msg, mp.curTs.Height()); err != nil { if err := mp.verifyMsgBeforePush(msg, mp.curTs.Height()); err != nil {
return nil, err return nil, err
} }
@ -768,6 +696,10 @@ func (mp *MessagePool) Pending() ([]*types.SignedMessage, *types.TipSet) {
mp.lk.Lock() mp.lk.Lock()
defer mp.lk.Unlock() defer mp.lk.Unlock()
return mp.allPending()
}
func (mp *MessagePool) allPending() ([]*types.SignedMessage, *types.TipSet) {
out := make([]*types.SignedMessage, 0) out := make([]*types.SignedMessage, 0)
for a := range mp.pending { for a := range mp.pending {
out = append(out, mp.pendingFor(a)...) out = append(out, mp.pendingFor(a)...)
@ -775,6 +707,7 @@ func (mp *MessagePool) Pending() ([]*types.SignedMessage, *types.TipSet) {
return out, mp.curTs return out, mp.curTs
} }
func (mp *MessagePool) PendingFor(a address.Address) ([]*types.SignedMessage, *types.TipSet) { func (mp *MessagePool) PendingFor(a address.Address) ([]*types.SignedMessage, *types.TipSet) {
mp.curTsLk.Lock() mp.curTsLk.Lock()
defer mp.curTsLk.Unlock() defer mp.curTsLk.Unlock()
@ -807,6 +740,7 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet)
mp.curTsLk.Lock() mp.curTsLk.Lock()
defer mp.curTsLk.Unlock() defer mp.curTsLk.Unlock()
repubTrigger := false
rmsgs := make(map[address.Address]map[uint64]*types.SignedMessage) rmsgs := make(map[address.Address]map[uint64]*types.SignedMessage)
add := func(m *types.SignedMessage) { add := func(m *types.SignedMessage) {
s, ok := rmsgs[m.Message.From] s, ok := rmsgs[m.Message.From]
@ -831,40 +765,70 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet)
mp.Remove(from, nonce) mp.Remove(from, nonce)
} }
maybeRepub := func(cid cid.Cid) {
if !repubTrigger {
mp.lk.Lock()
_, republished := mp.republished[cid]
mp.lk.Unlock()
if republished {
repubTrigger = true
}
}
}
var merr error
for _, ts := range revert { for _, ts := range revert {
pts, err := mp.api.LoadTipSet(ts.Parents()) pts, err := mp.api.LoadTipSet(ts.Parents())
if err != nil { if err != nil {
return err log.Errorf("error loading reverted tipset parent: %s", err)
} merr = multierror.Append(merr, err)
continue
msgs, err := mp.MessagesForBlocks(ts.Blocks())
if err != nil {
return err
} }
mp.curTs = pts mp.curTs = pts
msgs, err := mp.MessagesForBlocks(ts.Blocks())
if err != nil {
log.Errorf("error retrieving messages for reverted block: %s", err)
merr = multierror.Append(merr, err)
continue
}
for _, msg := range msgs { for _, msg := range msgs {
add(msg) add(msg)
} }
} }
for _, ts := range apply { for _, ts := range apply {
mp.curTs = ts
for _, b := range ts.Blocks() { for _, b := range ts.Blocks() {
bmsgs, smsgs, err := mp.api.MessagesForBlock(b) bmsgs, smsgs, err := mp.api.MessagesForBlock(b)
if err != nil { if err != nil {
return xerrors.Errorf("failed to get messages for apply block %s(height %d) (msgroot = %s): %w", b.Cid(), b.Height, b.Messages, err) xerr := xerrors.Errorf("failed to get messages for apply block %s(height %d) (msgroot = %s): %w", b.Cid(), b.Height, b.Messages, err)
log.Errorf("error retrieving messages for block: %s", xerr)
merr = multierror.Append(merr, xerr)
continue
} }
for _, msg := range smsgs { for _, msg := range smsgs {
rm(msg.Message.From, msg.Message.Nonce) rm(msg.Message.From, msg.Message.Nonce)
maybeRepub(msg.Cid())
} }
for _, msg := range bmsgs { for _, msg := range bmsgs {
rm(msg.From, msg.Nonce) rm(msg.From, msg.Nonce)
maybeRepub(msg.Cid())
} }
} }
}
mp.curTs = ts if repubTrigger {
select {
case mp.repubTrigger <- struct{}{}:
default:
}
} }
for _, s := range rmsgs { for _, s := range rmsgs {
@ -876,7 +840,9 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet)
} }
if len(revert) > 0 && futureDebug { if len(revert) > 0 && futureDebug {
msgs, ts := mp.Pending() mp.lk.Lock()
msgs, ts := mp.allPending()
mp.lk.Unlock()
buckets := map[address.Address]*statBucket{} buckets := map[address.Address]*statBucket{}
@ -893,7 +859,8 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet)
} }
for a, bkt := range buckets { for a, bkt := range buckets {
act, err := mp.api.StateGetActor(a, ts) // TODO that might not be correct with GatActorAfter but it is only debug code
act, err := mp.api.GetActorAfter(a, ts)
if err != nil { if err != nil {
log.Debugf("%s, err: %s\n", a, err) log.Debugf("%s, err: %s\n", a, err)
continue continue
@ -940,7 +907,72 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet)
} }
} }
return nil return merr
}
func (mp *MessagePool) runHeadChange(from *types.TipSet, to *types.TipSet, rmsgs map[address.Address]map[uint64]*types.SignedMessage) error {
add := func(m *types.SignedMessage) {
s, ok := rmsgs[m.Message.From]
if !ok {
s = make(map[uint64]*types.SignedMessage)
rmsgs[m.Message.From] = s
}
s[m.Message.Nonce] = m
}
rm := func(from address.Address, nonce uint64) {
s, ok := rmsgs[from]
if !ok {
return
}
if _, ok := s[nonce]; ok {
delete(s, nonce)
return
}
}
revert, apply, err := store.ReorgOps(mp.api.LoadTipSet, from, to)
if err != nil {
return xerrors.Errorf("failed to compute reorg ops for mpool pending messages: %w", err)
}
var merr error
for _, ts := range revert {
msgs, err := mp.MessagesForBlocks(ts.Blocks())
if err != nil {
log.Errorf("error retrieving messages for reverted block: %s", err)
merr = multierror.Append(merr, err)
continue
}
for _, msg := range msgs {
add(msg)
}
}
for _, ts := range apply {
for _, b := range ts.Blocks() {
bmsgs, smsgs, err := mp.api.MessagesForBlock(b)
if err != nil {
xerr := xerrors.Errorf("failed to get messages for apply block %s(height %d) (msgroot = %s): %w", b.Cid(), b.Height, b.Messages, err)
log.Errorf("error retrieving messages for block: %s", xerr)
merr = multierror.Append(merr, xerr)
continue
}
for _, msg := range smsgs {
rm(msg.Message.From, msg.Message.Nonce)
}
for _, msg := range bmsgs {
rm(msg.From, msg.Nonce)
}
}
}
return merr
} }
type statBucket struct { type statBucket struct {
@ -993,6 +1025,7 @@ func (mp *MessagePool) Updates(ctx context.Context) (<-chan api.MpoolUpdate, err
go func() { go func() {
defer mp.changes.Unsub(sub, localUpdates) defer mp.changes.Unsub(sub, localUpdates)
defer close(out)
for { for {
select { select {
@ -1001,9 +1034,13 @@ func (mp *MessagePool) Updates(ctx context.Context) (<-chan api.MpoolUpdate, err
case out <- u.(api.MpoolUpdate): case out <- u.(api.MpoolUpdate):
case <-ctx.Done(): case <-ctx.Done():
return return
case <-mp.closer:
return
} }
case <-ctx.Done(): case <-ctx.Done():
return return
case <-mp.closer:
return
} }
} }
}() }()
@ -1040,3 +1077,40 @@ func (mp *MessagePool) loadLocal() error {
return nil return nil
} }
func (mp *MessagePool) Clear(local bool) {
mp.lk.Lock()
defer mp.lk.Unlock()
// remove everything if local is true, including removing local messages from
// the datastore
if local {
for a := range mp.localAddrs {
mset, ok := mp.pending[a]
if !ok {
continue
}
for _, m := range mset.msgs {
err := mp.localMsgs.Delete(datastore.NewKey(string(m.Cid().Bytes())))
if err != nil {
log.Warnf("error deleting local message: %s", err)
}
}
}
mp.pending = make(map[address.Address]*msgSet)
mp.republished = nil
return
}
// remove everything except the local messages
for a := range mp.pending {
_, isLocal := mp.localAddrs[a]
if isLocal {
continue
}
delete(mp.pending, a)
}
}

View File

@ -3,18 +3,24 @@ package messagepool
import ( import (
"context" "context"
"fmt" "fmt"
"sort"
"testing" "testing"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/mock"
"github.com/filecoin-project/lotus/chain/wallet"
_ "github.com/filecoin-project/lotus/lib/sigs/bls"
_ "github.com/filecoin-project/lotus/lib/sigs/secp"
"github.com/filecoin-project/specs-actors/actors/builtin" "github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/specs-actors/actors/crypto" "github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore"
logging "github.com/ipfs/go-log/v2" logging "github.com/ipfs/go-log/v2"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/mock"
"github.com/filecoin-project/lotus/chain/wallet"
_ "github.com/filecoin-project/lotus/lib/sigs/bls" _ "github.com/filecoin-project/lotus/lib/sigs/bls"
_ "github.com/filecoin-project/lotus/lib/sigs/secp" _ "github.com/filecoin-project/lotus/lib/sigs/secp"
) )
@ -31,14 +37,25 @@ type testMpoolAPI struct {
balance map[address.Address]types.BigInt balance map[address.Address]types.BigInt
tipsets []*types.TipSet tipsets []*types.TipSet
published int
} }
func newTestMpoolAPI() *testMpoolAPI { func newTestMpoolAPI() *testMpoolAPI {
return &testMpoolAPI{ tma := &testMpoolAPI{
bmsgs: make(map[cid.Cid][]*types.SignedMessage), bmsgs: make(map[cid.Cid][]*types.SignedMessage),
statenonce: make(map[address.Address]uint64), statenonce: make(map[address.Address]uint64),
balance: make(map[address.Address]types.BigInt), balance: make(map[address.Address]types.BigInt),
} }
genesis := mock.MkBlock(nil, 1, 1)
tma.tipsets = append(tma.tipsets, mock.TipSet(genesis))
return tma
}
func (tma *testMpoolAPI) nextBlock() *types.BlockHeader {
newBlk := mock.MkBlock(tma.tipsets[len(tma.tipsets)-1], 1, 1)
tma.tipsets = append(tma.tipsets, mock.TipSet(newBlk))
return newBlk
} }
func (tma *testMpoolAPI) applyBlock(t *testing.T, b *types.BlockHeader) { func (tma *testMpoolAPI) applyBlock(t *testing.T, b *types.BlockHeader) {
@ -69,12 +86,11 @@ func (tma *testMpoolAPI) setBalanceRaw(addr address.Address, v types.BigInt) {
func (tma *testMpoolAPI) setBlockMessages(h *types.BlockHeader, msgs ...*types.SignedMessage) { func (tma *testMpoolAPI) setBlockMessages(h *types.BlockHeader, msgs ...*types.SignedMessage) {
tma.bmsgs[h.Cid()] = msgs tma.bmsgs[h.Cid()] = msgs
tma.tipsets = append(tma.tipsets, mock.TipSet(h))
} }
func (tma *testMpoolAPI) SubscribeHeadChanges(cb func(rev, app []*types.TipSet) error) *types.TipSet { func (tma *testMpoolAPI) SubscribeHeadChanges(cb func(rev, app []*types.TipSet) error) *types.TipSet {
tma.cb = cb tma.cb = cb
return nil return tma.tipsets[0]
} }
func (tma *testMpoolAPI) PutMessage(m types.ChainMsg) (cid.Cid, error) { func (tma *testMpoolAPI) PutMessage(m types.ChainMsg) (cid.Cid, error) {
@ -82,18 +98,47 @@ func (tma *testMpoolAPI) PutMessage(m types.ChainMsg) (cid.Cid, error) {
} }
func (tma *testMpoolAPI) PubSubPublish(string, []byte) error { func (tma *testMpoolAPI) PubSubPublish(string, []byte) error {
tma.published++
return nil return nil
} }
func (tma *testMpoolAPI) StateGetActor(addr address.Address, ts *types.TipSet) (*types.Actor, error) { func (tma *testMpoolAPI) GetActorAfter(addr address.Address, ts *types.TipSet) (*types.Actor, error) {
// regression check for load bug
if ts == nil {
panic("GetActorAfter called with nil tipset")
}
balance, ok := tma.balance[addr] balance, ok := tma.balance[addr]
if !ok { if !ok {
balance = types.NewInt(1000e6) balance = types.NewInt(1000e6)
tma.balance[addr] = balance tma.balance[addr] = balance
} }
msgs := make([]*types.SignedMessage, 0)
for _, b := range ts.Blocks() {
for _, m := range tma.bmsgs[b.Cid()] {
if m.Message.From == addr {
msgs = append(msgs, m)
}
}
}
sort.Slice(msgs, func(i, j int) bool {
return msgs[i].Message.Nonce < msgs[j].Message.Nonce
})
nonce := tma.statenonce[addr]
for _, m := range msgs {
if m.Message.Nonce != nonce {
break
}
nonce++
}
return &types.Actor{ return &types.Actor{
Code: builtin.StorageMarketActorCodeID, Code: builtin.StorageMarketActorCodeID,
Nonce: tma.statenonce[addr], Nonce: nonce,
Balance: balance, Balance: balance,
}, nil }, nil
} }
@ -179,7 +224,7 @@ func TestMessagePool(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
a := mock.MkBlock(nil, 1, 1) a := tma.nextBlock()
sender, err := w.GenerateKey(crypto.SigTypeBLS) sender, err := w.GenerateKey(crypto.SigTypeBLS)
if err != nil { if err != nil {
@ -205,7 +250,7 @@ func TestMessagePool(t *testing.T) {
assertNonce(t, mp, sender, 2) assertNonce(t, mp, sender, 2)
} }
func TestRevertMessages(t *testing.T) { func TestMessagePoolMessagesInEachBlock(t *testing.T) {
tma := newTestMpoolAPI() tma := newTestMpoolAPI()
w, err := wallet.NewWallet(wallet.NewMemKeyStore()) w, err := wallet.NewWallet(wallet.NewMemKeyStore())
@ -220,8 +265,57 @@ func TestRevertMessages(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
a := mock.MkBlock(nil, 1, 1) a := tma.nextBlock()
b := mock.MkBlock(mock.TipSet(a), 1, 1)
sender, err := w.GenerateKey(crypto.SigTypeBLS)
if err != nil {
t.Fatal(err)
}
target := mock.Address(1001)
var msgs []*types.SignedMessage
for i := 0; i < 5; i++ {
m := mock.MkMessage(sender, target, uint64(i), w)
msgs = append(msgs, m)
mustAdd(t, mp, m)
}
tma.setStateNonce(sender, 0)
tma.setBlockMessages(a, msgs[0], msgs[1])
tma.applyBlock(t, a)
tsa := mock.TipSet(a)
_, _ = mp.Pending()
selm, _ := mp.SelectMessages(tsa, 1)
if len(selm) == 0 {
t.Fatal("should have returned the rest of the messages")
}
}
func TestRevertMessages(t *testing.T) {
futureDebug = true
defer func() {
futureDebug = false
}()
tma := newTestMpoolAPI()
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
ds := datastore.NewMapDatastore()
mp, err := New(tma, ds, "mptest")
if err != nil {
t.Fatal(err)
}
a := tma.nextBlock()
b := tma.nextBlock()
sender, err := w.GenerateKey(crypto.SigTypeBLS) sender, err := w.GenerateKey(crypto.SigTypeBLS)
if err != nil { if err != nil {
@ -255,6 +349,7 @@ func TestRevertMessages(t *testing.T) {
assertNonce(t, mp, sender, 4) assertNonce(t, mp, sender, 4)
p, _ := mp.Pending() p, _ := mp.Pending()
fmt.Printf("%+v\n", p)
if len(p) != 3 { if len(p) != 3 {
t.Fatal("expected three messages in mempool") t.Fatal("expected three messages in mempool")
} }
@ -276,7 +371,7 @@ func TestPruningSimple(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
a := mock.MkBlock(nil, 1, 1) a := tma.nextBlock()
tma.applyBlock(t, a) tma.applyBlock(t, a)
sender, err := w.GenerateKey(crypto.SigTypeBLS) sender, err := w.GenerateKey(crypto.SigTypeBLS)
@ -309,3 +404,246 @@ func TestPruningSimple(t *testing.T) {
t.Fatal("expected only 5 messages in pool, got: ", len(msgs)) t.Fatal("expected only 5 messages in pool, got: ", len(msgs))
} }
} }
func TestLoadLocal(t *testing.T) {
tma := newTestMpoolAPI()
ds := datastore.NewMapDatastore()
mp, err := New(tma, ds, "mptest")
if err != nil {
t.Fatal(err)
}
// the actors
w1, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1)
if err != nil {
t.Fatal(err)
}
w2, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1)
if err != nil {
t.Fatal(err)
}
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
msgs := make(map[cid.Cid]struct{})
for i := 0; i < 10; i++ {
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
cid, err := mp.Push(m)
if err != nil {
t.Fatal(err)
}
msgs[cid] = struct{}{}
}
err = mp.Close()
if err != nil {
t.Fatal(err)
}
mp, err = New(tma, ds, "mptest")
if err != nil {
t.Fatal(err)
}
pmsgs, _ := mp.Pending()
if len(msgs) != len(pmsgs) {
t.Fatalf("expected %d messages, but got %d", len(msgs), len(pmsgs))
}
for _, m := range pmsgs {
cid := m.Cid()
_, ok := msgs[cid]
if !ok {
t.Fatal("unknown message")
}
delete(msgs, cid)
}
if len(msgs) > 0 {
t.Fatalf("not all messages were laoded; missing %d messages", len(msgs))
}
}
func TestClearAll(t *testing.T) {
tma := newTestMpoolAPI()
ds := datastore.NewMapDatastore()
mp, err := New(tma, ds, "mptest")
if err != nil {
t.Fatal(err)
}
// the actors
w1, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1)
if err != nil {
t.Fatal(err)
}
w2, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1)
if err != nil {
t.Fatal(err)
}
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
for i := 0; i < 10; i++ {
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
_, err := mp.Push(m)
if err != nil {
t.Fatal(err)
}
}
for i := 0; i < 10; i++ {
m := makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(i+1))
mustAdd(t, mp, m)
}
mp.Clear(true)
pending, _ := mp.Pending()
if len(pending) > 0 {
t.Fatalf("cleared the mpool, but got %d pending messages", len(pending))
}
}
func TestClearNonLocal(t *testing.T) {
tma := newTestMpoolAPI()
ds := datastore.NewMapDatastore()
mp, err := New(tma, ds, "mptest")
if err != nil {
t.Fatal(err)
}
// the actors
w1, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1)
if err != nil {
t.Fatal(err)
}
w2, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1)
if err != nil {
t.Fatal(err)
}
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
for i := 0; i < 10; i++ {
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
_, err := mp.Push(m)
if err != nil {
t.Fatal(err)
}
}
for i := 0; i < 10; i++ {
m := makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(i+1))
mustAdd(t, mp, m)
}
mp.Clear(false)
pending, _ := mp.Pending()
if len(pending) != 10 {
t.Fatalf("expected 10 pending messages, but got %d instead", len(pending))
}
for _, m := range pending {
if m.Message.From != a1 {
t.Fatalf("expected message from %s but got one from %s instead", a1, m.Message.From)
}
}
}
func TestUpdates(t *testing.T) {
tma := newTestMpoolAPI()
ds := datastore.NewMapDatastore()
mp, err := New(tma, ds, "mptest")
if err != nil {
t.Fatal(err)
}
// the actors
w1, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1)
if err != nil {
t.Fatal(err)
}
w2, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1)
if err != nil {
t.Fatal(err)
}
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ch, err := mp.Updates(ctx)
if err != nil {
t.Fatal(err)
}
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
for i := 0; i < 10; i++ {
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
_, err := mp.Push(m)
if err != nil {
t.Fatal(err)
}
_, ok := <-ch
if !ok {
t.Fatal("expected update, but got a closed channel instead")
}
}
err = mp.Close()
if err != nil {
t.Fatal(err)
}
_, ok := <-ch
if ok {
t.Fatal("expected closed channel, but got an update instead")
}
}

View File

@ -0,0 +1,81 @@
package messagepool
import (
"context"
"github.com/ipfs/go-cid"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types"
)
type Provider interface {
SubscribeHeadChanges(func(rev, app []*types.TipSet) error) *types.TipSet
PutMessage(m types.ChainMsg) (cid.Cid, error)
PubSubPublish(string, []byte) error
GetActorAfter(address.Address, *types.TipSet) (*types.Actor, error)
StateAccountKey(context.Context, address.Address, *types.TipSet) (address.Address, error)
MessagesForBlock(*types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error)
MessagesForTipset(*types.TipSet) ([]types.ChainMsg, error)
LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error)
ChainComputeBaseFee(ctx context.Context, ts *types.TipSet) (types.BigInt, error)
}
type mpoolProvider struct {
sm *stmgr.StateManager
ps *pubsub.PubSub
}
func NewProvider(sm *stmgr.StateManager, ps *pubsub.PubSub) Provider {
return &mpoolProvider{sm: sm, ps: ps}
}
func (mpp *mpoolProvider) SubscribeHeadChanges(cb func(rev, app []*types.TipSet) error) *types.TipSet {
mpp.sm.ChainStore().SubscribeHeadChanges(cb)
return mpp.sm.ChainStore().GetHeaviestTipSet()
}
func (mpp *mpoolProvider) PutMessage(m types.ChainMsg) (cid.Cid, error) {
return mpp.sm.ChainStore().PutMessage(m)
}
func (mpp *mpoolProvider) PubSubPublish(k string, v []byte) error {
return mpp.ps.Publish(k, v) //nolint
}
func (mpp *mpoolProvider) GetActorAfter(addr address.Address, ts *types.TipSet) (*types.Actor, error) {
var act types.Actor
stcid, _, err := mpp.sm.TipSetState(context.TODO(), ts)
if err != nil {
return nil, xerrors.Errorf("computing tipset state for GetActor: %w", err)
}
return &act, mpp.sm.WithStateTree(stcid, mpp.sm.WithActor(addr, stmgr.GetActor(&act)))
}
func (mpp *mpoolProvider) StateAccountKey(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) {
return mpp.sm.ResolveToKeyAddress(ctx, addr, ts)
}
func (mpp *mpoolProvider) MessagesForBlock(h *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) {
return mpp.sm.ChainStore().MessagesForBlock(h)
}
func (mpp *mpoolProvider) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg, error) {
return mpp.sm.ChainStore().MessagesForTipset(ts)
}
func (mpp *mpoolProvider) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) {
return mpp.sm.ChainStore().LoadTipSet(tsk)
}
func (mpp *mpoolProvider) ChainComputeBaseFee(ctx context.Context, ts *types.TipSet) (types.BigInt, error) {
baseFee, err := mpp.sm.ChainStore().ComputeBaseFee(ctx, ts)
if err != nil {
return types.NewInt(0), xerrors.Errorf("computing base fee at %s: %w", ts, err)
}
return baseFee, nil
}

163
chain/messagepool/repub.go Normal file
View File

@ -0,0 +1,163 @@
package messagepool
import (
"context"
"sort"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/journal"
"github.com/ipfs/go-cid"
)
const repubMsgLimit = 30
func (mp *MessagePool) republishPendingMessages() error {
mp.curTsLk.Lock()
ts := mp.curTs
baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts)
if err != nil {
mp.curTsLk.Unlock()
return xerrors.Errorf("computing basefee: %w", err)
}
pending := make(map[address.Address]map[uint64]*types.SignedMessage)
mp.lk.Lock()
mp.republished = nil // clear this to avoid races triggering an early republish
for actor := range mp.localAddrs {
mset, ok := mp.pending[actor]
if !ok {
continue
}
if len(mset.msgs) == 0 {
continue
}
// we need to copy this while holding the lock to avoid races with concurrent modification
pend := make(map[uint64]*types.SignedMessage, len(mset.msgs))
for nonce, m := range mset.msgs {
pend[nonce] = m
}
pending[actor] = pend
}
mp.lk.Unlock()
mp.curTsLk.Unlock()
if len(pending) == 0 {
return nil
}
var chains []*msgChain
for actor, mset := range pending {
next := mp.createMessageChains(actor, mset, baseFee, ts)
chains = append(chains, next...)
}
if len(chains) == 0 {
return nil
}
sort.Slice(chains, func(i, j int) bool {
return chains[i].Before(chains[j])
})
// we don't republish negative performing chains; this is an error that will be screamed
// at the user
if chains[0].gasPerf < 0 {
return xerrors.Errorf("skipping republish: all message chains have negative gas performance; best gas performance: %f", chains[0].gasPerf)
}
gasLimit := int64(build.BlockGasLimit)
minGas := int64(gasguess.MinGas)
var msgs []*types.SignedMessage
for i := 0; i < len(chains); {
chain := chains[i]
// we can exceed this if we have picked (some) longer chain already
if len(msgs) > repubMsgLimit {
break
}
// there is not enough gas for any message
if gasLimit <= minGas {
break
}
// we don't republish negative performing chains, as they won't be included in
// a block anyway
if chain.gasPerf < 0 {
break
}
// has the chain been invalidated?
if !chain.valid {
i++
continue
}
// does it fit in a block?
if chain.gasLimit <= gasLimit {
gasLimit -= chain.gasLimit
msgs = append(msgs, chain.msgs...)
i++
continue
}
// we can't fit the current chain but there is gas to spare
// trim it and push it down
chain.Trim(gasLimit, mp, baseFee, ts, false)
for j := i; j < len(chains)-1; j++ {
if chains[j].Before(chains[j+1]) {
break
}
chains[j], chains[j+1] = chains[j+1], chains[j]
}
}
count := 0
log.Infof("republishing %d messages", len(msgs))
for _, m := range msgs {
mb, err := m.Serialize()
if err != nil {
return xerrors.Errorf("cannot serialize message: %w", err)
}
err = mp.api.PubSubPublish(build.MessagesTopic(mp.netName), mb)
if err != nil {
return xerrors.Errorf("cannot publish: %w", err)
}
count++
}
if len(msgs) > 0 {
journal.J.RecordEvent(mp.evtTypes[evtTypeMpoolRepub], func() interface{} {
msgs := make([]MessagePoolEvt_Message, 0, len(msgs))
for _, m := range msgs {
msgs = append(msgs, MessagePoolEvt_Message{Message: m.Message, CID: m.Cid()})
}
return MessagePoolEvt{
Action: "repub",
Messages: msgs,
}
})
}
// track most recently republished messages
republished := make(map[cid.Cid]struct{})
for _, m := range msgs[:count] {
republished[m.Cid()] = struct{}{}
}
mp.lk.Lock()
// update the republished set so that we can trigger early republish from head changes
mp.republished = republished
mp.lk.Unlock()
return nil
}

View File

@ -0,0 +1,66 @@
package messagepool
import (
"testing"
"time"
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
"github.com/filecoin-project/lotus/chain/wallet"
"github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/ipfs/go-datastore"
)
func TestRepubMessages(t *testing.T) {
tma := newTestMpoolAPI()
ds := datastore.NewMapDatastore()
mp, err := New(tma, ds, "mptest")
if err != nil {
t.Fatal(err)
}
// the actors
w1, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1)
if err != nil {
t.Fatal(err)
}
w2, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1)
if err != nil {
t.Fatal(err)
}
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
tma.setBalance(a1, 1) // in FIL
for i := 0; i < 10; i++ {
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
_, err := mp.Push(m)
if err != nil {
t.Fatal(err)
}
}
if tma.published != 10 {
t.Fatalf("expected to have published 10 messages, but got %d instead", tma.published)
}
mp.repubTrigger <- struct{}{}
time.Sleep(100 * time.Millisecond)
if tma.published != 20 {
t.Fatalf("expected to have published 20 messages, but got %d instead", tma.published)
}
}

View File

@ -14,31 +14,293 @@ import (
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/chain/vm"
abig "github.com/filecoin-project/specs-actors/actors/abi/big" abig "github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/ipfs/go-cid"
) )
var bigBlockGasLimit = big.NewInt(build.BlockGasLimit) var bigBlockGasLimit = big.NewInt(build.BlockGasLimit)
const MaxBlocks = 15
type msgChain struct { type msgChain struct {
msgs []*types.SignedMessage msgs []*types.SignedMessage
gasReward *big.Int gasReward *big.Int
gasLimit int64 gasLimit int64
gasPerf float64 gasPerf float64
valid bool effPerf float64
next *msgChain bp float64
parentOffset float64
valid bool
merged bool
next *msgChain
prev *msgChain
} }
func (mp *MessagePool) SelectMessages(ts *types.TipSet) ([]*types.SignedMessage, error) { func (mp *MessagePool) SelectMessages(ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) {
mp.curTsLk.Lock() mp.curTsLk.Lock()
defer mp.curTsLk.Unlock() defer mp.curTsLk.Unlock()
mp.lk.Lock() mp.lk.Lock()
defer mp.lk.Unlock() defer mp.lk.Unlock()
return mp.selectMessages(mp.curTs, ts) // if the ticket quality is high enough that the first block has higher probability
// than any other block, then we don't bother with optimal selection because the
// first block will always have higher effective performance
if tq > 0.84 {
return mp.selectMessagesGreedy(mp.curTs, ts)
}
return mp.selectMessagesOptimal(mp.curTs, ts, tq)
} }
func (mp *MessagePool) selectMessages(curTs, ts *types.TipSet) ([]*types.SignedMessage, error) { func (mp *MessagePool) selectMessagesOptimal(curTs, ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) {
start := time.Now()
baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts)
if err != nil {
return nil, xerrors.Errorf("computing basefee: %w", err)
}
// 0. Load messages from the target tipset; if it is the same as the current tipset in
// the mpool, then this is just the pending messages
pending, err := mp.getPendingMessages(curTs, ts)
if err != nil {
return nil, err
}
if len(pending) == 0 {
return nil, nil
}
// defer only here so if we have no pending messages we don't spam
defer func() {
log.Infow("message selection done", "took", time.Since(start))
}()
// 0b. Select all priority messages that fit in the block
minGas := int64(gasguess.MinGas)
result, gasLimit := mp.selectPriorityMessages(pending, baseFee, ts)
// have we filled the block?
if gasLimit < minGas {
return result, nil
}
// 1. Create a list of dependent message chains with maximal gas reward per limit consumed
startChains := time.Now()
var chains []*msgChain
for actor, mset := range pending {
next := mp.createMessageChains(actor, mset, baseFee, ts)
chains = append(chains, next...)
}
if dt := time.Since(startChains); dt > time.Millisecond {
log.Infow("create message chains done", "took", dt)
}
// 2. Sort the chains
sort.Slice(chains, func(i, j int) bool {
return chains[i].Before(chains[j])
})
if len(chains) != 0 && chains[0].gasPerf < 0 {
log.Warnw("all messages in mpool have non-positive gas performance", "bestGasPerf", chains[0].gasPerf)
return nil, nil
}
// 3. Parition chains into blocks (without trimming)
// we use the full blockGasLimit (as opposed to the residual gas limit from the
// priority message selection) as we have to account for what other miners are doing
nextChain := 0
partitions := make([][]*msgChain, MaxBlocks)
for i := 0; i < MaxBlocks && nextChain < len(chains); i++ {
gasLimit := int64(build.BlockGasLimit)
for nextChain < len(chains) {
chain := chains[nextChain]
nextChain++
partitions[i] = append(partitions[i], chain)
gasLimit -= chain.gasLimit
if gasLimit < minGas {
break
}
}
}
// 4. Compute effective performance for each chain, based on the partition they fall into
// The effective performance is the gasPerf of the chain * block probability
blockProb := mp.blockProbabilities(tq)
effChains := 0
for i := 0; i < MaxBlocks; i++ {
for _, chain := range partitions[i] {
chain.SetEffectivePerf(blockProb[i])
}
effChains += len(partitions[i])
}
// nullify the effective performance of chains that don't fit in any partition
for _, chain := range chains[effChains:] {
chain.SetNullEffectivePerf()
}
// 5. Resort the chains based on effective performance
sort.Slice(chains, func(i, j int) bool {
return chains[i].BeforeEffective(chains[j])
})
// 6. Merge the head chains to produce the list of messages selected for inclusion
// subject to the residual gas limit
// When a chain is merged in, all its previous dependent chains *must* also be
// merged in or we'll have a broken block
startMerge := time.Now()
last := len(chains)
for i, chain := range chains {
// did we run out of performing chains?
if chain.gasPerf < 0 {
break
}
// has it already been merged?
if chain.merged {
continue
}
// compute the dependencies that must be merged and the gas limit including deps
chainGasLimit := chain.gasLimit
var chainDeps []*msgChain
for curChain := chain.prev; curChain != nil && !curChain.merged; curChain = curChain.prev {
chainDeps = append(chainDeps, curChain)
chainGasLimit += curChain.gasLimit
}
// does it all fit in the block?
if chainGasLimit <= gasLimit {
// include it together with all dependencies
for i := len(chainDeps) - 1; i >= 0; i-- {
curChain := chainDeps[i]
curChain.merged = true
result = append(result, curChain.msgs...)
}
chain.merged = true
// adjust the effective pefromance for all subsequent chains
if next := chain.next; next != nil && next.effPerf > 0 {
next.effPerf += next.parentOffset
for next = next.next; next != nil && next.effPerf > 0; next = next.next {
next.setEffPerf()
}
}
result = append(result, chain.msgs...)
gasLimit -= chainGasLimit
// resort to account for already merged chains and effective performance adjustments
sort.Slice(chains[i+1:], func(i, j int) bool {
return chains[i].BeforeEffective(chains[j])
})
continue
}
// we can't fit this chain and its dependencies because of block gasLimit -- we are
// at the edge
last = i
break
}
if dt := time.Since(startMerge); dt > time.Millisecond {
log.Infow("merge message chains done", "took", dt)
}
// 7. We have reached the edge of what can fit wholesale; if we still hae available
// gasLimit to pack some more chains, then trim the last chain and push it down.
// Trimming invalidaates subsequent dependent chains so that they can't be selected
// as their dependency cannot be (fully) included.
// We do this in a loop because the blocker might have been inordinately large and
// we might have to do it multiple times to satisfy tail packing
startTail := time.Now()
tailLoop:
for gasLimit >= minGas && last < len(chains) {
// trim if necessary
if chains[last].gasLimit > gasLimit {
chains[last].Trim(gasLimit, mp, baseFee, ts, false)
}
// push down if it hasn't been invalidated
if chains[last].valid {
for i := last; i < len(chains)-1; i++ {
if chains[i].BeforeEffective(chains[i+1]) {
break
}
chains[i], chains[i+1] = chains[i+1], chains[i]
}
}
// select the next (valid and fitting) chain and its dependencies for inclusion
for i, chain := range chains[last:] {
// has the chain been invalidated?
if !chain.valid {
continue
}
// has it already been merged?
if chain.merged {
continue
}
// if gasPerf < 0 we have no more profitable chains
if chain.gasPerf < 0 {
break tailLoop
}
// compute the dependencies that must be merged and the gas limit including deps
chainGasLimit := chain.gasLimit
depGasLimit := int64(0)
var chainDeps []*msgChain
for curChain := chain.prev; curChain != nil && !curChain.merged; curChain = curChain.prev {
chainDeps = append(chainDeps, curChain)
chainGasLimit += curChain.gasLimit
depGasLimit += curChain.gasLimit
}
// does it all fit in the bock
if chainGasLimit <= gasLimit {
// include it together with all dependencies
for i := len(chainDeps) - 1; i >= 0; i-- {
curChain := chainDeps[i]
curChain.merged = true
result = append(result, curChain.msgs...)
}
chain.merged = true
result = append(result, chain.msgs...)
gasLimit -= chainGasLimit
continue
}
// it doesn't all fit; now we have to take into account the dependent chains before
// making a decision about trimming or invalidating.
// if the dependencies exceed the gas limit, then we must invalidate the chain
// as it can never be included.
// Otherwise we can just trim and continue
if depGasLimit > gasLimit {
chain.Invalidate()
last += i + 1
continue tailLoop
}
// dependencies fit, just trim it
chain.Trim(gasLimit-depGasLimit, mp, baseFee, ts, false)
last += i
continue tailLoop
}
// the merge loop ended after processing all the chains and we we probably have still
// gas to spare; end the loop.
break
}
if dt := time.Since(startTail); dt > time.Millisecond {
log.Infow("pack tail chains done", "took", dt)
}
return result, nil
}
func (mp *MessagePool) selectMessagesGreedy(curTs, ts *types.TipSet) ([]*types.SignedMessage, error) {
start := time.Now() start := time.Now()
baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts) baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts)
@ -78,7 +340,9 @@ func (mp *MessagePool) selectMessages(curTs, ts *types.TipSet) ([]*types.SignedM
next := mp.createMessageChains(actor, mset, baseFee, ts) next := mp.createMessageChains(actor, mset, baseFee, ts)
chains = append(chains, next...) chains = append(chains, next...)
} }
log.Infow("create message chains done", "took", time.Since(startChains)) if dt := time.Since(startChains); dt > time.Millisecond {
log.Infow("create message chains done", "took", dt)
}
// 2. Sort the chains // 2. Sort the chains
sort.Slice(chains, func(i, j int) bool { sort.Slice(chains, func(i, j int) bool {
@ -95,23 +359,25 @@ func (mp *MessagePool) selectMessages(curTs, ts *types.TipSet) ([]*types.SignedM
startMerge := time.Now() startMerge := time.Now()
last := len(chains) last := len(chains)
for i, chain := range chains { for i, chain := range chains {
// does it fit in the block?
if chain.gasLimit <= gasLimit && chain.gasPerf >= 0 {
gasLimit -= chain.gasLimit
result = append(result, chain.msgs...)
continue
}
// did we run out of performing chains? // did we run out of performing chains?
if chain.gasPerf < 0 { if chain.gasPerf < 0 {
break break
} }
// does it fit in the block?
if chain.gasLimit <= gasLimit {
gasLimit -= chain.gasLimit
result = append(result, chain.msgs...)
continue
}
// we can't fit this chain because of block gasLimit -- we are at the edge // we can't fit this chain because of block gasLimit -- we are at the edge
last = i last = i
break break
} }
log.Infow("merge message chains done", "took", time.Since(startMerge)) if dt := time.Since(startMerge); dt > time.Millisecond {
log.Infow("merge message chains done", "took", dt)
}
// 4. We have reached the edge of what we can fit wholesale; if we still have available gasLimit // 4. We have reached the edge of what we can fit wholesale; if we still have available gasLimit
// to pack some more chains, then trim the last chain and push it down. // to pack some more chains, then trim the last chain and push it down.
@ -137,32 +403,35 @@ tailLoop:
// select the next (valid and fitting) chain for inclusion // select the next (valid and fitting) chain for inclusion
for i, chain := range chains[last:] { for i, chain := range chains[last:] {
// has the chain been invalidated // has the chain been invalidated?
if !chain.valid { if !chain.valid {
continue continue
} }
// does it fit in the bock?
if chain.gasLimit <= gasLimit && chain.gasPerf >= 0 {
gasLimit -= chain.gasLimit
result = append(result, chain.msgs...)
continue
}
// if gasPerf < 0 we have no more profitable chains // if gasPerf < 0 we have no more profitable chains
if chain.gasPerf < 0 { if chain.gasPerf < 0 {
break tailLoop break tailLoop
} }
// does it fit in the bock?
if chain.gasLimit <= gasLimit {
gasLimit -= chain.gasLimit
result = append(result, chain.msgs...)
continue
}
// this chain needs to be trimmed // this chain needs to be trimmed
last += i last += i
continue tailLoop continue tailLoop
} }
// the merge loop ended after processing all the chains and we probably still have gas to spare // the merge loop ended after processing all the chains and we probably still have
// -- mark the end. // gas to spare; end the loop
last = len(chains) break
}
if dt := time.Since(startTail); dt > time.Millisecond {
log.Infow("pack tail chains done", "took", dt)
} }
log.Infow("pack tail chains done", "took", time.Since(startTail))
return result, nil return result, nil
} }
@ -170,7 +439,9 @@ tailLoop:
func (mp *MessagePool) selectPriorityMessages(pending map[address.Address]map[uint64]*types.SignedMessage, baseFee types.BigInt, ts *types.TipSet) ([]*types.SignedMessage, int64) { func (mp *MessagePool) selectPriorityMessages(pending map[address.Address]map[uint64]*types.SignedMessage, baseFee types.BigInt, ts *types.TipSet) ([]*types.SignedMessage, int64) {
start := time.Now() start := time.Now()
defer func() { defer func() {
log.Infow("select priority messages done", "took", time.Since(start)) if dt := time.Since(start); dt > time.Millisecond {
log.Infow("select priority messages done", "took", dt)
}
}() }()
result := make([]*types.SignedMessage, 0, mp.cfg.SizeLimitLow) result := make([]*types.SignedMessage, 0, mp.cfg.SizeLimitLow)
@ -256,10 +527,9 @@ func (mp *MessagePool) getPendingMessages(curTs, ts *types.TipSet) (map[address.
start := time.Now() start := time.Now()
result := make(map[address.Address]map[uint64]*types.SignedMessage) result := make(map[address.Address]map[uint64]*types.SignedMessage)
haveCids := make(map[cid.Cid]struct{})
defer func() { defer func() {
if time.Since(start) > time.Millisecond { if dt := time.Since(start); dt > time.Millisecond {
log.Infow("get pending messages done", "took", time.Since(start)) log.Infow("get pending messages done", "took", dt)
} }
}() }()
@ -282,10 +552,6 @@ func (mp *MessagePool) getPendingMessages(curTs, ts *types.TipSet) (map[address.
} }
result[a] = msetCopy result[a] = msetCopy
// mark the messages as seen
for _, m := range mset.msgs {
haveCids[m.Cid()] = struct{}{}
}
} }
} }
@ -294,80 +560,19 @@ func (mp *MessagePool) getPendingMessages(curTs, ts *types.TipSet) (map[address.
return result, nil return result, nil
} }
// nope, we need to sync the tipsets if err := mp.runHeadChange(curTs, ts, result); err != nil {
for { return nil, xerrors.Errorf("failed to process difference between mpool head and given head: %w", err)
if curTs.Height() == ts.Height() {
if curTs.Equals(ts) {
return result, nil
}
// different blocks in tipsets -- we mark them as seen so that they are not included in
// in the message set we return, but *neither me (vyzo) nor why understand why*
// this code is also probably completely untested in production, so I am adding a big fat
// warning to revisit this case and sanity check this decision.
log.Warnf("mpool tipset has same height as target tipset but it's not equal; beware of dragons!")
have, err := mp.MessagesForBlocks(ts.Blocks())
if err != nil {
return nil, xerrors.Errorf("error retrieving messages for tipset: %w", err)
}
for _, m := range have {
haveCids[m.Cid()] = struct{}{}
}
}
msgs, err := mp.MessagesForBlocks(ts.Blocks())
if err != nil {
return nil, xerrors.Errorf("error retrieving messages for tipset: %w", err)
}
for _, m := range msgs {
if _, have := haveCids[m.Cid()]; have {
continue
}
haveCids[m.Cid()] = struct{}{}
mset, ok := result[m.Message.From]
if !ok {
mset = make(map[uint64]*types.SignedMessage)
result[m.Message.From] = mset
}
other, dupNonce := mset[m.Message.Nonce]
if dupNonce {
// duplicate nonce, selfishly keep the message with the highest GasPrice
// if the gas prices are the same, keep the one with the highest GasLimit
switch m.Message.GasPremium.Int.Cmp(other.Message.GasPremium.Int) {
case 0:
if m.Message.GasLimit > other.Message.GasLimit {
mset[m.Message.Nonce] = m
}
case 1:
mset[m.Message.Nonce] = m
}
} else {
mset[m.Message.Nonce] = m
}
}
if curTs.Height() >= ts.Height() {
return result, nil
}
ts, err = mp.api.LoadTipSet(ts.Parents())
if err != nil {
return nil, xerrors.Errorf("error loading parent tipset: %w", err)
}
} }
return result, nil
} }
func (mp *MessagePool) getGasReward(msg *types.SignedMessage, baseFee types.BigInt, ts *types.TipSet) *big.Int { func (mp *MessagePool) getGasReward(msg *types.SignedMessage, baseFee types.BigInt, ts *types.TipSet) *big.Int {
gasReward := abig.Mul(msg.Message.GasPremium, types.NewInt(uint64(msg.Message.GasLimit))) maxPremium := types.BigSub(msg.Message.GasFeeCap, baseFee)
maxReward := types.BigSub(msg.Message.GasFeeCap, baseFee) if types.BigCmp(maxPremium, msg.Message.GasPremium) < 0 {
if types.BigCmp(maxReward, gasReward) < 0 { maxPremium = msg.Message.GasPremium
gasReward = maxReward
} }
gasReward := abig.Mul(maxPremium, types.NewInt(uint64(msg.Message.GasLimit)))
return gasReward.Int return gasReward.Int
} }
@ -399,7 +604,12 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6
// cannot exceed the block limit; drop all messages that exceed the limit // cannot exceed the block limit; drop all messages that exceed the limit
// - the total gasReward cannot exceed the actor's balance; drop all messages that exceed // - the total gasReward cannot exceed the actor's balance; drop all messages that exceed
// the balance // the balance
a, _ := mp.api.StateGetActor(actor, ts) a, err := mp.api.GetActorAfter(actor, ts)
if err != nil {
log.Errorf("failed to load actor state, not building chain for %s: %w", actor, err)
return nil
}
curNonce := a.Nonce curNonce := a.Nonce
balance := a.Balance.Int balance := a.Balance.Int
gasLimit := int64(0) gasLimit := int64(0)
@ -449,7 +659,7 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6
} }
// check we have a sane set of messages to construct the chains // check we have a sane set of messages to construct the chains
if i > 0 { if i > skip {
msgs = msgs[skip:i] msgs = msgs[skip:i]
} else { } else {
return nil return nil
@ -533,6 +743,10 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6
chains[i].next = chains[i+1] chains[i].next = chains[i+1]
} }
for i := len(chains) - 1; i > 0; i-- {
chains[i].prev = chains[i-1]
}
return chains return chains
} }
@ -549,8 +763,12 @@ func (mc *msgChain) Trim(gasLimit int64, mp *MessagePool, baseFee types.BigInt,
mc.gasLimit -= mc.msgs[i].Message.GasLimit mc.gasLimit -= mc.msgs[i].Message.GasLimit
if mc.gasLimit > 0 { if mc.gasLimit > 0 {
mc.gasPerf = mp.getGasPerf(mc.gasReward, mc.gasLimit) mc.gasPerf = mp.getGasPerf(mc.gasReward, mc.gasLimit)
if mc.bp != 0 {
mc.setEffPerf()
}
} else { } else {
mc.gasPerf = 0 mc.gasPerf = 0
mc.effPerf = 0
} }
i-- i--
} }
@ -563,16 +781,47 @@ func (mc *msgChain) Trim(gasLimit int64, mp *MessagePool, baseFee types.BigInt,
} }
if mc.next != nil { if mc.next != nil {
mc.next.invalidate() mc.next.Invalidate()
mc.next = nil mc.next = nil
} }
} }
func (mc *msgChain) invalidate() { func (mc *msgChain) Invalidate() {
mc.valid = false mc.valid = false
mc.msgs = nil mc.msgs = nil
if mc.next != nil { if mc.next != nil {
mc.next.invalidate() mc.next.Invalidate()
mc.next = nil mc.next = nil
} }
} }
func (mc *msgChain) SetEffectivePerf(bp float64) {
mc.bp = bp
mc.setEffPerf()
}
func (mc *msgChain) setEffPerf() {
effPerf := mc.gasPerf * mc.bp
if effPerf > 0 && mc.prev != nil {
effPerfWithParent := (effPerf*float64(mc.gasLimit) + mc.prev.effPerf*float64(mc.prev.gasLimit)) / float64(mc.gasLimit+mc.prev.gasLimit)
mc.parentOffset = effPerf - effPerfWithParent
effPerf = effPerfWithParent
}
mc.effPerf = effPerf
}
func (mc *msgChain) SetNullEffectivePerf() {
if mc.gasPerf < 0 {
mc.effPerf = mc.gasPerf
} else {
mc.effPerf = 0
}
}
func (mc *msgChain) BeforeEffective(other *msgChain) bool {
// move merged chains to the front so we can discard them earlier
return (mc.merged && !other.merged) || mc.effPerf > other.effPerf ||
(mc.effPerf == other.effPerf && mc.gasPerf > other.gasPerf) ||
(mc.effPerf == other.effPerf && mc.gasPerf == other.gasPerf && mc.gasReward.Cmp(other.gasReward) > 0)
}

View File

@ -2,12 +2,12 @@ package messagepool
import ( import (
"context" "context"
"math"
"math/big"
"math/rand"
"testing" "testing"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/ipfs/go-datastore"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/messagepool/gasguess" "github.com/filecoin-project/lotus/chain/messagepool/gasguess"
@ -15,8 +15,14 @@ import (
"github.com/filecoin-project/lotus/chain/types/mock" "github.com/filecoin-project/lotus/chain/types/mock"
"github.com/filecoin-project/lotus/chain/wallet" "github.com/filecoin-project/lotus/chain/wallet"
"github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
_ "github.com/filecoin-project/lotus/lib/sigs/bls" _ "github.com/filecoin-project/lotus/lib/sigs/bls"
_ "github.com/filecoin-project/lotus/lib/sigs/secp" _ "github.com/filecoin-project/lotus/lib/sigs/secp"
logging "github.com/ipfs/go-log"
) )
func makeTestMessage(w *wallet.Wallet, from, to address.Address, nonce uint64, gasLimit int64, gasPrice uint64) *types.SignedMessage { func makeTestMessage(w *wallet.Wallet, from, to address.Address, nonce uint64, gasLimit int64, gasPrice uint64) *types.SignedMessage {
@ -60,7 +66,7 @@ func TestMessageChains(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
a1, err := w1.GenerateKey(crypto.SigTypeBLS) a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -70,15 +76,15 @@ func TestMessageChains(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
a2, err := w2.GenerateKey(crypto.SigTypeBLS) a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
block := mock.MkBlock(nil, 1, 1) block := tma.nextBlock()
ts := mock.TipSet(block) ts := mock.TipSet(block)
gasLimit := gasguess.Costs[gasguess.CostKey{builtin.StorageMarketActorCodeID, 2}] gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
tma.setBalance(a1, 1) // in FIL tma.setBalance(a1, 1) // in FIL
@ -298,7 +304,7 @@ func TestMessageChainSkipping(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
a1, err := w1.GenerateKey(crypto.SigTypeBLS) a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -308,15 +314,15 @@ func TestMessageChainSkipping(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
a2, err := w2.GenerateKey(crypto.SigTypeBLS) a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
block := mock.MkBlock(nil, 1, 1) block := tma.nextBlock()
ts := mock.TipSet(block) ts := mock.TipSet(block)
gasLimit := gasguess.Costs[gasguess.CostKey{builtin.StorageMarketActorCodeID, 2}] gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
baseFee := types.NewInt(0) baseFee := types.NewInt(0)
tma.setBalance(a1, 1) // in FIL tma.setBalance(a1, 1) // in FIL
@ -368,7 +374,7 @@ func TestBasicMessageSelection(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
a1, err := w1.GenerateKey(crypto.SigTypeBLS) a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -378,16 +384,16 @@ func TestBasicMessageSelection(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
a2, err := w2.GenerateKey(crypto.SigTypeBLS) a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
block := mock.MkBlock(nil, 1, 1) block := tma.nextBlock()
ts := mock.TipSet(block) ts := mock.TipSet(block)
tma.applyBlock(t, block) tma.applyBlock(t, block)
gasLimit := gasguess.Costs[gasguess.CostKey{builtin.StorageMarketActorCodeID, 2}] gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
tma.setBalance(a1, 1) // in FIL tma.setBalance(a1, 1) // in FIL
tma.setBalance(a2, 1) // in FIL tma.setBalance(a2, 1) // in FIL
@ -404,7 +410,7 @@ func TestBasicMessageSelection(t *testing.T) {
mustAdd(t, mp, m) mustAdd(t, mp, m)
} }
msgs, err := mp.SelectMessages(ts) msgs, err := mp.SelectMessages(ts, 1.0)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -436,12 +442,12 @@ func TestBasicMessageSelection(t *testing.T) {
} }
// now we make a block with all the messages and advance the chain // now we make a block with all the messages and advance the chain
block2 := mock.MkBlock(ts, 2, 2) block2 := tma.nextBlock()
tma.setBlockMessages(block2, msgs...) tma.setBlockMessages(block2, msgs...)
tma.applyBlock(t, block2) tma.applyBlock(t, block2)
// we should have no pending messages in the mpool // we should have no pending messages in the mpool
pend, ts2 := mp.Pending() pend, _ := mp.Pending()
if len(pend) != 0 { if len(pend) != 0 {
t.Fatalf("expected no pending messages, but got %d", len(pend)) t.Fatalf("expected no pending messages, but got %d", len(pend))
} }
@ -454,13 +460,13 @@ func TestBasicMessageSelection(t *testing.T) {
m = makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(i+1)) m = makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(i+1))
msgs = append(msgs, m) msgs = append(msgs, m)
} }
block3 := mock.MkBlock(ts2, 3, 3) block3 := tma.nextBlock()
tma.setBlockMessages(block3, msgs...) tma.setBlockMessages(block3, msgs...)
ts3 := mock.TipSet(block3) ts3 := mock.TipSet(block3)
// now create another set of messages and add them to the mpool // now create another set of messages and add them to the mpool
for i := 20; i < 30; i++ { for i := 20; i < 30; i++ {
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(2*i+1)) m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(2*i+200))
mustAdd(t, mp, m) mustAdd(t, mp, m)
m = makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(i+1)) m = makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(i+1))
mustAdd(t, mp, m) mustAdd(t, mp, m)
@ -472,16 +478,16 @@ func TestBasicMessageSelection(t *testing.T) {
tma.setStateNonce(a1, 10) tma.setStateNonce(a1, 10)
tma.setStateNonce(a2, 10) tma.setStateNonce(a2, 10)
msgs, err = mp.SelectMessages(ts3) msgs, err = mp.SelectMessages(ts3, 1.0)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if len(msgs) != 40 { if len(msgs) != 20 {
t.Fatalf("expected 40 messages, got %d", len(msgs)) t.Fatalf("expected 20 messages, got %d", len(msgs))
} }
nextNonce = 10 nextNonce = 20
for i := 0; i < 20; i++ { for i := 0; i < 10; i++ {
if msgs[i].Message.From != a1 { if msgs[i].Message.From != a1 {
t.Fatalf("expected message from actor a1") t.Fatalf("expected message from actor a1")
} }
@ -491,8 +497,8 @@ func TestBasicMessageSelection(t *testing.T) {
nextNonce++ nextNonce++
} }
nextNonce = 10 nextNonce = 20
for i := 20; i < 40; i++ { for i := 10; i < 20; i++ {
if msgs[i].Message.From != a2 { if msgs[i].Message.From != a2 {
t.Fatalf("expected message from actor a2") t.Fatalf("expected message from actor a2")
} }
@ -512,7 +518,7 @@ func TestMessageSelectionTrimming(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
a1, err := w1.GenerateKey(crypto.SigTypeBLS) a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -522,16 +528,16 @@ func TestMessageSelectionTrimming(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
a2, err := w2.GenerateKey(crypto.SigTypeBLS) a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
block := mock.MkBlock(nil, 1, 1) block := tma.nextBlock()
ts := mock.TipSet(block) ts := mock.TipSet(block)
tma.applyBlock(t, block) tma.applyBlock(t, block)
gasLimit := gasguess.Costs[gasguess.CostKey{builtin.StorageMarketActorCodeID, 2}] gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
tma.setBalance(a1, 1) // in FIL tma.setBalance(a1, 1) // in FIL
tma.setBalance(a2, 1) // in FIL tma.setBalance(a2, 1) // in FIL
@ -546,7 +552,7 @@ func TestMessageSelectionTrimming(t *testing.T) {
mustAdd(t, mp, m) mustAdd(t, mp, m)
} }
msgs, err := mp.SelectMessages(ts) msgs, err := mp.SelectMessages(ts, 1.0)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -575,7 +581,7 @@ func TestPriorityMessageSelection(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
a1, err := w1.GenerateKey(crypto.SigTypeBLS) a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -585,16 +591,16 @@ func TestPriorityMessageSelection(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
a2, err := w2.GenerateKey(crypto.SigTypeBLS) a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
block := mock.MkBlock(nil, 1, 1) block := tma.nextBlock()
ts := mock.TipSet(block) ts := mock.TipSet(block)
tma.applyBlock(t, block) tma.applyBlock(t, block)
gasLimit := gasguess.Costs[gasguess.CostKey{builtin.StorageMarketActorCodeID, 2}] gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
tma.setBalance(a1, 1) // in FIL tma.setBalance(a1, 1) // in FIL
tma.setBalance(a2, 1) // in FIL tma.setBalance(a2, 1) // in FIL
@ -610,7 +616,7 @@ func TestPriorityMessageSelection(t *testing.T) {
mustAdd(t, mp, m) mustAdd(t, mp, m)
} }
msgs, err := mp.SelectMessages(ts) msgs, err := mp.SelectMessages(ts, 1.0)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -644,3 +650,496 @@ func TestPriorityMessageSelection(t *testing.T) {
nextNonce++ nextNonce++
} }
} }
func TestPriorityMessageSelection2(t *testing.T) {
mp, tma := makeTestMpool()
// the actors
w1, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1)
if err != nil {
t.Fatal(err)
}
w2, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1)
if err != nil {
t.Fatal(err)
}
block := tma.nextBlock()
ts := mock.TipSet(block)
tma.applyBlock(t, block)
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
tma.setBalance(a1, 1) // in FIL
tma.setBalance(a2, 1) // in FIL
mp.cfg.PriorityAddrs = []address.Address{a1}
nMessages := int(2 * build.BlockGasLimit / gasLimit)
for i := 0; i < nMessages; i++ {
bias := (nMessages - i) / 3
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(1+i%3+bias))
mustAdd(t, mp, m)
m = makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(1+i%3+bias))
mustAdd(t, mp, m)
}
msgs, err := mp.SelectMessages(ts, 1.0)
if err != nil {
t.Fatal(err)
}
expectedMsgs := int(build.BlockGasLimit / gasLimit)
if len(msgs) != expectedMsgs {
t.Fatalf("expected %d messages but got %d", expectedMsgs, len(msgs))
}
// all messages must be from a1
nextNonce := uint64(0)
for _, m := range msgs {
if m.Message.From != a1 {
t.Fatal("expected messages from a1 before messages from a2")
}
if m.Message.Nonce != nextNonce {
t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce)
}
nextNonce++
}
}
func TestOptimalMessageSelection1(t *testing.T) {
// this test uses just a single actor sending messages with a low tq
// the chain depenent merging algorithm should pick messages from the actor
// from the start
mp, tma := makeTestMpool()
// the actors
w1, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1)
if err != nil {
t.Fatal(err)
}
w2, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1)
if err != nil {
t.Fatal(err)
}
block := tma.nextBlock()
ts := mock.TipSet(block)
tma.applyBlock(t, block)
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
tma.setBalance(a1, 1) // in FIL
tma.setBalance(a2, 1) // in FIL
nMessages := int(10 * build.BlockGasLimit / gasLimit)
for i := 0; i < nMessages; i++ {
bias := (nMessages - i) / 3
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(1+i%3+bias))
mustAdd(t, mp, m)
}
msgs, err := mp.SelectMessages(ts, 0.25)
if err != nil {
t.Fatal(err)
}
expectedMsgs := int(build.BlockGasLimit / gasLimit)
if len(msgs) != expectedMsgs {
t.Fatalf("expected %d messages, but got %d", expectedMsgs, len(msgs))
}
nextNonce := uint64(0)
for _, m := range msgs {
if m.Message.From != a1 {
t.Fatal("expected message from a1")
}
if m.Message.Nonce != nextNonce {
t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce)
}
nextNonce++
}
}
func TestOptimalMessageSelection2(t *testing.T) {
// this test uses two actors sending messages to each other, with the first
// actor paying (much) higher gas premium than the second.
// We select with a low ticket quality; the chain depenent merging algorithm should pick
// messages from the second actor from the start
mp, tma := makeTestMpool()
// the actors
w1, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1)
if err != nil {
t.Fatal(err)
}
w2, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1)
if err != nil {
t.Fatal(err)
}
block := tma.nextBlock()
ts := mock.TipSet(block)
tma.applyBlock(t, block)
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
tma.setBalance(a1, 1) // in FIL
tma.setBalance(a2, 1) // in FIL
nMessages := int(5 * build.BlockGasLimit / gasLimit)
for i := 0; i < nMessages; i++ {
bias := (nMessages - i) / 3
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(200000+i%3+bias))
mustAdd(t, mp, m)
m = makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(190000+i%3+bias))
mustAdd(t, mp, m)
}
msgs, err := mp.SelectMessages(ts, 0.1)
if err != nil {
t.Fatal(err)
}
expectedMsgs := int(build.BlockGasLimit / gasLimit)
if len(msgs) != expectedMsgs {
t.Fatalf("expected %d messages, but got %d", expectedMsgs, len(msgs))
}
var nFrom1, nFrom2 int
var nextNonce1, nextNonce2 uint64
for _, m := range msgs {
if m.Message.From == a1 {
if m.Message.Nonce != nextNonce1 {
t.Fatalf("expected nonce %d but got %d", nextNonce1, m.Message.Nonce)
}
nextNonce1++
nFrom1++
} else {
if m.Message.Nonce != nextNonce2 {
t.Fatalf("expected nonce %d but got %d", nextNonce2, m.Message.Nonce)
}
nextNonce2++
nFrom2++
}
}
if nFrom1 > nFrom2 {
t.Fatalf("expected more messages from a2 than a1; nFrom1=%d nFrom2=%d", nFrom1, nFrom2)
}
}
func TestOptimalMessageSelection3(t *testing.T) {
// this test uses 10 actors sending a block of messages to each other, with the the first
// actors paying higher gas premium than the subsequent actors.
// We select with a low ticket quality; the chain depenent merging algorithm should pick
// messages from the median actor from the start
mp, tma := makeTestMpool()
nActors := 10
// the actors
var actors []address.Address
var wallets []*wallet.Wallet
for i := 0; i < nActors; i++ {
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
a, err := w.GenerateKey(crypto.SigTypeSecp256k1)
if err != nil {
t.Fatal(err)
}
actors = append(actors, a)
wallets = append(wallets, w)
}
block := tma.nextBlock()
ts := mock.TipSet(block)
tma.applyBlock(t, block)
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
for _, a := range actors {
tma.setBalance(a, 1) // in FIL
}
nMessages := int(build.BlockGasLimit/gasLimit) + 1
for i := 0; i < nMessages; i++ {
for j := 0; j < nActors; j++ {
premium := 500000 + 10000*(nActors-j) + (nMessages+2-i)/(30*nActors) + i%3
m := makeTestMessage(wallets[j], actors[j], actors[j%nActors], uint64(i), gasLimit, uint64(premium))
mustAdd(t, mp, m)
}
}
msgs, err := mp.SelectMessages(ts, 0.1)
if err != nil {
t.Fatal(err)
}
expectedMsgs := int(build.BlockGasLimit / gasLimit)
if len(msgs) != expectedMsgs {
t.Fatalf("expected %d messages, but got %d", expectedMsgs, len(msgs))
}
whoIs := func(a address.Address) int {
for i, aa := range actors {
if a == aa {
return i
}
}
return -1
}
nonces := make([]uint64, nActors)
for _, m := range msgs {
who := whoIs(m.Message.From)
if who < 3 {
t.Fatalf("got message from %dth actor", who)
}
nextNonce := nonces[who]
if m.Message.Nonce != nextNonce {
t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce)
}
nonces[who]++
}
}
func testCompetitiveMessageSelection(t *testing.T, rng *rand.Rand, getPremium func() uint64) (float64, float64, float64) {
// in this test we use 300 actors and send 10 blocks of messages.
// actors send with an randomly distributed premium dictated by the getPremium function.
// a number of miners select with varying ticket quality and we compare the
// capacity and rewards of greedy selection -vs- optimal selection
mp, tma := makeTestMpool()
nActors := 300
// the actors
var actors []address.Address
var wallets []*wallet.Wallet
for i := 0; i < nActors; i++ {
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
a, err := w.GenerateKey(crypto.SigTypeSecp256k1)
if err != nil {
t.Fatal(err)
}
actors = append(actors, a)
wallets = append(wallets, w)
}
block := tma.nextBlock()
ts := mock.TipSet(block)
tma.applyBlock(t, block)
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
baseFee := types.NewInt(0)
for _, a := range actors {
tma.setBalance(a, 1) // in FIL
}
nMessages := 10 * int(build.BlockGasLimit/gasLimit)
t.Log("nMessages", nMessages)
nonces := make([]uint64, nActors)
for i := 0; i < nMessages; i++ {
from := rng.Intn(nActors)
to := rng.Intn(nActors)
nonce := nonces[from]
nonces[from]++
premium := getPremium()
m := makeTestMessage(wallets[from], actors[from], actors[to], nonce, gasLimit, premium)
mustAdd(t, mp, m)
}
logging.SetLogLevel("messagepool", "error")
// 1. greedy selection
greedyMsgs, err := mp.selectMessagesGreedy(ts, ts)
if err != nil {
t.Fatal(err)
}
totalGreedyCapacity := 0.0
totalGreedyReward := 0.0
totalOptimalCapacity := 0.0
totalOptimalReward := 0.0
totalBestTQReward := 0.0
const runs = 1
for i := 0; i < runs; i++ {
// 2. optimal selection
minersRand := rng.Float64()
winerProba := noWinnersProb()
i := 0
for ; i < MaxBlocks && minersRand > 0; i++ {
minersRand -= winerProba[i]
}
nMiners := i - 1
if nMiners < 1 {
nMiners = 1
}
optMsgs := make(map[cid.Cid]*types.SignedMessage)
bestTq := 0.0
var bestMsgs []*types.SignedMessage
for j := 0; j < nMiners; j++ {
tq := rng.Float64()
msgs, err := mp.SelectMessages(ts, tq)
if err != nil {
t.Fatal(err)
}
if tq > bestTq {
bestMsgs = msgs
}
for _, m := range msgs {
optMsgs[m.Cid()] = m
}
}
totalGreedyCapacity += float64(len(greedyMsgs))
totalOptimalCapacity += float64(len(optMsgs))
boost := float64(len(optMsgs)) / float64(len(greedyMsgs))
t.Logf("nMiners: %d", nMiners)
t.Logf("greedy capacity %d, optimal capacity %d (x %.1f )", len(greedyMsgs),
len(optMsgs), boost)
if len(greedyMsgs) > len(optMsgs) {
t.Errorf("greedy capacity higher than optimal capacity; wtf")
}
greedyReward := big.NewInt(0)
for _, m := range greedyMsgs {
greedyReward.Add(greedyReward, mp.getGasReward(m, baseFee, ts))
}
optReward := big.NewInt(0)
for _, m := range optMsgs {
optReward.Add(optReward, mp.getGasReward(m, baseFee, ts))
}
bestTqReward := big.NewInt(0)
for _, m := range bestMsgs {
bestTqReward.Add(bestTqReward, mp.getGasReward(m, baseFee, ts))
}
totalBestTQReward += float64(bestTqReward.Uint64())
nMinersBig := big.NewInt(int64(nMiners))
greedyAvgReward, _ := new(big.Rat).SetFrac(greedyReward, nMinersBig).Float64()
totalGreedyReward += greedyAvgReward
optimalAvgReward, _ := new(big.Rat).SetFrac(optReward, nMinersBig).Float64()
totalOptimalReward += optimalAvgReward
boost = optimalAvgReward / greedyAvgReward
t.Logf("greedy reward: %.0f, optimal reward: %.0f (x %.1f )", greedyAvgReward,
optimalAvgReward, boost)
}
capacityBoost := totalOptimalCapacity / totalGreedyCapacity
rewardBoost := totalOptimalReward / totalGreedyReward
t.Logf("Average capacity boost: %f", capacityBoost)
t.Logf("Average reward boost: %f", rewardBoost)
t.Logf("Average best tq reward: %f", totalBestTQReward/runs/1e12)
logging.SetLogLevel("messagepool", "info")
return capacityBoost, rewardBoost, totalBestTQReward / runs / 1e12
}
func makeExpPremiumDistribution(rng *rand.Rand) func() uint64 {
return func() uint64 {
premium := 20000*math.Exp(-3.*rng.Float64()) + 5000
return uint64(premium)
}
}
func makeZipfPremiumDistribution(rng *rand.Rand) func() uint64 {
zipf := rand.NewZipf(rng, 1.001, 1, 40000)
return func() uint64 {
return zipf.Uint64() + 10000
}
}
func TestCompetitiveMessageSelectionExp(t *testing.T) {
var capacityBoost, rewardBoost, tqReward float64
seeds := []int64{1947, 1976, 2020, 2100, 10000, 143324, 432432, 131, 32, 45}
for _, seed := range seeds {
t.Log("running competitive message selection with Exponential premium distribution and seed", seed)
rng := rand.New(rand.NewSource(seed))
cb, rb, tqR := testCompetitiveMessageSelection(t, rng, makeExpPremiumDistribution(rng))
capacityBoost += cb
rewardBoost += rb
tqReward += tqR
}
capacityBoost /= float64(len(seeds))
rewardBoost /= float64(len(seeds))
tqReward /= float64(len(seeds))
t.Logf("Average capacity boost across all seeds: %f", capacityBoost)
t.Logf("Average reward boost across all seeds: %f", rewardBoost)
t.Logf("Average reward of best ticket across all seeds: %f", tqReward)
}
func TestCompetitiveMessageSelectionZipf(t *testing.T) {
var capacityBoost, rewardBoost, tqReward float64
seeds := []int64{1947, 1976, 2020, 2100, 10000, 143324, 432432, 131, 32, 45}
for _, seed := range seeds {
t.Log("running competitive message selection with Zipf premium distribution and seed", seed)
rng := rand.New(rand.NewSource(seed))
cb, rb, tqR := testCompetitiveMessageSelection(t, rng, makeZipfPremiumDistribution(rng))
capacityBoost += cb
rewardBoost += rb
tqReward += tqR
}
tqReward /= float64(len(seeds))
capacityBoost /= float64(len(seeds))
rewardBoost /= float64(len(seeds))
t.Logf("Average capacity boost across all seeds: %f", capacityBoost)
t.Logf("Average reward boost across all seeds: %f", rewardBoost)
t.Logf("Average reward of best ticket across all seeds: %f", tqReward)
}

View File

@ -44,7 +44,7 @@ func SendHeadNotifs(nickname string) func(mctx helpers.MetricsCtx, lc fx.Lifecyc
} }
}() }()
go func() { go func() {
sub, err := ps.Subscribe(topic) sub, err := ps.Subscribe(topic) //nolint
if err != nil { if err != nil {
return return
} }
@ -116,6 +116,7 @@ func sendHeadNotifs(ctx context.Context, ps *pubsub.PubSub, topic string, chain
return err return err
} }
//nolint
if err := ps.Publish(topic, b); err != nil { if err := ps.Publish(topic, b); err != nil {
return err return err
} }

View File

@ -170,7 +170,10 @@ func (st *StateTree) LookupID(addr address.Address) (address.Address, error) {
return address.Undef, xerrors.Errorf("loading init actor state: %w", err) return address.Undef, xerrors.Errorf("loading init actor state: %w", err)
} }
a, err := ias.ResolveAddress(&AdtStore{st.Store}, addr) a, found, err := ias.ResolveAddress(&AdtStore{st.Store}, addr)
if err == nil && !found {
err = types.ErrActorNotFound
}
if err != nil { if err != nil {
return address.Undef, xerrors.Errorf("resolve address %s: %w", addr, err) return address.Undef, xerrors.Errorf("resolve address %s: %w", addr, err)
} }
@ -189,7 +192,7 @@ func (st *StateTree) GetActor(addr address.Address) (*types.Actor, error) {
// Transform `addr` to its ID format. // Transform `addr` to its ID format.
iaddr, err := st.LookupID(addr) iaddr, err := st.LookupID(addr)
if err != nil { if err != nil {
if xerrors.Is(err, init_.ErrAddressNotFound) { if xerrors.Is(err, types.ErrActorNotFound) {
return nil, xerrors.Errorf("resolution lookup failed (%s): %w", addr, err) return nil, xerrors.Errorf("resolution lookup failed (%s): %w", addr, err)
} }
return nil, xerrors.Errorf("address resolution: %w", err) return nil, xerrors.Errorf("address resolution: %w", err)
@ -224,7 +227,7 @@ func (st *StateTree) DeleteActor(addr address.Address) error {
iaddr, err := st.LookupID(addr) iaddr, err := st.LookupID(addr)
if err != nil { if err != nil {
if xerrors.Is(err, init_.ErrAddressNotFound) { if xerrors.Is(err, types.ErrActorNotFound) {
return xerrors.Errorf("resolution lookup failed (%s): %w", addr, err) return xerrors.Errorf("resolution lookup failed (%s): %w", addr, err)
} }
return xerrors.Errorf("address resolution: %w", err) return xerrors.Errorf("address resolution: %w", err)
@ -243,7 +246,7 @@ func (st *StateTree) DeleteActor(addr address.Address) error {
} }
func (st *StateTree) Flush(ctx context.Context) (cid.Cid, error) { func (st *StateTree) Flush(ctx context.Context) (cid.Cid, error) {
ctx, span := trace.StartSpan(ctx, "stateTree.Flush") ctx, span := trace.StartSpan(ctx, "stateTree.Flush") //nolint:staticcheck
defer span.End() defer span.End()
if len(st.snaps.layers) != 1 { if len(st.snaps.layers) != 1 {
return cid.Undef, xerrors.Errorf("tried to flush state tree with snapshots on the stack") return cid.Undef, xerrors.Errorf("tried to flush state tree with snapshots on the stack")
@ -265,7 +268,7 @@ func (st *StateTree) Flush(ctx context.Context) (cid.Cid, error) {
} }
func (st *StateTree) Snapshot(ctx context.Context) error { func (st *StateTree) Snapshot(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "stateTree.SnapShot") ctx, span := trace.StartSpan(ctx, "stateTree.SnapShot") //nolint:staticcheck
defer span.End() defer span.End()
st.snaps.addLayer() st.snaps.addLayer()
@ -333,3 +336,15 @@ func (st *StateTree) MutateActor(addr address.Address, f func(*types.Actor) erro
return st.SetActor(addr, act) return st.SetActor(addr, act)
} }
func (st *StateTree) ForEach(f func(address.Address, *types.Actor) error) error {
var act types.Actor
return st.root.ForEach(&act, func(k string) error {
addr, err := address.NewFromBytes([]byte(k))
if err != nil {
return xerrors.Errorf("invalid address (%x) found in state tree key: %w", []byte(k), err)
}
return f(addr, &act)
})
}

View File

@ -23,13 +23,13 @@ func (sm *StateManager) CallRaw(ctx context.Context, msg *types.Message, bstate
defer span.End() defer span.End()
vmopt := &vm.VMOpts{ vmopt := &vm.VMOpts{
StateBase: bstate, StateBase: bstate,
Epoch: bheight, Epoch: bheight,
Rand: r, Rand: r,
Bstore: sm.cs.Blockstore(), Bstore: sm.cs.Blockstore(),
Syscalls: sm.cs.VMSys(), Syscalls: sm.cs.VMSys(),
VestedCalc: sm.GetVestedFunds, CircSupplyCalc: sm.GetCirculatingSupply,
BaseFee: types.NewInt(0), BaseFee: types.NewInt(0),
} }
vmi, err := vm.NewVM(vmopt) vmi, err := vm.NewVM(vmopt)
@ -124,13 +124,13 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
} }
vmopt := &vm.VMOpts{ vmopt := &vm.VMOpts{
StateBase: state, StateBase: state,
Epoch: ts.Height() + 1, Epoch: ts.Height() + 1,
Rand: r, Rand: r,
Bstore: sm.cs.Blockstore(), Bstore: sm.cs.Blockstore(),
Syscalls: sm.cs.VMSys(), Syscalls: sm.cs.VMSys(),
VestedCalc: sm.GetVestedFunds, CircSupplyCalc: sm.GetCirculatingSupply,
BaseFee: ts.Blocks()[0].ParentBaseFee, BaseFee: ts.Blocks()[0].ParentBaseFee,
} }
vmi, err := vm.NewVM(vmopt) vmi, err := vm.NewVM(vmopt)
if err != nil { if err != nil {
@ -196,7 +196,7 @@ func (sm *StateManager) Replay(ctx context.Context, ts *types.TipSet, mcid cid.C
var outm *types.Message var outm *types.Message
var outr *vm.ApplyRet var outr *vm.ApplyRet
_, _, err := sm.computeTipSetState(ctx, ts.Blocks(), func(c cid.Cid, m *types.Message, ret *vm.ApplyRet) error { _, _, err := sm.computeTipSetState(ctx, ts, func(c cid.Cid, m *types.Message, ret *vm.ApplyRet) error {
if c == mcid { if c == mcid {
outm = m outm = m
outr = ret outr = ret

View File

@ -5,6 +5,8 @@ import (
"fmt" "fmt"
"sync" "sync"
"github.com/filecoin-project/specs-actors/actors/builtin/power"
"github.com/filecoin-project/specs-actors/actors/builtin/multisig" "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
@ -42,7 +44,7 @@ type StateManager struct {
stlk sync.Mutex stlk sync.Mutex
genesisMsigLk sync.Mutex genesisMsigLk sync.Mutex
newVM func(*vm.VMOpts) (*vm.VM, error) newVM func(*vm.VMOpts) (*vm.VM, error)
genesisMsigs []multisig.State genInfo *genesisInfo
} }
func NewStateManager(cs *store.ChainStore) *StateManager { func NewStateManager(cs *store.ChainStore) *StateManager {
@ -111,7 +113,7 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c
return ts.Blocks()[0].ParentStateRoot, ts.Blocks()[0].ParentMessageReceipts, nil return ts.Blocks()[0].ParentStateRoot, ts.Blocks()[0].ParentMessageReceipts, nil
} }
st, rec, err = sm.computeTipSetState(ctx, ts.Blocks(), nil) st, rec, err = sm.computeTipSetState(ctx, ts, nil)
if err != nil { if err != nil {
return cid.Undef, cid.Undef, err return cid.Undef, cid.Undef, err
} }
@ -121,7 +123,7 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c
func (sm *StateManager) ExecutionTrace(ctx context.Context, ts *types.TipSet) (cid.Cid, []*api.InvocResult, error) { func (sm *StateManager) ExecutionTrace(ctx context.Context, ts *types.TipSet) (cid.Cid, []*api.InvocResult, error) {
var trace []*api.InvocResult var trace []*api.InvocResult
st, _, err := sm.computeTipSetState(ctx, ts.Blocks(), func(mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet) error { st, _, err := sm.computeTipSetState(ctx, ts, func(mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet) error {
ir := &api.InvocResult{ ir := &api.InvocResult{
Msg: msg, Msg: msg,
MsgRct: &ret.MessageReceipt, MsgRct: &ret.MessageReceipt,
@ -141,25 +143,18 @@ func (sm *StateManager) ExecutionTrace(ctx context.Context, ts *types.TipSet) (c
return st, trace, nil return st, trace, nil
} }
type BlockMessages struct {
Miner address.Address
BlsMessages []types.ChainMsg
SecpkMessages []types.ChainMsg
WinCount int64
}
type ExecCallback func(cid.Cid, *types.Message, *vm.ApplyRet) error type ExecCallback func(cid.Cid, *types.Message, *vm.ApplyRet) error
func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEpoch, pstate cid.Cid, bms []BlockMessages, epoch abi.ChainEpoch, r vm.Rand, cb ExecCallback, baseFee abi.TokenAmount) (cid.Cid, cid.Cid, error) { func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEpoch, pstate cid.Cid, bms []store.BlockMessages, epoch abi.ChainEpoch, r vm.Rand, cb ExecCallback, baseFee abi.TokenAmount) (cid.Cid, cid.Cid, error) {
vmopt := &vm.VMOpts{ vmopt := &vm.VMOpts{
StateBase: pstate, StateBase: pstate,
Epoch: epoch, Epoch: epoch,
Rand: r, Rand: r,
Bstore: sm.cs.Blockstore(), Bstore: sm.cs.Blockstore(),
Syscalls: sm.cs.VMSys(), Syscalls: sm.cs.VMSys(),
VestedCalc: sm.GetVestedFunds, CircSupplyCalc: sm.GetCirculatingSupply,
BaseFee: baseFee, BaseFee: baseFee,
} }
vmi, err := sm.newVM(vmopt) vmi, err := sm.newVM(vmopt)
@ -311,10 +306,12 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
return st, rectroot, nil return st, rectroot, nil
} }
func (sm *StateManager) computeTipSetState(ctx context.Context, blks []*types.BlockHeader, cb ExecCallback) (cid.Cid, cid.Cid, error) { func (sm *StateManager) computeTipSetState(ctx context.Context, ts *types.TipSet, cb ExecCallback) (cid.Cid, cid.Cid, error) {
ctx, span := trace.StartSpan(ctx, "computeTipSetState") ctx, span := trace.StartSpan(ctx, "computeTipSetState")
defer span.End() defer span.End()
blks := ts.Blocks()
for i := 0; i < len(blks); i++ { for i := 0; i < len(blks); i++ {
for j := i + 1; j < len(blks); j++ { for j := i + 1; j < len(blks); j++ {
if blks[i].Miner == blks[j].Miner { if blks[i].Miner == blks[j].Miner {
@ -327,7 +324,7 @@ func (sm *StateManager) computeTipSetState(ctx context.Context, blks []*types.Bl
var parentEpoch abi.ChainEpoch var parentEpoch abi.ChainEpoch
pstate := blks[0].ParentStateRoot pstate := blks[0].ParentStateRoot
if len(blks[0].Parents) > 0 { if blks[0].Height > 0 {
parent, err := sm.cs.GetBlock(blks[0].Parents[0]) parent, err := sm.cs.GetBlock(blks[0].Parents[0])
if err != nil { if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("getting parent block: %w", err) return cid.Undef, cid.Undef, xerrors.Errorf("getting parent block: %w", err)
@ -343,30 +340,11 @@ func (sm *StateManager) computeTipSetState(ctx context.Context, blks []*types.Bl
r := store.NewChainRand(sm.cs, cids, blks[0].Height) r := store.NewChainRand(sm.cs, cids, blks[0].Height)
var blkmsgs []BlockMessages blkmsgs, err := sm.cs.BlockMsgsForTipset(ts)
for _, b := range blks { if err != nil {
bms, sms, err := sm.cs.MessagesForBlock(b) return cid.Undef, cid.Undef, xerrors.Errorf("getting block messages for tipset: %w", err)
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("failed to get messages for block: %w", err)
}
bm := BlockMessages{
Miner: b.Miner,
BlsMessages: make([]types.ChainMsg, 0, len(bms)),
SecpkMessages: make([]types.ChainMsg, 0, len(sms)),
WinCount: b.ElectionProof.WinCount,
}
for _, m := range bms {
bm.BlsMessages = append(bm.BlsMessages, m)
}
for _, m := range sms {
bm.SecpkMessages = append(bm.SecpkMessages, m)
}
blkmsgs = append(blkmsgs, bm)
} }
baseFee := blks[0].ParentBaseFee baseFee := blks[0].ParentBaseFee
return sm.ApplyBlocks(ctx, parentEpoch, pstate, blkmsgs, blks[0].Height, r, cb, baseFee) return sm.ApplyBlocks(ctx, parentEpoch, pstate, blkmsgs, blks[0].Height, r, cb, baseFee)
@ -441,7 +419,7 @@ func (sm *StateManager) GetReceipt(ctx context.Context, msg cid.Cid, ts *types.T
return nil, fmt.Errorf("failed to load message: %w", err) return nil, fmt.Errorf("failed to load message: %w", err)
} }
r, err := sm.tipsetExecutedMessage(ts, msg, m.VMMessage()) r, _, err := sm.tipsetExecutedMessage(ts, msg, m.VMMessage())
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -450,7 +428,7 @@ func (sm *StateManager) GetReceipt(ctx context.Context, msg cid.Cid, ts *types.T
return r, nil return r, nil
} }
_, r, err = sm.searchBackForMsg(ctx, ts, m) _, r, _, err = sm.searchBackForMsg(ctx, ts, m)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to look back through chain for message: %w", err) return nil, fmt.Errorf("failed to look back through chain for message: %w", err)
} }
@ -461,44 +439,45 @@ func (sm *StateManager) GetReceipt(ctx context.Context, msg cid.Cid, ts *types.T
// WaitForMessage blocks until a message appears on chain. It looks backwards in the chain to see if this has already // WaitForMessage blocks until a message appears on chain. It looks backwards in the chain to see if this has already
// happened. It guarantees that the message has been on chain for at least confidence epochs without being reverted // happened. It guarantees that the message has been on chain for at least confidence epochs without being reverted
// before returning. // before returning.
func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confidence uint64) (*types.TipSet, *types.MessageReceipt, error) { func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confidence uint64) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) {
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
defer cancel() defer cancel()
msg, err := sm.cs.GetCMessage(mcid) msg, err := sm.cs.GetCMessage(mcid)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("failed to load message: %w", err) return nil, nil, cid.Undef, fmt.Errorf("failed to load message: %w", err)
} }
tsub := sm.cs.SubHeadChanges(ctx) tsub := sm.cs.SubHeadChanges(ctx)
head, ok := <-tsub head, ok := <-tsub
if !ok { if !ok {
return nil, nil, fmt.Errorf("SubHeadChanges stream was invalid") return nil, nil, cid.Undef, fmt.Errorf("SubHeadChanges stream was invalid")
} }
if len(head) != 1 { if len(head) != 1 {
return nil, nil, fmt.Errorf("SubHeadChanges first entry should have been one item") return nil, nil, cid.Undef, fmt.Errorf("SubHeadChanges first entry should have been one item")
} }
if head[0].Type != store.HCCurrent { if head[0].Type != store.HCCurrent {
return nil, nil, fmt.Errorf("expected current head on SHC stream (got %s)", head[0].Type) return nil, nil, cid.Undef, fmt.Errorf("expected current head on SHC stream (got %s)", head[0].Type)
} }
r, err := sm.tipsetExecutedMessage(head[0].Val, mcid, msg.VMMessage()) r, foundMsg, err := sm.tipsetExecutedMessage(head[0].Val, mcid, msg.VMMessage())
if err != nil { if err != nil {
return nil, nil, err return nil, nil, cid.Undef, err
} }
if r != nil { if r != nil {
return head[0].Val, r, nil return head[0].Val, r, foundMsg, nil
} }
var backTs *types.TipSet var backTs *types.TipSet
var backRcp *types.MessageReceipt var backRcp *types.MessageReceipt
var backFm cid.Cid
backSearchWait := make(chan struct{}) backSearchWait := make(chan struct{})
go func() { go func() {
fts, r, err := sm.searchBackForMsg(ctx, head[0].Val, msg) fts, r, foundMsg, err := sm.searchBackForMsg(ctx, head[0].Val, msg)
if err != nil { if err != nil {
log.Warnf("failed to look back through chain for message: %w", err) log.Warnf("failed to look back through chain for message: %w", err)
return return
@ -506,11 +485,13 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid
backTs = fts backTs = fts
backRcp = r backRcp = r
backFm = foundMsg
close(backSearchWait) close(backSearchWait)
}() }()
var candidateTs *types.TipSet var candidateTs *types.TipSet
var candidateRcp *types.MessageReceipt var candidateRcp *types.MessageReceipt
var candidateFm cid.Cid
heightOfHead := head[0].Val.Height() heightOfHead := head[0].Val.Height()
reverts := map[types.TipSetKey]bool{} reverts := map[types.TipSetKey]bool{}
@ -518,7 +499,7 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid
select { select {
case notif, ok := <-tsub: case notif, ok := <-tsub:
if !ok { if !ok {
return nil, nil, ctx.Err() return nil, nil, cid.Undef, ctx.Err()
} }
for _, val := range notif { for _, val := range notif {
switch val.Type { switch val.Type {
@ -526,24 +507,26 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid
if val.Val.Equals(candidateTs) { if val.Val.Equals(candidateTs) {
candidateTs = nil candidateTs = nil
candidateRcp = nil candidateRcp = nil
candidateFm = cid.Undef
} }
if backSearchWait != nil { if backSearchWait != nil {
reverts[val.Val.Key()] = true reverts[val.Val.Key()] = true
} }
case store.HCApply: case store.HCApply:
if candidateTs != nil && val.Val.Height() >= candidateTs.Height()+abi.ChainEpoch(confidence) { if candidateTs != nil && val.Val.Height() >= candidateTs.Height()+abi.ChainEpoch(confidence) {
return candidateTs, candidateRcp, nil return candidateTs, candidateRcp, candidateFm, nil
} }
r, err := sm.tipsetExecutedMessage(val.Val, mcid, msg.VMMessage()) r, foundMsg, err := sm.tipsetExecutedMessage(val.Val, mcid, msg.VMMessage())
if err != nil { if err != nil {
return nil, nil, err return nil, nil, cid.Undef, err
} }
if r != nil { if r != nil {
if confidence == 0 { if confidence == 0 {
return val.Val, r, err return val.Val, r, foundMsg, err
} }
candidateTs = val.Val candidateTs = val.Val
candidateRcp = r candidateRcp = r
candidateFm = foundMsg
} }
heightOfHead = val.Val.Height() heightOfHead = val.Val.Height()
} }
@ -553,111 +536,112 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid
if backTs != nil && !reverts[backTs.Key()] { if backTs != nil && !reverts[backTs.Key()] {
// if head is at or past confidence interval, return immediately // if head is at or past confidence interval, return immediately
if heightOfHead >= backTs.Height()+abi.ChainEpoch(confidence) { if heightOfHead >= backTs.Height()+abi.ChainEpoch(confidence) {
return backTs, backRcp, nil return backTs, backRcp, backFm, nil
} }
// wait for confidence interval // wait for confidence interval
candidateTs = backTs candidateTs = backTs
candidateRcp = backRcp candidateRcp = backRcp
candidateFm = backFm
} }
reverts = nil reverts = nil
backSearchWait = nil backSearchWait = nil
case <-ctx.Done(): case <-ctx.Done():
return nil, nil, ctx.Err() return nil, nil, cid.Undef, ctx.Err()
} }
} }
} }
func (sm *StateManager) SearchForMessage(ctx context.Context, mcid cid.Cid) (*types.TipSet, *types.MessageReceipt, error) { func (sm *StateManager) SearchForMessage(ctx context.Context, mcid cid.Cid) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) {
msg, err := sm.cs.GetCMessage(mcid) msg, err := sm.cs.GetCMessage(mcid)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("failed to load message: %w", err) return nil, nil, cid.Undef, fmt.Errorf("failed to load message: %w", err)
} }
head := sm.cs.GetHeaviestTipSet() head := sm.cs.GetHeaviestTipSet()
r, err := sm.tipsetExecutedMessage(head, mcid, msg.VMMessage()) r, foundMsg, err := sm.tipsetExecutedMessage(head, mcid, msg.VMMessage())
if err != nil { if err != nil {
return nil, nil, err return nil, nil, cid.Undef, err
} }
if r != nil { if r != nil {
return head, r, nil return head, r, foundMsg, nil
} }
fts, r, err := sm.searchBackForMsg(ctx, head, msg) fts, r, foundMsg, err := sm.searchBackForMsg(ctx, head, msg)
if err != nil { if err != nil {
log.Warnf("failed to look back through chain for message %s", mcid) log.Warnf("failed to look back through chain for message %s", mcid)
return nil, nil, err return nil, nil, cid.Undef, err
} }
if fts == nil { if fts == nil {
return nil, nil, nil return nil, nil, cid.Undef, nil
} }
return fts, r, nil return fts, r, foundMsg, nil
} }
func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet, m types.ChainMsg) (*types.TipSet, *types.MessageReceipt, error) { func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet, m types.ChainMsg) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) {
cur := from cur := from
for { for {
if cur.Height() == 0 { if cur.Height() == 0 {
// it ain't here! // it ain't here!
return nil, nil, nil return nil, nil, cid.Undef, nil
} }
select { select {
case <-ctx.Done(): case <-ctx.Done():
return nil, nil, nil return nil, nil, cid.Undef, nil
default: default:
} }
var act types.Actor var act types.Actor
err := sm.WithParentState(cur, sm.WithActor(m.VMMessage().From, GetActor(&act))) err := sm.WithParentState(cur, sm.WithActor(m.VMMessage().From, GetActor(&act)))
if err != nil { if err != nil {
return nil, nil, err return nil, nil, cid.Undef, err
} }
// we either have no messages from the sender, or the latest message we found has a lower nonce than the one being searched for, // we either have no messages from the sender, or the latest message we found has a lower nonce than the one being searched for,
// either way, no reason to lookback, it ain't there // either way, no reason to lookback, it ain't there
if act.Nonce == 0 || act.Nonce < m.VMMessage().Nonce { if act.Nonce == 0 || act.Nonce < m.VMMessage().Nonce {
return nil, nil, nil return nil, nil, cid.Undef, nil
} }
ts, err := sm.cs.LoadTipSet(cur.Parents()) ts, err := sm.cs.LoadTipSet(cur.Parents())
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("failed to load tipset during msg wait searchback: %w", err) return nil, nil, cid.Undef, fmt.Errorf("failed to load tipset during msg wait searchback: %w", err)
} }
r, err := sm.tipsetExecutedMessage(ts, m.Cid(), m.VMMessage()) r, foundMsg, err := sm.tipsetExecutedMessage(ts, m.Cid(), m.VMMessage())
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("checking for message execution during lookback: %w", err) return nil, nil, cid.Undef, fmt.Errorf("checking for message execution during lookback: %w", err)
} }
if r != nil { if r != nil {
return ts, r, nil return ts, r, foundMsg, nil
} }
cur = ts cur = ts
} }
} }
func (sm *StateManager) tipsetExecutedMessage(ts *types.TipSet, msg cid.Cid, vmm *types.Message) (*types.MessageReceipt, error) { func (sm *StateManager) tipsetExecutedMessage(ts *types.TipSet, msg cid.Cid, vmm *types.Message) (*types.MessageReceipt, cid.Cid, error) {
// The genesis block did not execute any messages // The genesis block did not execute any messages
if ts.Height() == 0 { if ts.Height() == 0 {
return nil, nil return nil, cid.Undef, nil
} }
pts, err := sm.cs.LoadTipSet(ts.Parents()) pts, err := sm.cs.LoadTipSet(ts.Parents())
if err != nil { if err != nil {
return nil, err return nil, cid.Undef, err
} }
cm, err := sm.cs.MessagesForTipset(pts) cm, err := sm.cs.MessagesForTipset(pts)
if err != nil { if err != nil {
return nil, err return nil, cid.Undef, err
} }
for ii := range cm { for ii := range cm {
@ -667,21 +651,30 @@ func (sm *StateManager) tipsetExecutedMessage(ts *types.TipSet, msg cid.Cid, vmm
if m.VMMessage().From == vmm.From { // cheaper to just check origin first if m.VMMessage().From == vmm.From { // cheaper to just check origin first
if m.VMMessage().Nonce == vmm.Nonce { if m.VMMessage().Nonce == vmm.Nonce {
if m.Cid() == msg { if m.VMMessage().EqualCall(vmm) {
return sm.cs.GetParentReceipt(ts.Blocks()[0], i) if m.Cid() != msg {
log.Warnw("found message with equal nonce and call params but different CID",
"wanted", msg, "found", m.Cid(), "nonce", vmm.Nonce, "from", vmm.From)
}
pr, err := sm.cs.GetParentReceipt(ts.Blocks()[0], i)
if err != nil {
return nil, cid.Undef, err
}
return pr, m.Cid(), nil
} }
// this should be that message // this should be that message
return nil, xerrors.Errorf("found message with equal nonce as the one we are looking for (F:%s n %d, TS: %s n%d)", return nil, cid.Undef, xerrors.Errorf("found message with equal nonce as the one we are looking for (F:%s n %d, TS: %s n%d)",
msg, vmm.Nonce, m.Cid(), m.VMMessage().Nonce) msg, vmm.Nonce, m.Cid(), m.VMMessage().Nonce)
} }
if m.VMMessage().Nonce < vmm.Nonce { if m.VMMessage().Nonce < vmm.Nonce {
return nil, nil // don't bother looking further return nil, cid.Undef, nil // don't bother looking further
} }
} }
} }
return nil, nil return nil, cid.Undef, nil
} }
func (sm *StateManager) ListAllActors(ctx context.Context, ts *types.TipSet) ([]address.Address, error) { func (sm *StateManager) ListAllActors(ctx context.Context, ts *types.TipSet) ([]address.Address, error) {
@ -782,12 +775,24 @@ func (sm *StateManager) SetVMConstructor(nvm func(*vm.VMOpts) (*vm.VM, error)) {
sm.newVM = nvm sm.newVM = nvm
} }
type GenesisMsigEntry struct { type genesisInfo struct {
totalFunds abi.TokenAmount genesisMsigs []multisig.State
unitVest abi.TokenAmount // info about the Accounts in the genesis state
genesisActors []genesisActor
genesisPledge abi.TokenAmount
genesisMarketFunds abi.TokenAmount
} }
func (sm *StateManager) setupGenesisMsigs(ctx context.Context) error { type genesisActor struct {
addr address.Address
initBal abi.TokenAmount
}
// sets up information about the actors in the genesis state
func (sm *StateManager) setupGenesisActors(ctx context.Context) error {
gi := genesisInfo{}
gb, err := sm.cs.GetGenesis() gb, err := sm.cs.GetGenesis()
if err != nil { if err != nil {
return xerrors.Errorf("getting genesis block: %w", err) return xerrors.Errorf("getting genesis block: %w", err)
@ -803,6 +808,22 @@ func (sm *StateManager) setupGenesisMsigs(ctx context.Context) error {
return xerrors.Errorf("getting genesis tipset state: %w", err) return xerrors.Errorf("getting genesis tipset state: %w", err)
} }
cst := cbor.NewCborStore(sm.cs.Blockstore())
sTree, err := state.LoadStateTree(cst, st)
if err != nil {
return xerrors.Errorf("loading state tree: %w", err)
}
gi.genesisMarketFunds, err = getFilMarketLocked(ctx, sTree)
if err != nil {
return xerrors.Errorf("setting up genesis market funds: %w", err)
}
gi.genesisPledge, err = getFilPowerLocked(ctx, sTree)
if err != nil {
return xerrors.Errorf("setting up genesis pledge: %w", err)
}
r, err := adt.AsMap(sm.cs.Store(ctx), st) r, err := adt.AsMap(sm.cs.Store(ctx), st)
if err != nil { if err != nil {
return xerrors.Errorf("getting genesis actors: %w", err) return xerrors.Errorf("getting genesis actors: %w", err)
@ -829,40 +850,273 @@ func (sm *StateManager) setupGenesisMsigs(ctx context.Context) error {
totalsByEpoch[s.UnlockDuration] = s.InitialBalance totalsByEpoch[s.UnlockDuration] = s.InitialBalance
} }
} else if act.Code == builtin.AccountActorCodeID {
// should exclude burnt funds actor and "remainder account actor"
// should only ever be "faucet" accounts in testnets
kaddr, err := address.NewFromBytes([]byte(k))
if err != nil {
return xerrors.Errorf("decoding address: %w", err)
}
if kaddr != builtin.BurntFundsActorAddr {
kid, err := sTree.LookupID(kaddr)
if err != nil {
return xerrors.Errorf("resolving address: %w", err)
}
gi.genesisActors = append(gi.genesisActors, genesisActor{
addr: kid,
initBal: act.Balance,
})
}
} }
return nil return nil
}) })
if err != nil { if err != nil {
return xerrors.Errorf("error setting up composite genesis multisigs: %w", err) return xerrors.Errorf("error setting up genesis infos: %w", err)
} }
sm.genesisMsigs = make([]multisig.State, 0, len(totalsByEpoch)) gi.genesisMsigs = make([]multisig.State, 0, len(totalsByEpoch))
for k, v := range totalsByEpoch { for k, v := range totalsByEpoch {
ns := multisig.State{ ns := multisig.State{
InitialBalance: v, InitialBalance: v,
UnlockDuration: k, UnlockDuration: k,
PendingTxns: cid.Undef, PendingTxns: cid.Undef,
} }
sm.genesisMsigs = append(sm.genesisMsigs, ns) gi.genesisMsigs = append(gi.genesisMsigs, ns)
} }
sm.genInfo = &gi
return nil return nil
} }
func (sm *StateManager) GetVestedFunds(ctx context.Context, height abi.ChainEpoch) (abi.TokenAmount, error) { // sets up information about the actors in the genesis state
sm.genesisMsigLk.Lock() // For testnet we use a hardcoded set of multisig states, instead of what's actually in the genesis multisigs
defer sm.genesisMsigLk.Unlock() // We also do not consider ANY account actors (including the faucet)
if sm.genesisMsigs == nil { func (sm *StateManager) setupGenesisActorsTestnet(ctx context.Context) error {
err := sm.setupGenesisMsigs(ctx)
if err != nil { gi := genesisInfo{}
return big.Zero(), xerrors.Errorf("failed to setup genesis msig entries: %w", err)
} gb, err := sm.cs.GetGenesis()
if err != nil {
return xerrors.Errorf("getting genesis block: %w", err)
} }
gts, err := types.NewTipSet([]*types.BlockHeader{gb})
if err != nil {
return xerrors.Errorf("getting genesis tipset: %w", err)
}
st, _, err := sm.TipSetState(ctx, gts)
if err != nil {
return xerrors.Errorf("getting genesis tipset state: %w", err)
}
cst := cbor.NewCborStore(sm.cs.Blockstore())
sTree, err := state.LoadStateTree(cst, st)
if err != nil {
return xerrors.Errorf("loading state tree: %w", err)
}
gi.genesisMarketFunds, err = getFilMarketLocked(ctx, sTree)
if err != nil {
return xerrors.Errorf("setting up genesis market funds: %w", err)
}
gi.genesisPledge, err = getFilPowerLocked(ctx, sTree)
if err != nil {
return xerrors.Errorf("setting up genesis pledge: %w", err)
}
totalsByEpoch := make(map[abi.ChainEpoch]abi.TokenAmount)
// 6 months
sixMonths := abi.ChainEpoch(183 * builtin.EpochsInDay)
totalsByEpoch[sixMonths] = big.NewInt(49_929_341)
totalsByEpoch[sixMonths] = big.Add(totalsByEpoch[sixMonths], big.NewInt(32_787_700))
// 1 year
oneYear := abi.ChainEpoch(365 * builtin.EpochsInDay)
totalsByEpoch[oneYear] = big.NewInt(22_421_712)
// 2 years
twoYears := abi.ChainEpoch(2 * 365 * builtin.EpochsInDay)
totalsByEpoch[twoYears] = big.NewInt(7_223_364)
// 3 years
threeYears := abi.ChainEpoch(3 * 365 * builtin.EpochsInDay)
totalsByEpoch[threeYears] = big.NewInt(87_637_883)
// 6 years
sixYears := abi.ChainEpoch(6 * 365 * builtin.EpochsInDay)
totalsByEpoch[sixYears] = big.NewInt(100_000_000)
totalsByEpoch[sixYears] = big.Add(totalsByEpoch[sixYears], big.NewInt(300_000_000))
gi.genesisMsigs = make([]multisig.State, 0, len(totalsByEpoch))
for k, v := range totalsByEpoch {
ns := multisig.State{
InitialBalance: v,
UnlockDuration: k,
PendingTxns: cid.Undef,
}
gi.genesisMsigs = append(gi.genesisMsigs, ns)
}
sm.genInfo = &gi
return nil
}
// GetVestedFunds returns all funds that have "left" actors that are in the genesis state:
// - For Multisigs, it counts the actual amounts that have vested at the given epoch
// - For Accounts, it counts max(currentBalance - genesisBalance, 0).
func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (abi.TokenAmount, error) {
vf := big.Zero() vf := big.Zero()
for _, v := range sm.genesisMsigs { for _, v := range sm.genInfo.genesisMsigs {
au := big.Sub(v.InitialBalance, v.AmountLocked(height)) au := big.Sub(v.InitialBalance, v.AmountLocked(height))
vf = big.Add(vf, au) vf = big.Add(vf, au)
} }
// there should not be any such accounts in testnet (and also none in mainnet?)
for _, v := range sm.genInfo.genesisActors {
act, err := st.GetActor(v.addr)
if err != nil {
return big.Zero(), xerrors.Errorf("failed to get actor: %w", err)
}
diff := big.Sub(v.initBal, act.Balance)
if diff.GreaterThan(big.Zero()) {
vf = big.Add(vf, diff)
}
}
vf = big.Add(vf, sm.genInfo.genesisPledge)
vf = big.Add(vf, sm.genInfo.genesisMarketFunds)
return vf, nil return vf, nil
} }
func GetFilMined(ctx context.Context, st *state.StateTree) (abi.TokenAmount, error) {
ractor, err := st.GetActor(builtin.RewardActorAddr)
if err != nil {
return big.Zero(), xerrors.Errorf("failed to load reward actor state: %w", err)
}
var rst reward.State
if err := st.Store.Get(ctx, ractor.Head, &rst); err != nil {
return big.Zero(), xerrors.Errorf("failed to load reward state: %w", err)
}
return rst.TotalMined, nil
}
func getFilMarketLocked(ctx context.Context, st *state.StateTree) (abi.TokenAmount, error) {
mactor, err := st.GetActor(builtin.StorageMarketActorAddr)
if err != nil {
return big.Zero(), xerrors.Errorf("failed to load market actor: %w", err)
}
var mst market.State
if err := st.Store.Get(ctx, mactor.Head, &mst); err != nil {
return big.Zero(), xerrors.Errorf("failed to load market state: %w", err)
}
fml := types.BigAdd(mst.TotalClientLockedCollateral, mst.TotalProviderLockedCollateral)
fml = types.BigAdd(fml, mst.TotalClientStorageFee)
return fml, nil
}
func getFilPowerLocked(ctx context.Context, st *state.StateTree) (abi.TokenAmount, error) {
pactor, err := st.GetActor(builtin.StoragePowerActorAddr)
if err != nil {
return big.Zero(), xerrors.Errorf("failed to load power actor: %w", err)
}
var pst power.State
if err := st.Store.Get(ctx, pactor.Head, &pst); err != nil {
return big.Zero(), xerrors.Errorf("failed to load power state: %w", err)
}
return pst.TotalPledgeCollateral, nil
}
func (sm *StateManager) GetFilLocked(ctx context.Context, st *state.StateTree) (abi.TokenAmount, error) {
filMarketLocked, err := getFilMarketLocked(ctx, st)
if err != nil {
return big.Zero(), xerrors.Errorf("failed to get filMarketLocked: %w", err)
}
filPowerLocked, err := getFilPowerLocked(ctx, st)
if err != nil {
return big.Zero(), xerrors.Errorf("failed to get filPowerLocked: %w", err)
}
return types.BigAdd(filMarketLocked, filPowerLocked), nil
}
func GetFilBurnt(ctx context.Context, st *state.StateTree) (abi.TokenAmount, error) {
burnt, err := st.GetActor(builtin.BurntFundsActorAddr)
if err != nil {
return big.Zero(), xerrors.Errorf("failed to load burnt actor: %w", err)
}
return burnt.Balance, nil
}
func (sm *StateManager) GetCirculatingSupplyDetailed(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (api.CirculatingSupply, error) {
sm.genesisMsigLk.Lock()
defer sm.genesisMsigLk.Unlock()
if sm.genInfo == nil {
err := sm.setupGenesisActorsTestnet(ctx)
if err != nil {
return api.CirculatingSupply{}, xerrors.Errorf("failed to setup genesis information: %w", err)
}
}
filVested, err := sm.GetFilVested(ctx, height, st)
if err != nil {
return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filVested: %w", err)
}
filMined, err := GetFilMined(ctx, st)
if err != nil {
return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filMined: %w", err)
}
filBurnt, err := GetFilBurnt(ctx, st)
if err != nil {
return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filBurnt: %w", err)
}
filLocked, err := sm.GetFilLocked(ctx, st)
if err != nil {
return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filLocked: %w", err)
}
ret := types.BigAdd(filVested, filMined)
ret = types.BigSub(ret, filBurnt)
ret = types.BigSub(ret, filLocked)
if ret.LessThan(big.Zero()) {
ret = big.Zero()
}
return api.CirculatingSupply{
FilVested: filVested,
FilMined: filMined,
FilBurnt: filBurnt,
FilLocked: filLocked,
FilCirculating: ret,
}, nil
}
func (sm *StateManager) GetCirculatingSupply(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (abi.TokenAmount, error) {
csi, err := sm.GetCirculatingSupplyDetailed(ctx, height, st)
if err != nil {
return big.Zero(), err
}
return csi.FilCirculating, nil
}

View File

@ -13,9 +13,8 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/filecoin-project/specs-actors/actors/builtin" "github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/specs-actors/actors/builtin/account" "github.com/filecoin-project/specs-actors/actors/builtin/account"
"github.com/filecoin-project/specs-actors/actors/builtin/cron" "github.com/filecoin-project/specs-actors/actors/builtin/cron"
@ -175,7 +174,7 @@ func GetMinerSectorSet(ctx context.Context, sm *StateManager, ts *types.TipSet,
} }
func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]abi.SectorInfo, error) { func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]abi.SectorInfo, error) {
var partsProving []*abi.BitField var partsProving []abi.BitField
var mas *miner.State var mas *miner.State
var info *miner.MinerInfo var info *miner.MinerInfo
@ -294,14 +293,8 @@ func StateMinerInfo(ctx context.Context, sm *StateManager, ts *types.TipSet, mad
} }
func GetMinerSlashed(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (bool, error) { func GetMinerSlashed(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (bool, error) {
var mas miner.State
_, err := sm.LoadActorState(ctx, maddr, &mas, ts)
if err != nil {
return false, xerrors.Errorf("(get miner slashed) failed to load miner actor state")
}
var spas power.State var spas power.State
_, err = sm.LoadActorState(ctx, builtin.StoragePowerActorAddr, &spas, ts) _, err := sm.LoadActorState(ctx, builtin.StoragePowerActorAddr, &spas, ts)
if err != nil { if err != nil {
return false, xerrors.Errorf("(get miner slashed) failed to load power actor state") return false, xerrors.Errorf("(get miner slashed) failed to load power actor state")
} }
@ -441,13 +434,13 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch,
r := store.NewChainRand(sm.cs, ts.Cids(), height) r := store.NewChainRand(sm.cs, ts.Cids(), height)
vmopt := &vm.VMOpts{ vmopt := &vm.VMOpts{
StateBase: base, StateBase: base,
Epoch: height, Epoch: height,
Rand: r, Rand: r,
Bstore: sm.cs.Blockstore(), Bstore: sm.cs.Blockstore(),
Syscalls: sm.cs.VMSys(), Syscalls: sm.cs.VMSys(),
VestedCalc: sm.GetVestedFunds, CircSupplyCalc: sm.GetCirculatingSupply,
BaseFee: ts.Blocks()[0].ParentBaseFee, BaseFee: ts.Blocks()[0].ParentBaseFee,
} }
vmi, err := vm.NewVM(vmopt) vmi, err := vm.NewVM(vmopt)
if err != nil { if err != nil {
@ -593,40 +586,6 @@ func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcn beacon.RandomBe
}, nil }, nil
} }
func (sm *StateManager) CirculatingSupply(ctx context.Context, ts *types.TipSet) (abi.TokenAmount, error) {
if ts == nil {
ts = sm.cs.GetHeaviestTipSet()
}
st, _, err := sm.TipSetState(ctx, ts)
if err != nil {
return big.Zero(), err
}
r := store.NewChainRand(sm.cs, ts.Cids(), ts.Height())
vmopt := &vm.VMOpts{
StateBase: st,
Epoch: ts.Height(),
Rand: r,
Bstore: sm.cs.Blockstore(),
Syscalls: sm.cs.VMSys(),
VestedCalc: sm.GetVestedFunds,
BaseFee: ts.Blocks()[0].ParentBaseFee,
}
vmi, err := vm.NewVM(vmopt)
if err != nil {
return big.Zero(), err
}
unsafeVM := &vm.UnsafeVM{VM: vmi}
rt := unsafeVM.MakeRuntime(ctx, &types.Message{
GasLimit: 100e6,
From: builtin.SystemActorAddr,
}, builtin.SystemActorAddr, 0, 0, 0)
return rt.TotalFilCircSupply(), nil
}
type methodMeta struct { type methodMeta struct {
Name string Name string
@ -694,29 +653,20 @@ func MinerHasMinPower(ctx context.Context, sm *StateManager, addr address.Addres
return ps.MinerNominalPowerMeetsConsensusMinimum(sm.ChainStore().Store(ctx), addr) return ps.MinerNominalPowerMeetsConsensusMinimum(sm.ChainStore().Store(ctx), addr)
} }
func GetCirculatingSupply(ctx context.Context, sm *StateManager, ts *types.TipSet) (abi.TokenAmount, error) { func CheckTotalFIL(ctx context.Context, sm *StateManager, ts *types.TipSet) (abi.TokenAmount, error) {
if ts == nil { str, err := state.LoadStateTree(sm.ChainStore().Store(ctx), ts.ParentState())
ts = sm.cs.GetHeaviestTipSet()
}
r := store.NewChainRand(sm.cs, ts.Cids(), ts.Height())
vmopt := &vm.VMOpts{
StateBase: ts.ParentState(),
Epoch: ts.Height(),
Rand: r,
Bstore: sm.cs.Blockstore(),
Syscalls: sm.cs.VMSys(),
VestedCalc: sm.GetVestedFunds,
BaseFee: ts.Blocks()[0].ParentBaseFee,
}
vmi, err := vm.NewVM(vmopt)
if err != nil { if err != nil {
return abi.NewTokenAmount(0), err return abi.TokenAmount{}, err
} }
uvm := &vm.UnsafeVM{vmi} sum := types.NewInt(0)
err = str.ForEach(func(a address.Address, act *types.Actor) error {
sum = types.BigAdd(sum, act.Balance)
return nil
})
if err != nil {
return abi.TokenAmount{}, err
}
rt := uvm.MakeRuntime(ctx, &types.Message{From: builtin.InitActorAddr, GasLimit: 10000000}, builtin.InitActorAddr, 0, 0, 0) return sum, nil
return rt.TotalFilCircSupply(), nil
} }

View File

@ -7,11 +7,26 @@ import (
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/abi/big" "github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors" "golang.org/x/xerrors"
) )
func computeNextBaseFee(baseFee types.BigInt, gasLimitUsed int64, noOfBlocks int) types.BigInt { func computeNextBaseFee(baseFee types.BigInt, gasLimitUsed int64, noOfBlocks int) types.BigInt {
delta := gasLimitUsed/int64(noOfBlocks) - build.BlockGasTarget // deta := 1/PackingEfficiency * gasLimitUsed/noOfBlocks - build.BlockGasTarget
// change := baseFee * deta / BlockGasTarget / BaseFeeMaxChangeDenom
// nextBaseFee = baseFee + change
// nextBaseFee = max(nextBaseFee, build.MinimumBaseFee)
delta := build.PackingEfficiencyDenom * gasLimitUsed / (int64(noOfBlocks) * build.PackingEfficiencyNum)
delta -= build.BlockGasTarget
// cap change at 12.5% (BaseFeeMaxChangeDenom) by capping delta
if delta > build.BlockGasTarget {
delta = build.BlockGasTarget
}
if delta < -build.BlockGasTarget {
delta = -build.BlockGasTarget
}
change := big.Mul(baseFee, big.NewInt(delta)) change := big.Mul(baseFee, big.NewInt(delta))
change = big.Div(change, big.NewInt(build.BlockGasTarget)) change = big.Div(change, big.NewInt(build.BlockGasTarget))
@ -26,17 +41,30 @@ func computeNextBaseFee(baseFee types.BigInt, gasLimitUsed int64, noOfBlocks int
func (cs *ChainStore) ComputeBaseFee(ctx context.Context, ts *types.TipSet) (abi.TokenAmount, error) { func (cs *ChainStore) ComputeBaseFee(ctx context.Context, ts *types.TipSet) (abi.TokenAmount, error) {
zero := abi.NewTokenAmount(0) zero := abi.NewTokenAmount(0)
// totalLimit is sum of GasLimits of unique messages in a tipset
totalLimit := int64(0) totalLimit := int64(0)
seen := make(map[cid.Cid]struct{})
for _, b := range ts.Blocks() { for _, b := range ts.Blocks() {
msg1, msg2, err := cs.MessagesForBlock(b) msg1, msg2, err := cs.MessagesForBlock(b)
if err != nil { if err != nil {
return zero, xerrors.Errorf("error getting messages for: %s: %w", b.Cid(), err) return zero, xerrors.Errorf("error getting messages for: %s: %w", b.Cid(), err)
} }
for _, m := range msg1 { for _, m := range msg1 {
totalLimit += m.GasLimit c := m.Cid()
if _, ok := seen[c]; !ok {
totalLimit += m.GasLimit
seen[c] = struct{}{}
}
} }
for _, m := range msg2 { for _, m := range msg2 {
totalLimit += m.Message.GasLimit c := m.Cid()
if _, ok := seen[c]; !ok {
totalLimit += m.Message.GasLimit
seen[c] = struct{}{}
}
} }
} }
parentBaseFee := ts.Blocks()[0].ParentBaseFee parentBaseFee := ts.Blocks()[0].ParentBaseFee

View File

@ -18,10 +18,10 @@ func TestBaseFee(t *testing.T) {
}{ }{
{100e6, 0, 1, 87.5e6}, {100e6, 0, 1, 87.5e6},
{100e6, 0, 5, 87.5e6}, {100e6, 0, 5, 87.5e6},
{100e6, build.BlockGasTarget, 1, 100e6}, {100e6, build.BlockGasTarget, 1, 103.125e6},
{100e6, build.BlockGasTarget * 2, 2, 100e6}, {100e6, build.BlockGasTarget * 2, 2, 103.125e6},
{100e6, build.BlockGasLimit * 2, 2, 112.5e6}, {100e6, build.BlockGasLimit * 2, 2, 112.5e6},
{100e6, build.BlockGasLimit * 1.5, 2, 106.25e6}, {100e6, build.BlockGasLimit * 1.5, 2, 110937500},
} }
for _, test := range tests { for _, test := range tests {

View File

@ -2,6 +2,8 @@ package store
import ( import (
"context" "context"
"os"
"strconv"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"
@ -9,6 +11,19 @@ import (
"golang.org/x/xerrors" "golang.org/x/xerrors"
) )
var DefaultChainIndexCacheSize = 32 << 10
func init() {
if s := os.Getenv("LOTUS_CHAIN_INDEX_CACHE"); s != "" {
lcic, err := strconv.Atoi(s)
if err != nil {
log.Errorf("failed to parse 'LOTUS_CHAIN_INDEX_CACHE' env var: %s", err)
}
DefaultChainIndexCacheSize = lcic
}
}
type ChainIndex struct { type ChainIndex struct {
skipCache *lru.ARCCache skipCache *lru.ARCCache
@ -19,7 +34,7 @@ type ChainIndex struct {
type loadTipSetFunc func(types.TipSetKey) (*types.TipSet, error) type loadTipSetFunc func(types.TipSetKey) (*types.TipSet, error)
func NewChainIndex(lts loadTipSetFunc) *ChainIndex { func NewChainIndex(lts loadTipSetFunc) *ChainIndex {
sc, _ := lru.NewARC(8192) sc, _ := lru.NewARC(DefaultChainIndexCacheSize)
return &ChainIndex{ return &ChainIndex{
skipCache: sc, skipCache: sc,
loadTipSet: lts, loadTipSet: lts,

View File

@ -43,7 +43,7 @@ func TestIndexSeeks(t *testing.T) {
if err := cs.PutTipSet(ctx, mock.TipSet(gen)); err != nil { if err := cs.PutTipSet(ctx, mock.TipSet(gen)); err != nil {
t.Fatal(err) t.Fatal(err)
} }
cs.SetGenesis(gen) assert.NoError(t, cs.SetGenesis(gen))
// Put 113 blocks from genesis // Put 113 blocks from genesis
for i := 0; i < 113; i++ { for i := 0; i < 113; i++ {

View File

@ -7,6 +7,7 @@ import (
"encoding/json" "encoding/json"
"io" "io"
"os" "os"
"strconv"
"sync" "sync"
"github.com/filecoin-project/specs-actors/actors/crypto" "github.com/filecoin-project/specs-actors/actors/crypto"
@ -47,6 +48,18 @@ var log = logging.Logger("chainstore")
var chainHeadKey = dstore.NewKey("head") var chainHeadKey = dstore.NewKey("head")
var blockValidationCacheKeyPrefix = dstore.NewKey("blockValidation") var blockValidationCacheKeyPrefix = dstore.NewKey("blockValidation")
var DefaultTipSetCacheSize = 8192
func init() {
if s := os.Getenv("LOTUS_CHAIN_TIPSET_CACHE"); s != "" {
tscs, err := strconv.Atoi(s)
if err != nil {
log.Errorf("failed to parse 'LOTUS_CHAIN_TIPSET_CACHE' env var: %s", err)
}
DefaultTipSetCacheSize = tscs
}
}
// ReorgNotifee represents a callback that gets called upon reorgs. // ReorgNotifee represents a callback that gets called upon reorgs.
type ReorgNotifee func(rev, app []*types.TipSet) error type ReorgNotifee func(rev, app []*types.TipSet) error
@ -101,7 +114,7 @@ type ChainStore struct {
func NewChainStore(bs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallBuilder) *ChainStore { func NewChainStore(bs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallBuilder) *ChainStore {
c, _ := lru.NewARC(2048) c, _ := lru.NewARC(2048)
tsc, _ := lru.NewARC(4096) tsc, _ := lru.NewARC(DefaultTipSetCacheSize)
cs := &ChainStore{ cs := &ChainStore{
bs: bs, bs: bs,
ds: ds, ds: ds,
@ -493,6 +506,10 @@ func (cs *ChainStore) NearestCommonAncestor(a, b *types.TipSet) (*types.TipSet,
} }
func (cs *ChainStore) ReorgOps(a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) { func (cs *ChainStore) ReorgOps(a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) {
return ReorgOps(cs.LoadTipSet, a, b)
}
func ReorgOps(lts func(types.TipSetKey) (*types.TipSet, error), a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) {
left := a left := a
right := b right := b
@ -500,7 +517,7 @@ func (cs *ChainStore) ReorgOps(a, b *types.TipSet) ([]*types.TipSet, []*types.Ti
for !left.Equals(right) { for !left.Equals(right) {
if left.Height() > right.Height() { if left.Height() > right.Height() {
leftChain = append(leftChain, left) leftChain = append(leftChain, left)
par, err := cs.LoadTipSet(left.Parents()) par, err := lts(left.Parents())
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -508,7 +525,7 @@ func (cs *ChainStore) ReorgOps(a, b *types.TipSet) ([]*types.TipSet, []*types.Ti
left = par left = par
} else { } else {
rightChain = append(rightChain, right) rightChain = append(rightChain, right)
par, err := cs.LoadTipSet(right.Parents()) par, err := lts(right.Parents())
if err != nil { if err != nil {
log.Infof("failed to fetch right.Parents: %s", err) log.Infof("failed to fetch right.Parents: %s", err)
return nil, nil, err return nil, nil, err
@ -519,6 +536,7 @@ func (cs *ChainStore) ReorgOps(a, b *types.TipSet) ([]*types.TipSet, []*types.Ti
} }
return leftChain, rightChain, nil return leftChain, rightChain, nil
} }
// GetHeaviestTipSet returns the current heaviest tipset known (i.e. our head). // GetHeaviestTipSet returns the current heaviest tipset known (i.e. our head).
@ -730,9 +748,15 @@ func (cs *ChainStore) readAMTCids(root cid.Cid) ([]cid.Cid, error) {
return cids, nil return cids, nil
} }
func (cs *ChainStore) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg, error) { type BlockMessages struct {
Miner address.Address
BlsMessages []types.ChainMsg
SecpkMessages []types.ChainMsg
WinCount int64
}
func (cs *ChainStore) BlockMsgsForTipset(ts *types.TipSet) ([]BlockMessages, error) {
applied := make(map[address.Address]uint64) applied := make(map[address.Address]uint64)
balances := make(map[address.Address]types.BigInt)
cst := cbor.NewCborStore(cs.bs) cst := cbor.NewCborStore(cs.bs)
st, err := state.LoadStateTree(cst, ts.Blocks()[0].ParentStateRoot) st, err := state.LoadStateTree(cst, ts.Blocks()[0].ParentStateRoot)
@ -748,43 +772,80 @@ func (cs *ChainStore) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg, err
} }
applied[a] = act.Nonce applied[a] = act.Nonce
balances[a] = act.Balance
} }
return nil return nil
} }
var out []types.ChainMsg selectMsg := func(m *types.Message) (bool, error) {
if err := preloadAddr(m.From); err != nil {
return false, err
}
if applied[m.From] != m.Nonce {
return false, nil
}
applied[m.From]++
return true, nil
}
var out []BlockMessages
for _, b := range ts.Blocks() { for _, b := range ts.Blocks() {
bms, sms, err := cs.MessagesForBlock(b) bms, sms, err := cs.MessagesForBlock(b)
if err != nil { if err != nil {
return nil, xerrors.Errorf("failed to get messages for block: %w", err) return nil, xerrors.Errorf("failed to get messages for block: %w", err)
} }
cmsgs := make([]types.ChainMsg, 0, len(bms)+len(sms)) bm := BlockMessages{
for _, m := range bms { Miner: b.Miner,
cmsgs = append(cmsgs, m) BlsMessages: make([]types.ChainMsg, 0, len(bms)),
} SecpkMessages: make([]types.ChainMsg, 0, len(sms)),
for _, sm := range sms { WinCount: b.ElectionProof.WinCount,
cmsgs = append(cmsgs, sm)
} }
for _, cm := range cmsgs { for _, bmsg := range bms {
m := cm.VMMessage() b, err := selectMsg(bmsg.VMMessage())
if err := preloadAddr(m.From); err != nil { if err != nil {
return nil, err return nil, xerrors.Errorf("failed to decide whether to select message for block: %w", err)
} }
if applied[m.From] != m.Nonce { if b {
continue bm.BlsMessages = append(bm.BlsMessages, bmsg)
} }
applied[m.From]++ }
if balances[m.From].LessThan(m.RequiredFunds()) { for _, smsg := range sms {
continue b, err := selectMsg(smsg.VMMessage())
if err != nil {
return nil, xerrors.Errorf("failed to decide whether to select message for block: %w", err)
} }
balances[m.From] = types.BigSub(balances[m.From], m.RequiredFunds())
out = append(out, cm) if b {
bm.SecpkMessages = append(bm.SecpkMessages, smsg)
}
}
out = append(out, bm)
}
return out, nil
}
func (cs *ChainStore) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg, error) {
bmsgs, err := cs.BlockMsgsForTipset(ts)
if err != nil {
return nil, err
}
var out []types.ChainMsg
for _, bm := range bmsgs {
for _, blsm := range bm.BlsMessages {
out = append(out, blsm)
}
for _, secm := range bm.SecpkMessages {
out = append(out, secm)
} }
} }
@ -892,7 +953,7 @@ func (cs *ChainStore) LoadMessagesFromCids(cids []cid.Cid) ([]*types.Message, er
for i, c := range cids { for i, c := range cids {
m, err := cs.GetMessage(c) m, err := cs.GetMessage(c)
if err != nil { if err != nil {
return nil, xerrors.Errorf("failed to get message: (%s):%d: %w", err, c, i) return nil, xerrors.Errorf("failed to get message: (%s):%d: %w", c, i, err)
} }
msgs = append(msgs, m) msgs = append(msgs, m)
@ -906,7 +967,7 @@ func (cs *ChainStore) LoadSignedMessagesFromCids(cids []cid.Cid) ([]*types.Signe
for i, c := range cids { for i, c := range cids {
m, err := cs.GetSignedMessage(c) m, err := cs.GetSignedMessage(c)
if err != nil { if err != nil {
return nil, xerrors.Errorf("failed to get message: (%s):%d: %w", err, c, i) return nil, xerrors.Errorf("failed to get message: (%s):%d: %w", c, i, err)
} }
msgs = append(msgs, m) msgs = append(msgs, m)
@ -974,8 +1035,42 @@ func DrawRandomness(rbase []byte, pers crypto.DomainSeparationTag, round abi.Cha
return h.Sum(nil), nil return h.Sum(nil), nil
} }
func (cs *ChainStore) GetRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { func (cs *ChainStore) GetBeaconRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
_, span := trace.StartSpan(ctx, "store.GetRandomness") _, span := trace.StartSpan(ctx, "store.GetBeaconRandomness")
defer span.End()
span.AddAttributes(trace.Int64Attribute("round", int64(round)))
ts, err := cs.LoadTipSet(types.NewTipSetKey(blks...))
if err != nil {
return nil, err
}
if round > ts.Height() {
return nil, xerrors.Errorf("cannot draw randomness from the future")
}
searchHeight := round
if searchHeight < 0 {
searchHeight = 0
}
randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, true)
if err != nil {
return nil, err
}
be, err := cs.GetLatestBeaconEntry(randTs)
if err != nil {
return nil, err
}
// if at (or just past -- for null epochs) appropriate epoch
// or at genesis (works for negative epochs)
return DrawRandomness(be.Data, pers, round, entropy)
}
func (cs *ChainStore) GetChainRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
_, span := trace.StartSpan(ctx, "store.GetChainRandomness")
defer span.End() defer span.End()
span.AddAttributes(trace.Int64Attribute("round", int64(round))) span.AddAttributes(trace.Int64Attribute("round", int64(round)))
@ -1110,15 +1205,20 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, w io.Writer)
return xerrors.Errorf("unmarshaling block header (cid=%s): %w", blk, err) return xerrors.Errorf("unmarshaling block header (cid=%s): %w", blk, err)
} }
for _, p := range b.Parents {
blocksToWalk = append(blocksToWalk, p)
}
cids, err := recurseLinks(cs.bs, b.Messages, []cid.Cid{b.Messages}) cids, err := recurseLinks(cs.bs, b.Messages, []cid.Cid{b.Messages})
if err != nil { if err != nil {
return xerrors.Errorf("recursing messages failed: %w", err) return xerrors.Errorf("recursing messages failed: %w", err)
} }
if b.Height > 0 {
for _, p := range b.Parents {
blocksToWalk = append(blocksToWalk, p)
}
} else {
// include the genesis block
cids = append(cids, b.Parents...)
}
out := cids out := cids
if b.Height == 0 { if b.Height == 0 {
@ -1216,8 +1316,12 @@ func NewChainRand(cs *ChainStore, blks []cid.Cid, bheight abi.ChainEpoch) vm.Ran
} }
} }
func (cr *chainRand) GetRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { func (cr *chainRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
return cr.cs.GetRandomness(ctx, cr.blks, pers, round, entropy) return cr.cs.GetChainRandomness(ctx, cr.blks, pers, round, entropy)
}
func (cr *chainRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
return cr.cs.GetBeaconRandomness(ctx, cr.blks, pers, round, entropy)
} }
func (cs *ChainStore) GetTipSetFromKey(tsk types.TipSetKey) (*types.TipSet, error) { func (cs *ChainStore) GetTipSetFromKey(tsk types.TipSetKey) (*types.TipSet, error) {

View File

@ -73,7 +73,7 @@ func BenchmarkGetRandomness(b *testing.B) {
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
_, err := cs.GetRandomness(context.TODO(), last.Cids(), crypto.DomainSeparationTag_SealRandomness, 500, nil) _, err := cs.GetChainRandomness(context.TODO(), last.Cids(), crypto.DomainSeparationTag_SealRandomness, 500, nil)
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }

View File

@ -195,6 +195,8 @@ func fetchCids(
} }
type BlockValidator struct { type BlockValidator struct {
self peer.ID
peers *lru.TwoQueueCache peers *lru.TwoQueueCache
killThresh int killThresh int
@ -211,9 +213,10 @@ type BlockValidator struct {
keycache map[string]address.Address keycache map[string]address.Address
} }
func NewBlockValidator(chain *store.ChainStore, stmgr *stmgr.StateManager, blacklist func(peer.ID)) *BlockValidator { func NewBlockValidator(self peer.ID, chain *store.ChainStore, stmgr *stmgr.StateManager, blacklist func(peer.ID)) *BlockValidator {
p, _ := lru.New2Q(4096) p, _ := lru.New2Q(4096)
return &BlockValidator{ return &BlockValidator{
self: self,
peers: p, peers: p,
killThresh: 10, killThresh: 10,
blacklist: blacklist, blacklist: blacklist,
@ -243,6 +246,10 @@ func (bv *BlockValidator) flagPeer(p peer.ID) {
} }
func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub.Message) pubsub.ValidationResult { func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub.Message) pubsub.ValidationResult {
if pid == bv.self {
return bv.validateLocalBlock(ctx, msg)
}
// track validation time // track validation time
begin := build.Clock.Now() begin := build.Clock.Now()
defer func() { defer func() {
@ -257,25 +264,10 @@ func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub
bv.flagPeer(pid) bv.flagPeer(pid)
} }
// make sure the block can be decoded blk, what, err := bv.decodeAndCheckBlock(msg)
blk, err := types.DecodeBlockMsg(msg.GetData())
if err != nil { if err != nil {
log.Error("got invalid block over pubsub: ", err) log.Error("got invalid block over pubsub: ", err)
recordFailure("invalid") recordFailure(what)
return pubsub.ValidationReject
}
// check the message limit constraints
if len(blk.BlsMessages)+len(blk.SecpkMessages) > build.BlockMessageLimit {
log.Warnf("received block with too many messages over pubsub")
recordFailure("too_many_messages")
return pubsub.ValidationReject
}
// make sure we have a signature
if blk.Header.BlockSig == nil {
log.Warnf("received block without a signature over pubsub")
recordFailure("missing_signature")
return pubsub.ValidationReject return pubsub.ValidationReject
} }
@ -300,10 +292,9 @@ func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub
log.Warnf("received block from unknown miner or miner that doesn't meet min power over pubsub; rejecting message") log.Warnf("received block from unknown miner or miner that doesn't meet min power over pubsub; rejecting message")
recordFailure("unknown_miner") recordFailure("unknown_miner")
return pubsub.ValidationReject return pubsub.ValidationReject
} else {
log.Warnf("cannot validate block message; unknown miner or miner that doesn't meet min power in unsynced chain")
return pubsub.ValidationIgnore
} }
log.Warnf("cannot validate block message; unknown miner or miner that doesn't meet min power in unsynced chain")
return pubsub.ValidationIgnore
} }
err = sigs.CheckBlockSignature(ctx, blk.Header, key) err = sigs.CheckBlockSignature(ctx, blk.Header, key)
@ -332,6 +323,45 @@ func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub
return pubsub.ValidationAccept return pubsub.ValidationAccept
} }
func (bv *BlockValidator) validateLocalBlock(ctx context.Context, msg *pubsub.Message) pubsub.ValidationResult {
stats.Record(ctx, metrics.BlockPublished.M(1))
blk, what, err := bv.decodeAndCheckBlock(msg)
if err != nil {
log.Errorf("got invalid local block: %s", err)
ctx, _ = tag.New(ctx, tag.Insert(metrics.FailureType, what))
stats.Record(ctx, metrics.BlockValidationFailure.M(1))
return pubsub.ValidationIgnore
}
if count := bv.recvBlocks.add(blk.Header.Cid()); count > 0 {
log.Warnf("local block has been seen %d times; ignoring", count)
return pubsub.ValidationIgnore
}
msg.ValidatorData = blk
stats.Record(ctx, metrics.BlockValidationSuccess.M(1))
return pubsub.ValidationAccept
}
func (bv *BlockValidator) decodeAndCheckBlock(msg *pubsub.Message) (*types.BlockMsg, string, error) {
blk, err := types.DecodeBlockMsg(msg.GetData())
if err != nil {
return nil, "invalid", xerrors.Errorf("error decoding block: %w", err)
}
if count := len(blk.BlsMessages) + len(blk.SecpkMessages); count > build.BlockMessageLimit {
return nil, "too_many_messages", fmt.Errorf("block contains too many messages (%d)", count)
}
// make sure we have a signature
if blk.Header.BlockSig == nil {
return nil, "missing_signature", fmt.Errorf("block without a signature")
}
return blk, "", nil
}
func (bv *BlockValidator) isChainNearSynced() bool { func (bv *BlockValidator) isChainNearSynced() bool {
ts := bv.chain.GetHeaviestTipSet() ts := bv.chain.GetHeaviestTipSet()
timestamp := ts.MinTimestamp() timestamp := ts.MinTimestamp()
@ -485,14 +515,19 @@ func (brc *blockReceiptCache) add(bcid cid.Cid) int {
} }
type MessageValidator struct { type MessageValidator struct {
self peer.ID
mpool *messagepool.MessagePool mpool *messagepool.MessagePool
} }
func NewMessageValidator(mp *messagepool.MessagePool) *MessageValidator { func NewMessageValidator(self peer.ID, mp *messagepool.MessagePool) *MessageValidator {
return &MessageValidator{mp} return &MessageValidator{self: self, mpool: mp}
} }
func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub.Message) pubsub.ValidationResult { func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub.Message) pubsub.ValidationResult {
if pid == mv.self {
return mv.validateLocalMessage(ctx, msg)
}
stats.Record(ctx, metrics.MessageReceived.M(1)) stats.Record(ctx, metrics.MessageReceived.M(1))
m, err := types.DecodeSignedMessage(msg.Message.GetData()) m, err := types.DecodeSignedMessage(msg.Message.GetData())
if err != nil { if err != nil {
@ -510,7 +545,11 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs
) )
stats.Record(ctx, metrics.MessageValidationFailure.M(1)) stats.Record(ctx, metrics.MessageValidationFailure.M(1))
switch { switch {
case xerrors.Is(err, messagepool.ErrBroadcastAnyway) || xerrors.Is(err, messagepool.ErrRBFTooLowPremium): case xerrors.Is(err, messagepool.ErrBroadcastAnyway):
fallthrough
case xerrors.Is(err, messagepool.ErrRBFTooLowPremium):
fallthrough
case xerrors.Is(err, messagepool.ErrNonceTooLow):
return pubsub.ValidationIgnore return pubsub.ValidationIgnore
default: default:
return pubsub.ValidationReject return pubsub.ValidationReject
@ -520,6 +559,45 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs
return pubsub.ValidationAccept return pubsub.ValidationAccept
} }
func (mv *MessageValidator) validateLocalMessage(ctx context.Context, msg *pubsub.Message) pubsub.ValidationResult {
// do some lightweight validation
stats.Record(ctx, metrics.MessagePublished.M(1))
m, err := types.DecodeSignedMessage(msg.Message.GetData())
if err != nil {
log.Warnf("failed to decode local message: %s", err)
stats.Record(ctx, metrics.MessageValidationFailure.M(1))
return pubsub.ValidationIgnore
}
if m.Size() > 32*1024 {
log.Warnf("local message is too large! (%dB)", m.Size())
stats.Record(ctx, metrics.MessageValidationFailure.M(1))
return pubsub.ValidationIgnore
}
if m.Message.To == address.Undef {
log.Warn("local message has invalid destination address")
stats.Record(ctx, metrics.MessageValidationFailure.M(1))
return pubsub.ValidationIgnore
}
if !m.Message.Value.LessThan(types.TotalFilecoinInt) {
log.Warnf("local messages has too high value: %s", m.Message.Value)
stats.Record(ctx, metrics.MessageValidationFailure.M(1))
return pubsub.ValidationIgnore
}
if err := mv.mpool.VerifyMsgSig(m); err != nil {
log.Warnf("signature verification failed for local message: %s", err)
stats.Record(ctx, metrics.MessageValidationFailure.M(1))
return pubsub.ValidationIgnore
}
stats.Record(ctx, metrics.MessageValidationSuccess.M(1))
return pubsub.ValidationAccept
}
func HandleIncomingMessages(ctx context.Context, mpool *messagepool.MessagePool, msub *pubsub.Subscription) { func HandleIncomingMessages(ctx context.Context, mpool *messagepool.MessagePool, msub *pubsub.Subscription) {
for { for {
_, err := msub.Next(ctx) _, err := msub.Next(ctx)

View File

@ -7,6 +7,7 @@ import (
"fmt" "fmt"
"os" "os"
"sort" "sort"
"strconv"
"strings" "strings"
"time" "time"
@ -24,7 +25,7 @@ import (
"golang.org/x/xerrors" "golang.org/x/xerrors"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/builtin" "github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/specs-actors/actors/builtin/power" "github.com/filecoin-project/specs-actors/actors/builtin/power"
@ -52,6 +53,19 @@ import (
//the theoretical max height based on systime are quickly rejected //the theoretical max height based on systime are quickly rejected
const MaxHeightDrift = 5 const MaxHeightDrift = 5
var defaultMessageFetchWindowSize = 200
func init() {
if s := os.Getenv("LOTUS_BSYNC_MSG_WINDOW"); s != "" {
val, err := strconv.Atoi(s)
if err != nil {
log.Errorf("failed to parse LOTUS_BSYNC_MSG_WINDOW: %s", err)
return
}
defaultMessageFetchWindowSize = val
}
}
var log = logging.Logger("chain") var log = logging.Logger("chain")
var LocalIncoming = "incoming" var LocalIncoming = "incoming"
@ -1399,7 +1413,7 @@ func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipS
span.AddAttributes(trace.Int64Attribute("num_headers", int64(len(headers)))) span.AddAttributes(trace.Int64Attribute("num_headers", int64(len(headers))))
windowSize := 200 windowSize := defaultMessageFetchWindowSize
for i := len(headers) - 1; i >= 0; { for i := len(headers) - 1; i >= 0; {
fts, err := syncer.store.TryFillTipSet(headers[i]) fts, err := syncer.store.TryFillTipSet(headers[i])
if err != nil { if err != nil {

View File

@ -343,12 +343,12 @@ func (sm *SyncManager) scheduleProcessResult(res *syncResult) {
sm.syncQueue.buckets = append(sm.syncQueue.buckets, relbucket) sm.syncQueue.buckets = append(sm.syncQueue.buckets, relbucket)
} }
return return
} else {
// TODO: this is the case where we try to sync a chain, and
// fail, and we have more blocks on top of that chain that
// have come in since. The question is, should we try to
// sync these? or just drop them?
} }
// TODO: this is the case where we try to sync a chain, and
// fail, and we have more blocks on top of that chain that
// have come in since. The question is, should we try to
// sync these? or just drop them?
log.Error("failed to sync chain but have new unconnected blocks from chain")
} }
if sm.nextSyncTarget == nil && !sm.syncQueue.Empty() { if sm.nextSyncTarget == nil && !sm.syncQueue.Empty() {

View File

@ -7,6 +7,8 @@ import (
"testing" "testing"
"time" "time"
"github.com/ipfs/go-cid"
ds "github.com/ipfs/go-datastore" ds "github.com/ipfs/go-datastore"
logging "github.com/ipfs/go-log/v2" logging "github.com/ipfs/go-log/v2"
"github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peer"
@ -35,7 +37,10 @@ import (
func init() { func init() {
build.InsecurePoStValidation = true build.InsecurePoStValidation = true
os.Setenv("TRUST_PARAMS", "1") err := os.Setenv("TRUST_PARAMS", "1")
if err != nil {
panic(err)
}
miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{ miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
abi.RegisteredSealProof_StackedDrg2KiBV1: {}, abi.RegisteredSealProof_StackedDrg2KiBV1: {},
} }
@ -172,7 +177,7 @@ func (tu *syncTestUtil) pushTsExpectErr(to int, fts *store.FullTipSet, experr bo
} }
} }
func (tu *syncTestUtil) mineOnBlock(blk *store.FullTipSet, to int, miners []int, wait, fail bool) *store.FullTipSet { func (tu *syncTestUtil) mineOnBlock(blk *store.FullTipSet, to int, miners []int, wait, fail bool, msgs [][]*types.SignedMessage) *store.FullTipSet {
if miners == nil { if miners == nil {
for i := range tu.g.Miners { for i := range tu.g.Miners {
miners = append(miners, i) miners = append(miners, i)
@ -186,37 +191,31 @@ func (tu *syncTestUtil) mineOnBlock(blk *store.FullTipSet, to int, miners []int,
fmt.Println("Miner mining block: ", maddrs) fmt.Println("Miner mining block: ", maddrs)
mts, err := tu.g.NextTipSetFromMiners(blk.TipSet(), maddrs) var nts *store.FullTipSet
require.NoError(tu.t, err) var err error
if msgs != nil {
if fail { nts, err = tu.g.NextTipSetFromMinersWithMessages(blk.TipSet(), maddrs, msgs)
tu.pushTsExpectErr(to, mts.TipSet, true) require.NoError(tu.t, err)
} else { } else {
tu.pushFtsAndWait(to, mts.TipSet, wait) mt, err := tu.g.NextTipSetFromMiners(blk.TipSet(), maddrs)
require.NoError(tu.t, err)
nts = mt.TipSet
} }
return mts.TipSet if fail {
tu.pushTsExpectErr(to, nts, true)
} else {
tu.pushFtsAndWait(to, nts, wait)
}
return nts
} }
func (tu *syncTestUtil) mineNewBlock(src int, miners []int) { func (tu *syncTestUtil) mineNewBlock(src int, miners []int) {
mts := tu.mineOnBlock(tu.g.CurTipset, src, miners, true, false) mts := tu.mineOnBlock(tu.g.CurTipset, src, miners, true, false, nil)
tu.g.CurTipset = mts tu.g.CurTipset = mts
} }
func fblkToBlkMsg(fb *types.FullBlock) *types.BlockMsg {
out := &types.BlockMsg{
Header: fb.Header,
}
for _, msg := range fb.BlsMessages {
out.BlsMessages = append(out.BlsMessages, msg.Cid())
}
for _, msg := range fb.SecpkMessages {
out.SecpkMessages = append(out.SecpkMessages, msg.Cid())
}
return out
}
func (tu *syncTestUtil) addSourceNode(gen int) { func (tu *syncTestUtil) addSourceNode(gen int) {
if tu.genesis != nil { if tu.genesis != nil {
tu.t.Fatal("source node already exists") tu.t.Fatal("source node already exists")
@ -416,7 +415,7 @@ func TestSyncBadTimestamp(t *testing.T) {
fmt.Println("BASE: ", base.Cids()) fmt.Println("BASE: ", base.Cids())
tu.printHeads() tu.printHeads()
a1 := tu.mineOnBlock(base, 0, nil, false, true) a1 := tu.mineOnBlock(base, 0, nil, false, true, nil)
tu.g.Timestamper = nil tu.g.Timestamper = nil
require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet())) require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet()))
@ -425,7 +424,7 @@ func TestSyncBadTimestamp(t *testing.T) {
fmt.Println("After mine bad block!") fmt.Println("After mine bad block!")
tu.printHeads() tu.printHeads()
a2 := tu.mineOnBlock(base, 0, nil, true, false) a2 := tu.mineOnBlock(base, 0, nil, true, false, nil)
tu.waitUntilSync(0, client) tu.waitUntilSync(0, client)
@ -445,7 +444,7 @@ func (wpp badWpp) GenerateCandidates(context.Context, abi.PoStRandomness, uint64
func (wpp badWpp) ComputeProof(context.Context, []abi.SectorInfo, abi.PoStRandomness) ([]abi.PoStProof, error) { func (wpp badWpp) ComputeProof(context.Context, []abi.SectorInfo, abi.PoStRandomness) ([]abi.PoStProof, error) {
return []abi.PoStProof{ return []abi.PoStProof{
abi.PoStProof{ {
PoStProof: abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, PoStProof: abi.RegisteredPoStProof_StackedDrgWinning2KiBV1,
ProofBytes: []byte("evil"), ProofBytes: []byte("evil"),
}, },
@ -469,7 +468,7 @@ func TestSyncBadWinningPoSt(t *testing.T) {
tu.g.SetWinningPoStProver(tu.g.Miners[1], &badWpp{}) tu.g.SetWinningPoStProver(tu.g.Miners[1], &badWpp{})
// now ensure that new blocks are not accepted // now ensure that new blocks are not accepted
tu.mineOnBlock(base, client, nil, false, true) tu.mineOnBlock(base, client, nil, false, true, nil)
} }
func (tu *syncTestUtil) loadChainToNode(to int) { func (tu *syncTestUtil) loadChainToNode(to int) {
@ -514,16 +513,16 @@ func TestSyncFork(t *testing.T) {
fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height()) fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height())
// The two nodes fork at this point into 'a' and 'b' // The two nodes fork at this point into 'a' and 'b'
a1 := tu.mineOnBlock(base, p1, []int{0}, true, false) a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil)
a := tu.mineOnBlock(a1, p1, []int{0}, true, false) a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil)
a = tu.mineOnBlock(a, p1, []int{0}, true, false) a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil)
require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet())) require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet()))
// chain B will now be heaviest // chain B will now be heaviest
b := tu.mineOnBlock(base, p2, []int{1}, true, false) b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil)
b = tu.mineOnBlock(b, p2, []int{1}, true, false) b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil)
b = tu.mineOnBlock(b, p2, []int{1}, true, false) b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil)
b = tu.mineOnBlock(b, p2, []int{1}, true, false) b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil)
fmt.Println("A: ", a.Cids(), a.TipSet().Height()) fmt.Println("A: ", a.Cids(), a.TipSet().Height())
fmt.Println("B: ", b.Cids(), b.TipSet().Height()) fmt.Println("B: ", b.Cids(), b.TipSet().Height())
@ -538,6 +537,99 @@ func TestSyncFork(t *testing.T) {
phead() phead()
} }
// This test crafts a tipset with 2 blocks, A and B.
// A and B both include _different_ messages from sender X with nonce N (where N is the correct nonce for X).
// We can confirm that the state can be correctly computed, and that `MessagesForTipset` behaves as expected.
func TestDuplicateNonce(t *testing.T) {
H := 10
tu := prepSyncTest(t, H)
base := tu.g.CurTipset
// Produce a message from the banker to the rcvr
makeMsg := func(rcvr address.Address) *types.SignedMessage {
ba, err := tu.nds[0].StateGetActor(context.TODO(), tu.g.Banker(), base.TipSet().Key())
require.NoError(t, err)
msg := types.Message{
To: rcvr,
From: tu.g.Banker(),
Nonce: ba.Nonce,
Value: types.NewInt(1),
Method: 0,
GasLimit: 100_000_000,
GasFeeCap: types.NewInt(0),
GasPremium: types.NewInt(0),
}
sig, err := tu.g.Wallet().Sign(context.TODO(), tu.g.Banker(), msg.Cid().Bytes())
require.NoError(t, err)
return &types.SignedMessage{
Message: msg,
Signature: *sig,
}
}
msgs := make([][]*types.SignedMessage, 2)
// Each miner includes a message from the banker with the same nonce, but to different addresses
for k := range msgs {
msgs[k] = []*types.SignedMessage{makeMsg(tu.g.Miners[k])}
}
ts1 := tu.mineOnBlock(base, 0, []int{0, 1}, true, false, msgs)
tu.waitUntilSyncTarget(0, ts1.TipSet())
// mine another tipset
ts2 := tu.mineOnBlock(ts1, 0, []int{0, 1}, true, false, make([][]*types.SignedMessage, 2))
tu.waitUntilSyncTarget(0, ts2.TipSet())
var includedMsg cid.Cid
var skippedMsg cid.Cid
r0, err0 := tu.nds[0].StateGetReceipt(context.TODO(), msgs[0][0].Cid(), ts2.TipSet().Key())
r1, err1 := tu.nds[0].StateGetReceipt(context.TODO(), msgs[1][0].Cid(), ts2.TipSet().Key())
if err0 == nil {
require.Error(t, err1, "at least one of the StateGetReceipt calls should fail")
require.True(t, r0.ExitCode.IsSuccess())
includedMsg = msgs[0][0].Message.Cid()
skippedMsg = msgs[1][0].Message.Cid()
} else {
require.NoError(t, err1, "both the StateGetReceipt calls should not fail")
require.True(t, r1.ExitCode.IsSuccess())
includedMsg = msgs[1][0].Message.Cid()
skippedMsg = msgs[0][0].Message.Cid()
}
_, rslts, err := tu.g.StateManager().ExecutionTrace(context.TODO(), ts1.TipSet())
require.NoError(t, err)
found := false
for _, v := range rslts {
if v.Msg.Cid() == skippedMsg {
t.Fatal("skipped message should not be in exec trace")
}
if v.Msg.Cid() == includedMsg {
found = true
}
}
if !found {
t.Fatal("included message should be in exec trace")
}
mft, err := tu.g.ChainStore().MessagesForTipset(ts1.TipSet())
require.NoError(t, err)
require.True(t, len(mft) == 1, "only expecting one message for this tipset")
require.Equal(t, includedMsg, mft[0].VMMessage().Cid(), "messages for tipset didn't contain expected message")
}
func BenchmarkSyncBasic(b *testing.B) { func BenchmarkSyncBasic(b *testing.B) {
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
runSyncBenchLength(b, 100) runSyncBenchLength(b, 100)

View File

@ -1,13 +1,14 @@
package types package types
import ( import (
"errors"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
"github.com/filecoin-project/specs-actors/actors/builtin" "github.com/filecoin-project/specs-actors/actors/builtin"
init_ "github.com/filecoin-project/specs-actors/actors/builtin/init"
) )
var ErrActorNotFound = init_.ErrAddressNotFound var ErrActorNotFound = errors.New("actor not found")
type Actor struct { type Actor struct {
// Identifies the type of actor (string coded as a CID), see `chain/actors/actors.go`. // Identifies the type of actor (string coded as a CID), see `chain/actors/actors.go`.

View File

@ -22,6 +22,16 @@ type Ticket struct {
VRFProof []byte VRFProof []byte
} }
func (t *Ticket) Quality() float64 {
ticketHash := blake2b.Sum256(t.VRFProof)
ticketNum := BigFromBytes(ticketHash[:]).Int
ticketDenu := big.NewInt(1)
ticketDenu.Lsh(ticketDenu, 256)
tv, _ := new(big.Rat).SetFrac(ticketNum, ticketDenu).Float64()
tq := 1 - tv
return tq
}
type BeaconEntry struct { type BeaconEntry struct {
Round uint64 Round uint64
Data []byte Data []byte

View File

@ -81,7 +81,7 @@ func TestInteropBH(t *testing.T) {
} }
posts := []abi.PoStProof{ posts := []abi.PoStProof{
{abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, []byte{0x07}}, {PoStProof: abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, ProofBytes: []byte{0x07}},
} }
bh := &BlockHeader{ bh := &BlockHeader{

View File

@ -6,9 +6,9 @@ import (
"fmt" "fmt"
"io" "io"
"github.com/filecoin-project/specs-actors/actors/abi" abi "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/crypto" crypto "github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/filecoin-project/specs-actors/actors/runtime/exitcode" exitcode "github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
cid "github.com/ipfs/go-cid" cid "github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen" cbg "github.com/whyrusleeping/cbor-gen"
xerrors "golang.org/x/xerrors" xerrors "golang.org/x/xerrors"
@ -180,16 +180,14 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) error {
{ {
pb, err := br.PeekByte() b, err := br.ReadByte()
if err != nil { if err != nil {
return err return err
} }
if pb == cbg.CborNull[0] { if b != cbg.CborNull[0] {
var nbuf [1]byte if err := br.UnreadByte(); err != nil {
if _, err := br.Read(nbuf[:]); err != nil {
return err return err
} }
} else {
t.Ticket = new(Ticket) t.Ticket = new(Ticket)
if err := t.Ticket.UnmarshalCBOR(br); err != nil { if err := t.Ticket.UnmarshalCBOR(br); err != nil {
return xerrors.Errorf("unmarshaling t.Ticket pointer: %w", err) return xerrors.Errorf("unmarshaling t.Ticket pointer: %w", err)
@ -201,16 +199,14 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) error {
{ {
pb, err := br.PeekByte() b, err := br.ReadByte()
if err != nil { if err != nil {
return err return err
} }
if pb == cbg.CborNull[0] { if b != cbg.CborNull[0] {
var nbuf [1]byte if err := br.UnreadByte(); err != nil {
if _, err := br.Read(nbuf[:]); err != nil {
return err return err
} }
} else {
t.ElectionProof = new(ElectionProof) t.ElectionProof = new(ElectionProof)
if err := t.ElectionProof.UnmarshalCBOR(br); err != nil { if err := t.ElectionProof.UnmarshalCBOR(br); err != nil {
return xerrors.Errorf("unmarshaling t.ElectionProof pointer: %w", err) return xerrors.Errorf("unmarshaling t.ElectionProof pointer: %w", err)
@ -378,16 +374,14 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) error {
{ {
pb, err := br.PeekByte() b, err := br.ReadByte()
if err != nil { if err != nil {
return err return err
} }
if pb == cbg.CborNull[0] { if b != cbg.CborNull[0] {
var nbuf [1]byte if err := br.UnreadByte(); err != nil {
if _, err := br.Read(nbuf[:]); err != nil {
return err return err
} }
} else {
t.BLSAggregate = new(crypto.Signature) t.BLSAggregate = new(crypto.Signature)
if err := t.BLSAggregate.UnmarshalCBOR(br); err != nil { if err := t.BLSAggregate.UnmarshalCBOR(br); err != nil {
return xerrors.Errorf("unmarshaling t.BLSAggregate pointer: %w", err) return xerrors.Errorf("unmarshaling t.BLSAggregate pointer: %w", err)
@ -413,16 +407,14 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) error {
{ {
pb, err := br.PeekByte() b, err := br.ReadByte()
if err != nil { if err != nil {
return err return err
} }
if pb == cbg.CborNull[0] { if b != cbg.CborNull[0] {
var nbuf [1]byte if err := br.UnreadByte(); err != nil {
if _, err := br.Read(nbuf[:]); err != nil {
return err return err
} }
} else {
t.BlockSig = new(crypto.Signature) t.BlockSig = new(crypto.Signature)
if err := t.BlockSig.UnmarshalCBOR(br); err != nil { if err := t.BlockSig.UnmarshalCBOR(br); err != nil {
return xerrors.Errorf("unmarshaling t.BlockSig pointer: %w", err) return xerrors.Errorf("unmarshaling t.BlockSig pointer: %w", err)
@ -645,15 +637,10 @@ func (t *Message) MarshalCBOR(w io.Writer) error {
scratch := make([]byte, 9) scratch := make([]byte, 9)
// t.Version (int64) (int64) // t.Version (uint64) (uint64)
if t.Version >= 0 {
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Version)); err != nil { if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Version)); err != nil {
return err return err
}
} else {
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Version-1)); err != nil {
return err
}
} }
// t.To (address.Address) (struct) // t.To (address.Address) (struct)
@ -737,30 +724,19 @@ func (t *Message) UnmarshalCBOR(r io.Reader) error {
return fmt.Errorf("cbor input had wrong number of fields") return fmt.Errorf("cbor input had wrong number of fields")
} }
// t.Version (int64) (int64) // t.Version (uint64) (uint64)
{ {
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
var extraI int64 maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
if err != nil { if err != nil {
return err return err
} }
switch maj { if maj != cbg.MajUnsignedInt {
case cbg.MajUnsignedInt: return fmt.Errorf("wrong type for uint64 field")
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
} }
t.Version = uint64(extra)
t.Version = int64(extraI)
} }
// t.To (address.Address) (struct) // t.To (address.Address) (struct)
@ -1337,16 +1313,14 @@ func (t *BlockMsg) UnmarshalCBOR(r io.Reader) error {
{ {
pb, err := br.PeekByte() b, err := br.ReadByte()
if err != nil { if err != nil {
return err return err
} }
if pb == cbg.CborNull[0] { if b != cbg.CborNull[0] {
var nbuf [1]byte if err := br.UnreadByte(); err != nil {
if _, err := br.Read(nbuf[:]); err != nil {
return err return err
} }
} else {
t.Header = new(BlockHeader) t.Header = new(BlockHeader)
if err := t.Header.UnmarshalCBOR(br); err != nil { if err := t.Header.UnmarshalCBOR(br); err != nil {
return xerrors.Errorf("unmarshaling t.Header pointer: %w", err) return xerrors.Errorf("unmarshaling t.Header pointer: %w", err)

View File

@ -1,6 +1,7 @@
package types package types
import ( import (
"encoding"
"fmt" "fmt"
"math/big" "math/big"
"strings" "strings"
@ -27,6 +28,20 @@ func (f FIL) Format(s fmt.State, ch rune) {
} }
} }
func (f FIL) MarshalText() (text []byte, err error) {
return []byte(f.String()), nil
}
func (f FIL) UnmarshalText(text []byte) error {
p, err := ParseFIL(string(text))
if err != nil {
return err
}
f.Int.Set(p.Int)
return nil
}
func ParseFIL(s string) (FIL, error) { func ParseFIL(s string) (FIL, error) {
suffix := strings.TrimLeft(s, ".1234567890") suffix := strings.TrimLeft(s, ".1234567890")
s = s[:len(s)-len(suffix)] s = s[:len(s)-len(suffix)]
@ -61,3 +76,6 @@ func ParseFIL(s string) (FIL, error) {
return FIL{r.Num()}, nil return FIL{r.Num()}, nil
} }
var _ encoding.TextMarshaler = (*FIL)(nil)
var _ encoding.TextUnmarshaler = (*FIL)(nil)

View File

@ -25,7 +25,7 @@ type ChainMsg interface {
} }
type Message struct { type Message struct {
Version int64 Version uint64
To address.Address To address.Address
From address.Address From address.Address
@ -118,6 +118,17 @@ func (m *Message) Equals(o *Message) bool {
return m.Cid() == o.Cid() return m.Cid() == o.Cid()
} }
func (m *Message) EqualCall(o *Message) bool {
m1 := *m
m2 := *o
m1.GasLimit, m2.GasLimit = 0, 0
m1.GasFeeCap, m2.GasFeeCap = big.Zero(), big.Zero()
m1.GasPremium, m2.GasPremium = big.Zero(), big.Zero()
return (&m1).Equals(&m2)
}
func (m *Message) ValidForBlockInclusion(minGas int64) error { func (m *Message) ValidForBlockInclusion(minGas int64) error {
if m.Version != 0 { if m.Version != 0 {
return xerrors.New("'Version' unsupported") return xerrors.New("'Version' unsupported")
@ -131,6 +142,10 @@ func (m *Message) ValidForBlockInclusion(minGas int64) error {
return xerrors.New("'From' address cannot be empty") return xerrors.New("'From' address cannot be empty")
} }
if m.Value.Int == nil {
return xerrors.New("'Value' cannot be nil")
}
if m.Value.LessThan(big.Zero()) { if m.Value.LessThan(big.Zero()) {
return xerrors.New("'Value' field cannot be negative") return xerrors.New("'Value' field cannot be negative")
} }
@ -139,14 +154,26 @@ func (m *Message) ValidForBlockInclusion(minGas int64) error {
return xerrors.New("'Value' field cannot be greater than total filecoin supply") return xerrors.New("'Value' field cannot be greater than total filecoin supply")
} }
if m.GasFeeCap.Int == nil {
return xerrors.New("'GasFeeCap' cannot be nil")
}
if m.GasFeeCap.LessThan(big.Zero()) { if m.GasFeeCap.LessThan(big.Zero()) {
return xerrors.New("'GasFeeCap' field cannot be negative") return xerrors.New("'GasFeeCap' field cannot be negative")
} }
if m.GasPremium.Int == nil {
return xerrors.New("'GasPremium' cannot be nil")
}
if m.GasPremium.LessThan(big.Zero()) { if m.GasPremium.LessThan(big.Zero()) {
return xerrors.New("'GasPremium' field cannot be negative") return xerrors.New("'GasPremium' field cannot be negative")
} }
if m.GasPremium.GreaterThan(m.GasFeeCap) {
return xerrors.New("'GasFeeCap' less than 'GasPremium'")
}
if m.GasLimit > build.BlockGasLimit { if m.GasLimit > build.BlockGasLimit {
return xerrors.New("'GasLimit' field cannot be greater than a block's gas limit") return xerrors.New("'GasLimit' field cannot be greater than a block's gas limit")
} }

View File

@ -0,0 +1,72 @@
package types
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/filecoin-project/specs-actors/actors/builtin"
)
func TestEqualCall(t *testing.T) {
m1 := &Message{
To: builtin.StoragePowerActorAddr,
From: builtin.SystemActorAddr,
Nonce: 34,
Value: big.Zero(),
GasLimit: 123,
GasFeeCap: big.NewInt(234),
GasPremium: big.NewInt(234),
Method: 6,
Params: []byte("hai"),
}
m2 := &Message{
To: builtin.StoragePowerActorAddr,
From: builtin.SystemActorAddr,
Nonce: 34,
Value: big.Zero(),
GasLimit: 1236, // changed
GasFeeCap: big.NewInt(234),
GasPremium: big.NewInt(234),
Method: 6,
Params: []byte("hai"),
}
m3 := &Message{
To: builtin.StoragePowerActorAddr,
From: builtin.SystemActorAddr,
Nonce: 34,
Value: big.Zero(),
GasLimit: 123,
GasFeeCap: big.NewInt(4524), // changed
GasPremium: big.NewInt(234),
Method: 6,
Params: []byte("hai"),
}
m4 := &Message{
To: builtin.StoragePowerActorAddr,
From: builtin.SystemActorAddr,
Nonce: 34,
Value: big.Zero(),
GasLimit: 123,
GasFeeCap: big.NewInt(4524),
GasPremium: big.NewInt(234),
Method: 5, // changed
Params: []byte("hai"),
}
require.True(t, m1.EqualCall(m2))
require.True(t, m1.EqualCall(m3))
require.False(t, m1.EqualCall(m4))
}

View File

@ -62,8 +62,8 @@ func (sm *SignedMessage) Serialize() ([]byte, error) {
return buf.Bytes(), nil return buf.Bytes(), nil
} }
func (m *SignedMessage) ChainLength() int { func (sm *SignedMessage) ChainLength() int {
ser, err := m.Serialize() ser, err := sm.Serialize()
if err != nil { if err != nil {
panic(err) panic(err)
} }

View File

@ -238,3 +238,7 @@ func (ts *TipSet) IsChildOf(parent *TipSet) bool {
// height for their processing logic at the moment to obviate it. // height for their processing logic at the moment to obviate it.
ts.height > parent.height ts.height > parent.height
} }
func (ts *TipSet) String() string {
return fmt.Sprintf("%v", ts.cids)
}

View File

@ -72,9 +72,9 @@ func (a *Applier) ApplyTipSetMessages(epoch abi.ChainEpoch, blocks []vtypes.Bloc
cs := store.NewChainStore(a.stateWrapper.bs, a.stateWrapper.ds, a.syscalls) cs := store.NewChainStore(a.stateWrapper.bs, a.stateWrapper.ds, a.syscalls)
sm := stmgr.NewStateManager(cs) sm := stmgr.NewStateManager(cs)
var bms []stmgr.BlockMessages var bms []store.BlockMessages
for _, b := range blocks { for _, b := range blocks {
bm := stmgr.BlockMessages{ bm := store.BlockMessages{
Miner: b.Miner, Miner: b.Miner,
WinCount: 1, WinCount: 1,
} }
@ -121,17 +121,26 @@ func (a *Applier) ApplyTipSetMessages(epoch abi.ChainEpoch, blocks []vtypes.Bloc
} }
type randWrapper struct { type randWrapper struct {
rnd vstate.RandomnessSource rand vstate.RandomnessSource
} }
func (w *randWrapper) GetRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { // TODO: these should really be two different randomness sources
return w.rnd.Randomness(ctx, pers, round, entropy) func (w *randWrapper) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
return w.rand.Randomness(ctx, pers, round, entropy)
}
func (w *randWrapper) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
return w.rand.Randomness(ctx, pers, round, entropy)
} }
type vmRand struct { type vmRand struct {
} }
func (*vmRand) GetRandomness(ctx context.Context, dst crypto.DomainSeparationTag, h abi.ChainEpoch, input []byte) ([]byte, error) { func (*vmRand) GetChainRandomness(ctx context.Context, dst crypto.DomainSeparationTag, h abi.ChainEpoch, input []byte) ([]byte, error) {
panic("implement me")
}
func (*vmRand) GetBeaconRandomness(ctx context.Context, dst crypto.DomainSeparationTag, h abi.ChainEpoch, input []byte) ([]byte, error) {
panic("implement me") panic("implement me")
} }
@ -140,13 +149,13 @@ func (a *Applier) applyMessage(epoch abi.ChainEpoch, lm types.ChainMsg) (vtypes.
base := a.stateWrapper.Root() base := a.stateWrapper.Root()
vmopt := &vm.VMOpts{ vmopt := &vm.VMOpts{
StateBase: base, StateBase: base,
Epoch: epoch, Epoch: epoch,
Rand: &vmRand{}, Rand: &vmRand{},
Bstore: a.stateWrapper.bs, Bstore: a.stateWrapper.bs,
Syscalls: a.syscalls, Syscalls: a.syscalls,
VestedCalc: nil, CircSupplyCalc: nil,
BaseFee: abi.NewTokenAmount(100), BaseFee: abi.NewTokenAmount(100),
} }
lotusVM, err := vm.NewVM(vmopt) lotusVM, err := vm.NewVM(vmopt)

View File

@ -2,6 +2,7 @@ package validation
import ( import (
"context" "context"
"github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/specs-actors/actors/runtime" "github.com/filecoin-project/specs-actors/actors/runtime"
cbor "github.com/ipfs/go-ipld-cbor" cbor "github.com/ipfs/go-ipld-cbor"

View File

@ -2,9 +2,10 @@ package validation
import ( import (
"fmt" "fmt"
"github.com/minio/blake2b-simd"
"math/rand" "math/rand"
"github.com/minio/blake2b-simd"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-crypto" "github.com/filecoin-project/go-crypto"
acrypto "github.com/filecoin-project/specs-actors/actors/crypto" acrypto "github.com/filecoin-project/specs-actors/actors/crypto"
@ -69,7 +70,7 @@ func (k *KeyManager) Sign(addr address.Address, data []byte) (acrypto.Signature,
} }
func (k *KeyManager) newSecp256k1Key() *wallet.Key { func (k *KeyManager) newSecp256k1Key() *wallet.Key {
randSrc := rand.New(rand.NewSource(k.secpSeed)) randSrc := rand.New(rand.NewSource(k.secpSeed)) // nolint
prv, err := crypto.GenerateKeyFromSeed(randSrc) prv, err := crypto.GenerateKeyFromSeed(randSrc)
if err != nil { if err != nil {
panic(err) panic(err)

View File

@ -18,7 +18,7 @@ func LoadVector(t *testing.T, f string, out interface{}) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
defer fi.Close() defer fi.Close() //nolint:errcheck
if err := json.NewDecoder(fi).Decode(out); err != nil { if err := json.NewDecoder(fi).Decode(out); err != nil {
t.Fatal(err) t.Fatal(err)

View File

@ -8,14 +8,10 @@ import (
gruntime "runtime" gruntime "runtime"
"time" "time"
samarket "github.com/filecoin-project/specs-actors/actors/builtin/market"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/abi/big" "github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/filecoin-project/specs-actors/actors/builtin" "github.com/filecoin-project/specs-actors/actors/builtin"
sainit "github.com/filecoin-project/specs-actors/actors/builtin/init"
sapower "github.com/filecoin-project/specs-actors/actors/builtin/power"
"github.com/filecoin-project/specs-actors/actors/crypto" "github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/filecoin-project/specs-actors/actors/runtime" "github.com/filecoin-project/specs-actors/actors/runtime"
vmr "github.com/filecoin-project/specs-actors/actors/runtime" vmr "github.com/filecoin-project/specs-actors/actors/runtime"
@ -62,68 +58,18 @@ type Runtime struct {
} }
func (rt *Runtime) TotalFilCircSupply() abi.TokenAmount { func (rt *Runtime) TotalFilCircSupply() abi.TokenAmount {
cs, err := rt.vm.GetCircSupply(rt.ctx)
filVested, err := rt.vm.GetVestedFunds(rt.ctx)
if err != nil { if err != nil {
rt.Abortf(exitcode.ErrIllegalState, "failed to get vested funds for computing total supply: %s", err) rt.Abortf(exitcode.ErrIllegalState, "failed to get total circ supply: %s", err)
} }
rew, err := rt.state.GetActor(builtin.RewardActorAddr) return cs
if err != nil {
rt.Abortf(exitcode.ErrIllegalState, "failed to get reward actor for computing total supply: %s", err)
}
filMined := types.BigSub(types.FromFil(build.FilAllocStorageMining), rew.Balance)
if filMined.LessThan(big.Zero()) {
filMined = big.Zero()
}
burnt, err := rt.state.GetActor(builtin.BurntFundsActorAddr)
if err != nil {
rt.Abortf(exitcode.ErrIllegalState, "failed to get reward actor for computing total supply: %s", err)
}
filBurned := burnt.Balance
market, err := rt.state.GetActor(builtin.StorageMarketActorAddr)
if err != nil {
rt.Abortf(exitcode.ErrIllegalState, "failed to get reward actor for computing total supply: %s", err)
}
var mst samarket.State
if err := rt.cst.Get(rt.ctx, market.Head, &mst); err != nil {
rt.Abortf(exitcode.ErrIllegalState, "failed to get market state: %s", err)
}
filMarketLocked := types.BigAdd(mst.TotalClientLockedCollateral, mst.TotalProviderLockedCollateral)
filMarketLocked = types.BigAdd(filMarketLocked, mst.TotalClientStorageFee)
power, err := rt.state.GetActor(builtin.StoragePowerActorAddr)
if err != nil {
rt.Abortf(exitcode.ErrIllegalState, "failed to get reward actor for computing total supply: %s", err)
}
var pst sapower.State
if err := rt.cst.Get(rt.ctx, power.Head, &pst); err != nil {
rt.Abortf(exitcode.ErrIllegalState, "failed to get storage power state: %s", err)
}
filLocked := types.BigAdd(filMarketLocked, pst.TotalPledgeCollateral)
ret := types.BigAdd(filVested, filMined)
ret = types.BigSub(ret, filBurned)
ret = types.BigSub(ret, filLocked)
if ret.LessThan(big.Zero()) {
ret = big.Zero()
}
return ret
} }
func (rt *Runtime) ResolveAddress(addr address.Address) (ret address.Address, ok bool) { func (rt *Runtime) ResolveAddress(addr address.Address) (ret address.Address, ok bool) {
r, err := rt.state.LookupID(addr) r, err := rt.state.LookupID(addr)
if err != nil { if err != nil {
if xerrors.Is(err, sainit.ErrAddressNotFound) { if xerrors.Is(err, types.ErrActorNotFound) {
return address.Undef, false return address.Undef, false
} }
panic(aerrors.Fatalf("failed to resolve address %s: %s", addr, err)) panic(aerrors.Fatalf("failed to resolve address %s: %s", addr, err))
@ -232,8 +178,16 @@ func (rt *Runtime) GetActorCodeCID(addr address.Address) (ret cid.Cid, ok bool)
return act.Code, true return act.Code, true
} }
func (rt *Runtime) GetRandomness(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness { func (rt *Runtime) GetRandomnessFromTickets(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness {
res, err := rt.vm.rand.GetRandomness(rt.ctx, personalization, randEpoch, entropy) res, err := rt.vm.rand.GetChainRandomness(rt.ctx, personalization, randEpoch, entropy)
if err != nil {
panic(aerrors.Fatalf("could not get randomness: %s", err))
}
return res
}
func (rt *Runtime) GetRandomnessFromBeacon(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness {
res, err := rt.vm.rand.GetBeaconRandomness(rt.ctx, personalization, randEpoch, entropy)
if err != nil { if err != nil {
panic(aerrors.Fatalf("could not get randomness: %s", err)) panic(aerrors.Fatalf("could not get randomness: %s", err))
} }
@ -456,8 +410,10 @@ type shimStateHandle struct {
func (ssh *shimStateHandle) Create(obj vmr.CBORMarshaler) { func (ssh *shimStateHandle) Create(obj vmr.CBORMarshaler) {
c := ssh.rt.Put(obj) c := ssh.rt.Put(obj)
// TODO: handle error below err := ssh.rt.stateCommit(EmptyObjectCid, c)
ssh.rt.stateCommit(EmptyObjectCid, c) if err != nil {
panic(fmt.Errorf("failed to commit state after creating object: %w", err))
}
} }
func (ssh *shimStateHandle) Readonly(obj vmr.CBORUnmarshaler) { func (ssh *shimStateHandle) Readonly(obj vmr.CBORUnmarshaler) {
@ -486,8 +442,10 @@ func (ssh *shimStateHandle) Transaction(obj vmr.CBORer, f func()) {
c := ssh.rt.Put(obj) c := ssh.rt.Put(obj)
// TODO: handle error below err = ssh.rt.stateCommit(baseState, c)
ssh.rt.stateCommit(baseState, c) if err != nil {
panic(fmt.Errorf("failed to commit state after transaction: %w", err))
}
} }
func (rt *Runtime) GetBalance(a address.Address) (types.BigInt, aerrors.ActorError) { func (rt *Runtime) GetBalance(a address.Address) (types.BigInt, aerrors.ActorError) {

View File

@ -23,7 +23,7 @@ import (
"github.com/filecoin-project/specs-actors/actors/runtime" "github.com/filecoin-project/specs-actors/actors/runtime"
"github.com/filecoin-project/specs-actors/actors/util/adt" "github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
) )
func init() { func init() {

View File

@ -24,7 +24,6 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/builtin/account" "github.com/filecoin-project/specs-actors/actors/builtin/account"
init_ "github.com/filecoin-project/specs-actors/actors/builtin/init"
"github.com/filecoin-project/specs-actors/actors/crypto" "github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/filecoin-project/specs-actors/actors/runtime/exitcode" "github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
@ -141,30 +140,30 @@ func (vm *UnsafeVM) MakeRuntime(ctx context.Context, msg *types.Message, origin
return vm.VM.makeRuntime(ctx, msg, origin, originNonce, usedGas, nac) return vm.VM.makeRuntime(ctx, msg, origin, originNonce, usedGas, nac)
} }
type VestedCalculator func(context.Context, abi.ChainEpoch) (abi.TokenAmount, error) type CircSupplyCalculator func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error)
type VM struct { type VM struct {
cstate *state.StateTree cstate *state.StateTree
base cid.Cid base cid.Cid
cst *cbor.BasicIpldStore cst *cbor.BasicIpldStore
buf *bufbstore.BufferedBS buf *bufbstore.BufferedBS
blockHeight abi.ChainEpoch blockHeight abi.ChainEpoch
inv *Invoker inv *Invoker
rand Rand rand Rand
vc VestedCalculator circSupplyCalc CircSupplyCalculator
baseFee abi.TokenAmount baseFee abi.TokenAmount
Syscalls SyscallBuilder Syscalls SyscallBuilder
} }
type VMOpts struct { type VMOpts struct {
StateBase cid.Cid StateBase cid.Cid
Epoch abi.ChainEpoch Epoch abi.ChainEpoch
Rand Rand Rand Rand
Bstore bstore.Blockstore Bstore bstore.Blockstore
Syscalls SyscallBuilder Syscalls SyscallBuilder
VestedCalc VestedCalculator CircSupplyCalc CircSupplyCalculator
BaseFee abi.TokenAmount BaseFee abi.TokenAmount
} }
func NewVM(opts *VMOpts) (*VM, error) { func NewVM(opts *VMOpts) (*VM, error) {
@ -176,21 +175,22 @@ func NewVM(opts *VMOpts) (*VM, error) {
} }
return &VM{ return &VM{
cstate: state, cstate: state,
base: opts.StateBase, base: opts.StateBase,
cst: cst, cst: cst,
buf: buf, buf: buf,
blockHeight: opts.Epoch, blockHeight: opts.Epoch,
inv: NewInvoker(), inv: NewInvoker(),
rand: opts.Rand, // TODO: Probably should be a syscall rand: opts.Rand, // TODO: Probably should be a syscall
vc: opts.VestedCalc, circSupplyCalc: opts.CircSupplyCalc,
Syscalls: opts.Syscalls, Syscalls: opts.Syscalls,
baseFee: opts.BaseFee, baseFee: opts.BaseFee,
}, nil }, nil
} }
type Rand interface { type Rand interface {
GetRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error)
GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error)
} }
type ApplyRet struct { type ApplyRet struct {
@ -240,7 +240,7 @@ func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime,
_ = rt.chargeGasSafe(newGasCharge("OnGetActor", 0, 0)) _ = rt.chargeGasSafe(newGasCharge("OnGetActor", 0, 0))
toActor, err := st.GetActor(msg.To) toActor, err := st.GetActor(msg.To)
if err != nil { if err != nil {
if xerrors.Is(err, init_.ErrAddressNotFound) { if xerrors.Is(err, types.ErrActorNotFound) {
a, err := TryCreateAccountActor(rt, msg.To) a, err := TryCreateAccountActor(rt, msg.To)
if err != nil { if err != nil {
return nil, aerrors.Wrapf(err, "could not create account") return nil, aerrors.Wrapf(err, "could not create account")
@ -254,6 +254,9 @@ func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime,
if aerr := rt.chargeGasSafe(rt.Pricelist().OnMethodInvocation(msg.Value, msg.Method)); aerr != nil { if aerr := rt.chargeGasSafe(rt.Pricelist().OnMethodInvocation(msg.Value, msg.Method)); aerr != nil {
return nil, aerrors.Wrap(aerr, "not enough gas for method invocation") return nil, aerrors.Wrap(aerr, "not enough gas for method invocation")
} }
// not charging any gas, just logging
//nolint:errcheck
defer rt.chargeGasSafe(newGasCharge("OnMethodInvocationDone", 0, 0)) defer rt.chargeGasSafe(newGasCharge("OnMethodInvocationDone", 0, 0))
if types.BigCmp(msg.Value, types.NewInt(0)) != 0 { if types.BigCmp(msg.Value, types.NewInt(0)) != 0 {
@ -714,8 +717,8 @@ func (vm *VM) SetInvoker(i *Invoker) {
vm.inv = i vm.inv = i
} }
func (vm *VM) GetVestedFunds(ctx context.Context) (abi.TokenAmount, error) { func (vm *VM) GetCircSupply(ctx context.Context) (abi.TokenAmount, error) {
return vm.vc(ctx, vm.blockHeight) return vm.circSupplyCalc(ctx, vm.blockHeight, vm.cstate)
} }
func (vm *VM) incrementNonce(addr address.Address) error { func (vm *VM) incrementNonce(addr address.Address) error {

View File

@ -2,6 +2,7 @@ package cli
import ( import (
"fmt" "fmt"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"golang.org/x/xerrors" "golang.org/x/xerrors"

View File

@ -319,7 +319,7 @@ var chainSetHeadCmd = &cli.Command{
ts, err = api.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(cctx.Uint64("epoch")), types.EmptyTSK) ts, err = api.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(cctx.Uint64("epoch")), types.EmptyTSK)
} }
if ts == nil { if ts == nil {
ts, err = parseTipSet(api, ctx, cctx.Args().Slice()) ts, err = parseTipSet(ctx, api, cctx.Args().Slice())
} }
if err != nil { if err != nil {
return err return err
@ -337,7 +337,7 @@ var chainSetHeadCmd = &cli.Command{
}, },
} }
func parseTipSet(api api.FullNode, ctx context.Context, vals []string) (*types.TipSet, error) { func parseTipSet(ctx context.Context, api api.FullNode, vals []string) (*types.TipSet, error) {
var headers []*types.BlockHeader var headers []*types.BlockHeader
for _, c := range vals { for _, c := range vals {
blkc, err := cid.Decode(c) blkc, err := cid.Decode(c)
@ -1007,7 +1007,7 @@ var slashConsensusFault = &cli.Command{
Params: enc, Params: enc,
} }
smsg, err := api.MpoolPushMessage(ctx, msg) smsg, err := api.MpoolPushMessage(ctx, msg, nil)
if err != nil { if err != nil {
return err return err
} }

View File

@ -3,6 +3,7 @@ package cli
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io"
"os" "os"
"path/filepath" "path/filepath"
"sort" "sort"
@ -10,8 +11,11 @@ import (
"text/tabwriter" "text/tabwriter"
"time" "time"
tm "github.com/buger/goterm"
"github.com/docker/go-units" "github.com/docker/go-units"
"github.com/fatih/color" "github.com/fatih/color"
datatransfer "github.com/filecoin-project/go-data-transfer"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
"github.com/ipfs/go-cidutil/cidenc" "github.com/ipfs/go-cidutil/cidenc"
"github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peer"
@ -75,6 +79,7 @@ var clientCmd = &cli.Command{
WithCategory("util", clientCommPCmd), WithCategory("util", clientCommPCmd),
WithCategory("util", clientCarGenCmd), WithCategory("util", clientCarGenCmd),
WithCategory("util", clientInfoCmd), WithCategory("util", clientInfoCmd),
WithCategory("util", clientListTransfers),
}, },
} }
@ -173,7 +178,7 @@ var clientDropCmd = &cli.Command{
var clientCommPCmd = &cli.Command{ var clientCommPCmd = &cli.Command{
Name: "commP", Name: "commP",
Usage: "Calculate the piece-cid (commP) of a CAR file", Usage: "Calculate the piece-cid (commP) of a CAR file",
ArgsUsage: "[inputFile minerAddress]", ArgsUsage: "[inputFile]",
Flags: []cli.Flag{ Flags: []cli.Flag{
&CidBaseFlag, &CidBaseFlag,
}, },
@ -185,16 +190,11 @@ var clientCommPCmd = &cli.Command{
defer closer() defer closer()
ctx := ReqContext(cctx) ctx := ReqContext(cctx)
if cctx.Args().Len() != 2 { if cctx.Args().Len() != 1 {
return fmt.Errorf("usage: commP <inputPath> <minerAddr>") return fmt.Errorf("usage: commP <inputPath>")
} }
miner, err := address.NewFromString(cctx.Args().Get(1)) ret, err := api.ClientCalcCommP(ctx, cctx.Args().Get(0))
if err != nil {
return err
}
ret, err := api.ClientCalcCommP(ctx, cctx.Args().Get(0), miner)
if err != nil { if err != nil {
return err return err
} }
@ -315,6 +315,10 @@ var clientDealCmd = &cli.Command{
Usage: "indicate that the deal counts towards verified client total", Usage: "indicate that the deal counts towards verified client total",
Value: false, Value: false,
}, },
&cli.StringFlag{
Name: "provider-collateral",
Usage: "specify the requested provider collateral the miner should put up",
},
&CidBaseFlag, &CidBaseFlag,
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
@ -355,6 +359,15 @@ var clientDealCmd = &cli.Command{
return err return err
} }
var provCol big.Int
if pcs := cctx.String("provider-collateral"); pcs != "" {
pc, err := big.FromString(pcs)
if err != nil {
return fmt.Errorf("failed to parse provider-collateral: %w", err)
}
provCol = pc
}
if abi.ChainEpoch(dur) < build.MinDealDuration { if abi.ChainEpoch(dur) < build.MinDealDuration {
return xerrors.Errorf("minimum deal duration is %d blocks", build.MinDealDuration) return xerrors.Errorf("minimum deal duration is %d blocks", build.MinDealDuration)
} }
@ -419,14 +432,15 @@ var clientDealCmd = &cli.Command{
} }
proposal, err := api.ClientStartDeal(ctx, &lapi.StartDealParams{ proposal, err := api.ClientStartDeal(ctx, &lapi.StartDealParams{
Data: ref, Data: ref,
Wallet: a, Wallet: a,
Miner: miner, Miner: miner,
EpochPrice: types.BigInt(price), EpochPrice: types.BigInt(price),
MinBlocksDuration: uint64(dur), MinBlocksDuration: uint64(dur),
DealStartEpoch: abi.ChainEpoch(cctx.Int64("start-epoch")), DealStartEpoch: abi.ChainEpoch(cctx.Int64("start-epoch")),
FastRetrieval: cctx.Bool("fast-retrieval"), FastRetrieval: cctx.Bool("fast-retrieval"),
VerifiedDeal: isVerified, VerifiedDeal: isVerified,
ProviderCollateral: provCol,
}) })
if err != nil { if err != nil {
return err return err
@ -540,7 +554,7 @@ func interactiveDeal(cctx *cli.Context) error {
continue continue
} }
a, err := api.ClientQueryAsk(ctx, mi.PeerId, maddr) a, err := api.ClientQueryAsk(ctx, *mi.PeerId, maddr)
if err != nil { if err != nil {
printErr(xerrors.Errorf("failed to query ask: %w", err)) printErr(xerrors.Errorf("failed to query ask: %w", err))
state = "miner" state = "miner"
@ -837,12 +851,33 @@ var clientRetrieveCmd = &cli.Command{
Path: cctx.Args().Get(1), Path: cctx.Args().Get(1),
IsCAR: cctx.Bool("car"), IsCAR: cctx.Bool("car"),
} }
if err := fapi.ClientRetrieve(ctx, offer.Order(payer), ref); err != nil { updates, err := fapi.ClientRetrieveWithEvents(ctx, offer.Order(payer), ref)
return xerrors.Errorf("Retrieval Failed: %w", err) if err != nil {
return xerrors.Errorf("error setting up retrieval: %w", err)
} }
fmt.Println("Success") for {
return nil select {
case evt, ok := <-updates:
if ok {
fmt.Printf("> Recv: %s, Paid %s, %s (%s)\n",
types.SizeStr(types.NewInt(evt.BytesReceived)),
types.FIL(evt.FundsSpent),
retrievalmarket.ClientEvents[evt.Event],
retrievalmarket.DealStatuses[evt.Status],
)
} else {
fmt.Println("Success")
return nil
}
if evt.Err != "" {
return xerrors.Errorf("retrieval failed: %s", evt.Err)
}
case <-ctx.Done():
return xerrors.Errorf("retrieval timed out")
}
}
}, },
} }
@ -895,11 +930,11 @@ var clientQueryAskCmd = &cli.Command{
return xerrors.Errorf("failed to get peerID for miner: %w", err) return xerrors.Errorf("failed to get peerID for miner: %w", err)
} }
if peer.ID(mi.PeerId) == peer.ID("SETME") { if *mi.PeerId == peer.ID("SETME") {
return fmt.Errorf("the miner hasn't initialized yet") return fmt.Errorf("the miner hasn't initialized yet")
} }
pid = peer.ID(mi.PeerId) pid = *mi.PeerId
} }
ask, err := api.ClientQueryAsk(ctx, pid, maddr) ask, err := api.ClientQueryAsk(ctx, pid, maddr)
@ -909,6 +944,7 @@ var clientQueryAskCmd = &cli.Command{
fmt.Printf("Ask: %s\n", maddr) fmt.Printf("Ask: %s\n", maddr)
fmt.Printf("Price per GiB: %s\n", types.FIL(ask.Ask.Price)) fmt.Printf("Price per GiB: %s\n", types.FIL(ask.Ask.Price))
fmt.Printf("Verified Price per GiB: %s\n", types.FIL(ask.Ask.VerifiedPrice))
fmt.Printf("Max Piece size: %s\n", types.SizeStr(types.NewInt(uint64(ask.Ask.MaxPieceSize)))) fmt.Printf("Max Piece size: %s\n", types.SizeStr(types.NewInt(uint64(ask.Ask.MaxPieceSize))))
size := cctx.Int64("size") size := cctx.Int64("size")
@ -961,6 +997,10 @@ var clientListDeals = &cli.Command{
return err return err
} }
sort.Slice(localDeals, func(i, j int) bool {
return localDeals[i].CreationTime.Before(localDeals[j].CreationTime)
})
var deals []deal var deals []deal
for _, v := range localDeals { for _, v := range localDeals {
if v.DealID == 0 { if v.DealID == 0 {
@ -989,7 +1029,7 @@ var clientListDeals = &cli.Command{
if cctx.Bool("verbose") { if cctx.Bool("verbose") {
w := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0) w := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0)
fmt.Fprintf(w, "DealCid\tDealId\tProvider\tState\tOn Chain?\tSlashed?\tPieceCID\tSize\tPrice\tDuration\tMessage\n") fmt.Fprintf(w, "Created\tDealCid\tDealId\tProvider\tState\tOn Chain?\tSlashed?\tPieceCID\tSize\tPrice\tDuration\tMessage\n")
for _, d := range deals { for _, d := range deals {
onChain := "N" onChain := "N"
if d.OnChainDealState.SectorStartEpoch != -1 { if d.OnChainDealState.SectorStartEpoch != -1 {
@ -1002,58 +1042,56 @@ var clientListDeals = &cli.Command{
} }
price := types.FIL(types.BigMul(d.LocalDeal.PricePerEpoch, types.NewInt(d.LocalDeal.Duration))) price := types.FIL(types.BigMul(d.LocalDeal.PricePerEpoch, types.NewInt(d.LocalDeal.Duration)))
fmt.Fprintf(w, "%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%d\t%s\n", d.LocalDeal.ProposalCid, d.LocalDeal.DealID, d.LocalDeal.Provider, dealStateString(color, d.LocalDeal.State), onChain, slashed, d.LocalDeal.PieceCID, types.SizeStr(types.NewInt(d.LocalDeal.Size)), price, d.LocalDeal.Duration, d.LocalDeal.Message) fmt.Fprintf(w, "%s\t%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%d\t%s\n", d.LocalDeal.CreationTime.Format(time.Stamp), d.LocalDeal.ProposalCid, d.LocalDeal.DealID, d.LocalDeal.Provider, dealStateString(color, d.LocalDeal.State), onChain, slashed, d.LocalDeal.PieceCID, types.SizeStr(types.NewInt(d.LocalDeal.Size)), price, d.LocalDeal.Duration, d.LocalDeal.Message)
} }
return w.Flush() return w.Flush()
} else { }
w := tablewriter.New(tablewriter.Col("DealCid"),
tablewriter.Col("DealId"),
tablewriter.Col("Provider"),
tablewriter.Col("State"),
tablewriter.Col("On Chain?"),
tablewriter.Col("Slashed?"),
tablewriter.Col("PieceCID"),
tablewriter.Col("Size"),
tablewriter.Col("Price"),
tablewriter.Col("Duration"),
tablewriter.NewLineCol("Message"))
for _, d := range deals { w := tablewriter.New(tablewriter.Col("DealCid"),
propcid := d.LocalDeal.ProposalCid.String() tablewriter.Col("DealId"),
propcid = "..." + propcid[len(propcid)-8:] tablewriter.Col("Provider"),
tablewriter.Col("State"),
tablewriter.Col("On Chain?"),
tablewriter.Col("Slashed?"),
tablewriter.Col("PieceCID"),
tablewriter.Col("Size"),
tablewriter.Col("Price"),
tablewriter.Col("Duration"),
tablewriter.NewLineCol("Message"))
onChain := "N" for _, d := range deals {
if d.OnChainDealState.SectorStartEpoch != -1 { propcid := ellipsis(d.LocalDeal.ProposalCid.String(), 8)
onChain = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SectorStartEpoch)
}
slashed := "N" onChain := "N"
if d.OnChainDealState.SlashEpoch != -1 { if d.OnChainDealState.SectorStartEpoch != -1 {
slashed = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SlashEpoch) onChain = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SectorStartEpoch)
}
piece := d.LocalDeal.PieceCID.String()
piece = "..." + piece[len(piece)-8:]
price := types.FIL(types.BigMul(d.LocalDeal.PricePerEpoch, types.NewInt(d.LocalDeal.Duration)))
w.Write(map[string]interface{}{
"DealCid": propcid,
"DealId": d.LocalDeal.DealID,
"Provider": d.LocalDeal.Provider,
"State": dealStateString(color, d.LocalDeal.State),
"On Chain?": onChain,
"Slashed?": slashed,
"PieceCID": piece,
"Size": types.SizeStr(types.NewInt(d.LocalDeal.Size)),
"Price": price,
"Duration": d.LocalDeal.Duration,
"Message": d.LocalDeal.Message,
})
} }
return w.Flush(os.Stdout) slashed := "N"
if d.OnChainDealState.SlashEpoch != -1 {
slashed = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SlashEpoch)
}
piece := ellipsis(d.LocalDeal.PieceCID.String(), 8)
price := types.FIL(types.BigMul(d.LocalDeal.PricePerEpoch, types.NewInt(d.LocalDeal.Duration)))
w.Write(map[string]interface{}{
"DealCid": propcid,
"DealId": d.LocalDeal.DealID,
"Provider": d.LocalDeal.Provider,
"State": dealStateString(color, d.LocalDeal.State),
"On Chain?": onChain,
"Slashed?": slashed,
"PieceCID": piece,
"Size": types.SizeStr(types.NewInt(d.LocalDeal.Size)),
"Price": price,
"Duration": d.LocalDeal.Duration,
"Message": d.LocalDeal.Message,
})
} }
return w.Flush(os.Stdout)
}, },
} }
@ -1171,3 +1209,175 @@ var clientInfoCmd = &cli.Command{
return nil return nil
}, },
} }
var clientListTransfers = &cli.Command{
Name: "list-transfers",
Usage: "List ongoing data transfers for deals",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "color",
Usage: "use color in display output",
Value: true,
},
&cli.BoolFlag{
Name: "completed",
Usage: "show completed data transfers",
},
&cli.BoolFlag{
Name: "watch",
Usage: "watch deal updates in real-time, rather than a one time list",
},
},
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
channels, err := api.ClientListDataTransfers(ctx)
if err != nil {
return err
}
completed := cctx.Bool("completed")
color := cctx.Bool("color")
watch := cctx.Bool("watch")
if watch {
channelUpdates, err := api.ClientDataTransferUpdates(ctx)
if err != nil {
return err
}
for {
tm.Clear() // Clear current screen
tm.MoveCursor(1, 1)
OutputDataTransferChannels(tm.Screen, channels, completed, color)
tm.Flush()
select {
case <-ctx.Done():
return nil
case channelUpdate := <-channelUpdates:
var found bool
for i, existing := range channels {
if existing.TransferID == channelUpdate.TransferID &&
existing.OtherPeer == channelUpdate.OtherPeer &&
existing.IsSender == channelUpdate.IsSender &&
existing.IsInitiator == channelUpdate.IsInitiator {
channels[i] = channelUpdate
found = true
break
}
}
if !found {
channels = append(channels, channelUpdate)
}
}
}
}
OutputDataTransferChannels(os.Stdout, channels, completed, color)
return nil
},
}
// OutputDataTransferChannels generates table output for a list of channels
func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChannel, completed bool, color bool) {
sort.Slice(channels, func(i, j int) bool {
return channels[i].TransferID < channels[j].TransferID
})
var receivingChannels, sendingChannels []lapi.DataTransferChannel
for _, channel := range channels {
if !completed && channel.Status == datatransfer.Completed {
continue
}
if channel.IsSender {
sendingChannels = append(sendingChannels, channel)
} else {
receivingChannels = append(receivingChannels, channel)
}
}
fmt.Fprintf(out, "Sending Channels\n\n")
w := tablewriter.New(tablewriter.Col("ID"),
tablewriter.Col("Status"),
tablewriter.Col("Sending To"),
tablewriter.Col("Root Cid"),
tablewriter.Col("Initiated?"),
tablewriter.Col("Transferred"),
tablewriter.Col("Voucher"),
tablewriter.NewLineCol("Message"))
for _, channel := range sendingChannels {
w.Write(toChannelOutput(color, "Sending To", channel))
}
w.Flush(out) //nolint:errcheck
fmt.Fprintf(out, "\nReceiving Channels\n\n")
w = tablewriter.New(tablewriter.Col("ID"),
tablewriter.Col("Status"),
tablewriter.Col("Receiving From"),
tablewriter.Col("Root Cid"),
tablewriter.Col("Initiated?"),
tablewriter.Col("Transferred"),
tablewriter.Col("Voucher"),
tablewriter.NewLineCol("Message"))
for _, channel := range receivingChannels {
w.Write(toChannelOutput(color, "Receiving From", channel))
}
w.Flush(out) //nolint:errcheck
}
func channelStatusString(useColor bool, status datatransfer.Status) string {
s := datatransfer.Statuses[status]
if !useColor {
return s
}
switch status {
case datatransfer.Failed, datatransfer.Cancelled:
return color.RedString(s)
case datatransfer.Completed:
return color.GreenString(s)
default:
return s
}
}
func toChannelOutput(useColor bool, otherPartyColumn string, channel lapi.DataTransferChannel) map[string]interface{} {
rootCid := ellipsis(channel.BaseCID.String(), 8)
otherParty := ellipsis(channel.OtherPeer.String(), 8)
initiated := "N"
if channel.IsInitiator {
initiated = "Y"
}
voucher := channel.Voucher
if len(voucher) > 40 {
voucher = ellipsis(voucher, 37)
}
return map[string]interface{}{
"ID": channel.TransferID,
"Status": channelStatusString(useColor, channel.Status),
otherPartyColumn: otherParty,
"Root Cid": rootCid,
"Initiated?": initiated,
"Transferred": channel.Transferred,
"Voucher": voucher,
"Message": channel.Message,
}
}
func ellipsis(s string, length int) string {
if length > 0 && len(s) > length {
return "..." + s[len(s)-length:]
}
return s
}

View File

@ -12,7 +12,7 @@ import (
logging "github.com/ipfs/go-log/v2" logging "github.com/ipfs/go-log/v2"
"github.com/mitchellh/go-homedir" "github.com/mitchellh/go-homedir"
"github.com/multiformats/go-multiaddr" "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr-net" manet "github.com/multiformats/go-multiaddr/net"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"golang.org/x/xerrors" "golang.org/x/xerrors"
@ -26,7 +26,7 @@ import (
var log = logging.Logger("cli") var log = logging.Logger("cli")
const ( const (
metadataTraceConetxt = "traceContext" metadataTraceContext = "traceContext"
) )
// custom CLI error // custom CLI error
@ -67,6 +67,19 @@ func (a APIInfo) AuthHeader() http.Header {
return nil return nil
} }
// The flag passed on the command line with the listen address of the API
// server (only used by the tests)
func flagForAPI(t repo.RepoType) string {
switch t {
case repo.FullNode:
return "api"
case repo.StorageMiner:
return "miner-api"
default:
panic(fmt.Sprintf("Unknown repo type: %v", t))
}
}
func flagForRepo(t repo.RepoType) string { func flagForRepo(t repo.RepoType) string {
switch t { switch t {
case repo.FullNode: case repo.FullNode:
@ -102,6 +115,20 @@ func envForRepoDeprecation(t repo.RepoType) string {
} }
func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) { func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) {
// Check if there was a flag passed with the listen address of the API
// server (only used by the tests)
apiFlag := flagForAPI(t)
if ctx.IsSet(apiFlag) {
strma := ctx.String(apiFlag)
strma = strings.TrimSpace(strma)
apima, err := multiaddr.NewMultiaddr(strma)
if err != nil {
return APIInfo{}, err
}
return APIInfo{Addr: apima}, nil
}
envKey := envForRepo(t) envKey := envForRepo(t)
env, ok := os.LookupEnv(envKey) env, ok := os.LookupEnv(envKey)
if !ok { if !ok {
@ -132,7 +159,7 @@ func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) {
p, err := homedir.Expand(ctx.String(repoFlag)) p, err := homedir.Expand(ctx.String(repoFlag))
if err != nil { if err != nil {
return APIInfo{}, xerrors.Errorf("cound not expand home dir (%s): %w", repoFlag, err) return APIInfo{}, xerrors.Errorf("could not expand home dir (%s): %w", repoFlag, err)
} }
r, err := repo.NewFS(p) r, err := repo.NewFS(p)
@ -186,7 +213,7 @@ func GetAPI(ctx *cli.Context) (api.Common, jsonrpc.ClientCloser, error) {
return nil, nil, err return nil, nil, err
} }
return client.NewCommonRPC(addr, headers) return client.NewCommonRPC(ctx.Context, addr, headers)
} }
func GetFullNodeAPI(ctx *cli.Context) (api.FullNode, jsonrpc.ClientCloser, error) { func GetFullNodeAPI(ctx *cli.Context) (api.FullNode, jsonrpc.ClientCloser, error) {
@ -195,20 +222,20 @@ func GetFullNodeAPI(ctx *cli.Context) (api.FullNode, jsonrpc.ClientCloser, error
return nil, nil, err return nil, nil, err
} }
return client.NewFullNodeRPC(addr, headers) return client.NewFullNodeRPC(ctx.Context, addr, headers)
} }
func GetStorageMinerAPI(ctx *cli.Context) (api.StorageMiner, jsonrpc.ClientCloser, error) { func GetStorageMinerAPI(ctx *cli.Context, opts ...jsonrpc.Option) (api.StorageMiner, jsonrpc.ClientCloser, error) {
addr, headers, err := GetRawAPI(ctx, repo.StorageMiner) addr, headers, err := GetRawAPI(ctx, repo.StorageMiner)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
return client.NewStorageMinerRPC(addr, headers) return client.NewStorageMinerRPC(ctx.Context, addr, headers, opts...)
} }
func DaemonContext(cctx *cli.Context) context.Context { func DaemonContext(cctx *cli.Context) context.Context {
if mtCtx, ok := cctx.App.Metadata[metadataTraceConetxt]; ok { if mtCtx, ok := cctx.App.Metadata[metadataTraceContext]; ok {
return mtCtx.(context.Context) return mtCtx.(context.Context)
} }
@ -238,6 +265,7 @@ var CommonCommands = []*cli.Command{
logCmd, logCmd,
waitApiCmd, waitApiCmd,
fetchParamCmd, fetchParamCmd,
pprofCmd,
VersionCmd, VersionCmd,
} }
@ -256,6 +284,7 @@ var Commands = []*cli.Command{
WithCategory("developer", fetchParamCmd), WithCategory("developer", fetchParamCmd),
WithCategory("network", netCmd), WithCategory("network", netCmd),
WithCategory("network", syncCmd), WithCategory("network", syncCmd),
pprofCmd,
VersionCmd, VersionCmd,
} }

View File

@ -39,7 +39,7 @@ func RunApp(app *cli.App) {
} }
var phe *PrintHelpErr var phe *PrintHelpErr
if xerrors.As(err, &phe) { if xerrors.As(err, &phe) {
cli.ShowCommandHelp(phe.Ctx, phe.Ctx.Command.Name) _ = cli.ShowCommandHelp(phe.Ctx, phe.Ctx.Command.Name)
} }
os.Exit(1) os.Exit(1)
} }

View File

@ -60,7 +60,8 @@ var logSetLevel = &cli.Command{
Environment Variables: Environment Variables:
GOLOG_LOG_LEVEL - Default log level for all log systems GOLOG_LOG_LEVEL - Default log level for all log systems
GOLOG_LOG_FMT - Change output log format (json, nocolor) GOLOG_LOG_FMT - Change output log format (json, nocolor)
GOLOG_FILE - Write logs to file in addition to stderr GOLOG_FILE - Write logs to file
GOLOG_OUTPUT - Specify whether to output to file, stderr, stdout or a combination, i.e. file+stderr
`, `,
Flags: []cli.Flag{ Flags: []cli.Flag{
&cli.StringSliceFlag{ &cli.StringSliceFlag{

View File

@ -20,10 +20,12 @@ var mpoolCmd = &cli.Command{
Usage: "Manage message pool", Usage: "Manage message pool",
Subcommands: []*cli.Command{ Subcommands: []*cli.Command{
mpoolPending, mpoolPending,
mpoolClear,
mpoolSub, mpoolSub,
mpoolStat, mpoolStat,
mpoolReplaceCmd, mpoolReplaceCmd,
mpoolFindCmd, mpoolFindCmd,
mpoolConfig,
}, },
} }
@ -82,6 +84,39 @@ var mpoolPending = &cli.Command{
}, },
} }
var mpoolClear = &cli.Command{
Name: "clear",
Usage: "Clear all pending messages from the mpool (USE WITH CARE)",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "local",
Usage: "also clear local messages",
},
&cli.BoolFlag{
Name: "really-do-it",
Usage: "must be specified for the action to take effect",
},
},
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
really := cctx.Bool("really-do-it")
if !really {
//nolint:golint
return fmt.Errorf("--really-do-it must be specified for this action to have an effect; you have been warned")
}
local := cctx.Bool("local")
ctx := ReqContext(cctx)
return api.MpoolClear(ctx, local)
},
}
var mpoolSub = &cli.Command{ var mpoolSub = &cli.Command{
Name: "sub", Name: "sub",
Usage: "Subscribe to mpool changes", Usage: "Subscribe to mpool changes",
@ -312,7 +347,7 @@ var mpoolReplaceCmd = &cli.Command{
if err != nil { if err != nil {
return fmt.Errorf("parsing gas-premium: %w", err) return fmt.Errorf("parsing gas-premium: %w", err)
} }
// TODO: estiamte fee cap here // TODO: estimate fee cap here
msg.GasFeeCap, err = types.BigFromString(cctx.String("gas-feecap")) msg.GasFeeCap, err = types.BigFromString(cctx.String("gas-feecap"))
if err != nil { if err != nil {
return fmt.Errorf("parsing gas-feecap: %w", err) return fmt.Errorf("parsing gas-feecap: %w", err)
@ -415,3 +450,48 @@ var mpoolFindCmd = &cli.Command{
return nil return nil
}, },
} }
var mpoolConfig = &cli.Command{
Name: "config",
Usage: "get or set current mpool configuration",
ArgsUsage: "[new-config]",
Action: func(cctx *cli.Context) error {
if cctx.Args().Len() > 1 {
return cli.ShowCommandHelp(cctx, cctx.Command.Name)
}
api, closer, err := GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
if cctx.Args().Len() == 0 {
cfg, err := api.MpoolGetConfig(ctx)
if err != nil {
return err
}
bytes, err := json.Marshal(cfg)
if err != nil {
return err
}
fmt.Println(string(bytes))
} else {
cfg := new(types.MpoolConfig)
bytes := []byte(cctx.Args().Get(0))
err := json.Unmarshal(bytes, cfg)
if err != nil {
return err
}
return api.MpoolSetConfig(ctx, cfg)
}
return nil
},
}

View File

@ -12,6 +12,7 @@ import (
"text/tabwriter" "text/tabwriter"
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/specs-actors/actors/util/adt" "github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
@ -231,10 +232,10 @@ var msigInspectCmd = &cli.Command{
}) })
w := tabwriter.NewWriter(os.Stdout, 8, 4, 0, ' ', 0) w := tabwriter.NewWriter(os.Stdout, 8, 4, 0, ' ', 0)
fmt.Fprintf(w, "ID\tState\tTo\tValue\tMethod\tParams\n") fmt.Fprintf(w, "ID\tState\tApprovals\tTo\tValue\tMethod\tParams\n")
for _, txid := range txids { for _, txid := range txids {
tx := pending[txid] tx := pending[txid]
fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%d\t%x\n", txid, state(tx), tx.To, types.FIL(tx.Value), tx.Method, tx.Params) fmt.Fprintf(w, "%d\t%s\t%d\t%s\t%s\t%d\t%x\n", txid, state(tx), len(tx.Approved), tx.To, types.FIL(tx.Value), tx.Method, tx.Params)
} }
if err := w.Flush(); err != nil { if err := w.Flush(); err != nil {
return xerrors.Errorf("flushing output: %+v", err) return xerrors.Errorf("flushing output: %+v", err)
@ -355,6 +356,15 @@ var msigProposeCmd = &cli.Command{
from = defaddr from = defaddr
} }
act, err := api.StateGetActor(ctx, msig, types.EmptyTSK)
if err != nil {
return fmt.Errorf("failed to look up multisig %s: %w", msig, err)
}
if act.Code != builtin.MultisigActorCodeID {
return fmt.Errorf("actor %s is not a multisig actor", msig)
}
msgCid, err := api.MsigPropose(ctx, msig, dest, types.BigInt(value), from, method, params) msgCid, err := api.MsigPropose(ctx, msig, dest, types.BigInt(value), from, method, params)
if err != nil { if err != nil {
return err return err

View File

@ -24,6 +24,7 @@ var netCmd = &cli.Command{
NetId, NetId,
netFindPeer, netFindPeer,
netScores, netScores,
NetReachability,
}, },
} }
@ -179,7 +180,7 @@ var netFindPeer = &cli.Command{
return nil return nil
} }
pid, err := peer.IDB58Decode(cctx.Args().First()) pid, err := peer.Decode(cctx.Args().First())
if err != nil { if err != nil {
return err return err
} }
@ -202,3 +203,28 @@ var netFindPeer = &cli.Command{
return nil return nil
}, },
} }
var NetReachability = &cli.Command{
Name: "reachability",
Usage: "Print information about reachability from the internet",
Action: func(cctx *cli.Context) error {
api, closer, err := GetAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
i, err := api.NetAutoNatStatus(ctx)
if err != nil {
return err
}
fmt.Println("AutoNAT status: ", i.Reachability.String())
if i.PublicAddr != "" {
fmt.Println("Public address: ", i.PublicAddr)
}
return nil
},
}

View File

@ -4,10 +4,13 @@ import (
"bytes" "bytes"
"encoding/base64" "encoding/base64"
"fmt" "fmt"
"io"
"sort"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/paychmgr"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/specs-actors/actors/builtin/paych" "github.com/filecoin-project/specs-actors/actors/builtin/paych"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
@ -66,12 +69,12 @@ var paychGetCmd = &cli.Command{
} }
// Wait for the message to be confirmed // Wait for the message to be confirmed
chAddr, err := api.PaychGetWaitReady(ctx, info.ChannelMessage) chAddr, err := api.PaychGetWaitReady(ctx, info.WaitSentinel)
if err != nil { if err != nil {
return err return err
} }
fmt.Println(chAddr) fmt.Fprintln(cctx.App.Writer, chAddr)
return nil return nil
}, },
} }
@ -94,7 +97,7 @@ var paychListCmd = &cli.Command{
} }
for _, v := range chs { for _, v := range chs {
fmt.Println(v.String()) fmt.Fprintln(cctx.App.Writer, v.String())
} }
return nil return nil
}, },
@ -135,7 +138,7 @@ var paychSettleCmd = &cli.Command{
return fmt.Errorf("settle message execution failed (exit code %d)", mwait.Receipt.ExitCode) return fmt.Errorf("settle message execution failed (exit code %d)", mwait.Receipt.ExitCode)
} }
fmt.Printf("Settled channel %s\n", ch) fmt.Fprintf(cctx.App.Writer, "Settled channel %s\n", ch)
return nil return nil
}, },
} }
@ -175,7 +178,7 @@ var paychCloseCmd = &cli.Command{
return fmt.Errorf("collect message execution failed (exit code %d)", mwait.Receipt.ExitCode) return fmt.Errorf("collect message execution failed (exit code %d)", mwait.Receipt.ExitCode)
} }
fmt.Printf("Collected funds for channel %s\n", ch) fmt.Fprintf(cctx.App.Writer, "Collected funds for channel %s\n", ch)
return nil return nil
}, },
} }
@ -239,7 +242,7 @@ var paychVoucherCreateCmd = &cli.Command{
return err return err
} }
fmt.Println(enc) fmt.Fprintln(cctx.App.Writer, enc)
return nil return nil
}, },
} }
@ -275,7 +278,7 @@ var paychVoucherCheckCmd = &cli.Command{
return err return err
} }
fmt.Println("voucher is valid") fmt.Fprintln(cctx.App.Writer, "voucher is valid")
return nil return nil
}, },
} }
@ -323,7 +326,7 @@ var paychVoucherListCmd = &cli.Command{
Flags: []cli.Flag{ Flags: []cli.Flag{
&cli.BoolFlag{ &cli.BoolFlag{
Name: "export", Name: "export",
Usage: "Print export strings", Usage: "Print voucher as serialized string",
}, },
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
@ -349,16 +352,11 @@ var paychVoucherListCmd = &cli.Command{
return err return err
} }
for _, v := range vouchers { for _, v := range sortVouchers(vouchers) {
if cctx.Bool("export") { export := cctx.Bool("export")
enc, err := EncodedString(v) err := outputVoucher(cctx.App.Writer, v, export)
if err != nil { if err != nil {
return err return err
}
fmt.Printf("Lane %d, Nonce %d: %s; %s\n", v.Lane, v.Nonce, v.Amount.String(), enc)
} else {
fmt.Printf("Lane %d, Nonce %d: %s\n", v.Lane, v.Nonce, v.Amount.String())
} }
} }
@ -368,8 +366,14 @@ var paychVoucherListCmd = &cli.Command{
var paychVoucherBestSpendableCmd = &cli.Command{ var paychVoucherBestSpendableCmd = &cli.Command{
Name: "best-spendable", Name: "best-spendable",
Usage: "Print voucher with highest value that is currently spendable", Usage: "Print vouchers with highest value that is currently spendable for each lane",
ArgsUsage: "[channelAddress]", ArgsUsage: "[channelAddress]",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "export",
Usage: "Print voucher as serialized string",
},
},
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
if cctx.Args().Len() != 1 { if cctx.Args().Len() != 1 {
return ShowHelp(cctx, fmt.Errorf("must pass payment channel address")) return ShowHelp(cctx, fmt.Errorf("must pass payment channel address"))
@ -388,37 +392,53 @@ var paychVoucherBestSpendableCmd = &cli.Command{
ctx := ReqContext(cctx) ctx := ReqContext(cctx)
vouchers, err := api.PaychVoucherList(ctx, ch) vouchersByLane, err := paychmgr.BestSpendableByLane(ctx, api, ch)
if err != nil { if err != nil {
return err return err
} }
var best *paych.SignedVoucher var vouchers []*paych.SignedVoucher
for _, v := range vouchers { for _, vchr := range vouchersByLane {
spendable, err := api.PaychVoucherCheckSpendable(ctx, ch, v, nil, nil) vouchers = append(vouchers, vchr)
}
for _, best := range sortVouchers(vouchers) {
export := cctx.Bool("export")
err := outputVoucher(cctx.App.Writer, best, export)
if err != nil { if err != nil {
return err return err
} }
if spendable {
if best == nil || v.Amount.GreaterThan(best.Amount) {
best = v
}
}
} }
if best == nil { return nil
return fmt.Errorf("No spendable vouchers for that channel") },
} }
enc, err := EncodedString(best) func sortVouchers(vouchers []*paych.SignedVoucher) []*paych.SignedVoucher {
sort.Slice(vouchers, func(i, j int) bool {
if vouchers[i].Lane == vouchers[j].Lane {
return vouchers[i].Nonce < vouchers[j].Nonce
}
return vouchers[i].Lane < vouchers[j].Lane
})
return vouchers
}
func outputVoucher(w io.Writer, v *paych.SignedVoucher, export bool) error {
var enc string
if export {
var err error
enc, err = EncodedString(v)
if err != nil { if err != nil {
return err return err
} }
}
fmt.Println(enc) fmt.Fprintf(w, "Lane %d, Nonce %d: %s", v.Lane, v.Nonce, v.Amount.String())
fmt.Printf("Amount: %s\n", best.Amount) if export {
return nil fmt.Fprintf(w, "; %s", enc)
}, }
fmt.Fprintln(w)
return nil
} }
var paychVoucherSubmitCmd = &cli.Command{ var paychVoucherSubmitCmd = &cli.Command{
@ -462,7 +482,7 @@ var paychVoucherSubmitCmd = &cli.Command{
return fmt.Errorf("message execution failed (exit code %d)", mwait.Receipt.ExitCode) return fmt.Errorf("message execution failed (exit code %d)", mwait.Receipt.ExitCode)
} }
fmt.Println("channel updated successfully") fmt.Fprintln(cctx.App.Writer, "channel updated successfully")
return nil return nil
}, },

330
cli/paych_test.go Normal file
View File

@ -0,0 +1,330 @@
package cli
import (
"bytes"
"context"
"flag"
"fmt"
"os"
"strconv"
"strings"
"testing"
"time"
"github.com/filecoin-project/specs-actors/actors/abi/big"
saminer "github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/specs-actors/actors/builtin/power"
"github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
"github.com/multiformats/go-multiaddr"
"github.com/filecoin-project/lotus/chain/events"
"github.com/filecoin-project/lotus/api/apibstore"
"github.com/filecoin-project/specs-actors/actors/builtin/paych"
cbor "github.com/ipfs/go-ipld-cbor"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/api/test"
"github.com/filecoin-project/lotus/chain/wallet"
builder "github.com/filecoin-project/lotus/node/test"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/stretchr/testify/require"
"github.com/urfave/cli/v2"
)
func init() {
power.ConsensusMinerMinPower = big.NewInt(2048)
saminer.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
abi.RegisteredSealProof_StackedDrg2KiBV1: {},
}
verifreg.MinVerifiedDealSize = big.NewInt(256)
}
// TestPaymentChannels does a basic test to exercise the payment channel CLI
// commands
func TestPaymentChannels(t *testing.T) {
_ = os.Setenv("BELLMAN_NO_GPU", "1")
blocktime := 5 * time.Millisecond
ctx := context.Background()
nodes, addrs := startTwoNodesOneMiner(ctx, t, blocktime)
paymentCreator := nodes[0]
paymentReceiver := nodes[0]
creatorAddr := addrs[0]
receiverAddr := addrs[1]
// Create mock CLI
mockCLI := newMockCLI(t)
creatorCLI := mockCLI.client(paymentCreator.ListenAddr)
receiverCLI := mockCLI.client(paymentReceiver.ListenAddr)
// creator: paych get <creator> <receiver> <amount>
channelAmt := "100000"
cmd := []string{creatorAddr.String(), receiverAddr.String(), channelAmt}
chstr := creatorCLI.runCmd(paychGetCmd, cmd)
chAddr, err := address.NewFromString(chstr)
require.NoError(t, err)
// creator: paych voucher create <channel> <amount>
voucherAmt := 100
vamt := strconv.Itoa(voucherAmt)
cmd = []string{chAddr.String(), vamt}
voucher := creatorCLI.runCmd(paychVoucherCreateCmd, cmd)
// receiver: paych voucher add <channel> <voucher>
cmd = []string{chAddr.String(), voucher}
receiverCLI.runCmd(paychVoucherAddCmd, cmd)
// creator: paych settle <channel>
cmd = []string{chAddr.String()}
creatorCLI.runCmd(paychSettleCmd, cmd)
// Wait for the chain to reach the settle height
chState := getPaychState(ctx, t, paymentReceiver, chAddr)
waitForHeight(ctx, t, paymentReceiver, chState.SettlingAt)
// receiver: paych collect <channel>
cmd = []string{chAddr.String()}
receiverCLI.runCmd(paychCloseCmd, cmd)
}
type voucherSpec struct {
serialized string
amt int
lane int
}
// TestPaymentChannelVouchers does a basic test to exercise some payment
// channel voucher commands
func TestPaymentChannelVouchers(t *testing.T) {
_ = os.Setenv("BELLMAN_NO_GPU", "1")
blocktime := 5 * time.Millisecond
ctx := context.Background()
nodes, addrs := startTwoNodesOneMiner(ctx, t, blocktime)
paymentCreator := nodes[0]
creatorAddr := addrs[0]
receiverAddr := addrs[1]
// Create mock CLI
mockCLI := newMockCLI(t)
creatorCLI := mockCLI.client(paymentCreator.ListenAddr)
// creator: paych get <creator> <receiver> <amount>
channelAmt := "100000"
cmd := []string{creatorAddr.String(), receiverAddr.String(), channelAmt}
chstr := creatorCLI.runCmd(paychGetCmd, cmd)
chAddr, err := address.NewFromString(chstr)
require.NoError(t, err)
var vouchers []voucherSpec
// creator: paych voucher create <channel> <amount>
// Note: implied --lane=0
voucherAmt1 := 100
vamt1 := strconv.Itoa(voucherAmt1)
cmd = []string{chAddr.String(), vamt1}
voucher1 := creatorCLI.runCmd(paychVoucherCreateCmd, cmd)
vouchers = append(vouchers, voucherSpec{serialized: voucher1, lane: 0, amt: voucherAmt1})
// creator: paych voucher create <channel> <amount> --lane=5
lane5 := "--lane=5"
voucherAmt2 := 50
vamt2 := strconv.Itoa(voucherAmt2)
cmd = []string{lane5, chAddr.String(), vamt2}
voucher2 := creatorCLI.runCmd(paychVoucherCreateCmd, cmd)
vouchers = append(vouchers, voucherSpec{serialized: voucher2, lane: 5, amt: voucherAmt2})
// creator: paych voucher create <channel> <amount> --lane=5
voucherAmt3 := 70
vamt3 := strconv.Itoa(voucherAmt3)
cmd = []string{lane5, chAddr.String(), vamt3}
voucher3 := creatorCLI.runCmd(paychVoucherCreateCmd, cmd)
vouchers = append(vouchers, voucherSpec{serialized: voucher3, lane: 5, amt: voucherAmt3})
// creator: paych voucher list <channel> --export
cmd = []string{"--export", chAddr.String()}
list := creatorCLI.runCmd(paychVoucherListCmd, cmd)
// Check that voucher list output is correct
checkVoucherOutput(t, list, vouchers)
// creator: paych voucher best-spendable <channel>
cmd = []string{"--export", chAddr.String()}
bestSpendable := creatorCLI.runCmd(paychVoucherBestSpendableCmd, cmd)
// Check that best spendable output is correct
bestVouchers := []voucherSpec{
{serialized: voucher1, lane: 0, amt: voucherAmt1},
{serialized: voucher3, lane: 5, amt: voucherAmt3},
}
checkVoucherOutput(t, bestSpendable, bestVouchers)
}
func checkVoucherOutput(t *testing.T, list string, vouchers []voucherSpec) {
lines := strings.Split(list, "\n")
listVouchers := make(map[string]string)
for _, line := range lines {
parts := strings.Split(line, ";")
serialized := strings.TrimSpace(parts[1])
listVouchers[serialized] = strings.TrimSpace(parts[0])
}
for _, vchr := range vouchers {
res, ok := listVouchers[vchr.serialized]
require.True(t, ok)
require.Regexp(t, fmt.Sprintf("Lane %d", vchr.lane), res)
require.Regexp(t, fmt.Sprintf("%d", vchr.amt), res)
}
}
func startTwoNodesOneMiner(ctx context.Context, t *testing.T, blocktime time.Duration) ([]test.TestNode, []address.Address) {
n, sn := builder.RPCMockSbBuilder(t, 2, test.OneMiner)
paymentCreator := n[0]
paymentReceiver := n[1]
miner := sn[0]
// Get everyone connected
addrs, err := paymentCreator.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := paymentReceiver.NetConnect(ctx, addrs); err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrs); err != nil {
t.Fatal(err)
}
// Start mining blocks
bm := test.NewBlockMiner(ctx, t, miner, blocktime)
bm.MineBlocks()
// Send some funds to register the receiver
receiverAddr, err := paymentReceiver.WalletNew(ctx, wallet.ActSigType("secp256k1"))
if err != nil {
t.Fatal(err)
}
test.SendFunds(ctx, t, paymentCreator, receiverAddr, abi.NewTokenAmount(1e18))
// Get the creator's address
creatorAddr, err := paymentCreator.WalletDefaultAddress(ctx)
if err != nil {
t.Fatal(err)
}
// Create mock CLI
return n, []address.Address{creatorAddr, receiverAddr}
}
type mockCLI struct {
t *testing.T
cctx *cli.Context
out *bytes.Buffer
}
func newMockCLI(t *testing.T) *mockCLI {
// Create a CLI App with an --api flag so that we can specify which node
// the command should be executed against
app := cli.NewApp()
app.Flags = []cli.Flag{
&cli.StringFlag{
Name: "api",
Hidden: true,
},
}
var out bytes.Buffer
app.Writer = &out
app.Setup()
cctx := cli.NewContext(app, &flag.FlagSet{}, nil)
return &mockCLI{t: t, cctx: cctx, out: &out}
}
func (c *mockCLI) client(addr multiaddr.Multiaddr) *mockCLIClient {
return &mockCLIClient{t: c.t, addr: addr, cctx: c.cctx, out: c.out}
}
// mockCLIClient runs commands against a particular node
type mockCLIClient struct {
t *testing.T
addr multiaddr.Multiaddr
cctx *cli.Context
out *bytes.Buffer
}
func (c *mockCLIClient) runCmd(cmd *cli.Command, input []string) string {
// prepend --api=<node api listener address>
apiFlag := "--api=" + c.addr.String()
input = append([]string{apiFlag}, input...)
fs := c.flagSet(cmd)
err := fs.Parse(input)
require.NoError(c.t, err)
err = cmd.Action(cli.NewContext(c.cctx.App, fs, c.cctx))
require.NoError(c.t, err)
// Get the output
str := strings.TrimSpace(c.out.String())
c.out.Reset()
return str
}
func (c *mockCLIClient) flagSet(cmd *cli.Command) *flag.FlagSet {
// Apply app level flags (so we can process --api flag)
fs := &flag.FlagSet{}
for _, f := range c.cctx.App.Flags {
err := f.Apply(fs)
if err != nil {
c.t.Fatal(err)
}
}
// Apply command level flags
for _, f := range cmd.Flags {
err := f.Apply(fs)
if err != nil {
c.t.Fatal(err)
}
}
return fs
}
// waitForHeight waits for the node to reach the given chain epoch
func waitForHeight(ctx context.Context, t *testing.T, node test.TestNode, height abi.ChainEpoch) {
atHeight := make(chan struct{})
chainEvents := events.NewEvents(ctx, node)
err := chainEvents.ChainAt(func(ctx context.Context, ts *types.TipSet, curH abi.ChainEpoch) error {
close(atHeight)
return nil
}, nil, 1, height)
if err != nil {
t.Fatal(err)
}
select {
case <-atHeight:
case <-ctx.Done():
}
}
// getPaychState gets the state of the payment channel with the given address
func getPaychState(ctx context.Context, t *testing.T, node test.TestNode, chAddr address.Address) paych.State {
act, err := node.StateGetActor(ctx, chAddr, types.EmptyTSK)
require.NoError(t, err)
store := cbor.NewCborStore(apibstore.NewAPIBlockstore(node))
var chState paych.State
err = store.Get(ctx, act.Head, &chState)
require.NoError(t, err)
return chState
}

Some files were not shown because too many files have changed in this diff Show More