Merge branch 'master' of github.com:filecoin-project/lotus into extratsload
This commit is contained in:
commit
7f3ba0e6eb
@ -113,7 +113,7 @@ jobs:
|
|||||||
test: &test
|
test: &test
|
||||||
description: |
|
description: |
|
||||||
Run tests with gotestsum.
|
Run tests with gotestsum.
|
||||||
parameters:
|
parameters: &test-params
|
||||||
executor:
|
executor:
|
||||||
type: executor
|
type: executor
|
||||||
default: golang
|
default: golang
|
||||||
@ -161,6 +161,7 @@ jobs:
|
|||||||
name: go test
|
name: go test
|
||||||
environment:
|
environment:
|
||||||
LOTUS_TEST_WINDOW_POST: << parameters.winpost-test >>
|
LOTUS_TEST_WINDOW_POST: << parameters.winpost-test >>
|
||||||
|
SKIP_CONFORMANCE: "1"
|
||||||
command: |
|
command: |
|
||||||
mkdir -p /tmp/test-reports/<< parameters.test-suite-name >>
|
mkdir -p /tmp/test-reports/<< parameters.test-suite-name >>
|
||||||
mkdir -p /tmp/test-artifacts
|
mkdir -p /tmp/test-artifacts
|
||||||
@ -191,6 +192,63 @@ jobs:
|
|||||||
<<: *test
|
<<: *test
|
||||||
test-window-post:
|
test-window-post:
|
||||||
<<: *test
|
<<: *test
|
||||||
|
test-conformance:
|
||||||
|
description: |
|
||||||
|
Run tests using a corpus of interoperable test vectors for Filecoin
|
||||||
|
implementations to test their correctness and compliance with the Filecoin
|
||||||
|
specifications.
|
||||||
|
parameters:
|
||||||
|
<<: *test-params
|
||||||
|
vectors-branch:
|
||||||
|
type: string
|
||||||
|
default: ""
|
||||||
|
description: |
|
||||||
|
Branch on github.com/filecoin-project/test-vectors to checkout and
|
||||||
|
test with. If empty (the default) the commit defined by the git
|
||||||
|
submodule is used.
|
||||||
|
executor: << parameters.executor >>
|
||||||
|
steps:
|
||||||
|
- install-deps
|
||||||
|
- prepare
|
||||||
|
- run:
|
||||||
|
command: make deps lotus
|
||||||
|
no_output_timeout: 30m
|
||||||
|
- download-params
|
||||||
|
- when:
|
||||||
|
condition:
|
||||||
|
not:
|
||||||
|
equal: [ "", << parameters.vectors-branch >> ]
|
||||||
|
steps:
|
||||||
|
- run:
|
||||||
|
name: checkout vectors branch
|
||||||
|
command: |
|
||||||
|
cd extern/test-vectors
|
||||||
|
git fetch
|
||||||
|
git checkout origin/<< parameters.vectors-branch >>
|
||||||
|
- run:
|
||||||
|
name: go get vectors branch
|
||||||
|
command: go get github.com/filecoin-project/test-vectors@<< parameters.vectors-branch >>
|
||||||
|
- go/install-gotestsum:
|
||||||
|
gobin: $HOME/.local/bin
|
||||||
|
version: 0.5.2
|
||||||
|
- run:
|
||||||
|
name: go test
|
||||||
|
environment:
|
||||||
|
SKIP_CONFORMANCE: "0"
|
||||||
|
command: |
|
||||||
|
mkdir -p /tmp/test-reports
|
||||||
|
mkdir -p /tmp/test-artifacts
|
||||||
|
gotestsum \
|
||||||
|
--format pkgname-and-test-fails \
|
||||||
|
--junitfile /tmp/test-reports/junit.xml \
|
||||||
|
-- \
|
||||||
|
-v -coverpkg ./chain/vm/,github.com/filecoin-project/specs-actors/... -coverprofile=/tmp/conformance.out ./conformance/
|
||||||
|
go tool cover -html=/tmp/conformance.out -o /tmp/test-artifacts/conformance-coverage.html
|
||||||
|
no_output_timeout: 30m
|
||||||
|
- store_test_results:
|
||||||
|
path: /tmp/test-reports
|
||||||
|
- store_artifacts:
|
||||||
|
path: /tmp/test-artifacts/conformance-coverage.html
|
||||||
|
|
||||||
build-macos:
|
build-macos:
|
||||||
description: build darwin lotus binary
|
description: build darwin lotus binary
|
||||||
@ -252,6 +310,27 @@ jobs:
|
|||||||
- run:
|
- run:
|
||||||
command: "! go fmt ./... 2>&1 | read"
|
command: "! go fmt ./... 2>&1 | read"
|
||||||
|
|
||||||
|
cbor-gen-check:
|
||||||
|
executor: golang
|
||||||
|
steps:
|
||||||
|
- install-deps
|
||||||
|
- prepare
|
||||||
|
- run: make deps
|
||||||
|
- run: go install golang.org/x/tools/cmd/goimports
|
||||||
|
- run: go install github.com/hannahhoward/cbor-gen-for
|
||||||
|
- run: go generate ./...
|
||||||
|
- run: git --no-pager diff
|
||||||
|
- run: git --no-pager diff --quiet
|
||||||
|
|
||||||
|
docs-check:
|
||||||
|
executor: golang
|
||||||
|
steps:
|
||||||
|
- install-deps
|
||||||
|
- prepare
|
||||||
|
- run: make docsgen
|
||||||
|
- run: git --no-pager diff
|
||||||
|
- run: git --no-pager diff --quiet
|
||||||
|
|
||||||
lint: &lint
|
lint: &lint
|
||||||
description: |
|
description: |
|
||||||
Run golangci-lint.
|
Run golangci-lint.
|
||||||
@ -288,9 +367,6 @@ jobs:
|
|||||||
command: |
|
command: |
|
||||||
$HOME/.local/bin/golangci-lint run -v --timeout 2m \
|
$HOME/.local/bin/golangci-lint run -v --timeout 2m \
|
||||||
--concurrency << parameters.concurrency >> << parameters.args >>
|
--concurrency << parameters.concurrency >> << parameters.args >>
|
||||||
lint-changes:
|
|
||||||
<<: *lint
|
|
||||||
|
|
||||||
lint-all:
|
lint-all:
|
||||||
<<: *lint
|
<<: *lint
|
||||||
|
|
||||||
@ -319,10 +395,11 @@ workflows:
|
|||||||
version: 2.1
|
version: 2.1
|
||||||
ci:
|
ci:
|
||||||
jobs:
|
jobs:
|
||||||
- lint-changes:
|
- lint-all
|
||||||
args: "--new-from-rev origin/next"
|
|
||||||
- mod-tidy-check
|
- mod-tidy-check
|
||||||
- gofmt
|
- gofmt
|
||||||
|
- cbor-gen-check
|
||||||
|
- docs-check
|
||||||
- test:
|
- test:
|
||||||
codecov-upload: true
|
codecov-upload: true
|
||||||
test-suite-name: full
|
test-suite-name: full
|
||||||
@ -337,6 +414,14 @@ workflows:
|
|||||||
tags:
|
tags:
|
||||||
only:
|
only:
|
||||||
- /^v\d+\.\d+\.\d+$/
|
- /^v\d+\.\d+\.\d+$/
|
||||||
|
- test-conformance:
|
||||||
|
test-suite-name: conformance
|
||||||
|
packages: "./conformance"
|
||||||
|
- test-conformance:
|
||||||
|
name: test-conformance-bleeding-edge
|
||||||
|
test-suite-name: conformance-bleeding-edge
|
||||||
|
packages: "./conformance"
|
||||||
|
vectors-branch: master
|
||||||
- build-debug
|
- build-debug
|
||||||
- build-all:
|
- build-all:
|
||||||
requires:
|
requires:
|
||||||
|
10
.github/labels.yml
vendored
10
.github/labels.yml
vendored
@ -26,11 +26,14 @@
|
|||||||
color: 00A4E2
|
color: 00A4E2
|
||||||
description: "Area: Chain/VM"
|
description: "Area: Chain/VM"
|
||||||
- name: area/chain/sync
|
- name: area/chain/sync
|
||||||
color: 00A4E2
|
color: 00A4E4
|
||||||
description: "Area: Chain/Sync"
|
description: "Area: Chain/Sync"
|
||||||
- name: area/chain/misc
|
- name: area/chain/misc
|
||||||
color: 00A4E2
|
color: 00A4E6
|
||||||
description: "Area: Chain/Misc"
|
description: "Area: Chain/Misc"
|
||||||
|
- name: area/markets
|
||||||
|
color: 00A4E8
|
||||||
|
description: "Area: Markets"
|
||||||
- name: area/sealing/fsm
|
- name: area/sealing/fsm
|
||||||
color: 0bb1ed
|
color: 0bb1ed
|
||||||
description: "Area: Sealing/FSM"
|
description: "Area: Sealing/FSM"
|
||||||
@ -149,6 +152,9 @@
|
|||||||
- name: impact/test-flakiness
|
- name: impact/test-flakiness
|
||||||
color: DDE1E4
|
color: DDE1E4
|
||||||
description: "Impact: Test Flakiness"
|
description: "Impact: Test Flakiness"
|
||||||
|
- name: impact/consensus
|
||||||
|
color: b20014
|
||||||
|
description: "Impact: Consensus"
|
||||||
|
|
||||||
###
|
###
|
||||||
### Topics
|
### Topics
|
||||||
|
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -5,3 +5,6 @@
|
|||||||
[submodule "extern/serialization-vectors"]
|
[submodule "extern/serialization-vectors"]
|
||||||
path = extern/serialization-vectors
|
path = extern/serialization-vectors
|
||||||
url = https://github.com/filecoin-project/serialization-vectors
|
url = https://github.com/filecoin-project/serialization-vectors
|
||||||
|
[submodule "extern/test-vectors"]
|
||||||
|
path = extern/test-vectors
|
||||||
|
url = https://github.com/filecoin-project/test-vectors.git
|
||||||
|
@ -23,6 +23,14 @@ issues:
|
|||||||
- "Potential file inclusion via variable"
|
- "Potential file inclusion via variable"
|
||||||
- "should have( a package)? comment"
|
- "should have( a package)? comment"
|
||||||
- "Error return value of `logging.SetLogLevel` is not checked"
|
- "Error return value of `logging.SetLogLevel` is not checked"
|
||||||
|
- "comment on exported"
|
||||||
|
- "(func|method) \\w+ should be \\w+"
|
||||||
|
- "(type|var|struct field|(method|func) parameter) `\\w+` should be `\\w+`"
|
||||||
|
- "(G306|G301|G307|G108|G302|G204|G104)"
|
||||||
|
- "don't use ALL_CAPS in Go names"
|
||||||
|
- "string .* has .* occurrences, make it a constant"
|
||||||
|
- "a blank import should be only in a main or test package, or have a comment justifying it"
|
||||||
|
- "package comment should be of the form"
|
||||||
|
|
||||||
exclude-use-default: false
|
exclude-use-default: false
|
||||||
exclude-rules:
|
exclude-rules:
|
||||||
@ -46,6 +54,19 @@ issues:
|
|||||||
linters:
|
linters:
|
||||||
- gosec
|
- gosec
|
||||||
|
|
||||||
|
- path: chain/vectors/gen/.*
|
||||||
|
linters:
|
||||||
|
- gosec
|
||||||
|
|
||||||
|
- path: cmd/lotus-bench/.*
|
||||||
|
linters:
|
||||||
|
- gosec
|
||||||
|
|
||||||
|
- path: api/test/.*
|
||||||
|
text: "context.Context should be the first parameter"
|
||||||
|
linters:
|
||||||
|
- golint
|
||||||
|
|
||||||
linters-settings:
|
linters-settings:
|
||||||
goconst:
|
goconst:
|
||||||
min-occurrences: 6
|
min-occurrences: 6
|
||||||
|
241
CHANGELOG.md
241
CHANGELOG.md
@ -1,6 +1,243 @@
|
|||||||
# lotus changelog
|
# Lotus changelog
|
||||||
|
|
||||||
## 0.1.0 / 2019-12-11
|
# 0.5.7 / 2020-08-31
|
||||||
|
|
||||||
|
This patch release includes some bugfixes and enhancements to the sector lifecycle and message pool logic.
|
||||||
|
|
||||||
|
## Changes
|
||||||
|
|
||||||
|
- Rebuild unsealed infos on miner restart (https://github.com/filecoin-project/lotus/pull/3401)
|
||||||
|
- CLI to attach storage paths to workers (https://github.com/filecoin-project/lotus/pull/3405)
|
||||||
|
- Do not select negative performing message chains for inclusion (https://github.com/filecoin-project/lotus/pull/3392)
|
||||||
|
- Remove a redundant error-check (https://github.com/filecoin-project/lotus/pull/3421)
|
||||||
|
- Correctly move unsealed sectors in `FinalizeSectors` (https://github.com/filecoin-project/lotus/pull/3424)
|
||||||
|
- Improve worker selection logic (https://github.com/filecoin-project/lotus/pull/3425)
|
||||||
|
- Don't use context to close bitswap (https://github.com/filecoin-project/lotus/pull/3430)
|
||||||
|
- Correctly estimate gas premium when there is only one message on chain (https://github.com/filecoin-project/lotus/pull/3428)
|
||||||
|
|
||||||
|
# 0.5.6 / 2020-08-29
|
||||||
|
|
||||||
|
Hotfix release that fixes a panic in the sealing scheduler (https://github.com/filecoin-project/lotus/pull/3389).
|
||||||
|
|
||||||
|
# 0.5.5
|
||||||
|
|
||||||
|
This patch release introduces a large number of improvements to the sealing process.
|
||||||
|
It also updates go-fil-markets to
|
||||||
|
[version 0.5.8](https://github.com/filecoin-project/go-fil-markets/releases/tag/v0.5.8),
|
||||||
|
and go-libp2p-pubsub to [v0.3.5](https://github.com/libp2p/go-libp2p-pubsub/releases/tag/v0.3.5).
|
||||||
|
|
||||||
|
#### Downstream upgrades
|
||||||
|
|
||||||
|
- Upgrades markets to v0.5.8 (https://github.com/filecoin-project/lotus/pull/3384)
|
||||||
|
- Upgrades go-libp2p-pubsub to v0.3.5 (https://github.com/filecoin-project/lotus/pull/3305)
|
||||||
|
|
||||||
|
#### Sector sealing
|
||||||
|
|
||||||
|
- The following improvements were introduced in https://github.com/filecoin-project/lotus/pull/3350.
|
||||||
|
|
||||||
|
- Allow `lotus-miner sectors remove` to remove a sector in any state.
|
||||||
|
- Create a separate state in the storage FSM dedicated to submitting the Commit message.
|
||||||
|
- Recovery for when the Deal IDs of deals in a sector get changed in a reorg.
|
||||||
|
- Auto-retry sending Precommit and Commit messages if they run out of gas
|
||||||
|
- Auto-retry sector remove tasks when they fail
|
||||||
|
- Compact worker windows, and allow their tasks to be executed in any order
|
||||||
|
|
||||||
|
- Don't simply skip PoSt for bad sectors (https://github.com/filecoin-project/lotus/pull/3323)
|
||||||
|
|
||||||
|
#### Message Pool
|
||||||
|
|
||||||
|
- Spam Protection: Track required funds for pending messages (https://github.com/filecoin-project/lotus/pull/3313)
|
||||||
|
|
||||||
|
#### Chainwatch
|
||||||
|
|
||||||
|
- Add more power and reward metrics (https://github.com/filecoin-project/lotus/pull/3367)
|
||||||
|
- Fix raciness in sector deal table (https://github.com/filecoin-project/lotus/pull/3275)
|
||||||
|
- Parallelize miner processing (https://github.com/filecoin-project/lotus/pull/3380)
|
||||||
|
- Accept Lotus API and token (https://github.com/filecoin-project/lotus/pull/3337)
|
||||||
|
|
||||||
|
# 0.5.4
|
||||||
|
|
||||||
|
A patch release, containing a few nice bugfixes and improvements:
|
||||||
|
|
||||||
|
- Fix parsing of peer ID in `lotus-miner actor set-peer-id` (@whyrusleeping)
|
||||||
|
- Update dependencies, fixing several bugs (@Stebalien)
|
||||||
|
- Fix remaining linter warnings (@Stebalien)
|
||||||
|
- Use safe string truncation (@Ingar)
|
||||||
|
- Allow tweaking of blocksync message window size (@whyrusleeping)
|
||||||
|
- Add some additional gas stats to metrics (@Kubuxu)
|
||||||
|
- Fix an edge case bug in message selection, add many tests (@vyzo)
|
||||||
|
|
||||||
|
# 0.5.3
|
||||||
|
|
||||||
|
Yet another hotfix release.
|
||||||
|
A lesson for readers, having people who have been awake for 12+ hours review
|
||||||
|
your hotfix PR is not a good idea. Find someone who has enough slept recently
|
||||||
|
enough to give you good code review, otherwise you'll end up quickly bumping
|
||||||
|
versions again.
|
||||||
|
|
||||||
|
- Fixed a bug in the mempool that was introduced in v0.5.2
|
||||||
|
|
||||||
|
# 0.5.2 / 2020-08-24
|
||||||
|
|
||||||
|
This is a hotfix release.
|
||||||
|
|
||||||
|
- Fix message selection to not include messages that are invalid for block
|
||||||
|
inclusion.
|
||||||
|
- Improve SelectMessage handling of the case where the message pools tipset
|
||||||
|
differs from our mining base.
|
||||||
|
|
||||||
|
# 0.5.1 / 2020-08-24
|
||||||
|
|
||||||
|
The Space Race release!
|
||||||
|
This release contains the genesis car file and bootstrap peers for the space
|
||||||
|
race network.
|
||||||
|
|
||||||
|
Additionally, we included two small fixes to genesis creation:
|
||||||
|
- Randomize ticket value in genesis generation
|
||||||
|
- Correctly set t099 (burnt funds actor) to have valid account actor state
|
||||||
|
|
||||||
|
# 0.5.0 / 2020-08-20
|
||||||
|
|
||||||
|
This version of Lotus will be used for the incentivized testnet Space Race competition,
|
||||||
|
and can be considered mainnet-ready code. It includes some protocol
|
||||||
|
changes, upgrades of core dependencies, and various bugfixes and UX/performance improvements.
|
||||||
|
|
||||||
|
## Highlights
|
||||||
|
|
||||||
|
Among the highlights included in this release are:
|
||||||
|
|
||||||
|
- Gas changes: We implemented EIP-1559 and introduced real gas values.
|
||||||
|
- Deal-making: We now support "Committed Capacity" sectors, "fast-retrieval" deals,
|
||||||
|
and the packing of multiple deals into a single sector.
|
||||||
|
- Renamed features: We renamed some of the binaries, environment variables, and default
|
||||||
|
paths associated with a Lotus node.
|
||||||
|
|
||||||
|
### Gas changes
|
||||||
|
|
||||||
|
We made some significant changes to the mechanics of gas in this release.
|
||||||
|
|
||||||
|
#### Network fee
|
||||||
|
|
||||||
|
We implemented something similar to
|
||||||
|
[Ethereum's EIP-1559](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-1559.md).
|
||||||
|
The `Message` structure had three changes:
|
||||||
|
- The `GasPrice` field has been removed
|
||||||
|
- A new `GasFeeCap` field has been added, which controls the maximum cost
|
||||||
|
the sender incurs for the message
|
||||||
|
- A new `GasPremium` field has been added, which controls the reward a miner
|
||||||
|
earns for including the message
|
||||||
|
|
||||||
|
A sender will never be charged more than `GasFeeCap * GasLimit`.
|
||||||
|
A miner will typically earn `GasPremium * GasLimit` as a reward.
|
||||||
|
|
||||||
|
The `Blockheader` structure has one new field, called `ParentBaseFee`.
|
||||||
|
Informally speaking,the `ParentBaseFee`
|
||||||
|
is increased when blocks are densely packed with messages, and decreased otherwise.
|
||||||
|
|
||||||
|
The `ParentBaseFee` is used when calculating how much a sender burns when executing a message. _Burning_ simply refers to sending attoFIL to a dedicated, unreachable account.
|
||||||
|
A message causes `ParentBaseFee * GasUsed` attoFIL to be burnt.
|
||||||
|
|
||||||
|
#### Real gas values
|
||||||
|
|
||||||
|
This release also includes our first "real" gas costs for primitive operations.
|
||||||
|
The costs were designed to account for both the _time_ that message execution takes,
|
||||||
|
as well as the _space_ a message adds to the state tree.
|
||||||
|
|
||||||
|
## Deal-making changes
|
||||||
|
|
||||||
|
There are three key changes to the deal-making process.
|
||||||
|
|
||||||
|
#### Committed Capacity sectors
|
||||||
|
|
||||||
|
Miners can now pledge "Committed Capacity" (CC) sectors, which are explicitly
|
||||||
|
stated as containing junk data, and must not include any deals. Miners can do this
|
||||||
|
to increase their storage power, and win block rewards from this pledged storage.
|
||||||
|
|
||||||
|
They can mark these sectors as "upgradable" with `lotus-miner sectors mark-for-upgrade`.
|
||||||
|
If the miner receives and accepts one or more storage deals, the sector that includes
|
||||||
|
those deals will _replace_ the CC sector. This is intended to maximize the amount of useful
|
||||||
|
storage on the Filecoin network.
|
||||||
|
|
||||||
|
#### Fast-retrieval deals
|
||||||
|
|
||||||
|
Clients can now include a `fast-retrieval` flag when proposing deals with storage miners.
|
||||||
|
If set to true, the miner will include an extra copy of the deal data. This
|
||||||
|
data can be quickly served in a retrieval deal, since it will not need to be unsealed.
|
||||||
|
|
||||||
|
#### Multiple deals per sector
|
||||||
|
|
||||||
|
Miners can now pack multiple deals into a single sector, so long as all the deals
|
||||||
|
fit into the sector capacity. This should increase the packing efficiency of miners.
|
||||||
|
|
||||||
|
### Renamed features
|
||||||
|
|
||||||
|
To improve the user experience, we updated several names to mainatin
|
||||||
|
standard prefixing, and to better reflect the meaning of the features being referenced.
|
||||||
|
|
||||||
|
In particular, the Lotus miner binary is now called `lotus-miner`, the default
|
||||||
|
path for miner data is now `~/.lotusminer`, and the environment variable
|
||||||
|
that sets the path for miner data is now `$LOTUS_MINER_PATH`. A full list of renamed
|
||||||
|
features can be found [here](https://github.com/filecoin-project/lotus/issues/2304).
|
||||||
|
|
||||||
|
## Changelog
|
||||||
|
|
||||||
|
#### Downstream upgrades
|
||||||
|
- Upgrades markets to v0.5.6 (https://github.com/filecoin-project/lotus/pull/3058)
|
||||||
|
- Upgrades specs-actors to v0.9.3 (https://github.com/filecoin-project/lotus/pull/3151)
|
||||||
|
|
||||||
|
#### Core protocol
|
||||||
|
- Introduces gas values, replacing placeholders (https://github.com/filecoin-project/lotus/pull/2343)
|
||||||
|
- Implements EIP-1559, introducing a network base fee, message gas fee cap, and message gas fee premium (https://github.com/filecoin-project/lotus/pull/2874)
|
||||||
|
- Implements Poisson Sortition for elections (https://github.com/filecoin-project/lotus/pull/2084)
|
||||||
|
|
||||||
|
#### Deal-making lifecycle
|
||||||
|
- Introduces "Committed Capacity" sectors (https://github.com/filecoin-project/lotus/pull/2220)
|
||||||
|
- Introduces "fast-retrieval" flag for deals (https://github.com/filecoin-project/lotus/pull/2323
|
||||||
|
- Supports packing multiple deals into one sector (https://github.com/filecoin-project/storage-fsm/pull/38)
|
||||||
|
|
||||||
|
#### Enhancements
|
||||||
|
|
||||||
|
- Optimized message pool selection logic (https://github.com/filecoin-project/lotus/pull/2838)
|
||||||
|
- Window-based scheduling of sealing tasks (https://github.com/filecoin-project/sector-storage/pull/67)
|
||||||
|
- Faster window PoSt (https://github.com/filecoin-project/lotus/pull/2209/files)
|
||||||
|
- Refactors the payment channel manager (https://github.com/filecoin-project/lotus/pull/2640)
|
||||||
|
- Refactors blocksync (https://github.com/filecoin-project/lotus/pull/2715/files)
|
||||||
|
|
||||||
|
#### UX
|
||||||
|
|
||||||
|
- Provide status updates for data-transfer (https://github.com/filecoin-project/lotus/pull/3162, https://github.com/filecoin-project/lotus/pull/3191)
|
||||||
|
- Miners can customise asks (https://github.com/filecoin-project/lotus/pull/2046)
|
||||||
|
- Miners can toggle auto-acceptance of deals (https://github.com/filecoin-project/lotus/pull/1994)
|
||||||
|
- Miners can maintain a blocklist of piece CIDs (https://github.com/filecoin-project/lotus/pull/2069)
|
||||||
|
|
||||||
|
## Contributors
|
||||||
|
|
||||||
|
The following contributors had 10 or more commits go into this release.
|
||||||
|
We are grateful for every contribution!
|
||||||
|
|
||||||
|
| Contributor | Commits | Lines ± |
|
||||||
|
|--------------------|---------|---------------|
|
||||||
|
| magik6k | 361 | +13197/-6136 |
|
||||||
|
| Kubuxu | 227 | +5670/-2587 |
|
||||||
|
| arajasek | 120 | +2916/-1264 |
|
||||||
|
| whyrusleeping | 112 | +3979/-1089 |
|
||||||
|
| vyzo | 99 | +3343/-1305 |
|
||||||
|
| dirkmc | 68 | +8732/-3621 |
|
||||||
|
| laser | 45 | +1489/-501 |
|
||||||
|
| hannahhoward | 43 | +2654/-990 |
|
||||||
|
| frrist | 37 | +6630/-4338 |
|
||||||
|
| schomatis | 28 | +3016/-1368 |
|
||||||
|
| placer14 | 27 | +824/-350 |
|
||||||
|
| raulk | 25 | +28718/-29849 |
|
||||||
|
| mrsmkl | 22 | +560/-368 |
|
||||||
|
| travisperson | 18 | +1354/-314 |
|
||||||
|
| nonsense | 16 | +2956/-2842 |
|
||||||
|
| ingar | 13 | +331/-123 |
|
||||||
|
| daviddias | 11 | +311/-11 |
|
||||||
|
| Stebalien | 11 | +1204/-980 |
|
||||||
|
| RobQuistNL | 10 | +69/-74 |
|
||||||
|
|
||||||
|
# 0.1.0 / 2019-12-11
|
||||||
|
|
||||||
We are very excited to release **lotus** 0.1.0. This is our testnet release. To install lotus and join the testnet, please visit [lotu.sh](lotu.sh). Please file bug reports as [issues](https://github.com/filecoin-project/lotus/issues).
|
We are very excited to release **lotus** 0.1.0. This is our testnet release. To install lotus and join the testnet, please visit [lotu.sh](lotu.sh). Please file bug reports as [issues](https://github.com/filecoin-project/lotus/issues).
|
||||||
|
|
||||||
|
3
Makefile
3
Makefile
@ -279,5 +279,8 @@ method-gen:
|
|||||||
|
|
||||||
gen: type-gen method-gen
|
gen: type-gen method-gen
|
||||||
|
|
||||||
|
docsgen:
|
||||||
|
go run ./api/docgen > documentation/en/api-methods.md
|
||||||
|
|
||||||
print-%:
|
print-%:
|
||||||
@echo $*=$($*)
|
@echo $*=$($*)
|
||||||
|
@ -40,9 +40,6 @@ All work is tracked via issues. An attempt at keeping an up-to-date view on rema
|
|||||||
The lotus Filecoin implementation unfolds into the following packages:
|
The lotus Filecoin implementation unfolds into the following packages:
|
||||||
|
|
||||||
- [This repo](https://github.com/filecoin-project/lotus)
|
- [This repo](https://github.com/filecoin-project/lotus)
|
||||||
- [storage-fsm](https://github.com/filecoin-project/storage-fsm)
|
|
||||||
- [sector-storage](https://github.com/filecoin-project/sector-storage)
|
|
||||||
- [specs-storage](https://github.com/filecoin-project/specs-storage)
|
|
||||||
- [go-fil-markets](https://github.com/filecoin-project/go-fil-markets) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/markets-shared-components-5daa144a7046a60001c6e253/board)
|
- [go-fil-markets](https://github.com/filecoin-project/go-fil-markets) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/markets-shared-components-5daa144a7046a60001c6e253/board)
|
||||||
- [spec-actors](https://github.com/filecoin-project/specs-actors) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/actors-5ee6f3aa87591f0016c05685/board)
|
- [spec-actors](https://github.com/filecoin-project/specs-actors) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/actors-5ee6f3aa87591f0016c05685/board)
|
||||||
|
|
||||||
|
@ -109,7 +109,10 @@ type FullNode interface {
|
|||||||
ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*HeadChange, error)
|
ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*HeadChange, error)
|
||||||
|
|
||||||
// ChainExport returns a stream of bytes with CAR dump of chain data.
|
// ChainExport returns a stream of bytes with CAR dump of chain data.
|
||||||
ChainExport(context.Context, types.TipSetKey) (<-chan []byte, error)
|
// The exported chain data includes the header chain from the given tipset
|
||||||
|
// back to genesis, the entire genesis state, and the most recent 'nroots'
|
||||||
|
// state trees.
|
||||||
|
ChainExport(ctx context.Context, nroots abi.ChainEpoch, tsk types.TipSetKey) (<-chan []byte, error)
|
||||||
|
|
||||||
// MethodGroup: Beacon
|
// MethodGroup: Beacon
|
||||||
// The Beacon method group contains methods for interacting with the random beacon (DRAND)
|
// The Beacon method group contains methods for interacting with the random beacon (DRAND)
|
||||||
@ -184,6 +187,9 @@ type FullNode interface {
|
|||||||
MpoolGetNonce(context.Context, address.Address) (uint64, error)
|
MpoolGetNonce(context.Context, address.Address) (uint64, error)
|
||||||
MpoolSub(context.Context) (<-chan MpoolUpdate, error)
|
MpoolSub(context.Context) (<-chan MpoolUpdate, error)
|
||||||
|
|
||||||
|
// MpoolClear clears pending messages from the mpool
|
||||||
|
MpoolClear(context.Context, bool) error
|
||||||
|
|
||||||
// MpoolGetConfig returns (a copy of) the current mpool config
|
// MpoolGetConfig returns (a copy of) the current mpool config
|
||||||
MpoolGetConfig(context.Context) (*types.MpoolConfig, error)
|
MpoolGetConfig(context.Context) (*types.MpoolConfig, error)
|
||||||
// MpoolSetConfig sets the mpool config to (a copy of) the supplied config
|
// MpoolSetConfig sets the mpool config to (a copy of) the supplied config
|
||||||
@ -240,6 +246,8 @@ type FullNode interface {
|
|||||||
ClientGetDealInfo(context.Context, cid.Cid) (*DealInfo, error)
|
ClientGetDealInfo(context.Context, cid.Cid) (*DealInfo, error)
|
||||||
// ClientListDeals returns information about the deals made by the local client.
|
// ClientListDeals returns information about the deals made by the local client.
|
||||||
ClientListDeals(ctx context.Context) ([]DealInfo, error)
|
ClientListDeals(ctx context.Context) ([]DealInfo, error)
|
||||||
|
// ClientGetDealUpdates returns the status of updated deals
|
||||||
|
ClientGetDealUpdates(ctx context.Context) (<-chan DealInfo, error)
|
||||||
// ClientHasLocal indicates whether a certain CID is locally stored.
|
// ClientHasLocal indicates whether a certain CID is locally stored.
|
||||||
ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error)
|
ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error)
|
||||||
// ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer).
|
// ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer).
|
||||||
@ -320,7 +328,7 @@ type FullNode interface {
|
|||||||
StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error)
|
StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error)
|
||||||
// StateSectorPreCommitInfo returns the PreCommit info for the specified miner's sector
|
// StateSectorPreCommitInfo returns the PreCommit info for the specified miner's sector
|
||||||
StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error)
|
StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error)
|
||||||
// StateSectorGetInfo returns the on-chain info for the specified miner's sector
|
// StateSectorGetInfo returns the on-chain info for the specified miner's sector. Returns null in case the sector info isn't found
|
||||||
// NOTE: returned info.Expiration may not be accurate in some cases, use StateSectorExpiration to get accurate
|
// NOTE: returned info.Expiration may not be accurate in some cases, use StateSectorExpiration to get accurate
|
||||||
// expiration epoch
|
// expiration epoch
|
||||||
StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error)
|
StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error)
|
||||||
@ -424,7 +432,7 @@ type FullNode interface {
|
|||||||
PaychVoucherCreate(context.Context, address.Address, types.BigInt, uint64) (*paych.SignedVoucher, error)
|
PaychVoucherCreate(context.Context, address.Address, types.BigInt, uint64) (*paych.SignedVoucher, error)
|
||||||
PaychVoucherAdd(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error)
|
PaychVoucherAdd(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error)
|
||||||
PaychVoucherList(context.Context, address.Address) ([]*paych.SignedVoucher, error)
|
PaychVoucherList(context.Context, address.Address) ([]*paych.SignedVoucher, error)
|
||||||
PaychVoucherSubmit(context.Context, address.Address, *paych.SignedVoucher) (cid.Cid, error)
|
PaychVoucherSubmit(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type FileRef struct {
|
type FileRef struct {
|
||||||
|
@ -128,7 +128,10 @@ type SectorInfo struct {
|
|||||||
Deals []abi.DealID
|
Deals []abi.DealID
|
||||||
Ticket SealTicket
|
Ticket SealTicket
|
||||||
Seed SealSeed
|
Seed SealSeed
|
||||||
|
PreCommitMsg *cid.Cid
|
||||||
|
CommitMsg *cid.Cid
|
||||||
Retries uint64
|
Retries uint64
|
||||||
|
ToUpgrade bool
|
||||||
|
|
||||||
LastErr string
|
LastErr string
|
||||||
|
|
||||||
|
@ -27,11 +27,13 @@ type WorkerAPI interface {
|
|||||||
|
|
||||||
storage.Sealer
|
storage.Sealer
|
||||||
|
|
||||||
MoveStorage(ctx context.Context, sector abi.SectorID) error
|
MoveStorage(ctx context.Context, sector abi.SectorID, types stores.SectorFileType) error
|
||||||
|
|
||||||
UnsealPiece(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error
|
UnsealPiece(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error
|
||||||
ReadPiece(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (bool, error)
|
ReadPiece(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (bool, error)
|
||||||
|
|
||||||
|
StorageAddLocal(ctx context.Context, path string) error
|
||||||
|
|
||||||
Fetch(context.Context, abi.SectorID, stores.SectorFileType, stores.PathType, stores.AcquireMode) error
|
Fetch(context.Context, abi.SectorID, stores.SectorFileType, stores.PathType, stores.AcquireMode) error
|
||||||
|
|
||||||
Closing(context.Context) (<-chan struct{}, error)
|
Closing(context.Context) (<-chan struct{}, error)
|
||||||
|
@ -86,7 +86,7 @@ type FullNodeStruct struct {
|
|||||||
ChainGetNode func(ctx context.Context, p string) (*api.IpldObject, error) `perm:"read"`
|
ChainGetNode func(ctx context.Context, p string) (*api.IpldObject, error) `perm:"read"`
|
||||||
ChainGetMessage func(context.Context, cid.Cid) (*types.Message, error) `perm:"read"`
|
ChainGetMessage func(context.Context, cid.Cid) (*types.Message, error) `perm:"read"`
|
||||||
ChainGetPath func(context.Context, types.TipSetKey, types.TipSetKey) ([]*api.HeadChange, error) `perm:"read"`
|
ChainGetPath func(context.Context, types.TipSetKey, types.TipSetKey) ([]*api.HeadChange, error) `perm:"read"`
|
||||||
ChainExport func(context.Context, types.TipSetKey) (<-chan []byte, error) `perm:"read"`
|
ChainExport func(context.Context, abi.ChainEpoch, types.TipSetKey) (<-chan []byte, error) `perm:"read"`
|
||||||
|
|
||||||
BeaconGetEntry func(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"`
|
BeaconGetEntry func(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"`
|
||||||
|
|
||||||
@ -107,6 +107,8 @@ type FullNodeStruct struct {
|
|||||||
MpoolSelect func(context.Context, types.TipSetKey, float64) ([]*types.SignedMessage, error) `perm:"read"`
|
MpoolSelect func(context.Context, types.TipSetKey, float64) ([]*types.SignedMessage, error) `perm:"read"`
|
||||||
|
|
||||||
MpoolPending func(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) `perm:"read"`
|
MpoolPending func(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) `perm:"read"`
|
||||||
|
MpoolClear func(context.Context, bool) error `perm:"write"`
|
||||||
|
|
||||||
MpoolPush func(context.Context, *types.SignedMessage) (cid.Cid, error) `perm:"write"`
|
MpoolPush func(context.Context, *types.SignedMessage) (cid.Cid, error) `perm:"write"`
|
||||||
MpoolPushMessage func(context.Context, *types.Message, *api.MessageSendSpec) (*types.SignedMessage, error) `perm:"sign"`
|
MpoolPushMessage func(context.Context, *types.Message, *api.MessageSendSpec) (*types.SignedMessage, error) `perm:"sign"`
|
||||||
MpoolGetNonce func(context.Context, address.Address) (uint64, error) `perm:"read"`
|
MpoolGetNonce func(context.Context, address.Address) (uint64, error) `perm:"read"`
|
||||||
@ -137,6 +139,7 @@ type FullNodeStruct struct {
|
|||||||
ClientStartDeal func(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) `perm:"admin"`
|
ClientStartDeal func(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) `perm:"admin"`
|
||||||
ClientGetDealInfo func(context.Context, cid.Cid) (*api.DealInfo, error) `perm:"read"`
|
ClientGetDealInfo func(context.Context, cid.Cid) (*api.DealInfo, error) `perm:"read"`
|
||||||
ClientListDeals func(ctx context.Context) ([]api.DealInfo, error) `perm:"write"`
|
ClientListDeals func(ctx context.Context) ([]api.DealInfo, error) `perm:"write"`
|
||||||
|
ClientGetDealUpdates func(ctx context.Context) (<-chan api.DealInfo, error) `perm:"read"`
|
||||||
ClientRetrieve func(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error `perm:"admin"`
|
ClientRetrieve func(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error `perm:"admin"`
|
||||||
ClientRetrieveWithEvents func(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"`
|
ClientRetrieveWithEvents func(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"`
|
||||||
ClientQueryAsk func(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error) `perm:"read"`
|
ClientQueryAsk func(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error) `perm:"read"`
|
||||||
@ -212,7 +215,7 @@ type FullNodeStruct struct {
|
|||||||
PaychVoucherAdd func(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error) `perm:"write"`
|
PaychVoucherAdd func(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error) `perm:"write"`
|
||||||
PaychVoucherCreate func(context.Context, address.Address, big.Int, uint64) (*paych.SignedVoucher, error) `perm:"sign"`
|
PaychVoucherCreate func(context.Context, address.Address, big.Int, uint64) (*paych.SignedVoucher, error) `perm:"sign"`
|
||||||
PaychVoucherList func(context.Context, address.Address) ([]*paych.SignedVoucher, error) `perm:"write"`
|
PaychVoucherList func(context.Context, address.Address) ([]*paych.SignedVoucher, error) `perm:"write"`
|
||||||
PaychVoucherSubmit func(context.Context, address.Address, *paych.SignedVoucher) (cid.Cid, error) `perm:"sign"`
|
PaychVoucherSubmit func(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error) `perm:"sign"`
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -314,7 +317,8 @@ type WorkerStruct struct {
|
|||||||
FinalizeSector func(context.Context, abi.SectorID, []storage.Range) error `perm:"admin"`
|
FinalizeSector func(context.Context, abi.SectorID, []storage.Range) error `perm:"admin"`
|
||||||
ReleaseUnsealed func(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error `perm:"admin"`
|
ReleaseUnsealed func(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error `perm:"admin"`
|
||||||
Remove func(ctx context.Context, sector abi.SectorID) error `perm:"admin"`
|
Remove func(ctx context.Context, sector abi.SectorID) error `perm:"admin"`
|
||||||
MoveStorage func(ctx context.Context, sector abi.SectorID) error `perm:"admin"`
|
MoveStorage func(ctx context.Context, sector abi.SectorID, types stores.SectorFileType) error `perm:"admin"`
|
||||||
|
StorageAddLocal func(ctx context.Context, path string) error `perm:"admin"`
|
||||||
|
|
||||||
UnsealPiece func(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error `perm:"admin"`
|
UnsealPiece func(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error `perm:"admin"`
|
||||||
ReadPiece func(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (bool, error) `perm:"admin"`
|
ReadPiece func(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (bool, error) `perm:"admin"`
|
||||||
@ -431,6 +435,10 @@ func (c *FullNodeStruct) ClientListDeals(ctx context.Context) ([]api.DealInfo, e
|
|||||||
return c.Internal.ClientListDeals(ctx)
|
return c.Internal.ClientListDeals(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *FullNodeStruct) ClientGetDealUpdates(ctx context.Context) (<-chan api.DealInfo, error) {
|
||||||
|
return c.Internal.ClientGetDealUpdates(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
func (c *FullNodeStruct) ClientRetrieve(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error {
|
func (c *FullNodeStruct) ClientRetrieve(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error {
|
||||||
return c.Internal.ClientRetrieve(ctx, order, ref)
|
return c.Internal.ClientRetrieve(ctx, order, ref)
|
||||||
}
|
}
|
||||||
@ -494,6 +502,10 @@ func (c *FullNodeStruct) MpoolPending(ctx context.Context, tsk types.TipSetKey)
|
|||||||
return c.Internal.MpoolPending(ctx, tsk)
|
return c.Internal.MpoolPending(ctx, tsk)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *FullNodeStruct) MpoolClear(ctx context.Context, local bool) error {
|
||||||
|
return c.Internal.MpoolClear(ctx, local)
|
||||||
|
}
|
||||||
|
|
||||||
func (c *FullNodeStruct) MpoolPush(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) {
|
func (c *FullNodeStruct) MpoolPush(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) {
|
||||||
return c.Internal.MpoolPush(ctx, smsg)
|
return c.Internal.MpoolPush(ctx, smsg)
|
||||||
}
|
}
|
||||||
@ -642,8 +654,8 @@ func (c *FullNodeStruct) ChainGetPath(ctx context.Context, from types.TipSetKey,
|
|||||||
return c.Internal.ChainGetPath(ctx, from, to)
|
return c.Internal.ChainGetPath(ctx, from, to)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *FullNodeStruct) ChainExport(ctx context.Context, tsk types.TipSetKey) (<-chan []byte, error) {
|
func (c *FullNodeStruct) ChainExport(ctx context.Context, nroots abi.ChainEpoch, tsk types.TipSetKey) (<-chan []byte, error) {
|
||||||
return c.Internal.ChainExport(ctx, tsk)
|
return c.Internal.ChainExport(ctx, nroots, tsk)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *FullNodeStruct) BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) {
|
func (c *FullNodeStruct) BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) {
|
||||||
@ -914,8 +926,8 @@ func (c *FullNodeStruct) PaychNewPayment(ctx context.Context, from, to address.A
|
|||||||
return c.Internal.PaychNewPayment(ctx, from, to, vouchers)
|
return c.Internal.PaychNewPayment(ctx, from, to, vouchers)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *FullNodeStruct) PaychVoucherSubmit(ctx context.Context, ch address.Address, sv *paych.SignedVoucher) (cid.Cid, error) {
|
func (c *FullNodeStruct) PaychVoucherSubmit(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, secret []byte, proof []byte) (cid.Cid, error) {
|
||||||
return c.Internal.PaychVoucherSubmit(ctx, ch, sv)
|
return c.Internal.PaychVoucherSubmit(ctx, ch, sv, secret, proof)
|
||||||
}
|
}
|
||||||
|
|
||||||
// StorageMinerStruct
|
// StorageMinerStruct
|
||||||
@ -1208,8 +1220,12 @@ func (w *WorkerStruct) Remove(ctx context.Context, sector abi.SectorID) error {
|
|||||||
return w.Internal.Remove(ctx, sector)
|
return w.Internal.Remove(ctx, sector)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *WorkerStruct) MoveStorage(ctx context.Context, sector abi.SectorID) error {
|
func (w *WorkerStruct) MoveStorage(ctx context.Context, sector abi.SectorID, types stores.SectorFileType) error {
|
||||||
return w.Internal.MoveStorage(ctx, sector)
|
return w.Internal.MoveStorage(ctx, sector, types)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WorkerStruct) StorageAddLocal(ctx context.Context, path string) error {
|
||||||
|
return w.Internal.StorageAddLocal(ctx, path)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *WorkerStruct) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, c cid.Cid) error {
|
func (w *WorkerStruct) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, c cid.Cid) error {
|
||||||
|
@ -6,8 +6,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
abi "github.com/filecoin-project/specs-actors/actors/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/paych"
|
paych "github.com/filecoin-project/specs-actors/actors/builtin/paych"
|
||||||
cbg "github.com/whyrusleeping/cbor-gen"
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
xerrors "golang.org/x/xerrors"
|
xerrors "golang.org/x/xerrors"
|
||||||
)
|
)
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package client
|
package client
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
@ -14,9 +15,9 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// NewCommonRPC creates a new http jsonrpc client.
|
// NewCommonRPC creates a new http jsonrpc client.
|
||||||
func NewCommonRPC(addr string, requestHeader http.Header) (api.Common, jsonrpc.ClientCloser, error) {
|
func NewCommonRPC(ctx context.Context, addr string, requestHeader http.Header) (api.Common, jsonrpc.ClientCloser, error) {
|
||||||
var res apistruct.CommonStruct
|
var res apistruct.CommonStruct
|
||||||
closer, err := jsonrpc.NewMergeClient(addr, "Filecoin",
|
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
||||||
[]interface{}{
|
[]interface{}{
|
||||||
&res.Internal,
|
&res.Internal,
|
||||||
},
|
},
|
||||||
@ -27,9 +28,9 @@ func NewCommonRPC(addr string, requestHeader http.Header) (api.Common, jsonrpc.C
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFullNodeRPC creates a new http jsonrpc client.
|
// NewFullNodeRPC creates a new http jsonrpc client.
|
||||||
func NewFullNodeRPC(addr string, requestHeader http.Header) (api.FullNode, jsonrpc.ClientCloser, error) {
|
func NewFullNodeRPC(ctx context.Context, addr string, requestHeader http.Header) (api.FullNode, jsonrpc.ClientCloser, error) {
|
||||||
var res apistruct.FullNodeStruct
|
var res apistruct.FullNodeStruct
|
||||||
closer, err := jsonrpc.NewMergeClient(addr, "Filecoin",
|
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
||||||
[]interface{}{
|
[]interface{}{
|
||||||
&res.CommonStruct.Internal,
|
&res.CommonStruct.Internal,
|
||||||
&res.Internal,
|
&res.Internal,
|
||||||
@ -39,9 +40,9 @@ func NewFullNodeRPC(addr string, requestHeader http.Header) (api.FullNode, jsonr
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewStorageMinerRPC creates a new http jsonrpc client for miner
|
// NewStorageMinerRPC creates a new http jsonrpc client for miner
|
||||||
func NewStorageMinerRPC(addr string, requestHeader http.Header, opts ...jsonrpc.Option) (api.StorageMiner, jsonrpc.ClientCloser, error) {
|
func NewStorageMinerRPC(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (api.StorageMiner, jsonrpc.ClientCloser, error) {
|
||||||
var res apistruct.StorageMinerStruct
|
var res apistruct.StorageMinerStruct
|
||||||
closer, err := jsonrpc.NewMergeClient(addr, "Filecoin",
|
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
||||||
[]interface{}{
|
[]interface{}{
|
||||||
&res.CommonStruct.Internal,
|
&res.CommonStruct.Internal,
|
||||||
&res.Internal,
|
&res.Internal,
|
||||||
@ -53,7 +54,7 @@ func NewStorageMinerRPC(addr string, requestHeader http.Header, opts ...jsonrpc.
|
|||||||
return &res, closer, err
|
return &res, closer, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewWorkerRPC(addr string, requestHeader http.Header) (api.WorkerAPI, jsonrpc.ClientCloser, error) {
|
func NewWorkerRPC(ctx context.Context, addr string, requestHeader http.Header) (api.WorkerAPI, jsonrpc.ClientCloser, error) {
|
||||||
u, err := url.Parse(addr)
|
u, err := url.Parse(addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
@ -69,7 +70,7 @@ func NewWorkerRPC(addr string, requestHeader http.Header) (api.WorkerAPI, jsonrp
|
|||||||
u.Path = path.Join(u.Path, "../streams/v0/push")
|
u.Path = path.Join(u.Path, "../streams/v0/push")
|
||||||
|
|
||||||
var res apistruct.WorkerStruct
|
var res apistruct.WorkerStruct
|
||||||
closer, err := jsonrpc.NewMergeClient(addr, "Filecoin",
|
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
||||||
[]interface{}{
|
[]interface{}{
|
||||||
&res.Internal,
|
&res.Internal,
|
||||||
},
|
},
|
||||||
|
@ -12,22 +12,29 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
"unicode"
|
"unicode"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
"github.com/ipfs/go-filestore"
|
||||||
|
"github.com/libp2p/go-libp2p-core/network"
|
||||||
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
|
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||||
|
"github.com/multiformats/go-multiaddr"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-bitfield"
|
"github.com/filecoin-project/go-bitfield"
|
||||||
|
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||||
|
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||||
|
"github.com/filecoin-project/go-multistore"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/crypto"
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/api/apistruct"
|
"github.com/filecoin-project/lotus/api/apistruct"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
|
|
||||||
"github.com/ipfs/go-cid"
|
|
||||||
"github.com/ipfs/go-filestore"
|
|
||||||
"github.com/libp2p/go-libp2p-core/network"
|
|
||||||
peer "github.com/libp2p/go-libp2p-peer"
|
|
||||||
"github.com/multiformats/go-multiaddr"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var ExampleValues = map[reflect.Type]interface{}{
|
var ExampleValues = map[reflect.Type]interface{}{
|
||||||
@ -66,11 +73,12 @@ func init() {
|
|||||||
|
|
||||||
ExampleValues[reflect.TypeOf(addr)] = addr
|
ExampleValues[reflect.TypeOf(addr)] = addr
|
||||||
|
|
||||||
pid, err := peer.IDB58Decode("12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf")
|
pid, err := peer.Decode("12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
addExample(pid)
|
addExample(pid)
|
||||||
|
addExample(&pid)
|
||||||
|
|
||||||
addExample(bitfield.NewFromSet([]uint64{5}))
|
addExample(bitfield.NewFromSet([]uint64{5}))
|
||||||
addExample(abi.RegisteredSealProof_StackedDrg32GiBV1)
|
addExample(abi.RegisteredSealProof_StackedDrg32GiBV1)
|
||||||
@ -98,6 +106,12 @@ func init() {
|
|||||||
addExample(build.APIVersion)
|
addExample(build.APIVersion)
|
||||||
addExample(api.PCHInbound)
|
addExample(api.PCHInbound)
|
||||||
addExample(time.Minute)
|
addExample(time.Minute)
|
||||||
|
addExample(datatransfer.TransferID(3))
|
||||||
|
addExample(datatransfer.Ongoing)
|
||||||
|
addExample(multistore.StoreID(50))
|
||||||
|
addExample(retrievalmarket.ClientEventDealAccepted)
|
||||||
|
addExample(retrievalmarket.DealStatusNew)
|
||||||
|
addExample(network.ReachabilityPublic)
|
||||||
addExample(&types.ExecutionTrace{
|
addExample(&types.ExecutionTrace{
|
||||||
Msg: exampleValue(reflect.TypeOf(&types.Message{}), nil).(*types.Message),
|
Msg: exampleValue(reflect.TypeOf(&types.Message{}), nil).(*types.Message),
|
||||||
MsgRct: exampleValue(reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt),
|
MsgRct: exampleValue(reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt),
|
||||||
@ -111,6 +125,14 @@ func init() {
|
|||||||
addExample(map[string]api.MarketBalance{
|
addExample(map[string]api.MarketBalance{
|
||||||
"t026363": exampleValue(reflect.TypeOf(api.MarketBalance{}), nil).(api.MarketBalance),
|
"t026363": exampleValue(reflect.TypeOf(api.MarketBalance{}), nil).(api.MarketBalance),
|
||||||
})
|
})
|
||||||
|
addExample(map[string]*pubsub.TopicScoreSnapshot{
|
||||||
|
"/blocks": {
|
||||||
|
TimeInMesh: time.Minute,
|
||||||
|
FirstMessageDeliveries: 122,
|
||||||
|
MeshMessageDeliveries: 1234,
|
||||||
|
InvalidMessageDeliveries: 3,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
maddr, err := multiaddr.NewMultiaddr("/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior")
|
maddr, err := multiaddr.NewMultiaddr("/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -41,7 +41,7 @@ func (bm *BlockMiner) MineBlocks() {
|
|||||||
nulls := atomic.SwapInt64(&bm.nulls, 0)
|
nulls := atomic.SwapInt64(&bm.nulls, 0)
|
||||||
if err := bm.miner.MineOne(bm.ctx, miner.MineReq{
|
if err := bm.miner.MineOne(bm.ctx, miner.MineReq{
|
||||||
InjectNulls: abi.ChainEpoch(nulls),
|
InjectNulls: abi.ChainEpoch(nulls),
|
||||||
Done: func(bool, error) {},
|
Done: func(bool, abi.ChainEpoch, error) {},
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
bm.t.Error(err)
|
bm.t.Error(err)
|
||||||
}
|
}
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||||
"github.com/filecoin-project/lotus/miner"
|
"github.com/filecoin-project/lotus/miner"
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||||
dag "github.com/ipfs/go-merkledag"
|
dag "github.com/ipfs/go-merkledag"
|
||||||
dstest "github.com/ipfs/go-merkledag/test"
|
dstest "github.com/ipfs/go-merkledag/test"
|
||||||
unixfile "github.com/ipfs/go-unixfs/file"
|
unixfile "github.com/ipfs/go-unixfs/file"
|
||||||
@ -35,7 +36,7 @@ import (
|
|||||||
|
|
||||||
var MineNext = miner.MineReq{
|
var MineNext = miner.MineReq{
|
||||||
InjectNulls: 0,
|
InjectNulls: 0,
|
||||||
Done: func(bool, error) {},
|
Done: func(bool, abi.ChainEpoch, error) {},
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -141,7 +142,7 @@ func makeDeal(t *testing.T, ctx context.Context, rseed int, client *impl.FullNod
|
|||||||
info, err := client.ClientGetDealInfo(ctx, *deal)
|
info, err := client.ClientGetDealInfo(ctx, *deal)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
testRetrieval(t, ctx, err, client, fcid, &info.PieceCID, carExport, data)
|
testRetrieval(t, ctx, client, fcid, &info.PieceCID, carExport, data)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||||
@ -193,7 +194,7 @@ func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Durati
|
|||||||
info, err := client.ClientGetDealInfo(ctx, *deal)
|
info, err := client.ClientGetDealInfo(ctx, *deal)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
testRetrieval(t, ctx, err, client, fcid, &info.PieceCID, false, data)
|
testRetrieval(t, ctx, client, fcid, &info.PieceCID, false, data)
|
||||||
atomic.AddInt64(&mine, -1)
|
atomic.AddInt64(&mine, -1)
|
||||||
fmt.Println("shutting down mining")
|
fmt.Println("shutting down mining")
|
||||||
<-done
|
<-done
|
||||||
@ -267,7 +268,7 @@ func TestSenondDealRetrieval(t *testing.T, b APIBuilder, blocktime time.Duration
|
|||||||
rf, _ := miner.SectorsRefs(ctx)
|
rf, _ := miner.SectorsRefs(ctx)
|
||||||
fmt.Printf("refs: %+v\n", rf)
|
fmt.Printf("refs: %+v\n", rf)
|
||||||
|
|
||||||
testRetrieval(t, ctx, err, client, fcid2, &info.PieceCID, false, data2)
|
testRetrieval(t, ctx, client, fcid2, &info.PieceCID, false, data2)
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic.AddInt64(&mine, -1)
|
atomic.AddInt64(&mine, -1)
|
||||||
@ -373,7 +374,7 @@ func startSealingWaiting(t *testing.T, ctx context.Context, miner TestStorageNod
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testRetrieval(t *testing.T, ctx context.Context, err error, client *impl.FullNodeAPI, fcid cid.Cid, piece *cid.Cid, carExport bool, data []byte) {
|
func testRetrieval(t *testing.T, ctx context.Context, client *impl.FullNodeAPI, fcid cid.Cid, piece *cid.Cid, carExport bool, data []byte) {
|
||||||
offers, err := client.ClientFindData(ctx, fcid, piece)
|
offers, err := client.ClientFindData(ctx, fcid, piece)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/node/impl"
|
"github.com/filecoin-project/lotus/node/impl"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
//nolint:deadcode,varcheck
|
||||||
var log = logging.Logger("apitest")
|
var log = logging.Logger("apitest")
|
||||||
|
|
||||||
func (ts *testSuite) testMining(t *testing.T) {
|
func (ts *testSuite) testMining(t *testing.T) {
|
||||||
@ -30,22 +31,20 @@ func (ts *testSuite) testMining(t *testing.T) {
|
|||||||
newHeads, err := api.ChainNotify(ctx)
|
newHeads, err := api.ChainNotify(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
initHead := (<-newHeads)[0]
|
initHead := (<-newHeads)[0]
|
||||||
if initHead.Val.Height() != 2 {
|
baseHeight := initHead.Val.Height()
|
||||||
<-newHeads
|
|
||||||
}
|
|
||||||
|
|
||||||
h1, err := api.ChainHead(ctx)
|
h1, err := api.ChainHead(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, abi.ChainEpoch(2), h1.Height())
|
require.Equal(t, int64(h1.Height()), int64(baseHeight))
|
||||||
|
|
||||||
err = sn[0].MineOne(ctx, MineNext)
|
MineUntilBlock(ctx, t, apis[0], sn[0], nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
<-newHeads
|
<-newHeads
|
||||||
|
|
||||||
h2, err := api.ChainHead(ctx)
|
h2, err := api.ChainHead(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, abi.ChainEpoch(3), h2.Height())
|
require.Greater(t, int64(h2.Height()), int64(h1.Height()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ts *testSuite) testMiningReal(t *testing.T) {
|
func (ts *testSuite) testMiningReal(t *testing.T) {
|
||||||
@ -69,7 +68,7 @@ func (ts *testSuite) testMiningReal(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, abi.ChainEpoch(2), h1.Height())
|
require.Equal(t, abi.ChainEpoch(2), h1.Height())
|
||||||
|
|
||||||
err = sn[0].MineOne(ctx, MineNext)
|
MineUntilBlock(ctx, t, apis[0], sn[0], nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
<-newHeads
|
<-newHeads
|
||||||
@ -78,7 +77,7 @@ func (ts *testSuite) testMiningReal(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, abi.ChainEpoch(3), h2.Height())
|
require.Equal(t, abi.ChainEpoch(3), h2.Height())
|
||||||
|
|
||||||
err = sn[0].MineOne(ctx, MineNext)
|
MineUntilBlock(ctx, t, apis[0], sn[0], nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
<-newHeads
|
<-newHeads
|
||||||
@ -143,7 +142,7 @@ func TestDealMining(t *testing.T, b APIBuilder, blocktime time.Duration, carExpo
|
|||||||
complChan := minedTwo
|
complChan := minedTwo
|
||||||
for atomic.LoadInt32(&mine) != 0 {
|
for atomic.LoadInt32(&mine) != 0 {
|
||||||
wait := make(chan int)
|
wait := make(chan int)
|
||||||
mdone := func(mined bool, err error) {
|
mdone := func(mined bool, _ abi.ChainEpoch, err error) {
|
||||||
n := 0
|
n := 0
|
||||||
if mined {
|
if mined {
|
||||||
n = 1
|
n = 1
|
||||||
|
@ -153,6 +153,9 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
|||||||
}, int(build.MessageConfidence)+1, build.SealRandomnessLookbackLimit, func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) {
|
}, int(build.MessageConfidence)+1, build.SealRandomnessLookbackLimit, func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) {
|
||||||
return preds.OnPaymentChannelActorChanged(channel, preds.OnToSendAmountChanges())(ctx, oldTs.Key(), newTs.Key())
|
return preds.OnPaymentChannelActorChanged(channel, preds.OnToSendAmountChanges())(ctx, oldTs.Key(), newTs.Key())
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-finished:
|
case <-finished:
|
||||||
|
@ -3,11 +3,13 @@ package test
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/miner"
|
||||||
)
|
)
|
||||||
|
|
||||||
func SendFunds(ctx context.Context, t *testing.T, sender TestNode, addr address.Address, amount abi.TokenAmount) {
|
func SendFunds(ctx context.Context, t *testing.T, sender TestNode, addr address.Address, amount abi.TokenAmount) {
|
||||||
@ -34,3 +36,51 @@ func SendFunds(ctx context.Context, t *testing.T, sender TestNode, addr address.
|
|||||||
t.Fatal("did not successfully send money")
|
t.Fatal("did not successfully send money")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func MineUntilBlock(ctx context.Context, t *testing.T, fn TestNode, sn TestStorageNode, cb func(abi.ChainEpoch)) {
|
||||||
|
for i := 0; i < 1000; i++ {
|
||||||
|
var success bool
|
||||||
|
var err error
|
||||||
|
var epoch abi.ChainEpoch
|
||||||
|
wait := make(chan struct{})
|
||||||
|
mineErr := sn.MineOne(ctx, miner.MineReq{
|
||||||
|
Done: func(win bool, ep abi.ChainEpoch, e error) {
|
||||||
|
success = win
|
||||||
|
err = e
|
||||||
|
epoch = ep
|
||||||
|
wait <- struct{}{}
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if mineErr != nil {
|
||||||
|
t.Fatal(mineErr)
|
||||||
|
}
|
||||||
|
<-wait
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if success {
|
||||||
|
// Wait until it shows up on the given full nodes ChainHead
|
||||||
|
nloops := 50
|
||||||
|
for i := 0; i < nloops; i++ {
|
||||||
|
ts, err := fn.ChainHead(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if ts.Height() == epoch {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if i == nloops-1 {
|
||||||
|
t.Fatal("block never managed to sync to node")
|
||||||
|
}
|
||||||
|
time.Sleep(time.Millisecond * 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cb != nil {
|
||||||
|
cb(epoch)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t.Log("did not mine block, trying again", i)
|
||||||
|
}
|
||||||
|
t.Fatal("failed to mine 1000 times in a row...")
|
||||||
|
}
|
||||||
|
@ -24,9 +24,14 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/node/impl"
|
"github.com/filecoin-project/lotus/node/impl"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
|
func init() {
|
||||||
os.Setenv("BELLMAN_NO_GPU", "1")
|
err := os.Setenv("BELLMAN_NO_GPU", "1")
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("failed to set BELLMAN_NO_GPU env variable: %s", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
n, sn := b(t, 1, OneMiner)
|
n, sn := b(t, 1, OneMiner)
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||||
@ -48,7 +53,7 @@ func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSect
|
|||||||
defer close(done)
|
defer close(done)
|
||||||
for mine {
|
for mine {
|
||||||
build.Clock.Sleep(blocktime)
|
build.Clock.Sleep(blocktime)
|
||||||
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, error) {
|
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
|
||||||
|
|
||||||
}}); err != nil {
|
}}); err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
@ -110,8 +115,6 @@ func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n,
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
|
func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
|
||||||
os.Setenv("BELLMAN_NO_GPU", "1")
|
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
n, sn := b(t, 1, OneMiner)
|
n, sn := b(t, 1, OneMiner)
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||||
@ -213,6 +216,10 @@ func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSector
|
|||||||
sn, err := parts[0].Sectors.First()
|
sn, err := parts[0].Sectors.First()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
all, err := parts[0].Sectors.All(2)
|
||||||
|
require.NoError(t, err)
|
||||||
|
fmt.Println("the sectors", all)
|
||||||
|
|
||||||
s = abi.SectorID{
|
s = abi.SectorID{
|
||||||
Miner: abi.ActorID(mid),
|
Miner: abi.ActorID(mid),
|
||||||
Number: abi.SectorNumber(sn),
|
Number: abi.SectorNumber(sn),
|
||||||
|
@ -1,12 +1,6 @@
|
|||||||
/dns4/bootstrap-0-sin.fil-test.net/tcp/1347/p2p/12D3KooWPdUquftaQvoQEtEdsRBAhwD6jopbF2oweVTzR59VbHEd
|
/dns4/bootstrap-0.testnet.fildev.network/tcp/1347/p2p/12D3KooWJTUBUjtzWJGWU1XSiY21CwmHaCNLNYn2E7jqHEHyZaP7
|
||||||
/ip4/86.109.15.57/tcp/1347/p2p/12D3KooWPdUquftaQvoQEtEdsRBAhwD6jopbF2oweVTzR59VbHEd
|
/dns4/bootstrap-1.testnet.fildev.network/tcp/1347/p2p/12D3KooW9yeKXha4hdrJKq74zEo99T8DhriQdWNoojWnnQbsgB3v
|
||||||
/dns4/bootstrap-0-dfw.fil-test.net/tcp/1347/p2p/12D3KooWQSCkHCzosEyrh8FgYfLejKgEPM5VB6qWzZE3yDAuXn8d
|
/dns4/bootstrap-2.testnet.fildev.network/tcp/1347/p2p/12D3KooWCrx8yVG9U9Kf7w8KLN3Edkj5ZKDhgCaeMqQbcQUoB6CT
|
||||||
/ip4/139.178.84.45/tcp/1347/p2p/12D3KooWQSCkHCzosEyrh8FgYfLejKgEPM5VB6qWzZE3yDAuXn8d
|
/dns4/bootstrap-4.testnet.fildev.network/tcp/1347/p2p/12D3KooWPkL9LrKRQgHtq7kn9ecNhGU9QaziG8R5tX8v9v7t3h34
|
||||||
/dns4/bootstrap-0-fra.fil-test.net/tcp/1347/p2p/12D3KooWEXN2eQmoyqnNjde9PBAQfQLHN67jcEdWU6JougWrgXJK
|
/dns4/bootstrap-3.testnet.fildev.network/tcp/1347/p2p/12D3KooWKYSsbpgZ3HAjax5M1BXCwXLa6gVkUARciz7uN3FNtr7T
|
||||||
/ip4/136.144.49.17/tcp/1347/p2p/12D3KooWEXN2eQmoyqnNjde9PBAQfQLHN67jcEdWU6JougWrgXJK
|
/dns4/bootstrap-5.testnet.fildev.network/tcp/1347/p2p/12D3KooWQYzqnLASJAabyMpPb1GcWZvNSe7JDcRuhdRqonFoiK9W
|
||||||
/dns4/bootstrap-1-sin.fil-test.net/tcp/1347/p2p/12D3KooWLmJkZd33mJhjg5RrpJ6NFep9SNLXWc4uVngV4TXKwzYw
|
|
||||||
/ip4/86.109.15.123/tcp/1347/p2p/12D3KooWLmJkZd33mJhjg5RrpJ6NFep9SNLXWc4uVngV4TXKwzYw
|
|
||||||
/dns4/bootstrap-1-dfw.fil-test.net/tcp/1347/p2p/12D3KooWGXLHjiz6pTRu7x2pkgTVCoxcCiVxcNLpMnWcJ3JiNEy5
|
|
||||||
/ip4/139.178.86.3/tcp/1347/p2p/12D3KooWGXLHjiz6pTRu7x2pkgTVCoxcCiVxcNLpMnWcJ3JiNEy5
|
|
||||||
/dns4/bootstrap-1-fra.fil-test.net/tcp/1347/p2p/12D3KooW9szZmKttS9A1FafH3Zc2pxKwwmvCWCGKkRP4KmbhhC4R
|
|
||||||
/ip4/136.144.49.131/tcp/1347/p2p/12D3KooW9szZmKttS9A1FafH3Zc2pxKwwmvCWCGKkRP4KmbhhC4R
|
|
||||||
|
@ -2,7 +2,7 @@ package build
|
|||||||
|
|
||||||
import "github.com/filecoin-project/lotus/node/modules/dtypes"
|
import "github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
|
|
||||||
var DrandNetwork = DrandMainnet
|
var DrandNetwork = DrandIncentinet
|
||||||
|
|
||||||
func DrandConfig() dtypes.DrandConfig {
|
func DrandConfig() dtypes.DrandConfig {
|
||||||
return DrandConfigs[DrandNetwork]
|
return DrandConfigs[DrandNetwork]
|
||||||
@ -15,6 +15,7 @@ const (
|
|||||||
DrandTestnet
|
DrandTestnet
|
||||||
DrandDevnet
|
DrandDevnet
|
||||||
DrandLocalnet
|
DrandLocalnet
|
||||||
|
DrandIncentinet
|
||||||
)
|
)
|
||||||
|
|
||||||
var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{
|
var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{
|
||||||
@ -55,4 +56,17 @@ var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{
|
|||||||
},
|
},
|
||||||
ChainInfoJSON: `{"public_key":"8cda589f88914aa728fd183f383980b35789ce81b274e5daee1f338b77d02566ef4d3fb0098af1f844f10f9c803c1827","period":25,"genesis_time":1595348225,"hash":"e73b7dc3c4f6a236378220c0dd6aa110eb16eed26c11259606e07ee122838d4f","groupHash":"567d4785122a5a3e75a9bc9911d7ea807dd85ff76b78dc4ff06b075712898607"}`,
|
ChainInfoJSON: `{"public_key":"8cda589f88914aa728fd183f383980b35789ce81b274e5daee1f338b77d02566ef4d3fb0098af1f844f10f9c803c1827","period":25,"genesis_time":1595348225,"hash":"e73b7dc3c4f6a236378220c0dd6aa110eb16eed26c11259606e07ee122838d4f","groupHash":"567d4785122a5a3e75a9bc9911d7ea807dd85ff76b78dc4ff06b075712898607"}`,
|
||||||
},
|
},
|
||||||
|
DrandIncentinet: {
|
||||||
|
Servers: []string{
|
||||||
|
"https://pl-eu.incentinet.drand.sh",
|
||||||
|
"https://pl-us.incentinet.drand.sh",
|
||||||
|
"https://pl-sin.incentinet.drand.sh",
|
||||||
|
},
|
||||||
|
Relays: []string{
|
||||||
|
"/dnsaddr/pl-eu.incentinet.drand.sh/",
|
||||||
|
"/dnsaddr/pl-us.incentinet.drand.sh/",
|
||||||
|
"/dnsaddr/pl-sin.incentinet.drand.sh/",
|
||||||
|
},
|
||||||
|
ChainInfoJSON: `{"public_key":"8cad0c72c606ab27d36ee06de1d5b2db1faf92e447025ca37575ab3a8aac2eaae83192f846fc9e158bc738423753d000","period":30,"genesis_time":1595873820,"hash":"80c8b872c714f4c00fdd3daa465d5514049f457f01f85a4caf68cdcd394ba039","groupHash":"d9406aaed487f7af71851b4399448e311f2328923d454e971536c05398ce2d9b"}`,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
Binary file not shown.
@ -13,7 +13,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
power.ConsensusMinerMinPower = big.NewInt(1024 << 30)
|
power.ConsensusMinerMinPower = big.NewInt(10 << 40)
|
||||||
miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
|
miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
|
||||||
abi.RegisteredSealProof_StackedDrg32GiBV1: {},
|
abi.RegisteredSealProof_StackedDrg32GiBV1: {},
|
||||||
abi.RegisteredSealProof_StackedDrg64GiBV1: {},
|
abi.RegisteredSealProof_StackedDrg64GiBV1: {},
|
||||||
|
@ -25,7 +25,7 @@ func buildType() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// BuildVersion is the local build version, set by build system
|
// BuildVersion is the local build version, set by build system
|
||||||
const BuildVersion = "0.4.6"
|
const BuildVersion = "0.5.7"
|
||||||
|
|
||||||
func UserVersion() string {
|
func UserVersion() string {
|
||||||
return BuildVersion + buildType() + CurrentCommit
|
return BuildVersion + buildType() + CurrentCommit
|
||||||
@ -53,7 +53,7 @@ func (ve Version) EqMajorMinor(v2 Version) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// APIVersion is a semver version of the rpc api exposed
|
// APIVersion is a semver version of the rpc api exposed
|
||||||
var APIVersion Version = newVer(0, 11, 0)
|
var APIVersion Version = newVer(0, 14, 0)
|
||||||
|
|
||||||
//nolint:varcheck,deadcode
|
//nolint:varcheck,deadcode
|
||||||
const (
|
const (
|
||||||
|
@ -37,6 +37,10 @@ func ValidateBlockValues(b RandomBeacon, h *types.BlockHeader, prevEntry types.B
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(h.BeaconEntries) == 0 {
|
||||||
|
return xerrors.Errorf("expected to have beacon entries in this block, but didn't find any")
|
||||||
|
}
|
||||||
|
|
||||||
last := h.BeaconEntries[len(h.BeaconEntries)-1]
|
last := h.BeaconEntries[len(h.BeaconEntries)-1]
|
||||||
if last.Round != maxRound {
|
if last.Round != maxRound {
|
||||||
return xerrors.Errorf("expected final beacon entry in block to be at round %d, got %d", maxRound, last.Round)
|
return xerrors.Errorf("expected final beacon entry in block to be at round %d, got %d", maxRound, last.Round)
|
||||||
|
@ -7,8 +7,8 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/hashicorp/golang-lru"
|
lru "github.com/hashicorp/golang-lru"
|
||||||
peer "github.com/libp2p/go-libp2p-core/peer"
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
)
|
)
|
||||||
|
|
||||||
type blockReceiptTracker struct {
|
type blockReceiptTracker struct {
|
||||||
|
@ -6,8 +6,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
types "github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/ipfs/go-cid"
|
cid "github.com/ipfs/go-cid"
|
||||||
cbg "github.com/whyrusleeping/cbor-gen"
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
xerrors "golang.org/x/xerrors"
|
xerrors "golang.org/x/xerrors"
|
||||||
)
|
)
|
||||||
|
@ -172,7 +172,7 @@ func (client *BlockSync) processResponse(
|
|||||||
resLength, req.Length)
|
resLength, req.Length)
|
||||||
}
|
}
|
||||||
if resLength < int(req.Length) && res.Status != Partial {
|
if resLength < int(req.Length) && res.Status != Partial {
|
||||||
return nil, xerrors.Errorf("got less than requested without a proper status: %s", res.Status)
|
return nil, xerrors.Errorf("got less than requested without a proper status: %d", res.Status)
|
||||||
}
|
}
|
||||||
|
|
||||||
validRes := &validatedResponse{}
|
validRes := &validatedResponse{}
|
||||||
@ -205,7 +205,7 @@ func (client *BlockSync) processResponse(
|
|||||||
validRes.messages = make([]*CompactedMessages, resLength)
|
validRes.messages = make([]*CompactedMessages, resLength)
|
||||||
for i := 0; i < resLength; i++ {
|
for i := 0; i < resLength; i++ {
|
||||||
if res.Chain[i].Messages == nil {
|
if res.Chain[i].Messages == nil {
|
||||||
return nil, xerrors.Errorf("no messages included for tipset at height (head - %d): %w", i)
|
return nil, xerrors.Errorf("no messages included for tipset at height (head - %d)", i)
|
||||||
}
|
}
|
||||||
validRes.messages[i] = res.Chain[i].Messages
|
validRes.messages[i] = res.Chain[i].Messages
|
||||||
}
|
}
|
||||||
@ -308,6 +308,12 @@ func (client *BlockSync) GetChainMessages(
|
|||||||
length uint64,
|
length uint64,
|
||||||
) ([]*CompactedMessages, error) {
|
) ([]*CompactedMessages, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "GetChainMessages")
|
ctx, span := trace.StartSpan(ctx, "GetChainMessages")
|
||||||
|
if span.IsRecordingEvents() {
|
||||||
|
span.AddAttributes(
|
||||||
|
trace.StringAttribute("tipset", fmt.Sprint(head.Cids())),
|
||||||
|
trace.Int64Attribute("count", int64(length)),
|
||||||
|
)
|
||||||
|
}
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
req := &Request{
|
req := &Request{
|
||||||
|
@ -1,9 +1,10 @@
|
|||||||
package blocksync
|
package blocksync
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/store"
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
logging "github.com/ipfs/go-log"
|
logging "github.com/ipfs/go-log"
|
||||||
|
@ -221,37 +221,36 @@ func collectChainSegment(
|
|||||||
func gatherMessages(cs *store.ChainStore, ts *types.TipSet) ([]*types.Message, [][]uint64, []*types.SignedMessage, [][]uint64, error) {
|
func gatherMessages(cs *store.ChainStore, ts *types.TipSet) ([]*types.Message, [][]uint64, []*types.SignedMessage, [][]uint64, error) {
|
||||||
blsmsgmap := make(map[cid.Cid]uint64)
|
blsmsgmap := make(map[cid.Cid]uint64)
|
||||||
secpkmsgmap := make(map[cid.Cid]uint64)
|
secpkmsgmap := make(map[cid.Cid]uint64)
|
||||||
var secpkmsgs []*types.SignedMessage
|
|
||||||
var blsmsgs []*types.Message
|
|
||||||
var secpkincl, blsincl [][]uint64
|
var secpkincl, blsincl [][]uint64
|
||||||
|
|
||||||
|
var blscids, secpkcids []cid.Cid
|
||||||
for _, block := range ts.Blocks() {
|
for _, block := range ts.Blocks() {
|
||||||
bmsgs, smsgs, err := cs.MessagesForBlock(block)
|
bc, sc, err := cs.ReadMsgMetaCids(block.Messages)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, nil, err
|
return nil, nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: DRY. Use `chain.Message` interface.
|
// FIXME: DRY. Use `chain.Message` interface.
|
||||||
bmi := make([]uint64, 0, len(bmsgs))
|
bmi := make([]uint64, 0, len(bc))
|
||||||
for _, m := range bmsgs {
|
for _, m := range bc {
|
||||||
i, ok := blsmsgmap[m.Cid()]
|
i, ok := blsmsgmap[m]
|
||||||
if !ok {
|
if !ok {
|
||||||
i = uint64(len(blsmsgs))
|
i = uint64(len(blscids))
|
||||||
blsmsgs = append(blsmsgs, m)
|
blscids = append(blscids, m)
|
||||||
blsmsgmap[m.Cid()] = i
|
blsmsgmap[m] = i
|
||||||
}
|
}
|
||||||
|
|
||||||
bmi = append(bmi, i)
|
bmi = append(bmi, i)
|
||||||
}
|
}
|
||||||
blsincl = append(blsincl, bmi)
|
blsincl = append(blsincl, bmi)
|
||||||
|
|
||||||
smi := make([]uint64, 0, len(smsgs))
|
smi := make([]uint64, 0, len(sc))
|
||||||
for _, m := range smsgs {
|
for _, m := range sc {
|
||||||
i, ok := secpkmsgmap[m.Cid()]
|
i, ok := secpkmsgmap[m]
|
||||||
if !ok {
|
if !ok {
|
||||||
i = uint64(len(secpkmsgs))
|
i = uint64(len(secpkcids))
|
||||||
secpkmsgs = append(secpkmsgs, m)
|
secpkcids = append(secpkcids, m)
|
||||||
secpkmsgmap[m.Cid()] = i
|
secpkmsgmap[m] = i
|
||||||
}
|
}
|
||||||
|
|
||||||
smi = append(smi, i)
|
smi = append(smi, i)
|
||||||
@ -259,5 +258,15 @@ func gatherMessages(cs *store.ChainStore, ts *types.TipSet) ([]*types.Message, [
|
|||||||
secpkincl = append(secpkincl, smi)
|
secpkincl = append(secpkincl, smi)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
blsmsgs, err := cs.LoadMessagesFromCids(blscids)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
secpkmsgs, err := cs.LoadSignedMessagesFromCids(secpkcids)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return blsmsgs, blsincl, secpkmsgs, secpkincl, nil
|
return blsmsgs, blsincl, secpkmsgs, secpkincl, nil
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,7 @@ package state
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||||
typegen "github.com/whyrusleeping/cbor-gen"
|
typegen "github.com/whyrusleeping/cbor-gen"
|
||||||
)
|
)
|
||||||
|
@ -3,6 +3,7 @@ package state
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
||||||
|
@ -114,10 +114,10 @@ func TestMarketPredicates(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
oldBalances := map[address.Address]balance{
|
oldBalances := map[address.Address]balance{
|
||||||
tutils.NewIDAddr(t, 1): balance{abi.NewTokenAmount(1000), abi.NewTokenAmount(1000)},
|
tutils.NewIDAddr(t, 1): {abi.NewTokenAmount(1000), abi.NewTokenAmount(1000)},
|
||||||
tutils.NewIDAddr(t, 2): balance{abi.NewTokenAmount(2000), abi.NewTokenAmount(500)},
|
tutils.NewIDAddr(t, 2): {abi.NewTokenAmount(2000), abi.NewTokenAmount(500)},
|
||||||
tutils.NewIDAddr(t, 3): balance{abi.NewTokenAmount(3000), abi.NewTokenAmount(2000)},
|
tutils.NewIDAddr(t, 3): {abi.NewTokenAmount(3000), abi.NewTokenAmount(2000)},
|
||||||
tutils.NewIDAddr(t, 5): balance{abi.NewTokenAmount(3000), abi.NewTokenAmount(1000)},
|
tutils.NewIDAddr(t, 5): {abi.NewTokenAmount(3000), abi.NewTokenAmount(1000)},
|
||||||
}
|
}
|
||||||
|
|
||||||
oldStateC := createMarketState(ctx, t, store, oldDeals, oldProps, oldBalances)
|
oldStateC := createMarketState(ctx, t, store, oldDeals, oldProps, oldBalances)
|
||||||
@ -162,10 +162,10 @@ func TestMarketPredicates(t *testing.T) {
|
|||||||
// NB: DealProposals cannot be modified, so don't test that case.
|
// NB: DealProposals cannot be modified, so don't test that case.
|
||||||
}
|
}
|
||||||
newBalances := map[address.Address]balance{
|
newBalances := map[address.Address]balance{
|
||||||
tutils.NewIDAddr(t, 1): balance{abi.NewTokenAmount(3000), abi.NewTokenAmount(0)},
|
tutils.NewIDAddr(t, 1): {abi.NewTokenAmount(3000), abi.NewTokenAmount(0)},
|
||||||
tutils.NewIDAddr(t, 2): balance{abi.NewTokenAmount(2000), abi.NewTokenAmount(500)},
|
tutils.NewIDAddr(t, 2): {abi.NewTokenAmount(2000), abi.NewTokenAmount(500)},
|
||||||
tutils.NewIDAddr(t, 4): balance{abi.NewTokenAmount(5000), abi.NewTokenAmount(0)},
|
tutils.NewIDAddr(t, 4): {abi.NewTokenAmount(5000), abi.NewTokenAmount(0)},
|
||||||
tutils.NewIDAddr(t, 5): balance{abi.NewTokenAmount(1000), abi.NewTokenAmount(3000)},
|
tutils.NewIDAddr(t, 5): {abi.NewTokenAmount(1000), abi.NewTokenAmount(3000)},
|
||||||
}
|
}
|
||||||
|
|
||||||
newStateC := createMarketState(ctx, t, store, newDeals, newProps, newBalances)
|
newStateC := createMarketState(ctx, t, store, newDeals, newProps, newBalances)
|
||||||
@ -505,6 +505,7 @@ func createBalanceTable(ctx context.Context, t *testing.T, store adt.Store, bala
|
|||||||
lockedMapRootCid, err := lockedMapRoot.Root()
|
lockedMapRootCid, err := lockedMapRoot.Root()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
lockedRoot, err := adt.AsBalanceTable(store, lockedMapRootCid)
|
lockedRoot, err := adt.AsBalanceTable(store, lockedMapRootCid)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
for addr, balance := range balances {
|
for addr, balance := range balances {
|
||||||
err := escrowRoot.Add(addr, big.Add(balance.available, balance.locked))
|
err := escrowRoot.Add(addr, big.Add(balance.available, balance.locked))
|
||||||
@ -542,6 +543,7 @@ func createEmptyMinerState(ctx context.Context, t *testing.T, store adt.Store, o
|
|||||||
|
|
||||||
emptyVestingFunds := miner.ConstructVestingFunds()
|
emptyVestingFunds := miner.ConstructVestingFunds()
|
||||||
emptyVestingFundsCid, err := store.Put(store.Context(), emptyVestingFunds)
|
emptyVestingFundsCid, err := store.Put(store.Context(), emptyVestingFunds)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
emptyDeadlines := miner.ConstructDeadlines(emptyDeadline)
|
emptyDeadlines := miner.ConstructDeadlines(emptyDeadline)
|
||||||
emptyDeadlinesCid, err := store.Put(store.Context(), emptyDeadlines)
|
emptyDeadlinesCid, err := store.Put(store.Context(), emptyDeadlines)
|
||||||
|
@ -41,10 +41,11 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/node/repo"
|
"github.com/filecoin-project/lotus/node/repo"
|
||||||
)
|
)
|
||||||
|
|
||||||
var log = logging.Logger("gen")
|
|
||||||
|
|
||||||
const msgsPerBlock = 20
|
const msgsPerBlock = 20
|
||||||
|
|
||||||
|
//nolint:deadcode,varcheck
|
||||||
|
var log = logging.Logger("gen")
|
||||||
|
|
||||||
var ValidWpostForTesting = []abi.PoStProof{{
|
var ValidWpostForTesting = []abi.PoStProof{{
|
||||||
ProofBytes: []byte("valid proof"),
|
ProofBytes: []byte("valid proof"),
|
||||||
}}
|
}}
|
||||||
@ -605,7 +606,7 @@ func IsRoundWinner(ctx context.Context, ts *types.TipSet, round abi.ChainEpoch,
|
|||||||
|
|
||||||
buf := new(bytes.Buffer)
|
buf := new(bytes.Buffer)
|
||||||
if err := miner.MarshalCBOR(buf); err != nil {
|
if err := miner.MarshalCBOR(buf); err != nil {
|
||||||
return nil, xerrors.Errorf("failed to cbor marshal address: %w")
|
return nil, xerrors.Errorf("failed to cbor marshal address: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
electionRand, err := store.DrawRandomness(brand.Data, crypto.DomainSeparationTag_ElectionProofProduction, round, buf.Bytes())
|
electionRand, err := store.DrawRandomness(brand.Data, crypto.DomainSeparationTag_ElectionProofProduction, round, buf.Bytes())
|
||||||
|
@ -2,6 +2,7 @@ package genesis
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"crypto/rand"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
@ -118,11 +119,6 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
|
|||||||
return nil, nil, xerrors.Errorf("making new state tree: %w", err)
|
return nil, nil, xerrors.Errorf("making new state tree: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
emptyobject, err := cst.Put(context.TODO(), []struct{}{})
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, xerrors.Errorf("failed putting empty object: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create system actor
|
// Create system actor
|
||||||
|
|
||||||
sysact, err := SetupSystemActor(bs)
|
sysact, err := SetupSystemActor(bs)
|
||||||
@ -191,11 +187,18 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
|
|||||||
return nil, nil, xerrors.Errorf("set market actor: %w", err)
|
return nil, nil, xerrors.Errorf("set market actor: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
burntRoot, err := cst.Put(ctx, &account.State{
|
||||||
|
Address: builtin.BurntFundsActorAddr,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, xerrors.Errorf("failed to setup burnt funds actor state: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Setup burnt-funds
|
// Setup burnt-funds
|
||||||
err = state.SetActor(builtin.BurntFundsActorAddr, &types.Actor{
|
err = state.SetActor(builtin.BurntFundsActorAddr, &types.Actor{
|
||||||
Code: builtin.AccountActorCodeID,
|
Code: builtin.AccountActorCodeID,
|
||||||
Balance: types.NewInt(0),
|
Balance: types.NewInt(0),
|
||||||
Head: emptyobject,
|
Head: burntRoot,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, xerrors.Errorf("set burnt funds account actor: %w", err)
|
return nil, nil, xerrors.Errorf("set burnt funds account actor: %w", err)
|
||||||
@ -297,6 +300,8 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
|
|||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template.RemainderAccount.Balance = remainingFil
|
||||||
|
|
||||||
if err := createMultisigAccount(ctx, bs, cst, state, remAccKey, template.RemainderAccount, keyIDs); err != nil {
|
if err := createMultisigAccount(ctx, bs, cst, state, remAccKey, template.RemainderAccount, keyIDs); err != nil {
|
||||||
return nil, nil, xerrors.Errorf("failed to set up remainder account: %w", err)
|
return nil, nil, xerrors.Errorf("failed to set up remainder account: %w", err)
|
||||||
}
|
}
|
||||||
@ -508,8 +513,10 @@ func MakeGenesisBlock(ctx context.Context, bs bstore.Blockstore, sys vm.SyscallB
|
|||||||
|
|
||||||
log.Infof("Empty Genesis root: %s", emptyroot)
|
log.Infof("Empty Genesis root: %s", emptyroot)
|
||||||
|
|
||||||
|
tickBuf := make([]byte, 32)
|
||||||
|
_, _ = rand.Read(tickBuf)
|
||||||
genesisticket := &types.Ticket{
|
genesisticket := &types.Ticket{
|
||||||
VRFProof: []byte("vrf proof0000000vrf proof0000000"),
|
VRFProof: tickBuf,
|
||||||
}
|
}
|
||||||
|
|
||||||
filecoinGenesisCid, err := cid.Decode("bafyreiaqpwbbyjo4a42saasj36kkrpv4tsherf2e7bvezkert2a7dhonoi")
|
filecoinGenesisCid, err := cid.Decode("bafyreiaqpwbbyjo4a42saasj36kkrpv4tsherf2e7bvezkert2a7dhonoi")
|
||||||
|
@ -129,6 +129,9 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, xerrors.Errorf("mutating state: %w", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add market funds
|
// Add market funds
|
||||||
@ -217,9 +220,12 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
|
|||||||
}
|
}
|
||||||
|
|
||||||
err = vm.MutateState(ctx, builtin.RewardActorAddr, func(sct cbor.IpldStore, st *reward.State) error {
|
err = vm.MutateState(ctx, builtin.RewardActorAddr, func(sct cbor.IpldStore, st *reward.State) error {
|
||||||
st = reward.ConstructState(qaPow)
|
*st = *reward.ConstructState(qaPow)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, xerrors.Errorf("mutating state: %w", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, m := range miners {
|
for i, m := range miners {
|
||||||
@ -244,7 +250,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
|
|||||||
|
|
||||||
// we've added fake power for this sector above, remove it now
|
// we've added fake power for this sector above, remove it now
|
||||||
err = vm.MutateState(ctx, builtin.StoragePowerActorAddr, func(cst cbor.IpldStore, st *power.State) error {
|
err = vm.MutateState(ctx, builtin.StoragePowerActorAddr, func(cst cbor.IpldStore, st *power.State) error {
|
||||||
st.TotalQualityAdjPower = types.BigSub(st.TotalQualityAdjPower, sectorWeight)
|
st.TotalQualityAdjPower = types.BigSub(st.TotalQualityAdjPower, sectorWeight) //nolint:scopelint
|
||||||
st.TotalRawBytePower = types.BigSub(st.TotalRawBytePower, types.NewInt(uint64(m.SectorSize)))
|
st.TotalRawBytePower = types.BigSub(st.TotalRawBytePower, types.NewInt(uint64(m.SectorSize)))
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
@ -324,13 +330,13 @@ type fakeRand struct{}
|
|||||||
|
|
||||||
func (fr *fakeRand) GetChainRandomness(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
func (fr *fakeRand) GetChainRandomness(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
||||||
out := make([]byte, 32)
|
out := make([]byte, 32)
|
||||||
_, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out)
|
_, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out) //nolint
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fr *fakeRand) GetBeaconRandomness(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
func (fr *fakeRand) GetBeaconRandomness(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
||||||
out := make([]byte, 32)
|
out := make([]byte, 32)
|
||||||
_, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out)
|
_, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -21,10 +21,6 @@ func mustEnc(i cbg.CBORMarshaler) []byte {
|
|||||||
return enc
|
return enc
|
||||||
}
|
}
|
||||||
|
|
||||||
func doExec(ctx context.Context, vm *vm.VM, to, from address.Address, method abi.MethodNum, params []byte) ([]byte, error) {
|
|
||||||
return doExecValue(ctx, vm, to, from, types.NewInt(0), method, params)
|
|
||||||
}
|
|
||||||
|
|
||||||
func doExecValue(ctx context.Context, vm *vm.VM, to, from address.Address, value types.BigInt, method abi.MethodNum, params []byte) ([]byte, error) {
|
func doExecValue(ctx context.Context, vm *vm.VM, to, from address.Address, value types.BigInt, method abi.MethodNum, params []byte) ([]byte, error) {
|
||||||
act, err := vm.StateTree().GetActor(from)
|
act, err := vm.StateTree().GetActor(from)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -147,6 +147,7 @@ func TestAddFunds(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for testCase, data := range testCases {
|
for testCase, data := range testCases {
|
||||||
|
//nolint:scopelint
|
||||||
t.Run(testCase, func(t *testing.T) {
|
t.Run(testCase, func(t *testing.T) {
|
||||||
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
@ -56,7 +56,7 @@ func binomialCoefficient(n, k float64) float64 {
|
|||||||
for d := 1.0; d <= k; d++ {
|
for d := 1.0; d <= k; d++ {
|
||||||
r *= n
|
r *= n
|
||||||
r /= d
|
r /= d
|
||||||
n -= 1
|
n--
|
||||||
}
|
}
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
@ -6,12 +6,14 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
|
stdbig "math/big"
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
"github.com/filecoin-project/specs-actors/actors/crypto"
|
||||||
|
"github.com/hashicorp/go-multierror"
|
||||||
lru "github.com/hashicorp/golang-lru"
|
lru "github.com/hashicorp/golang-lru"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/ipfs/go-datastore"
|
"github.com/ipfs/go-datastore"
|
||||||
@ -26,6 +28,7 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/chain/vm"
|
"github.com/filecoin-project/lotus/chain/vm"
|
||||||
"github.com/filecoin-project/lotus/lib/sigs"
|
"github.com/filecoin-project/lotus/lib/sigs"
|
||||||
@ -36,7 +39,7 @@ import (
|
|||||||
|
|
||||||
var log = logging.Logger("messagepool")
|
var log = logging.Logger("messagepool")
|
||||||
|
|
||||||
const futureDebug = false
|
var futureDebug = false
|
||||||
|
|
||||||
var rbfNumBig = types.NewInt(uint64((ReplaceByFeeRatioDefault - 1) * RbfDenom))
|
var rbfNumBig = types.NewInt(uint64((ReplaceByFeeRatioDefault - 1) * RbfDenom))
|
||||||
var rbfDenomBig = types.NewInt(RbfDenom)
|
var rbfDenomBig = types.NewInt(RbfDenom)
|
||||||
@ -45,6 +48,10 @@ const RbfDenom = 256
|
|||||||
|
|
||||||
var RepublishInterval = pubsub.TimeCacheDuration + time.Duration(5*build.BlockDelaySecs+build.PropagationDelaySecs)*time.Second
|
var RepublishInterval = pubsub.TimeCacheDuration + time.Duration(5*build.BlockDelaySecs+build.PropagationDelaySecs)*time.Second
|
||||||
|
|
||||||
|
var minimumBaseFee = types.NewInt(uint64(build.MinimumBaseFee))
|
||||||
|
|
||||||
|
var MaxActorPendingMessages = 1000
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ErrMessageTooBig = errors.New("message too big")
|
ErrMessageTooBig = errors.New("message too big")
|
||||||
|
|
||||||
@ -52,12 +59,15 @@ var (
|
|||||||
|
|
||||||
ErrNonceTooLow = errors.New("message nonce too low")
|
ErrNonceTooLow = errors.New("message nonce too low")
|
||||||
|
|
||||||
|
ErrGasFeeCapTooLow = errors.New("gas fee cap too low")
|
||||||
|
|
||||||
ErrNotEnoughFunds = errors.New("not enough funds to execute transaction")
|
ErrNotEnoughFunds = errors.New("not enough funds to execute transaction")
|
||||||
|
|
||||||
ErrInvalidToAddr = errors.New("message had invalid to address")
|
ErrInvalidToAddr = errors.New("message had invalid to address")
|
||||||
|
|
||||||
ErrBroadcastAnyway = errors.New("broadcasting message despite validation fail")
|
ErrSoftValidationFailure = errors.New("validation failure")
|
||||||
ErrRBFTooLowPremium = errors.New("replace by fee has too low GasPremium")
|
ErrRBFTooLowPremium = errors.New("replace by fee has too low GasPremium")
|
||||||
|
ErrTooManyPendingMessages = errors.New("too many pending messages for actor")
|
||||||
|
|
||||||
ErrTryAgain = errors.New("state inconsistency while pushing message; please try again")
|
ErrTryAgain = errors.New("state inconsistency while pushing message; please try again")
|
||||||
)
|
)
|
||||||
@ -118,15 +128,17 @@ type MessagePool struct {
|
|||||||
type msgSet struct {
|
type msgSet struct {
|
||||||
msgs map[uint64]*types.SignedMessage
|
msgs map[uint64]*types.SignedMessage
|
||||||
nextNonce uint64
|
nextNonce uint64
|
||||||
|
requiredFunds *stdbig.Int
|
||||||
}
|
}
|
||||||
|
|
||||||
func newMsgSet() *msgSet {
|
func newMsgSet() *msgSet {
|
||||||
return &msgSet{
|
return &msgSet{
|
||||||
msgs: make(map[uint64]*types.SignedMessage),
|
msgs: make(map[uint64]*types.SignedMessage),
|
||||||
|
requiredFunds: stdbig.NewInt(0),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool) (bool, error) {
|
func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool, limit bool) (bool, error) {
|
||||||
if len(ms.msgs) == 0 || m.Message.Nonce >= ms.nextNonce {
|
if len(ms.msgs) == 0 || m.Message.Nonce >= ms.nextNonce {
|
||||||
ms.nextNonce = m.Message.Nonce + 1
|
ms.nextNonce = m.Message.Nonce + 1
|
||||||
}
|
}
|
||||||
@ -148,22 +160,52 @@ func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool) (bool, error) {
|
|||||||
ErrRBFTooLowPremium)
|
ErrRBFTooLowPremium)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ms.requiredFunds.Sub(ms.requiredFunds, exms.Message.RequiredFunds().Int)
|
||||||
|
//ms.requiredFunds.Sub(ms.requiredFunds, exms.Message.Value.Int)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !has && limit && len(ms.msgs) > MaxActorPendingMessages {
|
||||||
|
log.Errorf("too many pending messages from actor %s", m.Message.From)
|
||||||
|
return false, ErrTooManyPendingMessages
|
||||||
|
}
|
||||||
|
|
||||||
ms.msgs[m.Message.Nonce] = m
|
ms.msgs[m.Message.Nonce] = m
|
||||||
|
ms.requiredFunds.Add(ms.requiredFunds, m.Message.RequiredFunds().Int)
|
||||||
|
//ms.requiredFunds.Add(ms.requiredFunds, m.Message.Value.Int)
|
||||||
|
|
||||||
return !has, nil
|
return !has, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ms *msgSet) rm(nonce uint64) {
|
||||||
|
m, has := ms.msgs[nonce]
|
||||||
|
if has {
|
||||||
|
ms.requiredFunds.Sub(ms.requiredFunds, m.Message.RequiredFunds().Int)
|
||||||
|
//ms.requiredFunds.Sub(ms.requiredFunds, m.Message.Value.Int)
|
||||||
|
delete(ms.msgs, nonce)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ms *msgSet) getRequiredFunds(nonce uint64) types.BigInt {
|
||||||
|
requiredFunds := new(stdbig.Int).Set(ms.requiredFunds)
|
||||||
|
|
||||||
|
m, has := ms.msgs[nonce]
|
||||||
|
if has {
|
||||||
|
requiredFunds.Sub(requiredFunds, m.Message.RequiredFunds().Int)
|
||||||
|
//requiredFunds.Sub(requiredFunds, m.Message.Value.Int)
|
||||||
|
}
|
||||||
|
|
||||||
|
return types.BigInt{Int: requiredFunds}
|
||||||
|
}
|
||||||
|
|
||||||
func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName) (*MessagePool, error) {
|
func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName) (*MessagePool, error) {
|
||||||
cache, _ := lru.New2Q(build.BlsSignatureCacheSize)
|
cache, _ := lru.New2Q(build.BlsSignatureCacheSize)
|
||||||
verifcache, _ := lru.New2Q(build.VerifSigCacheSize)
|
verifcache, _ := lru.New2Q(build.VerifSigCacheSize)
|
||||||
|
|
||||||
cfg, err := loadConfig(ds)
|
cfg, err := loadConfig(ds)
|
||||||
if err != nil {
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("error loading mpool config: %w", err)
|
return nil, xerrors.Errorf("error loading mpool config: %w", err)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
mp := &MessagePool{
|
mp := &MessagePool{
|
||||||
ds: ds,
|
ds: ds,
|
||||||
@ -188,12 +230,7 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName) (*Messa
|
|||||||
// enable initial prunes
|
// enable initial prunes
|
||||||
mp.pruneCooldown <- struct{}{}
|
mp.pruneCooldown <- struct{}{}
|
||||||
|
|
||||||
if err := mp.loadLocal(); err != nil {
|
// load the current tipset and subscribe to head changes _before_ loading local messages
|
||||||
log.Errorf("loading local messages: %+v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
go mp.runLoop()
|
|
||||||
|
|
||||||
mp.curTs = api.SubscribeHeadChanges(func(rev, app []*types.TipSet) error {
|
mp.curTs = api.SubscribeHeadChanges(func(rev, app []*types.TipSet) error {
|
||||||
err := mp.HeadChange(rev, app)
|
err := mp.HeadChange(rev, app)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -202,6 +239,12 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName) (*Messa
|
|||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
||||||
|
if err := mp.loadLocal(); err != nil {
|
||||||
|
log.Errorf("loading local messages: %+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
go mp.runLoop()
|
||||||
|
|
||||||
return mp, nil
|
return mp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -254,7 +297,7 @@ func (mp *MessagePool) addLocal(m *types.SignedMessage, msgb []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mp *MessagePool) verifyMsgBeforePush(m *types.SignedMessage, epoch abi.ChainEpoch) error {
|
func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, epoch abi.ChainEpoch) error {
|
||||||
minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength())
|
minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength())
|
||||||
|
|
||||||
if err := m.VMMessage().ValidForBlockInclusion(minGas.Total()); err != nil {
|
if err := m.VMMessage().ValidForBlockInclusion(minGas.Total()); err != nil {
|
||||||
@ -275,25 +318,12 @@ func (mp *MessagePool) Push(m *types.SignedMessage) (cid.Cid, error) {
|
|||||||
<-mp.addSema
|
<-mp.addSema
|
||||||
}()
|
}()
|
||||||
|
|
||||||
mp.curTsLk.Lock()
|
|
||||||
curTs := mp.curTs
|
|
||||||
epoch := curTs.Height()
|
|
||||||
mp.curTsLk.Unlock()
|
|
||||||
if err := mp.verifyMsgBeforePush(m, epoch); err != nil {
|
|
||||||
return cid.Undef, err
|
|
||||||
}
|
|
||||||
|
|
||||||
msgb, err := m.Serialize()
|
msgb, err := m.Serialize()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cid.Undef, err
|
return cid.Undef, err
|
||||||
}
|
}
|
||||||
|
|
||||||
mp.curTsLk.Lock()
|
mp.curTsLk.Lock()
|
||||||
if mp.curTs != curTs {
|
|
||||||
mp.curTsLk.Unlock()
|
|
||||||
return cid.Undef, ErrTryAgain
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := mp.addTs(m, mp.curTs); err != nil {
|
if err := mp.addTs(m, mp.curTs); err != nil {
|
||||||
mp.curTsLk.Unlock()
|
mp.curTsLk.Unlock()
|
||||||
return cid.Undef, err
|
return cid.Undef, err
|
||||||
@ -316,6 +346,11 @@ func (mp *MessagePool) checkMessage(m *types.SignedMessage) error {
|
|||||||
return xerrors.Errorf("mpool message too large (%dB): %w", m.Size(), ErrMessageTooBig)
|
return xerrors.Errorf("mpool message too large (%dB): %w", m.Size(), ErrMessageTooBig)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Perform syntactic validation, minGas=0 as we check the actual mingas before we add it
|
||||||
|
if err := m.Message.ValidForBlockInclusion(0); err != nil {
|
||||||
|
return xerrors.Errorf("message not valid for block inclusion: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
if m.Message.To == address.Undef {
|
if m.Message.To == address.Undef {
|
||||||
return ErrInvalidToAddr
|
return ErrInvalidToAddr
|
||||||
}
|
}
|
||||||
@ -324,8 +359,12 @@ func (mp *MessagePool) checkMessage(m *types.SignedMessage) error {
|
|||||||
return ErrMessageValueTooHigh
|
return ErrMessageValueTooHigh
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if m.Message.GasFeeCap.LessThan(minimumBaseFee) {
|
||||||
|
return ErrGasFeeCapTooLow
|
||||||
|
}
|
||||||
|
|
||||||
if err := mp.VerifyMsgSig(m); err != nil {
|
if err := mp.VerifyMsgSig(m); err != nil {
|
||||||
log.Warnf("mpooladd signature verification failed: %s", err)
|
log.Warnf("signature verification failed: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -385,48 +424,71 @@ func (mp *MessagePool) VerifyMsgSig(m *types.SignedMessage) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (mp *MessagePool) checkBalance(m *types.SignedMessage, curTs *types.TipSet) error {
|
||||||
|
balance, err := mp.getStateBalance(m.Message.From, curTs)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to check sender balance: %s: %w", err, ErrSoftValidationFailure)
|
||||||
|
}
|
||||||
|
|
||||||
|
requiredFunds := m.Message.RequiredFunds()
|
||||||
|
if balance.LessThan(requiredFunds) {
|
||||||
|
return xerrors.Errorf("not enough funds (required: %s, balance: %s): %w", types.FIL(requiredFunds), types.FIL(balance), ErrNotEnoughFunds)
|
||||||
|
}
|
||||||
|
|
||||||
|
// add Value for soft failure check
|
||||||
|
//requiredFunds = types.BigAdd(requiredFunds, m.Message.Value)
|
||||||
|
|
||||||
|
mset, ok := mp.pending[m.Message.From]
|
||||||
|
if ok {
|
||||||
|
requiredFunds = types.BigAdd(requiredFunds, mset.getRequiredFunds(m.Message.Nonce))
|
||||||
|
}
|
||||||
|
|
||||||
|
if balance.LessThan(requiredFunds) {
|
||||||
|
// Note: we fail here for ErrSoftValidationFailure to signal a soft failure because we might
|
||||||
|
// be out of sync.
|
||||||
|
return xerrors.Errorf("not enough funds including pending messages (required: %s, balance: %s): %w", types.FIL(requiredFunds), types.FIL(balance), ErrSoftValidationFailure)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (mp *MessagePool) addTs(m *types.SignedMessage, curTs *types.TipSet) error {
|
func (mp *MessagePool) addTs(m *types.SignedMessage, curTs *types.TipSet) error {
|
||||||
snonce, err := mp.getStateNonce(m.Message.From, curTs)
|
snonce, err := mp.getStateNonce(m.Message.From, curTs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrBroadcastAnyway)
|
return xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure)
|
||||||
}
|
}
|
||||||
|
|
||||||
if snonce > m.Message.Nonce {
|
if snonce > m.Message.Nonce {
|
||||||
return xerrors.Errorf("minimum expected nonce is %d: %w", snonce, ErrNonceTooLow)
|
return xerrors.Errorf("minimum expected nonce is %d: %w", snonce, ErrNonceTooLow)
|
||||||
}
|
}
|
||||||
|
|
||||||
balance, err := mp.getStateBalance(m.Message.From, curTs)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("failed to check sender balance: %s: %w", err, ErrBroadcastAnyway)
|
|
||||||
}
|
|
||||||
|
|
||||||
if balance.LessThan(m.Message.RequiredFunds()) {
|
|
||||||
return xerrors.Errorf("not enough funds (required: %s, balance: %s): %w", types.FIL(m.Message.RequiredFunds()), types.FIL(balance), ErrNotEnoughFunds)
|
|
||||||
}
|
|
||||||
|
|
||||||
mp.lk.Lock()
|
mp.lk.Lock()
|
||||||
defer mp.lk.Unlock()
|
defer mp.lk.Unlock()
|
||||||
|
|
||||||
return mp.addLocked(m)
|
if err := mp.verifyMsgBeforeAdd(m, curTs.Height()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := mp.checkBalance(m, curTs); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return mp.addLocked(m, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mp *MessagePool) addSkipChecks(m *types.SignedMessage) error {
|
func (mp *MessagePool) addSkipChecks(m *types.SignedMessage) error {
|
||||||
mp.lk.Lock()
|
mp.lk.Lock()
|
||||||
defer mp.lk.Unlock()
|
defer mp.lk.Unlock()
|
||||||
|
|
||||||
return mp.addLocked(m)
|
return mp.addLocked(m, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mp *MessagePool) addLocked(m *types.SignedMessage) error {
|
func (mp *MessagePool) addLocked(m *types.SignedMessage, limit bool) error {
|
||||||
log.Debugf("mpooladd: %s %d", m.Message.From, m.Message.Nonce)
|
log.Debugf("mpooladd: %s %d", m.Message.From, m.Message.Nonce)
|
||||||
if m.Signature.Type == crypto.SigTypeBLS {
|
if m.Signature.Type == crypto.SigTypeBLS {
|
||||||
mp.blsSigCache.Add(m.Cid(), m.Signature)
|
mp.blsSigCache.Add(m.Cid(), m.Signature)
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.Message.GasLimit > build.BlockGasLimit {
|
|
||||||
return xerrors.Errorf("given message has too high of a gas limit")
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := mp.api.PutMessage(m); err != nil {
|
if _, err := mp.api.PutMessage(m); err != nil {
|
||||||
log.Warnf("mpooladd cs.PutMessage failed: %s", err)
|
log.Warnf("mpooladd cs.PutMessage failed: %s", err)
|
||||||
return err
|
return err
|
||||||
@ -443,7 +505,7 @@ func (mp *MessagePool) addLocked(m *types.SignedMessage) error {
|
|||||||
mp.pending[m.Message.From] = mset
|
mp.pending[m.Message.From] = mset
|
||||||
}
|
}
|
||||||
|
|
||||||
incr, err := mset.add(m, mp)
|
incr, err := mset.add(m, mp, limit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Info(err)
|
log.Info(err)
|
||||||
return err
|
return err
|
||||||
@ -498,42 +560,16 @@ func (mp *MessagePool) getNonceLocked(addr address.Address, curTs *types.TipSet)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (mp *MessagePool) getStateNonce(addr address.Address, curTs *types.TipSet) (uint64, error) {
|
func (mp *MessagePool) getStateNonce(addr address.Address, curTs *types.TipSet) (uint64, error) {
|
||||||
// TODO: this method probably should be cached
|
act, err := mp.api.GetActorAfter(addr, curTs)
|
||||||
|
|
||||||
act, err := mp.api.StateGetActor(addr, curTs)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
baseNonce := act.Nonce
|
return act.Nonce, nil
|
||||||
|
|
||||||
// TODO: the correct thing to do here is probably to set curTs to chain.head
|
|
||||||
// but since we have an accurate view of the world until a head change occurs,
|
|
||||||
// this should be fine
|
|
||||||
if curTs == nil {
|
|
||||||
return baseNonce, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
msgs, err := mp.api.MessagesForTipset(curTs)
|
|
||||||
if err != nil {
|
|
||||||
return 0, xerrors.Errorf("failed to check messages for tipset: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, m := range msgs {
|
|
||||||
msg := m.VMMessage()
|
|
||||||
if msg.From == addr {
|
|
||||||
if msg.Nonce != baseNonce {
|
|
||||||
return 0, xerrors.Errorf("tipset %s has bad nonce ordering (%d != %d)", curTs.Cids(), msg.Nonce, baseNonce)
|
|
||||||
}
|
|
||||||
baseNonce++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return baseNonce, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mp *MessagePool) getStateBalance(addr address.Address, ts *types.TipSet) (types.BigInt, error) {
|
func (mp *MessagePool) getStateBalance(addr address.Address, ts *types.TipSet) (types.BigInt, error) {
|
||||||
act, err := mp.api.StateGetActor(addr, ts)
|
act, err := mp.api.GetActorAfter(addr, ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.EmptyInt, err
|
return types.EmptyInt, err
|
||||||
}
|
}
|
||||||
@ -580,6 +616,16 @@ func (mp *MessagePool) PushWithNonce(ctx context.Context, addr address.Address,
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = mp.checkMessage(msg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
msgb, err := msg.Serialize()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// reacquire the locks and check state for consistency
|
// reacquire the locks and check state for consistency
|
||||||
mp.curTsLk.Lock()
|
mp.curTsLk.Lock()
|
||||||
defer mp.curTsLk.Unlock()
|
defer mp.curTsLk.Unlock()
|
||||||
@ -600,16 +646,15 @@ func (mp *MessagePool) PushWithNonce(ctx context.Context, addr address.Address,
|
|||||||
return nil, ErrTryAgain
|
return nil, ErrTryAgain
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := mp.verifyMsgBeforePush(msg, mp.curTs.Height()); err != nil {
|
if err := mp.verifyMsgBeforeAdd(msg, curTs.Height()); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
msgb, err := msg.Serialize()
|
if err := mp.checkBalance(msg, curTs); err != nil {
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := mp.addLocked(msg); err != nil {
|
if err := mp.addLocked(msg, true); err != nil {
|
||||||
return nil, xerrors.Errorf("add locked failed: %w", err)
|
return nil, xerrors.Errorf("add locked failed: %w", err)
|
||||||
}
|
}
|
||||||
if err := mp.addLocal(msg, msgb); err != nil {
|
if err := mp.addLocal(msg, msgb); err != nil {
|
||||||
@ -643,7 +688,7 @@ func (mp *MessagePool) remove(from address.Address, nonce uint64) {
|
|||||||
|
|
||||||
// NB: This deletes any message with the given nonce. This makes sense
|
// NB: This deletes any message with the given nonce. This makes sense
|
||||||
// as two messages with the same sender cannot have the same nonce
|
// as two messages with the same sender cannot have the same nonce
|
||||||
delete(mset.msgs, nonce)
|
mset.rm(nonce)
|
||||||
|
|
||||||
if len(mset.msgs) == 0 {
|
if len(mset.msgs) == 0 {
|
||||||
delete(mp.pending, from)
|
delete(mp.pending, from)
|
||||||
@ -669,6 +714,10 @@ func (mp *MessagePool) Pending() ([]*types.SignedMessage, *types.TipSet) {
|
|||||||
mp.lk.Lock()
|
mp.lk.Lock()
|
||||||
defer mp.lk.Unlock()
|
defer mp.lk.Unlock()
|
||||||
|
|
||||||
|
return mp.allPending()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mp *MessagePool) allPending() ([]*types.SignedMessage, *types.TipSet) {
|
||||||
out := make([]*types.SignedMessage, 0)
|
out := make([]*types.SignedMessage, 0)
|
||||||
for a := range mp.pending {
|
for a := range mp.pending {
|
||||||
out = append(out, mp.pendingFor(a)...)
|
out = append(out, mp.pendingFor(a)...)
|
||||||
@ -676,6 +725,7 @@ func (mp *MessagePool) Pending() ([]*types.SignedMessage, *types.TipSet) {
|
|||||||
|
|
||||||
return out, mp.curTs
|
return out, mp.curTs
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mp *MessagePool) PendingFor(a address.Address) ([]*types.SignedMessage, *types.TipSet) {
|
func (mp *MessagePool) PendingFor(a address.Address) ([]*types.SignedMessage, *types.TipSet) {
|
||||||
mp.curTsLk.Lock()
|
mp.curTsLk.Lock()
|
||||||
defer mp.curTsLk.Unlock()
|
defer mp.curTsLk.Unlock()
|
||||||
@ -744,30 +794,42 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var merr error
|
||||||
|
|
||||||
for _, ts := range revert {
|
for _, ts := range revert {
|
||||||
pts, err := mp.api.LoadTipSet(ts.Parents())
|
pts, err := mp.api.LoadTipSet(ts.Parents())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
log.Errorf("error loading reverted tipset parent: %s", err)
|
||||||
}
|
merr = multierror.Append(merr, err)
|
||||||
|
continue
|
||||||
msgs, err := mp.MessagesForBlocks(ts.Blocks())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
mp.curTs = pts
|
mp.curTs = pts
|
||||||
|
|
||||||
|
msgs, err := mp.MessagesForBlocks(ts.Blocks())
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("error retrieving messages for reverted block: %s", err)
|
||||||
|
merr = multierror.Append(merr, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
for _, msg := range msgs {
|
for _, msg := range msgs {
|
||||||
add(msg)
|
add(msg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, ts := range apply {
|
for _, ts := range apply {
|
||||||
|
mp.curTs = ts
|
||||||
|
|
||||||
for _, b := range ts.Blocks() {
|
for _, b := range ts.Blocks() {
|
||||||
bmsgs, smsgs, err := mp.api.MessagesForBlock(b)
|
bmsgs, smsgs, err := mp.api.MessagesForBlock(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("failed to get messages for apply block %s(height %d) (msgroot = %s): %w", b.Cid(), b.Height, b.Messages, err)
|
xerr := xerrors.Errorf("failed to get messages for apply block %s(height %d) (msgroot = %s): %w", b.Cid(), b.Height, b.Messages, err)
|
||||||
|
log.Errorf("error retrieving messages for block: %s", xerr)
|
||||||
|
merr = multierror.Append(merr, xerr)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, msg := range smsgs {
|
for _, msg := range smsgs {
|
||||||
rm(msg.Message.From, msg.Message.Nonce)
|
rm(msg.Message.From, msg.Message.Nonce)
|
||||||
maybeRepub(msg.Cid())
|
maybeRepub(msg.Cid())
|
||||||
@ -778,8 +840,6 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet)
|
|||||||
maybeRepub(msg.Cid())
|
maybeRepub(msg.Cid())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mp.curTs = ts
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if repubTrigger {
|
if repubTrigger {
|
||||||
@ -798,7 +858,9 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(revert) > 0 && futureDebug {
|
if len(revert) > 0 && futureDebug {
|
||||||
msgs, ts := mp.Pending()
|
mp.lk.Lock()
|
||||||
|
msgs, ts := mp.allPending()
|
||||||
|
mp.lk.Unlock()
|
||||||
|
|
||||||
buckets := map[address.Address]*statBucket{}
|
buckets := map[address.Address]*statBucket{}
|
||||||
|
|
||||||
@ -815,7 +877,8 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet)
|
|||||||
}
|
}
|
||||||
|
|
||||||
for a, bkt := range buckets {
|
for a, bkt := range buckets {
|
||||||
act, err := mp.api.StateGetActor(a, ts)
|
// TODO that might not be correct with GatActorAfter but it is only debug code
|
||||||
|
act, err := mp.api.GetActorAfter(a, ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debugf("%s, err: %s\n", a, err)
|
log.Debugf("%s, err: %s\n", a, err)
|
||||||
continue
|
continue
|
||||||
@ -862,7 +925,72 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return merr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mp *MessagePool) runHeadChange(from *types.TipSet, to *types.TipSet, rmsgs map[address.Address]map[uint64]*types.SignedMessage) error {
|
||||||
|
add := func(m *types.SignedMessage) {
|
||||||
|
s, ok := rmsgs[m.Message.From]
|
||||||
|
if !ok {
|
||||||
|
s = make(map[uint64]*types.SignedMessage)
|
||||||
|
rmsgs[m.Message.From] = s
|
||||||
|
}
|
||||||
|
s[m.Message.Nonce] = m
|
||||||
|
}
|
||||||
|
rm := func(from address.Address, nonce uint64) {
|
||||||
|
s, ok := rmsgs[from]
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := s[nonce]; ok {
|
||||||
|
delete(s, nonce)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
revert, apply, err := store.ReorgOps(mp.api.LoadTipSet, from, to)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to compute reorg ops for mpool pending messages: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var merr error
|
||||||
|
|
||||||
|
for _, ts := range revert {
|
||||||
|
msgs, err := mp.MessagesForBlocks(ts.Blocks())
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("error retrieving messages for reverted block: %s", err)
|
||||||
|
merr = multierror.Append(merr, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, msg := range msgs {
|
||||||
|
add(msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ts := range apply {
|
||||||
|
for _, b := range ts.Blocks() {
|
||||||
|
bmsgs, smsgs, err := mp.api.MessagesForBlock(b)
|
||||||
|
if err != nil {
|
||||||
|
xerr := xerrors.Errorf("failed to get messages for apply block %s(height %d) (msgroot = %s): %w", b.Cid(), b.Height, b.Messages, err)
|
||||||
|
log.Errorf("error retrieving messages for block: %s", xerr)
|
||||||
|
merr = multierror.Append(merr, xerr)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, msg := range smsgs {
|
||||||
|
rm(msg.Message.From, msg.Message.Nonce)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, msg := range bmsgs {
|
||||||
|
rm(msg.From, msg.Nonce)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return merr
|
||||||
}
|
}
|
||||||
|
|
||||||
type statBucket struct {
|
type statBucket struct {
|
||||||
@ -915,6 +1043,7 @@ func (mp *MessagePool) Updates(ctx context.Context) (<-chan api.MpoolUpdate, err
|
|||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
defer mp.changes.Unsub(sub, localUpdates)
|
defer mp.changes.Unsub(sub, localUpdates)
|
||||||
|
defer close(out)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@ -923,9 +1052,13 @@ func (mp *MessagePool) Updates(ctx context.Context) (<-chan api.MpoolUpdate, err
|
|||||||
case out <- u.(api.MpoolUpdate):
|
case out <- u.(api.MpoolUpdate):
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
|
case <-mp.closer:
|
||||||
|
return
|
||||||
}
|
}
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
|
case <-mp.closer:
|
||||||
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@ -962,3 +1095,40 @@ func (mp *MessagePool) loadLocal() error {
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (mp *MessagePool) Clear(local bool) {
|
||||||
|
mp.lk.Lock()
|
||||||
|
defer mp.lk.Unlock()
|
||||||
|
|
||||||
|
// remove everything if local is true, including removing local messages from
|
||||||
|
// the datastore
|
||||||
|
if local {
|
||||||
|
for a := range mp.localAddrs {
|
||||||
|
mset, ok := mp.pending[a]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, m := range mset.msgs {
|
||||||
|
err := mp.localMsgs.Delete(datastore.NewKey(string(m.Cid().Bytes())))
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("error deleting local message: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mp.pending = make(map[address.Address]*msgSet)
|
||||||
|
mp.republished = nil
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// remove everything except the local messages
|
||||||
|
for a := range mp.pending {
|
||||||
|
_, isLocal := mp.localAddrs[a]
|
||||||
|
if isLocal {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
delete(mp.pending, a)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -3,9 +3,11 @@ package messagepool
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/chain/types/mock"
|
"github.com/filecoin-project/lotus/chain/types/mock"
|
||||||
"github.com/filecoin-project/lotus/chain/wallet"
|
"github.com/filecoin-project/lotus/chain/wallet"
|
||||||
@ -30,14 +32,25 @@ type testMpoolAPI struct {
|
|||||||
balance map[address.Address]types.BigInt
|
balance map[address.Address]types.BigInt
|
||||||
|
|
||||||
tipsets []*types.TipSet
|
tipsets []*types.TipSet
|
||||||
|
|
||||||
|
published int
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTestMpoolAPI() *testMpoolAPI {
|
func newTestMpoolAPI() *testMpoolAPI {
|
||||||
return &testMpoolAPI{
|
tma := &testMpoolAPI{
|
||||||
bmsgs: make(map[cid.Cid][]*types.SignedMessage),
|
bmsgs: make(map[cid.Cid][]*types.SignedMessage),
|
||||||
statenonce: make(map[address.Address]uint64),
|
statenonce: make(map[address.Address]uint64),
|
||||||
balance: make(map[address.Address]types.BigInt),
|
balance: make(map[address.Address]types.BigInt),
|
||||||
}
|
}
|
||||||
|
genesis := mock.MkBlock(nil, 1, 1)
|
||||||
|
tma.tipsets = append(tma.tipsets, mock.TipSet(genesis))
|
||||||
|
return tma
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tma *testMpoolAPI) nextBlock() *types.BlockHeader {
|
||||||
|
newBlk := mock.MkBlock(tma.tipsets[len(tma.tipsets)-1], 1, 1)
|
||||||
|
tma.tipsets = append(tma.tipsets, mock.TipSet(newBlk))
|
||||||
|
return newBlk
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tma *testMpoolAPI) applyBlock(t *testing.T, b *types.BlockHeader) {
|
func (tma *testMpoolAPI) applyBlock(t *testing.T, b *types.BlockHeader) {
|
||||||
@ -68,12 +81,11 @@ func (tma *testMpoolAPI) setBalanceRaw(addr address.Address, v types.BigInt) {
|
|||||||
|
|
||||||
func (tma *testMpoolAPI) setBlockMessages(h *types.BlockHeader, msgs ...*types.SignedMessage) {
|
func (tma *testMpoolAPI) setBlockMessages(h *types.BlockHeader, msgs ...*types.SignedMessage) {
|
||||||
tma.bmsgs[h.Cid()] = msgs
|
tma.bmsgs[h.Cid()] = msgs
|
||||||
tma.tipsets = append(tma.tipsets, mock.TipSet(h))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tma *testMpoolAPI) SubscribeHeadChanges(cb func(rev, app []*types.TipSet) error) *types.TipSet {
|
func (tma *testMpoolAPI) SubscribeHeadChanges(cb func(rev, app []*types.TipSet) error) *types.TipSet {
|
||||||
tma.cb = cb
|
tma.cb = cb
|
||||||
return nil
|
return tma.tipsets[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tma *testMpoolAPI) PutMessage(m types.ChainMsg) (cid.Cid, error) {
|
func (tma *testMpoolAPI) PutMessage(m types.ChainMsg) (cid.Cid, error) {
|
||||||
@ -81,18 +93,47 @@ func (tma *testMpoolAPI) PutMessage(m types.ChainMsg) (cid.Cid, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (tma *testMpoolAPI) PubSubPublish(string, []byte) error {
|
func (tma *testMpoolAPI) PubSubPublish(string, []byte) error {
|
||||||
|
tma.published++
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tma *testMpoolAPI) StateGetActor(addr address.Address, ts *types.TipSet) (*types.Actor, error) {
|
func (tma *testMpoolAPI) GetActorAfter(addr address.Address, ts *types.TipSet) (*types.Actor, error) {
|
||||||
|
// regression check for load bug
|
||||||
|
if ts == nil {
|
||||||
|
panic("GetActorAfter called with nil tipset")
|
||||||
|
}
|
||||||
|
|
||||||
balance, ok := tma.balance[addr]
|
balance, ok := tma.balance[addr]
|
||||||
if !ok {
|
if !ok {
|
||||||
balance = types.NewInt(1000e6)
|
balance = types.NewInt(1000e6)
|
||||||
tma.balance[addr] = balance
|
tma.balance[addr] = balance
|
||||||
}
|
}
|
||||||
|
|
||||||
|
msgs := make([]*types.SignedMessage, 0)
|
||||||
|
for _, b := range ts.Blocks() {
|
||||||
|
for _, m := range tma.bmsgs[b.Cid()] {
|
||||||
|
if m.Message.From == addr {
|
||||||
|
msgs = append(msgs, m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Slice(msgs, func(i, j int) bool {
|
||||||
|
return msgs[i].Message.Nonce < msgs[j].Message.Nonce
|
||||||
|
})
|
||||||
|
|
||||||
|
nonce := tma.statenonce[addr]
|
||||||
|
|
||||||
|
for _, m := range msgs {
|
||||||
|
if m.Message.Nonce != nonce {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
nonce++
|
||||||
|
}
|
||||||
|
|
||||||
return &types.Actor{
|
return &types.Actor{
|
||||||
Code: builtin.StorageMarketActorCodeID,
|
Code: builtin.StorageMarketActorCodeID,
|
||||||
Nonce: tma.statenonce[addr],
|
Nonce: nonce,
|
||||||
Balance: balance,
|
Balance: balance,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
@ -178,7 +219,7 @@ func TestMessagePool(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
a := mock.MkBlock(nil, 1, 1)
|
a := tma.nextBlock()
|
||||||
|
|
||||||
sender, err := w.GenerateKey(crypto.SigTypeBLS)
|
sender, err := w.GenerateKey(crypto.SigTypeBLS)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -204,7 +245,7 @@ func TestMessagePool(t *testing.T) {
|
|||||||
assertNonce(t, mp, sender, 2)
|
assertNonce(t, mp, sender, 2)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRevertMessages(t *testing.T) {
|
func TestMessagePoolMessagesInEachBlock(t *testing.T) {
|
||||||
tma := newTestMpoolAPI()
|
tma := newTestMpoolAPI()
|
||||||
|
|
||||||
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
@ -219,8 +260,57 @@ func TestRevertMessages(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
a := mock.MkBlock(nil, 1, 1)
|
a := tma.nextBlock()
|
||||||
b := mock.MkBlock(mock.TipSet(a), 1, 1)
|
|
||||||
|
sender, err := w.GenerateKey(crypto.SigTypeBLS)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
target := mock.Address(1001)
|
||||||
|
|
||||||
|
var msgs []*types.SignedMessage
|
||||||
|
for i := 0; i < 5; i++ {
|
||||||
|
m := mock.MkMessage(sender, target, uint64(i), w)
|
||||||
|
msgs = append(msgs, m)
|
||||||
|
mustAdd(t, mp, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
tma.setStateNonce(sender, 0)
|
||||||
|
|
||||||
|
tma.setBlockMessages(a, msgs[0], msgs[1])
|
||||||
|
tma.applyBlock(t, a)
|
||||||
|
tsa := mock.TipSet(a)
|
||||||
|
|
||||||
|
_, _ = mp.Pending()
|
||||||
|
|
||||||
|
selm, _ := mp.SelectMessages(tsa, 1)
|
||||||
|
if len(selm) == 0 {
|
||||||
|
t.Fatal("should have returned the rest of the messages")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRevertMessages(t *testing.T) {
|
||||||
|
futureDebug = true
|
||||||
|
defer func() {
|
||||||
|
futureDebug = false
|
||||||
|
}()
|
||||||
|
|
||||||
|
tma := newTestMpoolAPI()
|
||||||
|
|
||||||
|
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ds := datastore.NewMapDatastore()
|
||||||
|
|
||||||
|
mp, err := New(tma, ds, "mptest")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
a := tma.nextBlock()
|
||||||
|
b := tma.nextBlock()
|
||||||
|
|
||||||
sender, err := w.GenerateKey(crypto.SigTypeBLS)
|
sender, err := w.GenerateKey(crypto.SigTypeBLS)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -254,6 +344,7 @@ func TestRevertMessages(t *testing.T) {
|
|||||||
assertNonce(t, mp, sender, 4)
|
assertNonce(t, mp, sender, 4)
|
||||||
|
|
||||||
p, _ := mp.Pending()
|
p, _ := mp.Pending()
|
||||||
|
fmt.Printf("%+v\n", p)
|
||||||
if len(p) != 3 {
|
if len(p) != 3 {
|
||||||
t.Fatal("expected three messages in mempool")
|
t.Fatal("expected three messages in mempool")
|
||||||
}
|
}
|
||||||
@ -275,13 +366,14 @@ func TestPruningSimple(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
a := mock.MkBlock(nil, 1, 1)
|
a := tma.nextBlock()
|
||||||
tma.applyBlock(t, a)
|
tma.applyBlock(t, a)
|
||||||
|
|
||||||
sender, err := w.GenerateKey(crypto.SigTypeBLS)
|
sender, err := w.GenerateKey(crypto.SigTypeBLS)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
tma.setBalance(sender, 1) // in FIL
|
||||||
target := mock.Address(1001)
|
target := mock.Address(1001)
|
||||||
|
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
@ -308,3 +400,257 @@ func TestPruningSimple(t *testing.T) {
|
|||||||
t.Fatal("expected only 5 messages in pool, got: ", len(msgs))
|
t.Fatal("expected only 5 messages in pool, got: ", len(msgs))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestLoadLocal(t *testing.T) {
|
||||||
|
tma := newTestMpoolAPI()
|
||||||
|
ds := datastore.NewMapDatastore()
|
||||||
|
|
||||||
|
mp, err := New(tma, ds, "mptest")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// the actors
|
||||||
|
w1, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
w2, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tma.setBalance(a1, 1) // in FIL
|
||||||
|
tma.setBalance(a2, 1) // in FIL
|
||||||
|
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
|
||||||
|
msgs := make(map[cid.Cid]struct{})
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
||||||
|
cid, err := mp.Push(m)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
msgs[cid] = struct{}{}
|
||||||
|
}
|
||||||
|
err = mp.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mp, err = New(tma, ds, "mptest")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pmsgs, _ := mp.Pending()
|
||||||
|
if len(msgs) != len(pmsgs) {
|
||||||
|
t.Fatalf("expected %d messages, but got %d", len(msgs), len(pmsgs))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, m := range pmsgs {
|
||||||
|
cid := m.Cid()
|
||||||
|
_, ok := msgs[cid]
|
||||||
|
if !ok {
|
||||||
|
t.Fatal("unknown message")
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(msgs, cid)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(msgs) > 0 {
|
||||||
|
t.Fatalf("not all messages were laoded; missing %d messages", len(msgs))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClearAll(t *testing.T) {
|
||||||
|
tma := newTestMpoolAPI()
|
||||||
|
ds := datastore.NewMapDatastore()
|
||||||
|
|
||||||
|
mp, err := New(tma, ds, "mptest")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// the actors
|
||||||
|
w1, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
w2, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tma.setBalance(a1, 1) // in FIL
|
||||||
|
tma.setBalance(a2, 1) // in FIL
|
||||||
|
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
||||||
|
_, err := mp.Push(m)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
m := makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(i+1))
|
||||||
|
mustAdd(t, mp, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
mp.Clear(true)
|
||||||
|
|
||||||
|
pending, _ := mp.Pending()
|
||||||
|
if len(pending) > 0 {
|
||||||
|
t.Fatalf("cleared the mpool, but got %d pending messages", len(pending))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClearNonLocal(t *testing.T) {
|
||||||
|
tma := newTestMpoolAPI()
|
||||||
|
ds := datastore.NewMapDatastore()
|
||||||
|
|
||||||
|
mp, err := New(tma, ds, "mptest")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// the actors
|
||||||
|
w1, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
w2, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tma.setBalance(a1, 1) // in FIL
|
||||||
|
tma.setBalance(a2, 1) // in FIL
|
||||||
|
|
||||||
|
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
||||||
|
_, err := mp.Push(m)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
m := makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(i+1))
|
||||||
|
mustAdd(t, mp, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
mp.Clear(false)
|
||||||
|
|
||||||
|
pending, _ := mp.Pending()
|
||||||
|
if len(pending) != 10 {
|
||||||
|
t.Fatalf("expected 10 pending messages, but got %d instead", len(pending))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, m := range pending {
|
||||||
|
if m.Message.From != a1 {
|
||||||
|
t.Fatalf("expected message from %s but got one from %s instead", a1, m.Message.From)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdates(t *testing.T) {
|
||||||
|
tma := newTestMpoolAPI()
|
||||||
|
ds := datastore.NewMapDatastore()
|
||||||
|
|
||||||
|
mp, err := New(tma, ds, "mptest")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// the actors
|
||||||
|
w1, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
w2, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.TODO())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
ch, err := mp.Updates(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
|
||||||
|
|
||||||
|
tma.setBalance(a1, 1) // in FIL
|
||||||
|
tma.setBalance(a2, 1) // in FIL
|
||||||
|
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
||||||
|
_, err := mp.Push(m)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ok := <-ch
|
||||||
|
if !ok {
|
||||||
|
t.Fatal("expected update, but got a closed channel instead")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = mp.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ok := <-ch
|
||||||
|
if ok {
|
||||||
|
t.Fatal("expected closed channel, but got an update instead")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -16,7 +16,7 @@ type Provider interface {
|
|||||||
SubscribeHeadChanges(func(rev, app []*types.TipSet) error) *types.TipSet
|
SubscribeHeadChanges(func(rev, app []*types.TipSet) error) *types.TipSet
|
||||||
PutMessage(m types.ChainMsg) (cid.Cid, error)
|
PutMessage(m types.ChainMsg) (cid.Cid, error)
|
||||||
PubSubPublish(string, []byte) error
|
PubSubPublish(string, []byte) error
|
||||||
StateGetActor(address.Address, *types.TipSet) (*types.Actor, error)
|
GetActorAfter(address.Address, *types.TipSet) (*types.Actor, error)
|
||||||
StateAccountKey(context.Context, address.Address, *types.TipSet) (address.Address, error)
|
StateAccountKey(context.Context, address.Address, *types.TipSet) (address.Address, error)
|
||||||
MessagesForBlock(*types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error)
|
MessagesForBlock(*types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error)
|
||||||
MessagesForTipset(*types.TipSet) ([]types.ChainMsg, error)
|
MessagesForTipset(*types.TipSet) ([]types.ChainMsg, error)
|
||||||
@ -43,12 +43,17 @@ func (mpp *mpoolProvider) PutMessage(m types.ChainMsg) (cid.Cid, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (mpp *mpoolProvider) PubSubPublish(k string, v []byte) error {
|
func (mpp *mpoolProvider) PubSubPublish(k string, v []byte) error {
|
||||||
return mpp.ps.Publish(k, v)
|
return mpp.ps.Publish(k, v) //nolint
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mpp *mpoolProvider) StateGetActor(addr address.Address, ts *types.TipSet) (*types.Actor, error) {
|
func (mpp *mpoolProvider) GetActorAfter(addr address.Address, ts *types.TipSet) (*types.Actor, error) {
|
||||||
var act types.Actor
|
var act types.Actor
|
||||||
return &act, mpp.sm.WithParentState(ts, mpp.sm.WithActor(addr, stmgr.GetActor(&act)))
|
stcid, _, err := mpp.sm.TipSetState(context.TODO(), ts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("computing tipset state for GetActor: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &act, mpp.sm.WithStateTree(stcid, mpp.sm.WithActor(addr, stmgr.GetActor(&act)))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mpp *mpoolProvider) StateAccountKey(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) {
|
func (mpp *mpoolProvider) StateAccountKey(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) {
|
||||||
|
@ -108,7 +108,7 @@ func (mp *MessagePool) republishPendingMessages() error {
|
|||||||
|
|
||||||
// we can't fit the current chain but there is gas to spare
|
// we can't fit the current chain but there is gas to spare
|
||||||
// trim it and push it down
|
// trim it and push it down
|
||||||
chain.Trim(gasLimit, mp, baseFee, ts, false)
|
chain.Trim(gasLimit, mp, baseFee, ts)
|
||||||
for j := i; j < len(chains)-1; j++ {
|
for j := i; j < len(chains)-1; j++ {
|
||||||
if chains[j].Before(chains[j+1]) {
|
if chains[j].Before(chains[j+1]) {
|
||||||
break
|
break
|
||||||
|
66
chain/messagepool/repub_test.go
Normal file
66
chain/messagepool/repub_test.go
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
package messagepool
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
|
||||||
|
"github.com/filecoin-project/lotus/chain/wallet"
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/crypto"
|
||||||
|
"github.com/ipfs/go-datastore"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRepubMessages(t *testing.T) {
|
||||||
|
tma := newTestMpoolAPI()
|
||||||
|
ds := datastore.NewMapDatastore()
|
||||||
|
|
||||||
|
mp, err := New(tma, ds, "mptest")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// the actors
|
||||||
|
w1, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
w2, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
|
||||||
|
|
||||||
|
tma.setBalance(a1, 1) // in FIL
|
||||||
|
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
||||||
|
_, err := mp.Push(m)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if tma.published != 10 {
|
||||||
|
t.Fatalf("expected to have published 10 messages, but got %d instead", tma.published)
|
||||||
|
}
|
||||||
|
|
||||||
|
mp.repubTrigger <- struct{}{}
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
|
||||||
|
if tma.published != 20 {
|
||||||
|
t.Fatalf("expected to have published 20 messages, but got %d instead", tma.published)
|
||||||
|
}
|
||||||
|
}
|
@ -14,7 +14,6 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/chain/vm"
|
"github.com/filecoin-project/lotus/chain/vm"
|
||||||
abig "github.com/filecoin-project/specs-actors/actors/abi/big"
|
abig "github.com/filecoin-project/specs-actors/actors/abi/big"
|
||||||
"github.com/ipfs/go-cid"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var bigBlockGasLimit = big.NewInt(build.BlockGasLimit)
|
var bigBlockGasLimit = big.NewInt(build.BlockGasLimit)
|
||||||
@ -218,7 +217,7 @@ tailLoop:
|
|||||||
for gasLimit >= minGas && last < len(chains) {
|
for gasLimit >= minGas && last < len(chains) {
|
||||||
// trim if necessary
|
// trim if necessary
|
||||||
if chains[last].gasLimit > gasLimit {
|
if chains[last].gasLimit > gasLimit {
|
||||||
chains[last].Trim(gasLimit, mp, baseFee, ts, false)
|
chains[last].Trim(gasLimit, mp, baseFee, ts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// push down if it hasn't been invalidated
|
// push down if it hasn't been invalidated
|
||||||
@ -285,7 +284,7 @@ tailLoop:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// dependencies fit, just trim it
|
// dependencies fit, just trim it
|
||||||
chain.Trim(gasLimit-depGasLimit, mp, baseFee, ts, false)
|
chain.Trim(gasLimit-depGasLimit, mp, baseFee, ts)
|
||||||
last += i
|
last += i
|
||||||
continue tailLoop
|
continue tailLoop
|
||||||
}
|
}
|
||||||
@ -390,7 +389,7 @@ func (mp *MessagePool) selectMessagesGreedy(curTs, ts *types.TipSet) ([]*types.S
|
|||||||
tailLoop:
|
tailLoop:
|
||||||
for gasLimit >= minGas && last < len(chains) {
|
for gasLimit >= minGas && last < len(chains) {
|
||||||
// trim
|
// trim
|
||||||
chains[last].Trim(gasLimit, mp, baseFee, ts, false)
|
chains[last].Trim(gasLimit, mp, baseFee, ts)
|
||||||
|
|
||||||
// push down if it hasn't been invalidated
|
// push down if it hasn't been invalidated
|
||||||
if chains[last].valid {
|
if chains[last].valid {
|
||||||
@ -463,15 +462,27 @@ func (mp *MessagePool) selectPriorityMessages(pending map[address.Address]map[ui
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(chains) == 0 {
|
||||||
|
return nil, gasLimit
|
||||||
|
}
|
||||||
|
|
||||||
// 2. Sort the chains
|
// 2. Sort the chains
|
||||||
sort.Slice(chains, func(i, j int) bool {
|
sort.Slice(chains, func(i, j int) bool {
|
||||||
return chains[i].Before(chains[j])
|
return chains[i].Before(chains[j])
|
||||||
})
|
})
|
||||||
|
|
||||||
// 3. Merge chains until the block limit; we are willing to include negative performing chains
|
if len(chains) != 0 && chains[0].gasPerf < 0 {
|
||||||
// as these are messages from our own miners
|
log.Warnw("all priority messages in mpool have negative gas performance", "bestGasPerf", chains[0].gasPerf)
|
||||||
|
return nil, gasLimit
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. Merge chains until the block limit, as long as they have non-negative gas performance
|
||||||
last := len(chains)
|
last := len(chains)
|
||||||
for i, chain := range chains {
|
for i, chain := range chains {
|
||||||
|
if chain.gasPerf < 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
if chain.gasLimit <= gasLimit {
|
if chain.gasLimit <= gasLimit {
|
||||||
gasLimit -= chain.gasLimit
|
gasLimit -= chain.gasLimit
|
||||||
result = append(result, chain.msgs...)
|
result = append(result, chain.msgs...)
|
||||||
@ -485,8 +496,8 @@ func (mp *MessagePool) selectPriorityMessages(pending map[address.Address]map[ui
|
|||||||
|
|
||||||
tailLoop:
|
tailLoop:
|
||||||
for gasLimit >= minGas && last < len(chains) {
|
for gasLimit >= minGas && last < len(chains) {
|
||||||
// trim, without discarding negative performing messages
|
// trim, discarding negative performing messages
|
||||||
chains[last].Trim(gasLimit, mp, baseFee, ts, true)
|
chains[last].Trim(gasLimit, mp, baseFee, ts)
|
||||||
|
|
||||||
// push down if it hasn't been invalidated
|
// push down if it hasn't been invalidated
|
||||||
if chains[last].valid {
|
if chains[last].valid {
|
||||||
@ -504,6 +515,12 @@ tailLoop:
|
|||||||
if !chain.valid {
|
if !chain.valid {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// if gasPerf < 0 we have no more profitable chains
|
||||||
|
if chain.gasPerf < 0 {
|
||||||
|
break tailLoop
|
||||||
|
}
|
||||||
|
|
||||||
// does it fit in the bock?
|
// does it fit in the bock?
|
||||||
if chain.gasLimit <= gasLimit {
|
if chain.gasLimit <= gasLimit {
|
||||||
gasLimit -= chain.gasLimit
|
gasLimit -= chain.gasLimit
|
||||||
@ -516,9 +533,9 @@ tailLoop:
|
|||||||
continue tailLoop
|
continue tailLoop
|
||||||
}
|
}
|
||||||
|
|
||||||
// the merge loop ended after processing all the chains and we probably still have gas to spare
|
// the merge loop ended after processing all the chains and we probably still have gas to spare;
|
||||||
// -- mark the end.
|
// end the loop
|
||||||
last = len(chains)
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
return result, gasLimit
|
return result, gasLimit
|
||||||
@ -528,7 +545,6 @@ func (mp *MessagePool) getPendingMessages(curTs, ts *types.TipSet) (map[address.
|
|||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
|
||||||
result := make(map[address.Address]map[uint64]*types.SignedMessage)
|
result := make(map[address.Address]map[uint64]*types.SignedMessage)
|
||||||
haveCids := make(map[cid.Cid]struct{})
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if dt := time.Since(start); dt > time.Millisecond {
|
if dt := time.Since(start); dt > time.Millisecond {
|
||||||
log.Infow("get pending messages done", "took", dt)
|
log.Infow("get pending messages done", "took", dt)
|
||||||
@ -554,10 +570,6 @@ func (mp *MessagePool) getPendingMessages(curTs, ts *types.TipSet) (map[address.
|
|||||||
}
|
}
|
||||||
result[a] = msetCopy
|
result[a] = msetCopy
|
||||||
|
|
||||||
// mark the messages as seen
|
|
||||||
for _, m := range mset.msgs {
|
|
||||||
haveCids[m.Cid()] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -566,74 +578,13 @@ func (mp *MessagePool) getPendingMessages(curTs, ts *types.TipSet) (map[address.
|
|||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// nope, we need to sync the tipsets
|
if err := mp.runHeadChange(curTs, ts, result); err != nil {
|
||||||
for {
|
return nil, xerrors.Errorf("failed to process difference between mpool head and given head: %w", err)
|
||||||
if curTs.Height() == ts.Height() {
|
}
|
||||||
if curTs.Equals(ts) {
|
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// different blocks in tipsets -- we mark them as seen so that they are not included in
|
|
||||||
// in the message set we return, but *neither me (vyzo) nor why understand why*
|
|
||||||
// this code is also probably completely untested in production, so I am adding a big fat
|
|
||||||
// warning to revisit this case and sanity check this decision.
|
|
||||||
log.Warnf("mpool tipset has same height as target tipset but it's not equal; beware of dragons!")
|
|
||||||
|
|
||||||
have, err := mp.MessagesForBlocks(ts.Blocks())
|
|
||||||
if err != nil {
|
|
||||||
return nil, xerrors.Errorf("error retrieving messages for tipset: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, m := range have {
|
|
||||||
haveCids[m.Cid()] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
msgs, err := mp.MessagesForBlocks(ts.Blocks())
|
|
||||||
if err != nil {
|
|
||||||
return nil, xerrors.Errorf("error retrieving messages for tipset: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, m := range msgs {
|
|
||||||
if _, have := haveCids[m.Cid()]; have {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
haveCids[m.Cid()] = struct{}{}
|
|
||||||
mset, ok := result[m.Message.From]
|
|
||||||
if !ok {
|
|
||||||
mset = make(map[uint64]*types.SignedMessage)
|
|
||||||
result[m.Message.From] = mset
|
|
||||||
}
|
|
||||||
|
|
||||||
other, dupNonce := mset[m.Message.Nonce]
|
|
||||||
if dupNonce {
|
|
||||||
// duplicate nonce, selfishly keep the message with the highest GasPrice
|
|
||||||
// if the gas prices are the same, keep the one with the highest GasLimit
|
|
||||||
switch m.Message.GasPremium.Int.Cmp(other.Message.GasPremium.Int) {
|
|
||||||
case 0:
|
|
||||||
if m.Message.GasLimit > other.Message.GasLimit {
|
|
||||||
mset[m.Message.Nonce] = m
|
|
||||||
}
|
|
||||||
case 1:
|
|
||||||
mset[m.Message.Nonce] = m
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
mset[m.Message.Nonce] = m
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if curTs.Height() >= ts.Height() {
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ts, err = mp.api.LoadTipSet(ts.Parents())
|
|
||||||
if err != nil {
|
|
||||||
return nil, xerrors.Errorf("error loading parent tipset: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mp *MessagePool) getGasReward(msg *types.SignedMessage, baseFee types.BigInt, ts *types.TipSet) *big.Int {
|
func (mp *MessagePool) getGasReward(msg *types.SignedMessage, baseFee types.BigInt, ts *types.TipSet) *big.Int {
|
||||||
maxPremium := types.BigSub(msg.Message.GasFeeCap, baseFee)
|
maxPremium := types.BigSub(msg.Message.GasFeeCap, baseFee)
|
||||||
if types.BigCmp(maxPremium, msg.Message.GasPremium) < 0 {
|
if types.BigCmp(maxPremium, msg.Message.GasPremium) < 0 {
|
||||||
@ -671,7 +622,12 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6
|
|||||||
// cannot exceed the block limit; drop all messages that exceed the limit
|
// cannot exceed the block limit; drop all messages that exceed the limit
|
||||||
// - the total gasReward cannot exceed the actor's balance; drop all messages that exceed
|
// - the total gasReward cannot exceed the actor's balance; drop all messages that exceed
|
||||||
// the balance
|
// the balance
|
||||||
a, _ := mp.api.StateGetActor(actor, ts)
|
a, err := mp.api.GetActorAfter(actor, ts)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed to load actor state, not building chain for %s: %w", actor, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
curNonce := a.Nonce
|
curNonce := a.Nonce
|
||||||
balance := a.Balance.Int
|
balance := a.Balance.Int
|
||||||
gasLimit := int64(0)
|
gasLimit := int64(0)
|
||||||
@ -817,9 +773,9 @@ func (mc *msgChain) Before(other *msgChain) bool {
|
|||||||
(mc.gasPerf == other.gasPerf && mc.gasReward.Cmp(other.gasReward) > 0)
|
(mc.gasPerf == other.gasPerf && mc.gasReward.Cmp(other.gasReward) > 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mc *msgChain) Trim(gasLimit int64, mp *MessagePool, baseFee types.BigInt, ts *types.TipSet, priority bool) {
|
func (mc *msgChain) Trim(gasLimit int64, mp *MessagePool, baseFee types.BigInt, ts *types.TipSet) {
|
||||||
i := len(mc.msgs) - 1
|
i := len(mc.msgs) - 1
|
||||||
for i >= 0 && (mc.gasLimit > gasLimit || (!priority && mc.gasPerf < 0)) {
|
for i >= 0 && (mc.gasLimit > gasLimit || mc.gasPerf < 0) {
|
||||||
gasReward := mp.getGasReward(mc.msgs[i], baseFee, ts)
|
gasReward := mp.getGasReward(mc.msgs[i], baseFee, ts)
|
||||||
mc.gasReward = new(big.Int).Sub(mc.gasReward, gasReward)
|
mc.gasReward = new(big.Int).Sub(mc.gasReward, gasReward)
|
||||||
mc.gasLimit -= mc.msgs[i].Message.GasLimit
|
mc.gasLimit -= mc.msgs[i].Message.GasLimit
|
||||||
|
@ -23,6 +23,11 @@ import (
|
|||||||
logging "github.com/ipfs/go-log"
|
logging "github.com/ipfs/go-log"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// bump this for the selection tests
|
||||||
|
MaxActorPendingMessages = 1000000
|
||||||
|
}
|
||||||
|
|
||||||
func makeTestMessage(w *wallet.Wallet, from, to address.Address, nonce uint64, gasLimit int64, gasPrice uint64) *types.SignedMessage {
|
func makeTestMessage(w *wallet.Wallet, from, to address.Address, nonce uint64, gasLimit int64, gasPrice uint64) *types.SignedMessage {
|
||||||
msg := &types.Message{
|
msg := &types.Message{
|
||||||
From: from,
|
From: from,
|
||||||
@ -79,7 +84,7 @@ func TestMessageChains(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
block := mock.MkBlock(nil, 1, 1)
|
block := tma.nextBlock()
|
||||||
ts := mock.TipSet(block)
|
ts := mock.TipSet(block)
|
||||||
|
|
||||||
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
|
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
|
||||||
@ -317,7 +322,7 @@ func TestMessageChainSkipping(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
block := mock.MkBlock(nil, 1, 1)
|
block := tma.nextBlock()
|
||||||
ts := mock.TipSet(block)
|
ts := mock.TipSet(block)
|
||||||
|
|
||||||
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
|
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
|
||||||
@ -387,7 +392,7 @@ func TestBasicMessageSelection(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
block := mock.MkBlock(nil, 1, 1)
|
block := tma.nextBlock()
|
||||||
ts := mock.TipSet(block)
|
ts := mock.TipSet(block)
|
||||||
tma.applyBlock(t, block)
|
tma.applyBlock(t, block)
|
||||||
|
|
||||||
@ -440,12 +445,12 @@ func TestBasicMessageSelection(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// now we make a block with all the messages and advance the chain
|
// now we make a block with all the messages and advance the chain
|
||||||
block2 := mock.MkBlock(ts, 2, 2)
|
block2 := tma.nextBlock()
|
||||||
tma.setBlockMessages(block2, msgs...)
|
tma.setBlockMessages(block2, msgs...)
|
||||||
tma.applyBlock(t, block2)
|
tma.applyBlock(t, block2)
|
||||||
|
|
||||||
// we should have no pending messages in the mpool
|
// we should have no pending messages in the mpool
|
||||||
pend, ts2 := mp.Pending()
|
pend, _ := mp.Pending()
|
||||||
if len(pend) != 0 {
|
if len(pend) != 0 {
|
||||||
t.Fatalf("expected no pending messages, but got %d", len(pend))
|
t.Fatalf("expected no pending messages, but got %d", len(pend))
|
||||||
}
|
}
|
||||||
@ -458,13 +463,13 @@ func TestBasicMessageSelection(t *testing.T) {
|
|||||||
m = makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(i+1))
|
m = makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(i+1))
|
||||||
msgs = append(msgs, m)
|
msgs = append(msgs, m)
|
||||||
}
|
}
|
||||||
block3 := mock.MkBlock(ts2, 3, 3)
|
block3 := tma.nextBlock()
|
||||||
tma.setBlockMessages(block3, msgs...)
|
tma.setBlockMessages(block3, msgs...)
|
||||||
ts3 := mock.TipSet(block3)
|
ts3 := mock.TipSet(block3)
|
||||||
|
|
||||||
// now create another set of messages and add them to the mpool
|
// now create another set of messages and add them to the mpool
|
||||||
for i := 20; i < 30; i++ {
|
for i := 20; i < 30; i++ {
|
||||||
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(2*i+1))
|
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(2*i+200))
|
||||||
mustAdd(t, mp, m)
|
mustAdd(t, mp, m)
|
||||||
m = makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(i+1))
|
m = makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(i+1))
|
||||||
mustAdd(t, mp, m)
|
mustAdd(t, mp, m)
|
||||||
@ -480,12 +485,12 @@ func TestBasicMessageSelection(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if len(msgs) != 40 {
|
if len(msgs) != 20 {
|
||||||
t.Fatalf("expected 40 messages, got %d", len(msgs))
|
t.Fatalf("expected 20 messages, got %d", len(msgs))
|
||||||
}
|
}
|
||||||
|
|
||||||
nextNonce = 10
|
nextNonce = 20
|
||||||
for i := 0; i < 20; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
if msgs[i].Message.From != a1 {
|
if msgs[i].Message.From != a1 {
|
||||||
t.Fatalf("expected message from actor a1")
|
t.Fatalf("expected message from actor a1")
|
||||||
}
|
}
|
||||||
@ -495,8 +500,8 @@ func TestBasicMessageSelection(t *testing.T) {
|
|||||||
nextNonce++
|
nextNonce++
|
||||||
}
|
}
|
||||||
|
|
||||||
nextNonce = 10
|
nextNonce = 20
|
||||||
for i := 20; i < 40; i++ {
|
for i := 10; i < 20; i++ {
|
||||||
if msgs[i].Message.From != a2 {
|
if msgs[i].Message.From != a2 {
|
||||||
t.Fatalf("expected message from actor a2")
|
t.Fatalf("expected message from actor a2")
|
||||||
}
|
}
|
||||||
@ -531,7 +536,7 @@ func TestMessageSelectionTrimming(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
block := mock.MkBlock(nil, 1, 1)
|
block := tma.nextBlock()
|
||||||
ts := mock.TipSet(block)
|
ts := mock.TipSet(block)
|
||||||
tma.applyBlock(t, block)
|
tma.applyBlock(t, block)
|
||||||
|
|
||||||
@ -594,7 +599,7 @@ func TestPriorityMessageSelection(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
block := mock.MkBlock(nil, 1, 1)
|
block := tma.nextBlock()
|
||||||
ts := mock.TipSet(block)
|
ts := mock.TipSet(block)
|
||||||
tma.applyBlock(t, block)
|
tma.applyBlock(t, block)
|
||||||
|
|
||||||
@ -649,6 +654,73 @@ func TestPriorityMessageSelection(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPriorityMessageSelection2(t *testing.T) {
|
||||||
|
mp, tma := makeTestMpool()
|
||||||
|
|
||||||
|
// the actors
|
||||||
|
w1, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
a1, err := w1.GenerateKey(crypto.SigTypeSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
w2, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
a2, err := w2.GenerateKey(crypto.SigTypeSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
block := tma.nextBlock()
|
||||||
|
ts := mock.TipSet(block)
|
||||||
|
tma.applyBlock(t, block)
|
||||||
|
|
||||||
|
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin.StorageMarketActorCodeID, M: 2}]
|
||||||
|
|
||||||
|
tma.setBalance(a1, 1) // in FIL
|
||||||
|
tma.setBalance(a2, 1) // in FIL
|
||||||
|
|
||||||
|
mp.cfg.PriorityAddrs = []address.Address{a1}
|
||||||
|
|
||||||
|
nMessages := int(2 * build.BlockGasLimit / gasLimit)
|
||||||
|
for i := 0; i < nMessages; i++ {
|
||||||
|
bias := (nMessages - i) / 3
|
||||||
|
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(1+i%3+bias))
|
||||||
|
mustAdd(t, mp, m)
|
||||||
|
m = makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(1+i%3+bias))
|
||||||
|
mustAdd(t, mp, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
msgs, err := mp.SelectMessages(ts, 1.0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedMsgs := int(build.BlockGasLimit / gasLimit)
|
||||||
|
if len(msgs) != expectedMsgs {
|
||||||
|
t.Fatalf("expected %d messages but got %d", expectedMsgs, len(msgs))
|
||||||
|
}
|
||||||
|
|
||||||
|
// all messages must be from a1
|
||||||
|
nextNonce := uint64(0)
|
||||||
|
for _, m := range msgs {
|
||||||
|
if m.Message.From != a1 {
|
||||||
|
t.Fatal("expected messages from a1 before messages from a2")
|
||||||
|
}
|
||||||
|
if m.Message.Nonce != nextNonce {
|
||||||
|
t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce)
|
||||||
|
}
|
||||||
|
nextNonce++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestOptimalMessageSelection1(t *testing.T) {
|
func TestOptimalMessageSelection1(t *testing.T) {
|
||||||
// this test uses just a single actor sending messages with a low tq
|
// this test uses just a single actor sending messages with a low tq
|
||||||
// the chain depenent merging algorithm should pick messages from the actor
|
// the chain depenent merging algorithm should pick messages from the actor
|
||||||
@ -676,7 +748,7 @@ func TestOptimalMessageSelection1(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
block := mock.MkBlock(nil, 1, 1)
|
block := tma.nextBlock()
|
||||||
ts := mock.TipSet(block)
|
ts := mock.TipSet(block)
|
||||||
tma.applyBlock(t, block)
|
tma.applyBlock(t, block)
|
||||||
|
|
||||||
@ -743,7 +815,7 @@ func TestOptimalMessageSelection2(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
block := mock.MkBlock(nil, 1, 1)
|
block := tma.nextBlock()
|
||||||
ts := mock.TipSet(block)
|
ts := mock.TipSet(block)
|
||||||
tma.applyBlock(t, block)
|
tma.applyBlock(t, block)
|
||||||
|
|
||||||
@ -821,7 +893,7 @@ func TestOptimalMessageSelection3(t *testing.T) {
|
|||||||
wallets = append(wallets, w)
|
wallets = append(wallets, w)
|
||||||
}
|
}
|
||||||
|
|
||||||
block := mock.MkBlock(nil, 1, 1)
|
block := tma.nextBlock()
|
||||||
ts := mock.TipSet(block)
|
ts := mock.TipSet(block)
|
||||||
tma.applyBlock(t, block)
|
tma.applyBlock(t, block)
|
||||||
|
|
||||||
@ -879,7 +951,6 @@ func testCompetitiveMessageSelection(t *testing.T, rng *rand.Rand, getPremium fu
|
|||||||
// actors send with an randomly distributed premium dictated by the getPremium function.
|
// actors send with an randomly distributed premium dictated by the getPremium function.
|
||||||
// a number of miners select with varying ticket quality and we compare the
|
// a number of miners select with varying ticket quality and we compare the
|
||||||
// capacity and rewards of greedy selection -vs- optimal selection
|
// capacity and rewards of greedy selection -vs- optimal selection
|
||||||
|
|
||||||
mp, tma := makeTestMpool()
|
mp, tma := makeTestMpool()
|
||||||
|
|
||||||
nActors := 300
|
nActors := 300
|
||||||
@ -902,7 +973,7 @@ func testCompetitiveMessageSelection(t *testing.T, rng *rand.Rand, getPremium fu
|
|||||||
wallets = append(wallets, w)
|
wallets = append(wallets, w)
|
||||||
}
|
}
|
||||||
|
|
||||||
block := mock.MkBlock(nil, 1, 1)
|
block := tma.nextBlock()
|
||||||
ts := mock.TipSet(block)
|
ts := mock.TipSet(block)
|
||||||
tma.applyBlock(t, block)
|
tma.applyBlock(t, block)
|
||||||
|
|
||||||
|
@ -44,7 +44,7 @@ func SendHeadNotifs(nickname string) func(mctx helpers.MetricsCtx, lc fx.Lifecyc
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
go func() {
|
go func() {
|
||||||
sub, err := ps.Subscribe(topic)
|
sub, err := ps.Subscribe(topic) //nolint
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -116,6 +116,7 @@ func sendHeadNotifs(ctx context.Context, ps *pubsub.PubSub, topic string, chain
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//nolint
|
||||||
if err := ps.Publish(topic, b); err != nil {
|
if err := ps.Publish(topic, b); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -246,7 +246,7 @@ func (st *StateTree) DeleteActor(addr address.Address) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (st *StateTree) Flush(ctx context.Context) (cid.Cid, error) {
|
func (st *StateTree) Flush(ctx context.Context) (cid.Cid, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "stateTree.Flush")
|
ctx, span := trace.StartSpan(ctx, "stateTree.Flush") //nolint:staticcheck
|
||||||
defer span.End()
|
defer span.End()
|
||||||
if len(st.snaps.layers) != 1 {
|
if len(st.snaps.layers) != 1 {
|
||||||
return cid.Undef, xerrors.Errorf("tried to flush state tree with snapshots on the stack")
|
return cid.Undef, xerrors.Errorf("tried to flush state tree with snapshots on the stack")
|
||||||
@ -268,7 +268,7 @@ func (st *StateTree) Flush(ctx context.Context) (cid.Cid, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (st *StateTree) Snapshot(ctx context.Context) error {
|
func (st *StateTree) Snapshot(ctx context.Context) error {
|
||||||
ctx, span := trace.StartSpan(ctx, "stateTree.SnapShot")
|
ctx, span := trace.StartSpan(ctx, "stateTree.SnapShot") //nolint:staticcheck
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
st.snaps.addLayer()
|
st.snaps.addLayer()
|
||||||
|
@ -95,7 +95,7 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types.
|
|||||||
|
|
||||||
state := ts.ParentState()
|
state := ts.ParentState()
|
||||||
|
|
||||||
r := store.NewChainRand(sm.cs, ts.Cids(), ts.Height())
|
r := store.NewChainRand(sm.cs, ts.Cids())
|
||||||
|
|
||||||
return sm.CallRaw(ctx, msg, state, r, ts.Height())
|
return sm.CallRaw(ctx, msg, state, r, ts.Height())
|
||||||
}
|
}
|
||||||
@ -113,7 +113,7 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
|
|||||||
return nil, xerrors.Errorf("computing tipset state: %w", err)
|
return nil, xerrors.Errorf("computing tipset state: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
r := store.NewChainRand(sm.cs, ts.Cids(), ts.Height())
|
r := store.NewChainRand(sm.cs, ts.Cids())
|
||||||
|
|
||||||
if span.IsRecordingEvents() {
|
if span.IsRecordingEvents() {
|
||||||
span.AddAttributes(
|
span.AddAttributes(
|
||||||
|
@ -338,7 +338,7 @@ func (sm *StateManager) computeTipSetState(ctx context.Context, ts *types.TipSet
|
|||||||
cids[i] = v.Cid()
|
cids[i] = v.Cid()
|
||||||
}
|
}
|
||||||
|
|
||||||
r := store.NewChainRand(sm.cs, cids, blks[0].Height)
|
r := store.NewChainRand(sm.cs, cids)
|
||||||
|
|
||||||
blkmsgs, err := sm.cs.BlockMsgsForTipset(ts)
|
blkmsgs, err := sm.cs.BlockMsgsForTipset(ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -157,7 +157,7 @@ func MinerSectorInfo(ctx context.Context, sm *StateManager, maddr address.Addres
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, xerrors.New("sector not found")
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return sectorInfo, nil
|
return sectorInfo, nil
|
||||||
@ -432,7 +432,7 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch,
|
|||||||
return cid.Undef, nil, err
|
return cid.Undef, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
r := store.NewChainRand(sm.cs, ts.Cids(), height)
|
r := store.NewChainRand(sm.cs, ts.Cids())
|
||||||
vmopt := &vm.VMOpts{
|
vmopt := &vm.VMOpts{
|
||||||
StateBase: base,
|
StateBase: base,
|
||||||
Epoch: height,
|
Epoch: height,
|
||||||
|
@ -42,7 +42,7 @@ func TestIndexSeeks(t *testing.T) {
|
|||||||
if err := cs.PutTipSet(ctx, mock.TipSet(gen)); err != nil {
|
if err := cs.PutTipSet(ctx, mock.TipSet(gen)); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
cs.SetGenesis(gen)
|
assert.NoError(t, cs.SetGenesis(gen))
|
||||||
|
|
||||||
// Put 113 blocks from genesis
|
// Put 113 blocks from genesis
|
||||||
for i := 0; i < 113; i++ {
|
for i := 0; i < 113; i++ {
|
||||||
|
@ -49,6 +49,7 @@ var chainHeadKey = dstore.NewKey("head")
|
|||||||
var blockValidationCacheKeyPrefix = dstore.NewKey("blockValidation")
|
var blockValidationCacheKeyPrefix = dstore.NewKey("blockValidation")
|
||||||
|
|
||||||
var DefaultTipSetCacheSize = 8192
|
var DefaultTipSetCacheSize = 8192
|
||||||
|
var DefaultMsgMetaCacheSize = 2048
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
if s := os.Getenv("LOTUS_CHAIN_TIPSET_CACHE"); s != "" {
|
if s := os.Getenv("LOTUS_CHAIN_TIPSET_CACHE"); s != "" {
|
||||||
@ -58,6 +59,14 @@ func init() {
|
|||||||
}
|
}
|
||||||
DefaultTipSetCacheSize = tscs
|
DefaultTipSetCacheSize = tscs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if s := os.Getenv("LOTUS_CHAIN_MSGMETA_CACHE"); s != "" {
|
||||||
|
mmcs, err := strconv.Atoi(s)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed to parse 'LOTUS_CHAIN_MSGMETA_CACHE' env var: %s", err)
|
||||||
|
}
|
||||||
|
DefaultMsgMetaCacheSize = mmcs
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReorgNotifee represents a callback that gets called upon reorgs.
|
// ReorgNotifee represents a callback that gets called upon reorgs.
|
||||||
@ -97,7 +106,7 @@ type ChainStore struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func NewChainStore(bs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallBuilder) *ChainStore {
|
func NewChainStore(bs bstore.Blockstore, ds dstore.Batching, vmcalls vm.SyscallBuilder) *ChainStore {
|
||||||
c, _ := lru.NewARC(2048)
|
c, _ := lru.NewARC(DefaultMsgMetaCacheSize)
|
||||||
tsc, _ := lru.NewARC(DefaultTipSetCacheSize)
|
tsc, _ := lru.NewARC(DefaultTipSetCacheSize)
|
||||||
cs := &ChainStore{
|
cs := &ChainStore{
|
||||||
bs: bs,
|
bs: bs,
|
||||||
@ -483,6 +492,10 @@ func (cs *ChainStore) NearestCommonAncestor(a, b *types.TipSet) (*types.TipSet,
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (cs *ChainStore) ReorgOps(a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) {
|
func (cs *ChainStore) ReorgOps(a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) {
|
||||||
|
return ReorgOps(cs.LoadTipSet, a, b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ReorgOps(lts func(types.TipSetKey) (*types.TipSet, error), a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) {
|
||||||
left := a
|
left := a
|
||||||
right := b
|
right := b
|
||||||
|
|
||||||
@ -490,7 +503,7 @@ func (cs *ChainStore) ReorgOps(a, b *types.TipSet) ([]*types.TipSet, []*types.Ti
|
|||||||
for !left.Equals(right) {
|
for !left.Equals(right) {
|
||||||
if left.Height() > right.Height() {
|
if left.Height() > right.Height() {
|
||||||
leftChain = append(leftChain, left)
|
leftChain = append(leftChain, left)
|
||||||
par, err := cs.LoadTipSet(left.Parents())
|
par, err := lts(left.Parents())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@ -498,7 +511,7 @@ func (cs *ChainStore) ReorgOps(a, b *types.TipSet) ([]*types.TipSet, []*types.Ti
|
|||||||
left = par
|
left = par
|
||||||
} else {
|
} else {
|
||||||
rightChain = append(rightChain, right)
|
rightChain = append(rightChain, right)
|
||||||
par, err := cs.LoadTipSet(right.Parents())
|
par, err := lts(right.Parents())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Infof("failed to fetch right.Parents: %s", err)
|
log.Infof("failed to fetch right.Parents: %s", err)
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
@ -509,6 +522,7 @@ func (cs *ChainStore) ReorgOps(a, b *types.TipSet) ([]*types.TipSet, []*types.Ti
|
|||||||
}
|
}
|
||||||
|
|
||||||
return leftChain, rightChain, nil
|
return leftChain, rightChain, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetHeaviestTipSet returns the current heaviest tipset known (i.e. our head).
|
// GetHeaviestTipSet returns the current heaviest tipset known (i.e. our head).
|
||||||
@ -829,7 +843,7 @@ type mmCids struct {
|
|||||||
secpk []cid.Cid
|
secpk []cid.Cid
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *ChainStore) readMsgMetaCids(mmc cid.Cid) ([]cid.Cid, []cid.Cid, error) {
|
func (cs *ChainStore) ReadMsgMetaCids(mmc cid.Cid) ([]cid.Cid, []cid.Cid, error) {
|
||||||
o, ok := cs.mmCache.Get(mmc)
|
o, ok := cs.mmCache.Get(mmc)
|
||||||
if ok {
|
if ok {
|
||||||
mmcids := o.(*mmCids)
|
mmcids := o.(*mmCids)
|
||||||
@ -885,7 +899,7 @@ func (cs *ChainStore) GetPath(ctx context.Context, from types.TipSetKey, to type
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (cs *ChainStore) MessagesForBlock(b *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) {
|
func (cs *ChainStore) MessagesForBlock(b *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) {
|
||||||
blscids, secpkcids, err := cs.readMsgMetaCids(b.Messages)
|
blscids, secpkcids, err := cs.ReadMsgMetaCids(b.Messages)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@ -925,7 +939,7 @@ func (cs *ChainStore) LoadMessagesFromCids(cids []cid.Cid) ([]*types.Message, er
|
|||||||
for i, c := range cids {
|
for i, c := range cids {
|
||||||
m, err := cs.GetMessage(c)
|
m, err := cs.GetMessage(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("failed to get message: (%s):%d: %w", err, c, i)
|
return nil, xerrors.Errorf("failed to get message: (%s):%d: %w", c, i, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
msgs = append(msgs, m)
|
msgs = append(msgs, m)
|
||||||
@ -939,7 +953,7 @@ func (cs *ChainStore) LoadSignedMessagesFromCids(cids []cid.Cid) ([]*types.Signe
|
|||||||
for i, c := range cids {
|
for i, c := range cids {
|
||||||
m, err := cs.GetSignedMessage(c)
|
m, err := cs.GetSignedMessage(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("failed to get message: (%s):%d: %w", err, c, i)
|
return nil, xerrors.Errorf("failed to get message: (%s):%d: %w", c, i, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
msgs = append(msgs, m)
|
msgs = append(msgs, m)
|
||||||
@ -1109,7 +1123,7 @@ func (cs *ChainStore) GetTipsetByHeight(ctx context.Context, h abi.ChainEpoch, t
|
|||||||
return cs.LoadTipSet(lbts.Parents())
|
return cs.LoadTipSet(lbts.Parents())
|
||||||
}
|
}
|
||||||
|
|
||||||
func recurseLinks(bs bstore.Blockstore, root cid.Cid, in []cid.Cid) ([]cid.Cid, error) {
|
func recurseLinks(bs bstore.Blockstore, walked *cid.Set, root cid.Cid, in []cid.Cid) ([]cid.Cid, error) {
|
||||||
if root.Prefix().Codec != cid.DagCBOR {
|
if root.Prefix().Codec != cid.DagCBOR {
|
||||||
return in, nil
|
return in, nil
|
||||||
}
|
}
|
||||||
@ -1126,9 +1140,14 @@ func recurseLinks(bs bstore.Blockstore, root cid.Cid, in []cid.Cid) ([]cid.Cid,
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// traversed this already...
|
||||||
|
if !walked.Visit(c) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
in = append(in, c)
|
in = append(in, c)
|
||||||
var err error
|
var err error
|
||||||
in, err = recurseLinks(bs, c, in)
|
in, err = recurseLinks(bs, walked, c, in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rerr = err
|
rerr = err
|
||||||
}
|
}
|
||||||
@ -1140,12 +1159,13 @@ func recurseLinks(bs bstore.Blockstore, root cid.Cid, in []cid.Cid) ([]cid.Cid,
|
|||||||
return in, rerr
|
return in, rerr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, w io.Writer) error {
|
func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, w io.Writer) error {
|
||||||
if ts == nil {
|
if ts == nil {
|
||||||
ts = cs.GetHeaviestTipSet()
|
ts = cs.GetHeaviestTipSet()
|
||||||
}
|
}
|
||||||
|
|
||||||
seen := cid.NewSet()
|
seen := cid.NewSet()
|
||||||
|
walked := cid.NewSet()
|
||||||
|
|
||||||
h := &car.CarHeader{
|
h := &car.CarHeader{
|
||||||
Roots: ts.Cids(),
|
Roots: ts.Cids(),
|
||||||
@ -1177,7 +1197,7 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, w io.Writer)
|
|||||||
return xerrors.Errorf("unmarshaling block header (cid=%s): %w", blk, err)
|
return xerrors.Errorf("unmarshaling block header (cid=%s): %w", blk, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cids, err := recurseLinks(cs.bs, b.Messages, []cid.Cid{b.Messages})
|
cids, err := recurseLinks(cs.bs, walked, b.Messages, []cid.Cid{b.Messages})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("recursing messages failed: %w", err)
|
return xerrors.Errorf("recursing messages failed: %w", err)
|
||||||
}
|
}
|
||||||
@ -1193,8 +1213,8 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, w io.Writer)
|
|||||||
|
|
||||||
out := cids
|
out := cids
|
||||||
|
|
||||||
if b.Height == 0 {
|
if b.Height == 0 || b.Height > ts.Height()-inclRecentRoots {
|
||||||
cids, err := recurseLinks(cs.bs, b.ParentStateRoot, []cid.Cid{b.ParentStateRoot})
|
cids, err := recurseLinks(cs.bs, walked, b.ParentStateRoot, []cid.Cid{b.ParentStateRoot})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("recursing genesis state failed: %w", err)
|
return xerrors.Errorf("recursing genesis state failed: %w", err)
|
||||||
}
|
}
|
||||||
@ -1279,14 +1299,12 @@ func (cs *ChainStore) GetLatestBeaconEntry(ts *types.TipSet) (*types.BeaconEntry
|
|||||||
type chainRand struct {
|
type chainRand struct {
|
||||||
cs *ChainStore
|
cs *ChainStore
|
||||||
blks []cid.Cid
|
blks []cid.Cid
|
||||||
bh abi.ChainEpoch
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewChainRand(cs *ChainStore, blks []cid.Cid, bheight abi.ChainEpoch) vm.Rand {
|
func NewChainRand(cs *ChainStore, blks []cid.Cid) vm.Rand {
|
||||||
return &chainRand{
|
return &chainRand{
|
||||||
cs: cs,
|
cs: cs,
|
||||||
blks: blks,
|
blks: blks,
|
||||||
bh: bheight,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -96,7 +96,7 @@ func TestChainExportImport(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
buf := new(bytes.Buffer)
|
buf := new(bytes.Buffer)
|
||||||
if err := cg.ChainStore().Export(context.TODO(), last, buf); err != nil {
|
if err := cg.ChainStore().Export(context.TODO(), last, 0, buf); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ func (cs *ChainStore) Weight(ctx context.Context, ts *types.TipSet) (types.BigIn
|
|||||||
|
|
||||||
var st power.State
|
var st power.State
|
||||||
if err := cst.Get(ctx, act.Head, &st); err != nil {
|
if err := cst.Get(ctx, act.Head, &st); err != nil {
|
||||||
return types.NewInt(0), xerrors.Errorf("get power actor head: %w", err)
|
return types.NewInt(0), xerrors.Errorf("get power actor head (%s, height=%d): %w", act.Head, ts.Height(), err)
|
||||||
}
|
}
|
||||||
tpow = st.TotalQualityAdjPower // TODO: REVIEW: Is this correct?
|
tpow = st.TotalQualityAdjPower // TODO: REVIEW: Is this correct?
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,7 @@ package sub
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@ -40,6 +41,9 @@ import (
|
|||||||
|
|
||||||
var log = logging.Logger("sub")
|
var log = logging.Logger("sub")
|
||||||
|
|
||||||
|
var ErrSoftFailure = errors.New("soft validation failure")
|
||||||
|
var ErrInsufficientPower = errors.New("incoming block's miner does not have minimum power")
|
||||||
|
|
||||||
func HandleIncomingBlocks(ctx context.Context, bsub *pubsub.Subscription, s *chain.Syncer, bserv bserv.BlockService, cmgr connmgr.ConnManager) {
|
func HandleIncomingBlocks(ctx context.Context, bsub *pubsub.Subscription, s *chain.Syncer, bserv bserv.BlockService, cmgr connmgr.ConnManager) {
|
||||||
for {
|
for {
|
||||||
msg, err := bsub.Next(ctx)
|
msg, err := bsub.Next(ctx)
|
||||||
@ -258,16 +262,15 @@ func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub
|
|||||||
|
|
||||||
stats.Record(ctx, metrics.BlockReceived.M(1))
|
stats.Record(ctx, metrics.BlockReceived.M(1))
|
||||||
|
|
||||||
recordFailure := func(what string) {
|
recordFailureFlagPeer := func(what string) {
|
||||||
ctx, _ = tag.New(ctx, tag.Insert(metrics.FailureType, what))
|
recordFailure(ctx, metrics.BlockValidationFailure, what)
|
||||||
stats.Record(ctx, metrics.BlockValidationFailure.M(1))
|
|
||||||
bv.flagPeer(pid)
|
bv.flagPeer(pid)
|
||||||
}
|
}
|
||||||
|
|
||||||
blk, what, err := bv.decodeAndCheckBlock(msg)
|
blk, what, err := bv.decodeAndCheckBlock(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("got invalid block over pubsub: ", err)
|
log.Error("got invalid block over pubsub: ", err)
|
||||||
recordFailure(what)
|
recordFailureFlagPeer(what)
|
||||||
return pubsub.ValidationReject
|
return pubsub.ValidationReject
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -275,7 +278,7 @@ func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub
|
|||||||
err = bv.validateMsgMeta(ctx, blk)
|
err = bv.validateMsgMeta(ctx, blk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("error validating message metadata: %s", err)
|
log.Warnf("error validating message metadata: %s", err)
|
||||||
recordFailure("invalid_block_meta")
|
recordFailureFlagPeer("invalid_block_meta")
|
||||||
return pubsub.ValidationReject
|
return pubsub.ValidationReject
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -288,26 +291,26 @@ func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub
|
|||||||
// if we are synced and the miner is unknown, then the block is rejcected.
|
// if we are synced and the miner is unknown, then the block is rejcected.
|
||||||
key, err := bv.checkPowerAndGetWorkerKey(ctx, blk.Header)
|
key, err := bv.checkPowerAndGetWorkerKey(ctx, blk.Header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if bv.isChainNearSynced() {
|
if err != ErrSoftFailure && bv.isChainNearSynced() {
|
||||||
log.Warnf("received block from unknown miner or miner that doesn't meet min power over pubsub; rejecting message")
|
log.Warnf("received block from unknown miner or miner that doesn't meet min power over pubsub; rejecting message")
|
||||||
recordFailure("unknown_miner")
|
recordFailureFlagPeer("unknown_miner")
|
||||||
return pubsub.ValidationReject
|
return pubsub.ValidationReject
|
||||||
} else {
|
}
|
||||||
|
|
||||||
log.Warnf("cannot validate block message; unknown miner or miner that doesn't meet min power in unsynced chain")
|
log.Warnf("cannot validate block message; unknown miner or miner that doesn't meet min power in unsynced chain")
|
||||||
return pubsub.ValidationIgnore
|
return pubsub.ValidationIgnore
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
err = sigs.CheckBlockSignature(ctx, blk.Header, key)
|
err = sigs.CheckBlockSignature(ctx, blk.Header, key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("block signature verification failed: %s", err)
|
log.Errorf("block signature verification failed: %s", err)
|
||||||
recordFailure("signature_verification_failed")
|
recordFailureFlagPeer("signature_verification_failed")
|
||||||
return pubsub.ValidationReject
|
return pubsub.ValidationReject
|
||||||
}
|
}
|
||||||
|
|
||||||
if blk.Header.ElectionProof.WinCount < 1 {
|
if blk.Header.ElectionProof.WinCount < 1 {
|
||||||
log.Errorf("block is not claiming to be winning")
|
log.Errorf("block is not claiming to be winning")
|
||||||
recordFailure("not_winning")
|
recordFailureFlagPeer("not_winning")
|
||||||
return pubsub.ValidationReject
|
return pubsub.ValidationReject
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -474,19 +477,19 @@ func (bv *BlockValidator) checkPowerAndGetWorkerKey(ctx context.Context, bh *typ
|
|||||||
baseTs := bv.chain.GetHeaviestTipSet()
|
baseTs := bv.chain.GetHeaviestTipSet()
|
||||||
lbts, err := stmgr.GetLookbackTipSetForRound(ctx, bv.stmgr, baseTs, bh.Height)
|
lbts, err := stmgr.GetLookbackTipSetForRound(ctx, bv.stmgr, baseTs, bh.Height)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("failed to load lookback tipset for incoming block")
|
log.Warnf("failed to load lookback tipset for incoming block: %s", err)
|
||||||
return address.Undef, err
|
return address.Undef, ErrSoftFailure
|
||||||
}
|
}
|
||||||
|
|
||||||
hmp, err := stmgr.MinerHasMinPower(ctx, bv.stmgr, bh.Miner, lbts)
|
hmp, err := stmgr.MinerHasMinPower(ctx, bv.stmgr, bh.Miner, lbts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("failed to determine if incoming block's miner has minimum power")
|
log.Warnf("failed to determine if incoming block's miner has minimum power: %s", err)
|
||||||
return address.Undef, err
|
return address.Undef, ErrSoftFailure
|
||||||
}
|
}
|
||||||
|
|
||||||
if !hmp {
|
if !hmp {
|
||||||
log.Warnf("incoming block's miner does not have minimum power")
|
log.Warnf("incoming block's miner does not have minimum power")
|
||||||
return address.Undef, xerrors.New("incoming block's miner does not have minimum power")
|
return address.Undef, ErrInsufficientPower
|
||||||
}
|
}
|
||||||
|
|
||||||
return key, nil
|
return key, nil
|
||||||
@ -542,14 +545,16 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs
|
|||||||
log.Debugf("failed to add message from network to message pool (From: %s, To: %s, Nonce: %d, Value: %s): %s", m.Message.From, m.Message.To, m.Message.Nonce, types.FIL(m.Message.Value), err)
|
log.Debugf("failed to add message from network to message pool (From: %s, To: %s, Nonce: %d, Value: %s): %s", m.Message.From, m.Message.To, m.Message.Nonce, types.FIL(m.Message.Value), err)
|
||||||
ctx, _ = tag.New(
|
ctx, _ = tag.New(
|
||||||
ctx,
|
ctx,
|
||||||
tag.Insert(metrics.FailureType, "add"),
|
tag.Upsert(metrics.Local, "false"),
|
||||||
)
|
)
|
||||||
stats.Record(ctx, metrics.MessageValidationFailure.M(1))
|
recordFailure(ctx, metrics.MessageValidationFailure, "add")
|
||||||
switch {
|
switch {
|
||||||
case xerrors.Is(err, messagepool.ErrBroadcastAnyway):
|
case xerrors.Is(err, messagepool.ErrSoftValidationFailure):
|
||||||
fallthrough
|
fallthrough
|
||||||
case xerrors.Is(err, messagepool.ErrRBFTooLowPremium):
|
case xerrors.Is(err, messagepool.ErrRBFTooLowPremium):
|
||||||
fallthrough
|
fallthrough
|
||||||
|
case xerrors.Is(err, messagepool.ErrTooManyPendingMessages):
|
||||||
|
fallthrough
|
||||||
case xerrors.Is(err, messagepool.ErrNonceTooLow):
|
case xerrors.Is(err, messagepool.ErrNonceTooLow):
|
||||||
return pubsub.ValidationIgnore
|
return pubsub.ValidationIgnore
|
||||||
default:
|
default:
|
||||||
@ -561,37 +566,41 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (mv *MessageValidator) validateLocalMessage(ctx context.Context, msg *pubsub.Message) pubsub.ValidationResult {
|
func (mv *MessageValidator) validateLocalMessage(ctx context.Context, msg *pubsub.Message) pubsub.ValidationResult {
|
||||||
|
ctx, _ = tag.New(
|
||||||
|
ctx,
|
||||||
|
tag.Upsert(metrics.Local, "true"),
|
||||||
|
)
|
||||||
// do some lightweight validation
|
// do some lightweight validation
|
||||||
stats.Record(ctx, metrics.MessagePublished.M(1))
|
stats.Record(ctx, metrics.MessagePublished.M(1))
|
||||||
|
|
||||||
m, err := types.DecodeSignedMessage(msg.Message.GetData())
|
m, err := types.DecodeSignedMessage(msg.Message.GetData())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("failed to decode local message: %s", err)
|
log.Warnf("failed to decode local message: %s", err)
|
||||||
stats.Record(ctx, metrics.MessageValidationFailure.M(1))
|
recordFailure(ctx, metrics.MessageValidationFailure, "decode")
|
||||||
return pubsub.ValidationIgnore
|
return pubsub.ValidationIgnore
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.Size() > 32*1024 {
|
if m.Size() > 32*1024 {
|
||||||
log.Warnf("local message is too large! (%dB)", m.Size())
|
log.Warnf("local message is too large! (%dB)", m.Size())
|
||||||
stats.Record(ctx, metrics.MessageValidationFailure.M(1))
|
recordFailure(ctx, metrics.MessageValidationFailure, "oversize")
|
||||||
return pubsub.ValidationIgnore
|
return pubsub.ValidationIgnore
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.Message.To == address.Undef {
|
if m.Message.To == address.Undef {
|
||||||
log.Warn("local message has invalid destination address")
|
log.Warn("local message has invalid destination address")
|
||||||
stats.Record(ctx, metrics.MessageValidationFailure.M(1))
|
recordFailure(ctx, metrics.MessageValidationFailure, "undef-addr")
|
||||||
return pubsub.ValidationIgnore
|
return pubsub.ValidationIgnore
|
||||||
}
|
}
|
||||||
|
|
||||||
if !m.Message.Value.LessThan(types.TotalFilecoinInt) {
|
if !m.Message.Value.LessThan(types.TotalFilecoinInt) {
|
||||||
log.Warnf("local messages has too high value: %s", m.Message.Value)
|
log.Warnf("local messages has too high value: %s", m.Message.Value)
|
||||||
stats.Record(ctx, metrics.MessageValidationFailure.M(1))
|
recordFailure(ctx, metrics.MessageValidationFailure, "value-too-high")
|
||||||
return pubsub.ValidationIgnore
|
return pubsub.ValidationIgnore
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := mv.mpool.VerifyMsgSig(m); err != nil {
|
if err := mv.mpool.VerifyMsgSig(m); err != nil {
|
||||||
log.Warnf("signature verification failed for local message: %s", err)
|
log.Warnf("signature verification failed for local message: %s", err)
|
||||||
stats.Record(ctx, metrics.MessageValidationFailure.M(1))
|
recordFailure(ctx, metrics.MessageValidationFailure, "verify-sig")
|
||||||
return pubsub.ValidationIgnore
|
return pubsub.ValidationIgnore
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -614,3 +623,11 @@ func HandleIncomingMessages(ctx context.Context, mpool *messagepool.MessagePool,
|
|||||||
// Do nothing... everything happens in validate
|
// Do nothing... everything happens in validate
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func recordFailure(ctx context.Context, metric *stats.Int64Measure, failureType string) {
|
||||||
|
ctx, _ = tag.New(
|
||||||
|
ctx,
|
||||||
|
tag.Upsert(metrics.FailureType, failureType),
|
||||||
|
)
|
||||||
|
stats.Record(ctx, metric.M(1))
|
||||||
|
}
|
||||||
|
@ -7,6 +7,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -52,6 +53,19 @@ import (
|
|||||||
//the theoretical max height based on systime are quickly rejected
|
//the theoretical max height based on systime are quickly rejected
|
||||||
const MaxHeightDrift = 5
|
const MaxHeightDrift = 5
|
||||||
|
|
||||||
|
var defaultMessageFetchWindowSize = 200
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
if s := os.Getenv("LOTUS_BSYNC_MSG_WINDOW"); s != "" {
|
||||||
|
val, err := strconv.Atoi(s)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed to parse LOTUS_BSYNC_MSG_WINDOW: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defaultMessageFetchWindowSize = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var log = logging.Logger("chain")
|
var log = logging.Logger("chain")
|
||||||
|
|
||||||
var LocalIncoming = "incoming"
|
var LocalIncoming = "incoming"
|
||||||
@ -109,6 +123,8 @@ type Syncer struct {
|
|||||||
receiptTracker *blockReceiptTracker
|
receiptTracker *blockReceiptTracker
|
||||||
|
|
||||||
verifier ffiwrapper.Verifier
|
verifier ffiwrapper.Verifier
|
||||||
|
|
||||||
|
windowSize int
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSyncer creates a new Syncer object.
|
// NewSyncer creates a new Syncer object.
|
||||||
@ -134,6 +150,7 @@ func NewSyncer(sm *stmgr.StateManager, bsync *blocksync.BlockSync, connmgr connm
|
|||||||
receiptTracker: newBlockReceiptTracker(),
|
receiptTracker: newBlockReceiptTracker(),
|
||||||
connmgr: connmgr,
|
connmgr: connmgr,
|
||||||
verifier: verifier,
|
verifier: verifier,
|
||||||
|
windowSize: defaultMessageFetchWindowSize,
|
||||||
|
|
||||||
incoming: pubsub.New(50),
|
incoming: pubsub.New(50),
|
||||||
}
|
}
|
||||||
@ -641,7 +658,7 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) (er
|
|||||||
validationStart := build.Clock.Now()
|
validationStart := build.Clock.Now()
|
||||||
defer func() {
|
defer func() {
|
||||||
stats.Record(ctx, metrics.BlockValidationDurationMilliseconds.M(metrics.SinceInMilliseconds(validationStart)))
|
stats.Record(ctx, metrics.BlockValidationDurationMilliseconds.M(metrics.SinceInMilliseconds(validationStart)))
|
||||||
log.Infow("block validation", "took", time.Since(validationStart), "height", b.Header.Height)
|
log.Infow("block validation", "took", time.Since(validationStart), "height", b.Header.Height, "age", time.Since(time.Unix(int64(b.Header.Timestamp), 0)))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
ctx, span := trace.StartSpan(ctx, "validateBlock")
|
ctx, span := trace.StartSpan(ctx, "validateBlock")
|
||||||
@ -1399,7 +1416,8 @@ func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipS
|
|||||||
|
|
||||||
span.AddAttributes(trace.Int64Attribute("num_headers", int64(len(headers))))
|
span.AddAttributes(trace.Int64Attribute("num_headers", int64(len(headers))))
|
||||||
|
|
||||||
windowSize := 200
|
windowSize := syncer.windowSize
|
||||||
|
mainLoop:
|
||||||
for i := len(headers) - 1; i >= 0; {
|
for i := len(headers) - 1; i >= 0; {
|
||||||
fts, err := syncer.store.TryFillTipSet(headers[i])
|
fts, err := syncer.store.TryFillTipSet(headers[i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1427,6 +1445,12 @@ func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipS
|
|||||||
nreq := batchSize - len(bstout)
|
nreq := batchSize - len(bstout)
|
||||||
bstips, err := syncer.Bsync.GetChainMessages(ctx, next, uint64(nreq))
|
bstips, err := syncer.Bsync.GetChainMessages(ctx, next, uint64(nreq))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// TODO check errors for temporary nature
|
||||||
|
if windowSize > 1 {
|
||||||
|
windowSize /= 2
|
||||||
|
log.Infof("error fetching messages: %s; reducing window size to %d and trying again", err, windowSize)
|
||||||
|
continue mainLoop
|
||||||
|
}
|
||||||
return xerrors.Errorf("message processing failed: %w", err)
|
return xerrors.Errorf("message processing failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1461,9 +1485,24 @@ func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipS
|
|||||||
return xerrors.Errorf("message processing failed: %w", err)
|
return xerrors.Errorf("message processing failed: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if i >= windowSize {
|
||||||
|
newWindowSize := windowSize + 10
|
||||||
|
if newWindowSize > int(blocksync.MaxRequestLength) {
|
||||||
|
newWindowSize = int(blocksync.MaxRequestLength)
|
||||||
|
}
|
||||||
|
if newWindowSize > windowSize {
|
||||||
|
windowSize = newWindowSize
|
||||||
|
log.Infof("successfully fetched %d messages; increasing window size to %d", len(bstout), windowSize)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
i -= batchSize
|
i -= batchSize
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// remember our window size
|
||||||
|
syncer.windowSize = windowSize
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -343,12 +343,12 @@ func (sm *SyncManager) scheduleProcessResult(res *syncResult) {
|
|||||||
sm.syncQueue.buckets = append(sm.syncQueue.buckets, relbucket)
|
sm.syncQueue.buckets = append(sm.syncQueue.buckets, relbucket)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
} else {
|
}
|
||||||
// TODO: this is the case where we try to sync a chain, and
|
// TODO: this is the case where we try to sync a chain, and
|
||||||
// fail, and we have more blocks on top of that chain that
|
// fail, and we have more blocks on top of that chain that
|
||||||
// have come in since. The question is, should we try to
|
// have come in since. The question is, should we try to
|
||||||
// sync these? or just drop them?
|
// sync these? or just drop them?
|
||||||
}
|
log.Error("failed to sync chain but have new unconnected blocks from chain")
|
||||||
}
|
}
|
||||||
|
|
||||||
if sm.nextSyncTarget == nil && !sm.syncQueue.Empty() {
|
if sm.nextSyncTarget == nil && !sm.syncQueue.Empty() {
|
||||||
|
@ -3,11 +3,12 @@ package chain_test
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/ipfs/go-cid"
|
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
ds "github.com/ipfs/go-datastore"
|
ds "github.com/ipfs/go-datastore"
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
"github.com/libp2p/go-libp2p-core/peer"
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
@ -36,7 +37,10 @@ import (
|
|||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
build.InsecurePoStValidation = true
|
build.InsecurePoStValidation = true
|
||||||
os.Setenv("TRUST_PARAMS", "1")
|
err := os.Setenv("TRUST_PARAMS", "1")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
|
miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{
|
||||||
abi.RegisteredSealProof_StackedDrg2KiBV1: {},
|
abi.RegisteredSealProof_StackedDrg2KiBV1: {},
|
||||||
}
|
}
|
||||||
@ -212,20 +216,6 @@ func (tu *syncTestUtil) mineNewBlock(src int, miners []int) {
|
|||||||
tu.g.CurTipset = mts
|
tu.g.CurTipset = mts
|
||||||
}
|
}
|
||||||
|
|
||||||
func fblkToBlkMsg(fb *types.FullBlock) *types.BlockMsg {
|
|
||||||
out := &types.BlockMsg{
|
|
||||||
Header: fb.Header,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, msg := range fb.BlsMessages {
|
|
||||||
out.BlsMessages = append(out.BlsMessages, msg.Cid())
|
|
||||||
}
|
|
||||||
for _, msg := range fb.SecpkMessages {
|
|
||||||
out.SecpkMessages = append(out.SecpkMessages, msg.Cid())
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tu *syncTestUtil) addSourceNode(gen int) {
|
func (tu *syncTestUtil) addSourceNode(gen int) {
|
||||||
if tu.genesis != nil {
|
if tu.genesis != nil {
|
||||||
tu.t.Fatal("source node already exists")
|
tu.t.Fatal("source node already exists")
|
||||||
@ -454,7 +444,7 @@ func (wpp badWpp) GenerateCandidates(context.Context, abi.PoStRandomness, uint64
|
|||||||
|
|
||||||
func (wpp badWpp) ComputeProof(context.Context, []abi.SectorInfo, abi.PoStRandomness) ([]abi.PoStProof, error) {
|
func (wpp badWpp) ComputeProof(context.Context, []abi.SectorInfo, abi.PoStRandomness) ([]abi.PoStProof, error) {
|
||||||
return []abi.PoStProof{
|
return []abi.PoStProof{
|
||||||
abi.PoStProof{
|
{
|
||||||
PoStProof: abi.RegisteredPoStProof_StackedDrgWinning2KiBV1,
|
PoStProof: abi.RegisteredPoStProof_StackedDrgWinning2KiBV1,
|
||||||
ProofBytes: []byte("evil"),
|
ProofBytes: []byte("evil"),
|
||||||
},
|
},
|
||||||
@ -587,7 +577,7 @@ func TestDuplicateNonce(t *testing.T) {
|
|||||||
|
|
||||||
msgs := make([][]*types.SignedMessage, 2)
|
msgs := make([][]*types.SignedMessage, 2)
|
||||||
// Each miner includes a message from the banker with the same nonce, but to different addresses
|
// Each miner includes a message from the banker with the same nonce, but to different addresses
|
||||||
for k, _ := range msgs {
|
for k := range msgs {
|
||||||
msgs[k] = []*types.SignedMessage{makeMsg(tu.g.Miners[k])}
|
msgs[k] = []*types.SignedMessage{makeMsg(tu.g.Miners[k])}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,10 +6,10 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
abi "github.com/filecoin-project/specs-actors/actors/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
crypto "github.com/filecoin-project/specs-actors/actors/crypto"
|
||||||
"github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
|
exitcode "github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
|
||||||
"github.com/ipfs/go-cid"
|
cid "github.com/ipfs/go-cid"
|
||||||
cbg "github.com/whyrusleeping/cbor-gen"
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
xerrors "golang.org/x/xerrors"
|
xerrors "golang.org/x/xerrors"
|
||||||
)
|
)
|
||||||
@ -637,16 +637,11 @@ func (t *Message) MarshalCBOR(w io.Writer) error {
|
|||||||
|
|
||||||
scratch := make([]byte, 9)
|
scratch := make([]byte, 9)
|
||||||
|
|
||||||
// t.Version (int64) (int64)
|
// t.Version (uint64) (uint64)
|
||||||
if t.Version >= 0 {
|
|
||||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Version)); err != nil {
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Version)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Version-1)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// t.To (address.Address) (struct)
|
// t.To (address.Address) (struct)
|
||||||
if err := t.To.MarshalCBOR(w); err != nil {
|
if err := t.To.MarshalCBOR(w); err != nil {
|
||||||
@ -729,30 +724,19 @@ func (t *Message) UnmarshalCBOR(r io.Reader) error {
|
|||||||
return fmt.Errorf("cbor input had wrong number of fields")
|
return fmt.Errorf("cbor input had wrong number of fields")
|
||||||
}
|
}
|
||||||
|
|
||||||
// t.Version (int64) (int64)
|
// t.Version (uint64) (uint64)
|
||||||
|
|
||||||
{
|
{
|
||||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
|
||||||
var extraI int64
|
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
switch maj {
|
if maj != cbg.MajUnsignedInt {
|
||||||
case cbg.MajUnsignedInt:
|
return fmt.Errorf("wrong type for uint64 field")
|
||||||
extraI = int64(extra)
|
|
||||||
if extraI < 0 {
|
|
||||||
return fmt.Errorf("int64 positive overflow")
|
|
||||||
}
|
|
||||||
case cbg.MajNegativeInt:
|
|
||||||
extraI = int64(extra)
|
|
||||||
if extraI < 0 {
|
|
||||||
return fmt.Errorf("int64 negative oveflow")
|
|
||||||
}
|
|
||||||
extraI = -1 - extraI
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("wrong type for int64 field: %d", maj)
|
|
||||||
}
|
}
|
||||||
|
t.Version = uint64(extra)
|
||||||
|
|
||||||
t.Version = int64(extraI)
|
|
||||||
}
|
}
|
||||||
// t.To (address.Address) (struct)
|
// t.To (address.Address) (struct)
|
||||||
|
|
||||||
|
@ -25,7 +25,7 @@ type ChainMsg interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Message struct {
|
type Message struct {
|
||||||
Version int64
|
Version uint64
|
||||||
|
|
||||||
To address.Address
|
To address.Address
|
||||||
From address.Address
|
From address.Address
|
||||||
|
@ -62,8 +62,8 @@ func (sm *SignedMessage) Serialize() ([]byte, error) {
|
|||||||
return buf.Bytes(), nil
|
return buf.Bytes(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *SignedMessage) ChainLength() int {
|
func (sm *SignedMessage) ChainLength() int {
|
||||||
ser, err := m.Serialize()
|
ser, err := sm.Serialize()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
@ -238,3 +238,7 @@ func (ts *TipSet) IsChildOf(parent *TipSet) bool {
|
|||||||
// height for their processing logic at the moment to obviate it.
|
// height for their processing logic at the moment to obviate it.
|
||||||
ts.height > parent.height
|
ts.height > parent.height
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ts *TipSet) String() string {
|
||||||
|
return fmt.Sprintf("%v", ts.cids)
|
||||||
|
}
|
||||||
|
@ -2,6 +2,7 @@ package validation
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/state"
|
"github.com/filecoin-project/lotus/chain/state"
|
||||||
"github.com/filecoin-project/specs-actors/actors/runtime"
|
"github.com/filecoin-project/specs-actors/actors/runtime"
|
||||||
cbor "github.com/ipfs/go-ipld-cbor"
|
cbor "github.com/ipfs/go-ipld-cbor"
|
||||||
|
@ -2,9 +2,10 @@ package validation
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/minio/blake2b-simd"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
|
||||||
|
"github.com/minio/blake2b-simd"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-crypto"
|
"github.com/filecoin-project/go-crypto"
|
||||||
acrypto "github.com/filecoin-project/specs-actors/actors/crypto"
|
acrypto "github.com/filecoin-project/specs-actors/actors/crypto"
|
||||||
@ -69,7 +70,7 @@ func (k *KeyManager) Sign(addr address.Address, data []byte) (acrypto.Signature,
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (k *KeyManager) newSecp256k1Key() *wallet.Key {
|
func (k *KeyManager) newSecp256k1Key() *wallet.Key {
|
||||||
randSrc := rand.New(rand.NewSource(k.secpSeed))
|
randSrc := rand.New(rand.NewSource(k.secpSeed)) // nolint
|
||||||
prv, err := crypto.GenerateKeyFromSeed(randSrc)
|
prv, err := crypto.GenerateKeyFromSeed(randSrc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
|
@ -18,7 +18,7 @@ func LoadVector(t *testing.T, f string, out interface{}) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer fi.Close()
|
defer fi.Close() //nolint:errcheck
|
||||||
|
|
||||||
if err := json.NewDecoder(fi).Decode(out); err != nil {
|
if err := json.NewDecoder(fi).Decode(out); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -34,7 +34,6 @@ type Runtime struct {
|
|||||||
|
|
||||||
vm *VM
|
vm *VM
|
||||||
state *state.StateTree
|
state *state.StateTree
|
||||||
msg *types.Message
|
|
||||||
vmsg vmr.Message
|
vmsg vmr.Message
|
||||||
height abi.ChainEpoch
|
height abi.ChainEpoch
|
||||||
cst cbor.IpldStore
|
cst cbor.IpldStore
|
||||||
@ -410,8 +409,10 @@ type shimStateHandle struct {
|
|||||||
|
|
||||||
func (ssh *shimStateHandle) Create(obj vmr.CBORMarshaler) {
|
func (ssh *shimStateHandle) Create(obj vmr.CBORMarshaler) {
|
||||||
c := ssh.rt.Put(obj)
|
c := ssh.rt.Put(obj)
|
||||||
// TODO: handle error below
|
err := ssh.rt.stateCommit(EmptyObjectCid, c)
|
||||||
ssh.rt.stateCommit(EmptyObjectCid, c)
|
if err != nil {
|
||||||
|
panic(fmt.Errorf("failed to commit state after creating object: %w", err))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ssh *shimStateHandle) Readonly(obj vmr.CBORUnmarshaler) {
|
func (ssh *shimStateHandle) Readonly(obj vmr.CBORUnmarshaler) {
|
||||||
@ -440,8 +441,10 @@ func (ssh *shimStateHandle) Transaction(obj vmr.CBORer, f func()) {
|
|||||||
|
|
||||||
c := ssh.rt.Put(obj)
|
c := ssh.rt.Put(obj)
|
||||||
|
|
||||||
// TODO: handle error below
|
err = ssh.rt.stateCommit(baseState, c)
|
||||||
ssh.rt.stateCommit(baseState, c)
|
if err != nil {
|
||||||
|
panic(fmt.Errorf("failed to commit state after transaction: %w", err))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rt *Runtime) GetBalance(a address.Address) (types.BigInt, aerrors.ActorError) {
|
func (rt *Runtime) GetBalance(a address.Address) (types.BigInt, aerrors.ActorError) {
|
||||||
|
@ -97,7 +97,6 @@ func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, origin addres
|
|||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
vm: vm,
|
vm: vm,
|
||||||
state: vm.cstate,
|
state: vm.cstate,
|
||||||
msg: msg,
|
|
||||||
origin: origin,
|
origin: origin,
|
||||||
originNonce: originNonce,
|
originNonce: originNonce,
|
||||||
height: vm.blockHeight,
|
height: vm.blockHeight,
|
||||||
@ -254,6 +253,9 @@ func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime,
|
|||||||
if aerr := rt.chargeGasSafe(rt.Pricelist().OnMethodInvocation(msg.Value, msg.Method)); aerr != nil {
|
if aerr := rt.chargeGasSafe(rt.Pricelist().OnMethodInvocation(msg.Value, msg.Method)); aerr != nil {
|
||||||
return nil, aerrors.Wrap(aerr, "not enough gas for method invocation")
|
return nil, aerrors.Wrap(aerr, "not enough gas for method invocation")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// not charging any gas, just logging
|
||||||
|
//nolint:errcheck
|
||||||
defer rt.chargeGasSafe(newGasCharge("OnMethodInvocationDone", 0, 0))
|
defer rt.chargeGasSafe(newGasCharge("OnMethodInvocationDone", 0, 0))
|
||||||
|
|
||||||
if types.BigCmp(msg.Value, types.NewInt(0)) != 0 {
|
if types.BigCmp(msg.Value, types.NewInt(0)) != 0 {
|
||||||
|
@ -2,6 +2,7 @@ package cli
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
15
cli/chain.go
15
cli/chain.go
@ -319,7 +319,7 @@ var chainSetHeadCmd = &cli.Command{
|
|||||||
ts, err = api.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(cctx.Uint64("epoch")), types.EmptyTSK)
|
ts, err = api.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(cctx.Uint64("epoch")), types.EmptyTSK)
|
||||||
}
|
}
|
||||||
if ts == nil {
|
if ts == nil {
|
||||||
ts, err = parseTipSet(api, ctx, cctx.Args().Slice())
|
ts, err = parseTipSet(ctx, api, cctx.Args().Slice())
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -337,7 +337,7 @@ var chainSetHeadCmd = &cli.Command{
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseTipSet(api api.FullNode, ctx context.Context, vals []string) (*types.TipSet, error) {
|
func parseTipSet(ctx context.Context, api api.FullNode, vals []string) (*types.TipSet, error) {
|
||||||
var headers []*types.BlockHeader
|
var headers []*types.BlockHeader
|
||||||
for _, c := range vals {
|
for _, c := range vals {
|
||||||
blkc, err := cid.Decode(c)
|
blkc, err := cid.Decode(c)
|
||||||
@ -859,6 +859,10 @@ var chainExportCmd = &cli.Command{
|
|||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: "tipset",
|
Name: "tipset",
|
||||||
},
|
},
|
||||||
|
&cli.Int64Flag{
|
||||||
|
Name: "recent-stateroots",
|
||||||
|
Usage: "specify the number of recent state roots to include in the export",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
api, closer, err := GetFullNodeAPI(cctx)
|
api, closer, err := GetFullNodeAPI(cctx)
|
||||||
@ -872,6 +876,11 @@ var chainExportCmd = &cli.Command{
|
|||||||
return fmt.Errorf("must specify filename to export chain to")
|
return fmt.Errorf("must specify filename to export chain to")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rsrs := abi.ChainEpoch(cctx.Int64("recent-stateroots"))
|
||||||
|
if cctx.IsSet("recent-stateroots") && rsrs < build.Finality {
|
||||||
|
return fmt.Errorf("\"recent-stateroots\" has to be greater than %d", build.Finality)
|
||||||
|
}
|
||||||
|
|
||||||
fi, err := os.Create(cctx.Args().First())
|
fi, err := os.Create(cctx.Args().First())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -888,7 +897,7 @@ var chainExportCmd = &cli.Command{
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
stream, err := api.ChainExport(ctx, ts.Key())
|
stream, err := api.ChainExport(ctx, rsrs, ts.Key())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
131
cli/client.go
131
cli/client.go
@ -1,6 +1,7 @@
|
|||||||
package cli
|
package cli
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@ -930,11 +931,11 @@ var clientQueryAskCmd = &cli.Command{
|
|||||||
return xerrors.Errorf("failed to get peerID for miner: %w", err)
|
return xerrors.Errorf("failed to get peerID for miner: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if peer.ID(*mi.PeerId) == peer.ID("SETME") {
|
if *mi.PeerId == peer.ID("SETME") {
|
||||||
return fmt.Errorf("the miner hasn't initialized yet")
|
return fmt.Errorf("the miner hasn't initialized yet")
|
||||||
}
|
}
|
||||||
|
|
||||||
pid = peer.ID(*mi.PeerId)
|
pid = *mi.PeerId
|
||||||
}
|
}
|
||||||
|
|
||||||
ask, err := api.ClientQueryAsk(ctx, pid, maddr)
|
ask, err := api.ClientQueryAsk(ctx, pid, maddr)
|
||||||
@ -978,6 +979,10 @@ var clientListDeals = &cli.Command{
|
|||||||
Usage: "use color in display output",
|
Usage: "use color in display output",
|
||||||
Value: true,
|
Value: true,
|
||||||
},
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "watch",
|
||||||
|
Usage: "watch deal updates in real-time, rather than a one time list",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
api, closer, err := GetFullNodeAPI(cctx)
|
api, closer, err := GetFullNodeAPI(cctx)
|
||||||
@ -987,48 +992,95 @@ var clientListDeals = &cli.Command{
|
|||||||
defer closer()
|
defer closer()
|
||||||
ctx := ReqContext(cctx)
|
ctx := ReqContext(cctx)
|
||||||
|
|
||||||
head, err := api.ChainHead(ctx)
|
verbose := cctx.Bool("verbose")
|
||||||
if err != nil {
|
color := cctx.Bool("color")
|
||||||
return err
|
watch := cctx.Bool("watch")
|
||||||
}
|
|
||||||
|
|
||||||
localDeals, err := api.ClientListDeals(ctx)
|
localDeals, err := api.ClientListDeals(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.Slice(localDeals, func(i, j int) bool {
|
if watch {
|
||||||
return localDeals[i].CreationTime.Before(localDeals[j].CreationTime)
|
updates, err := api.ClientGetDealUpdates(ctx)
|
||||||
})
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
var deals []deal
|
for {
|
||||||
for _, v := range localDeals {
|
tm.Clear()
|
||||||
|
tm.MoveCursor(1, 1)
|
||||||
|
|
||||||
|
err = outputStorageDeals(ctx, tm.Screen, api, localDeals, verbose, color)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
tm.Flush()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil
|
||||||
|
case updated := <-updates:
|
||||||
|
var found bool
|
||||||
|
for i, existing := range localDeals {
|
||||||
|
if existing.ProposalCid.Equals(updated.ProposalCid) {
|
||||||
|
localDeals[i] = updated
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
localDeals = append(localDeals, updated)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return outputStorageDeals(ctx, os.Stdout, api, localDeals, cctx.Bool("verbose"), cctx.Bool("color"))
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func dealFromDealInfo(ctx context.Context, full api.FullNode, head *types.TipSet, v api.DealInfo) deal {
|
||||||
if v.DealID == 0 {
|
if v.DealID == 0 {
|
||||||
deals = append(deals, deal{
|
return deal{
|
||||||
LocalDeal: v,
|
LocalDeal: v,
|
||||||
OnChainDealState: market.DealState{
|
OnChainDealState: market.DealState{
|
||||||
SectorStartEpoch: -1,
|
SectorStartEpoch: -1,
|
||||||
LastUpdatedEpoch: -1,
|
LastUpdatedEpoch: -1,
|
||||||
SlashEpoch: -1,
|
SlashEpoch: -1,
|
||||||
},
|
},
|
||||||
})
|
}
|
||||||
} else {
|
}
|
||||||
onChain, err := api.StateMarketStorageDeal(ctx, v.DealID, head.Key())
|
|
||||||
|
onChain, err := full.StateMarketStorageDeal(ctx, v.DealID, head.Key())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
deals = append(deals, deal{LocalDeal: v})
|
return deal{LocalDeal: v}
|
||||||
} else {
|
}
|
||||||
deals = append(deals, deal{
|
|
||||||
|
return deal{
|
||||||
LocalDeal: v,
|
LocalDeal: v,
|
||||||
OnChainDealState: onChain.State,
|
OnChainDealState: onChain.State,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func outputStorageDeals(ctx context.Context, out io.Writer, full api.FullNode, localDeals []api.DealInfo, verbose bool, color bool) error {
|
||||||
|
sort.Slice(localDeals, func(i, j int) bool {
|
||||||
|
return localDeals[i].CreationTime.Before(localDeals[j].CreationTime)
|
||||||
})
|
})
|
||||||
}
|
|
||||||
}
|
head, err := full.ChainHead(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
color := cctx.Bool("color")
|
var deals []deal
|
||||||
|
for _, localDeal := range localDeals {
|
||||||
|
deals = append(deals, dealFromDealInfo(ctx, full, head, localDeal))
|
||||||
|
}
|
||||||
|
|
||||||
if cctx.Bool("verbose") {
|
if verbose {
|
||||||
w := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0)
|
w := tabwriter.NewWriter(out, 2, 4, 2, ' ', 0)
|
||||||
fmt.Fprintf(w, "Created\tDealCid\tDealId\tProvider\tState\tOn Chain?\tSlashed?\tPieceCID\tSize\tPrice\tDuration\tMessage\n")
|
fmt.Fprintf(w, "Created\tDealCid\tDealId\tProvider\tState\tOn Chain?\tSlashed?\tPieceCID\tSize\tPrice\tDuration\tMessage\n")
|
||||||
for _, d := range deals {
|
for _, d := range deals {
|
||||||
onChain := "N"
|
onChain := "N"
|
||||||
@ -1045,7 +1097,8 @@ var clientListDeals = &cli.Command{
|
|||||||
fmt.Fprintf(w, "%s\t%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%d\t%s\n", d.LocalDeal.CreationTime.Format(time.Stamp), d.LocalDeal.ProposalCid, d.LocalDeal.DealID, d.LocalDeal.Provider, dealStateString(color, d.LocalDeal.State), onChain, slashed, d.LocalDeal.PieceCID, types.SizeStr(types.NewInt(d.LocalDeal.Size)), price, d.LocalDeal.Duration, d.LocalDeal.Message)
|
fmt.Fprintf(w, "%s\t%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%d\t%s\n", d.LocalDeal.CreationTime.Format(time.Stamp), d.LocalDeal.ProposalCid, d.LocalDeal.DealID, d.LocalDeal.Provider, dealStateString(color, d.LocalDeal.State), onChain, slashed, d.LocalDeal.PieceCID, types.SizeStr(types.NewInt(d.LocalDeal.Size)), price, d.LocalDeal.Duration, d.LocalDeal.Message)
|
||||||
}
|
}
|
||||||
return w.Flush()
|
return w.Flush()
|
||||||
} else {
|
}
|
||||||
|
|
||||||
w := tablewriter.New(tablewriter.Col("DealCid"),
|
w := tablewriter.New(tablewriter.Col("DealCid"),
|
||||||
tablewriter.Col("DealId"),
|
tablewriter.Col("DealId"),
|
||||||
tablewriter.Col("Provider"),
|
tablewriter.Col("Provider"),
|
||||||
@ -1059,8 +1112,7 @@ var clientListDeals = &cli.Command{
|
|||||||
tablewriter.NewLineCol("Message"))
|
tablewriter.NewLineCol("Message"))
|
||||||
|
|
||||||
for _, d := range deals {
|
for _, d := range deals {
|
||||||
propcid := d.LocalDeal.ProposalCid.String()
|
propcid := ellipsis(d.LocalDeal.ProposalCid.String(), 8)
|
||||||
propcid = "..." + propcid[len(propcid)-8:]
|
|
||||||
|
|
||||||
onChain := "N"
|
onChain := "N"
|
||||||
if d.OnChainDealState.SectorStartEpoch != -1 {
|
if d.OnChainDealState.SectorStartEpoch != -1 {
|
||||||
@ -1072,8 +1124,7 @@ var clientListDeals = &cli.Command{
|
|||||||
slashed = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SlashEpoch)
|
slashed = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SlashEpoch)
|
||||||
}
|
}
|
||||||
|
|
||||||
piece := d.LocalDeal.PieceCID.String()
|
piece := ellipsis(d.LocalDeal.PieceCID.String(), 8)
|
||||||
piece = "..." + piece[len(piece)-8:]
|
|
||||||
|
|
||||||
price := types.FIL(types.BigMul(d.LocalDeal.PricePerEpoch, types.NewInt(d.LocalDeal.Duration)))
|
price := types.FIL(types.BigMul(d.LocalDeal.PricePerEpoch, types.NewInt(d.LocalDeal.Duration)))
|
||||||
|
|
||||||
@ -1092,9 +1143,7 @@ var clientListDeals = &cli.Command{
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
return w.Flush(os.Stdout)
|
return w.Flush(out)
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func dealStateString(c bool, state storagemarket.StorageDealStatus) string {
|
func dealStateString(c bool, state storagemarket.StorageDealStatus) string {
|
||||||
@ -1318,7 +1367,7 @@ func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChann
|
|||||||
for _, channel := range sendingChannels {
|
for _, channel := range sendingChannels {
|
||||||
w.Write(toChannelOutput(color, "Sending To", channel))
|
w.Write(toChannelOutput(color, "Sending To", channel))
|
||||||
}
|
}
|
||||||
w.Flush(out)
|
w.Flush(out) //nolint:errcheck
|
||||||
|
|
||||||
fmt.Fprintf(out, "\nReceiving Channels\n\n")
|
fmt.Fprintf(out, "\nReceiving Channels\n\n")
|
||||||
w = tablewriter.New(tablewriter.Col("ID"),
|
w = tablewriter.New(tablewriter.Col("ID"),
|
||||||
@ -1332,7 +1381,7 @@ func OutputDataTransferChannels(out io.Writer, channels []lapi.DataTransferChann
|
|||||||
for _, channel := range receivingChannels {
|
for _, channel := range receivingChannels {
|
||||||
w.Write(toChannelOutput(color, "Receiving From", channel))
|
w.Write(toChannelOutput(color, "Receiving From", channel))
|
||||||
}
|
}
|
||||||
w.Flush(out)
|
w.Flush(out) //nolint:errcheck
|
||||||
}
|
}
|
||||||
|
|
||||||
func channelStatusString(useColor bool, status datatransfer.Status) string {
|
func channelStatusString(useColor bool, status datatransfer.Status) string {
|
||||||
@ -1352,11 +1401,8 @@ func channelStatusString(useColor bool, status datatransfer.Status) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func toChannelOutput(useColor bool, otherPartyColumn string, channel lapi.DataTransferChannel) map[string]interface{} {
|
func toChannelOutput(useColor bool, otherPartyColumn string, channel lapi.DataTransferChannel) map[string]interface{} {
|
||||||
rootCid := channel.BaseCID.String()
|
rootCid := ellipsis(channel.BaseCID.String(), 8)
|
||||||
rootCid = "..." + rootCid[len(rootCid)-8:]
|
otherParty := ellipsis(channel.OtherPeer.String(), 8)
|
||||||
|
|
||||||
otherParty := channel.OtherPeer.String()
|
|
||||||
otherParty = "..." + otherParty[len(otherParty)-8:]
|
|
||||||
|
|
||||||
initiated := "N"
|
initiated := "N"
|
||||||
if channel.IsInitiator {
|
if channel.IsInitiator {
|
||||||
@ -1365,7 +1411,7 @@ func toChannelOutput(useColor bool, otherPartyColumn string, channel lapi.DataTr
|
|||||||
|
|
||||||
voucher := channel.Voucher
|
voucher := channel.Voucher
|
||||||
if len(voucher) > 40 {
|
if len(voucher) > 40 {
|
||||||
voucher = "..." + voucher[len(voucher)-37:]
|
voucher = ellipsis(voucher, 37)
|
||||||
}
|
}
|
||||||
|
|
||||||
return map[string]interface{}{
|
return map[string]interface{}{
|
||||||
@ -1379,3 +1425,10 @@ func toChannelOutput(useColor bool, otherPartyColumn string, channel lapi.DataTr
|
|||||||
"Message": channel.Message,
|
"Message": channel.Message,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ellipsis(s string, length int) string {
|
||||||
|
if length > 0 && len(s) > length {
|
||||||
|
return "..." + s[len(s)-length:]
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
25
cli/cmd.go
25
cli/cmd.go
@ -12,7 +12,7 @@ import (
|
|||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
"github.com/mitchellh/go-homedir"
|
"github.com/mitchellh/go-homedir"
|
||||||
"github.com/multiformats/go-multiaddr"
|
"github.com/multiformats/go-multiaddr"
|
||||||
manet "github.com/multiformats/go-multiaddr-net"
|
manet "github.com/multiformats/go-multiaddr/net"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
@ -75,6 +75,8 @@ func flagForAPI(t repo.RepoType) string {
|
|||||||
return "api"
|
return "api"
|
||||||
case repo.StorageMiner:
|
case repo.StorageMiner:
|
||||||
return "miner-api"
|
return "miner-api"
|
||||||
|
case repo.Worker:
|
||||||
|
return "worker-api"
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("Unknown repo type: %v", t))
|
panic(fmt.Sprintf("Unknown repo type: %v", t))
|
||||||
}
|
}
|
||||||
@ -86,6 +88,8 @@ func flagForRepo(t repo.RepoType) string {
|
|||||||
return "repo"
|
return "repo"
|
||||||
case repo.StorageMiner:
|
case repo.StorageMiner:
|
||||||
return "miner-repo"
|
return "miner-repo"
|
||||||
|
case repo.Worker:
|
||||||
|
return "worker-repo"
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("Unknown repo type: %v", t))
|
panic(fmt.Sprintf("Unknown repo type: %v", t))
|
||||||
}
|
}
|
||||||
@ -97,6 +101,8 @@ func envForRepo(t repo.RepoType) string {
|
|||||||
return "FULLNODE_API_INFO"
|
return "FULLNODE_API_INFO"
|
||||||
case repo.StorageMiner:
|
case repo.StorageMiner:
|
||||||
return "MINER_API_INFO"
|
return "MINER_API_INFO"
|
||||||
|
case repo.Worker:
|
||||||
|
return "WORKER_API_INFO"
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("Unknown repo type: %v", t))
|
panic(fmt.Sprintf("Unknown repo type: %v", t))
|
||||||
}
|
}
|
||||||
@ -109,6 +115,8 @@ func envForRepoDeprecation(t repo.RepoType) string {
|
|||||||
return "FULLNODE_API_INFO"
|
return "FULLNODE_API_INFO"
|
||||||
case repo.StorageMiner:
|
case repo.StorageMiner:
|
||||||
return "STORAGE_API_INFO"
|
return "STORAGE_API_INFO"
|
||||||
|
case repo.Worker:
|
||||||
|
return "WORKER_API_INFO"
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("Unknown repo type: %v", t))
|
panic(fmt.Sprintf("Unknown repo type: %v", t))
|
||||||
}
|
}
|
||||||
@ -213,7 +221,7 @@ func GetAPI(ctx *cli.Context) (api.Common, jsonrpc.ClientCloser, error) {
|
|||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return client.NewCommonRPC(addr, headers)
|
return client.NewCommonRPC(ctx.Context, addr, headers)
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetFullNodeAPI(ctx *cli.Context) (api.FullNode, jsonrpc.ClientCloser, error) {
|
func GetFullNodeAPI(ctx *cli.Context) (api.FullNode, jsonrpc.ClientCloser, error) {
|
||||||
@ -222,7 +230,7 @@ func GetFullNodeAPI(ctx *cli.Context) (api.FullNode, jsonrpc.ClientCloser, error
|
|||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return client.NewFullNodeRPC(addr, headers)
|
return client.NewFullNodeRPC(ctx.Context, addr, headers)
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetStorageMinerAPI(ctx *cli.Context, opts ...jsonrpc.Option) (api.StorageMiner, jsonrpc.ClientCloser, error) {
|
func GetStorageMinerAPI(ctx *cli.Context, opts ...jsonrpc.Option) (api.StorageMiner, jsonrpc.ClientCloser, error) {
|
||||||
@ -231,7 +239,16 @@ func GetStorageMinerAPI(ctx *cli.Context, opts ...jsonrpc.Option) (api.StorageMi
|
|||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return client.NewStorageMinerRPC(addr, headers, opts...)
|
return client.NewStorageMinerRPC(ctx.Context, addr, headers, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetWorkerAPI(ctx *cli.Context) (api.WorkerAPI, jsonrpc.ClientCloser, error) {
|
||||||
|
addr, headers, err := GetRawAPI(ctx, repo.Worker)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return client.NewWorkerRPC(ctx.Context, addr, headers)
|
||||||
}
|
}
|
||||||
|
|
||||||
func DaemonContext(cctx *cli.Context) context.Context {
|
func DaemonContext(cctx *cli.Context) context.Context {
|
||||||
|
@ -39,7 +39,7 @@ func RunApp(app *cli.App) {
|
|||||||
}
|
}
|
||||||
var phe *PrintHelpErr
|
var phe *PrintHelpErr
|
||||||
if xerrors.As(err, &phe) {
|
if xerrors.As(err, &phe) {
|
||||||
cli.ShowCommandHelp(phe.Ctx, phe.Ctx.Command.Name)
|
_ = cli.ShowCommandHelp(phe.Ctx, phe.Ctx.Command.Name)
|
||||||
}
|
}
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,7 @@ var logList = &cli.Command{
|
|||||||
Name: "list",
|
Name: "list",
|
||||||
Usage: "List log systems",
|
Usage: "List log systems",
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
api, closer, err := GetFullNodeAPI(cctx)
|
api, closer, err := GetAPI(cctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -60,7 +60,8 @@ var logSetLevel = &cli.Command{
|
|||||||
Environment Variables:
|
Environment Variables:
|
||||||
GOLOG_LOG_LEVEL - Default log level for all log systems
|
GOLOG_LOG_LEVEL - Default log level for all log systems
|
||||||
GOLOG_LOG_FMT - Change output log format (json, nocolor)
|
GOLOG_LOG_FMT - Change output log format (json, nocolor)
|
||||||
GOLOG_FILE - Write logs to file in addition to stderr
|
GOLOG_FILE - Write logs to file
|
||||||
|
GOLOG_OUTPUT - Specify whether to output to file, stderr, stdout or a combination, i.e. file+stderr
|
||||||
`,
|
`,
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
&cli.StringSliceFlag{
|
&cli.StringSliceFlag{
|
||||||
@ -70,7 +71,7 @@ var logSetLevel = &cli.Command{
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
api, closer, err := GetFullNodeAPI(cctx)
|
api, closer, err := GetAPI(cctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
36
cli/mpool.go
36
cli/mpool.go
@ -20,6 +20,7 @@ var mpoolCmd = &cli.Command{
|
|||||||
Usage: "Manage message pool",
|
Usage: "Manage message pool",
|
||||||
Subcommands: []*cli.Command{
|
Subcommands: []*cli.Command{
|
||||||
mpoolPending,
|
mpoolPending,
|
||||||
|
mpoolClear,
|
||||||
mpoolSub,
|
mpoolSub,
|
||||||
mpoolStat,
|
mpoolStat,
|
||||||
mpoolReplaceCmd,
|
mpoolReplaceCmd,
|
||||||
@ -83,6 +84,39 @@ var mpoolPending = &cli.Command{
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var mpoolClear = &cli.Command{
|
||||||
|
Name: "clear",
|
||||||
|
Usage: "Clear all pending messages from the mpool (USE WITH CARE)",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "local",
|
||||||
|
Usage: "also clear local messages",
|
||||||
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "really-do-it",
|
||||||
|
Usage: "must be specified for the action to take effect",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
api, closer, err := GetFullNodeAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer closer()
|
||||||
|
|
||||||
|
really := cctx.Bool("really-do-it")
|
||||||
|
if !really {
|
||||||
|
//nolint:golint
|
||||||
|
return fmt.Errorf("--really-do-it must be specified for this action to have an effect; you have been warned")
|
||||||
|
}
|
||||||
|
|
||||||
|
local := cctx.Bool("local")
|
||||||
|
|
||||||
|
ctx := ReqContext(cctx)
|
||||||
|
return api.MpoolClear(ctx, local)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
var mpoolSub = &cli.Command{
|
var mpoolSub = &cli.Command{
|
||||||
Name: "sub",
|
Name: "sub",
|
||||||
Usage: "Subscribe to mpool changes",
|
Usage: "Subscribe to mpool changes",
|
||||||
@ -313,7 +347,7 @@ var mpoolReplaceCmd = &cli.Command{
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("parsing gas-premium: %w", err)
|
return fmt.Errorf("parsing gas-premium: %w", err)
|
||||||
}
|
}
|
||||||
// TODO: estiamte fee cap here
|
// TODO: estimate fee cap here
|
||||||
msg.GasFeeCap, err = types.BigFromString(cctx.String("gas-feecap"))
|
msg.GasFeeCap, err = types.BigFromString(cctx.String("gas-feecap"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("parsing gas-feecap: %w", err)
|
return fmt.Errorf("parsing gas-feecap: %w", err)
|
||||||
|
@ -180,7 +180,7 @@ var netFindPeer = &cli.Command{
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
pid, err := peer.IDB58Decode(cctx.Args().First())
|
pid, err := peer.Decode(cctx.Args().First())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
83
cli/paych.go
83
cli/paych.go
@ -4,6 +4,10 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/paychmgr"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
@ -322,7 +326,7 @@ var paychVoucherListCmd = &cli.Command{
|
|||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
&cli.BoolFlag{
|
&cli.BoolFlag{
|
||||||
Name: "export",
|
Name: "export",
|
||||||
Usage: "Print export strings",
|
Usage: "Print voucher as serialized string",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
@ -348,17 +352,12 @@ var paychVoucherListCmd = &cli.Command{
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, v := range vouchers {
|
for _, v := range sortVouchers(vouchers) {
|
||||||
if cctx.Bool("export") {
|
export := cctx.Bool("export")
|
||||||
enc, err := EncodedString(v)
|
err := outputVoucher(cctx.App.Writer, v, export)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(cctx.App.Writer, "Lane %d, Nonce %d: %s; %s\n", v.Lane, v.Nonce, v.Amount.String(), enc)
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(cctx.App.Writer, "Lane %d, Nonce %d: %s\n", v.Lane, v.Nonce, v.Amount.String())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -367,8 +366,14 @@ var paychVoucherListCmd = &cli.Command{
|
|||||||
|
|
||||||
var paychVoucherBestSpendableCmd = &cli.Command{
|
var paychVoucherBestSpendableCmd = &cli.Command{
|
||||||
Name: "best-spendable",
|
Name: "best-spendable",
|
||||||
Usage: "Print voucher with highest value that is currently spendable",
|
Usage: "Print vouchers with highest value that is currently spendable for each lane",
|
||||||
ArgsUsage: "[channelAddress]",
|
ArgsUsage: "[channelAddress]",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "export",
|
||||||
|
Usage: "Print voucher as serialized string",
|
||||||
|
},
|
||||||
|
},
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
if cctx.Args().Len() != 1 {
|
if cctx.Args().Len() != 1 {
|
||||||
return ShowHelp(cctx, fmt.Errorf("must pass payment channel address"))
|
return ShowHelp(cctx, fmt.Errorf("must pass payment channel address"))
|
||||||
@ -387,39 +392,55 @@ var paychVoucherBestSpendableCmd = &cli.Command{
|
|||||||
|
|
||||||
ctx := ReqContext(cctx)
|
ctx := ReqContext(cctx)
|
||||||
|
|
||||||
vouchers, err := api.PaychVoucherList(ctx, ch)
|
vouchersByLane, err := paychmgr.BestSpendableByLane(ctx, api, ch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var best *paych.SignedVoucher
|
var vouchers []*paych.SignedVoucher
|
||||||
for _, v := range vouchers {
|
for _, vchr := range vouchersByLane {
|
||||||
spendable, err := api.PaychVoucherCheckSpendable(ctx, ch, v, nil, nil)
|
vouchers = append(vouchers, vchr)
|
||||||
|
}
|
||||||
|
for _, best := range sortVouchers(vouchers) {
|
||||||
|
export := cctx.Bool("export")
|
||||||
|
err := outputVoucher(cctx.App.Writer, best, export)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if spendable {
|
|
||||||
if best == nil || v.Amount.GreaterThan(best.Amount) {
|
|
||||||
best = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if best == nil {
|
|
||||||
return fmt.Errorf("No spendable vouchers for that channel")
|
|
||||||
}
|
|
||||||
|
|
||||||
enc, err := EncodedString(best)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprintln(cctx.App.Writer, enc)
|
|
||||||
fmt.Fprintf(cctx.App.Writer, "Amount: %s\n", best.Amount)
|
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func sortVouchers(vouchers []*paych.SignedVoucher) []*paych.SignedVoucher {
|
||||||
|
sort.Slice(vouchers, func(i, j int) bool {
|
||||||
|
if vouchers[i].Lane == vouchers[j].Lane {
|
||||||
|
return vouchers[i].Nonce < vouchers[j].Nonce
|
||||||
|
}
|
||||||
|
return vouchers[i].Lane < vouchers[j].Lane
|
||||||
|
})
|
||||||
|
return vouchers
|
||||||
|
}
|
||||||
|
|
||||||
|
func outputVoucher(w io.Writer, v *paych.SignedVoucher, export bool) error {
|
||||||
|
var enc string
|
||||||
|
if export {
|
||||||
|
var err error
|
||||||
|
enc, err = EncodedString(v)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(w, "Lane %d, Nonce %d: %s", v.Lane, v.Nonce, v.Amount.String())
|
||||||
|
if export {
|
||||||
|
fmt.Fprintf(w, "; %s", enc)
|
||||||
|
}
|
||||||
|
fmt.Fprintln(w)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
var paychVoucherSubmitCmd = &cli.Command{
|
var paychVoucherSubmitCmd = &cli.Command{
|
||||||
Name: "submit",
|
Name: "submit",
|
||||||
Usage: "Submit voucher to chain to update payment channel state",
|
Usage: "Submit voucher to chain to update payment channel state",
|
||||||
@ -447,7 +468,7 @@ var paychVoucherSubmitCmd = &cli.Command{
|
|||||||
|
|
||||||
ctx := ReqContext(cctx)
|
ctx := ReqContext(cctx)
|
||||||
|
|
||||||
mcid, err := api.PaychVoucherSubmit(ctx, ch, sv)
|
mcid, err := api.PaychVoucherSubmit(ctx, ch, sv, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -4,6 +4,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"flag"
|
"flag"
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -49,6 +50,227 @@ func TestPaymentChannels(t *testing.T) {
|
|||||||
|
|
||||||
blocktime := 5 * time.Millisecond
|
blocktime := 5 * time.Millisecond
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
nodes, addrs := startTwoNodesOneMiner(ctx, t, blocktime)
|
||||||
|
paymentCreator := nodes[0]
|
||||||
|
paymentReceiver := nodes[0]
|
||||||
|
creatorAddr := addrs[0]
|
||||||
|
receiverAddr := addrs[1]
|
||||||
|
|
||||||
|
// Create mock CLI
|
||||||
|
mockCLI := newMockCLI(t)
|
||||||
|
creatorCLI := mockCLI.client(paymentCreator.ListenAddr)
|
||||||
|
receiverCLI := mockCLI.client(paymentReceiver.ListenAddr)
|
||||||
|
|
||||||
|
// creator: paych get <creator> <receiver> <amount>
|
||||||
|
channelAmt := "100000"
|
||||||
|
cmd := []string{creatorAddr.String(), receiverAddr.String(), channelAmt}
|
||||||
|
chstr := creatorCLI.runCmd(paychGetCmd, cmd)
|
||||||
|
|
||||||
|
chAddr, err := address.NewFromString(chstr)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// creator: paych voucher create <channel> <amount>
|
||||||
|
voucherAmt := 100
|
||||||
|
vamt := strconv.Itoa(voucherAmt)
|
||||||
|
cmd = []string{chAddr.String(), vamt}
|
||||||
|
voucher := creatorCLI.runCmd(paychVoucherCreateCmd, cmd)
|
||||||
|
|
||||||
|
// receiver: paych voucher add <channel> <voucher>
|
||||||
|
cmd = []string{chAddr.String(), voucher}
|
||||||
|
receiverCLI.runCmd(paychVoucherAddCmd, cmd)
|
||||||
|
|
||||||
|
// creator: paych settle <channel>
|
||||||
|
cmd = []string{chAddr.String()}
|
||||||
|
creatorCLI.runCmd(paychSettleCmd, cmd)
|
||||||
|
|
||||||
|
// Wait for the chain to reach the settle height
|
||||||
|
chState := getPaychState(ctx, t, paymentReceiver, chAddr)
|
||||||
|
waitForHeight(ctx, t, paymentReceiver, chState.SettlingAt)
|
||||||
|
|
||||||
|
// receiver: paych collect <channel>
|
||||||
|
cmd = []string{chAddr.String()}
|
||||||
|
receiverCLI.runCmd(paychCloseCmd, cmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
type voucherSpec struct {
|
||||||
|
serialized string
|
||||||
|
amt int
|
||||||
|
lane int
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestPaymentChannelVouchers does a basic test to exercise some payment
|
||||||
|
// channel voucher commands
|
||||||
|
func TestPaymentChannelVouchers(t *testing.T) {
|
||||||
|
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||||
|
|
||||||
|
blocktime := 5 * time.Millisecond
|
||||||
|
ctx := context.Background()
|
||||||
|
nodes, addrs := startTwoNodesOneMiner(ctx, t, blocktime)
|
||||||
|
paymentCreator := nodes[0]
|
||||||
|
paymentReceiver := nodes[1]
|
||||||
|
creatorAddr := addrs[0]
|
||||||
|
receiverAddr := addrs[1]
|
||||||
|
|
||||||
|
// Create mock CLI
|
||||||
|
mockCLI := newMockCLI(t)
|
||||||
|
creatorCLI := mockCLI.client(paymentCreator.ListenAddr)
|
||||||
|
receiverCLI := mockCLI.client(paymentReceiver.ListenAddr)
|
||||||
|
|
||||||
|
// creator: paych get <creator> <receiver> <amount>
|
||||||
|
channelAmt := "100000"
|
||||||
|
cmd := []string{creatorAddr.String(), receiverAddr.String(), channelAmt}
|
||||||
|
chstr := creatorCLI.runCmd(paychGetCmd, cmd)
|
||||||
|
|
||||||
|
chAddr, err := address.NewFromString(chstr)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var vouchers []voucherSpec
|
||||||
|
|
||||||
|
// creator: paych voucher create <channel> <amount>
|
||||||
|
// Note: implied --lane=0
|
||||||
|
voucherAmt1 := 100
|
||||||
|
cmd = []string{chAddr.String(), strconv.Itoa(voucherAmt1)}
|
||||||
|
voucher1 := creatorCLI.runCmd(paychVoucherCreateCmd, cmd)
|
||||||
|
vouchers = append(vouchers, voucherSpec{serialized: voucher1, lane: 0, amt: voucherAmt1})
|
||||||
|
|
||||||
|
// creator: paych voucher create <channel> <amount> --lane=5
|
||||||
|
lane5 := "--lane=5"
|
||||||
|
voucherAmt2 := 50
|
||||||
|
cmd = []string{lane5, chAddr.String(), strconv.Itoa(voucherAmt2)}
|
||||||
|
voucher2 := creatorCLI.runCmd(paychVoucherCreateCmd, cmd)
|
||||||
|
vouchers = append(vouchers, voucherSpec{serialized: voucher2, lane: 5, amt: voucherAmt2})
|
||||||
|
|
||||||
|
// creator: paych voucher create <channel> <amount> --lane=5
|
||||||
|
voucherAmt3 := 70
|
||||||
|
cmd = []string{lane5, chAddr.String(), strconv.Itoa(voucherAmt3)}
|
||||||
|
voucher3 := creatorCLI.runCmd(paychVoucherCreateCmd, cmd)
|
||||||
|
vouchers = append(vouchers, voucherSpec{serialized: voucher3, lane: 5, amt: voucherAmt3})
|
||||||
|
|
||||||
|
// creator: paych voucher create <channel> <amount> --lane=5
|
||||||
|
voucherAmt4 := 80
|
||||||
|
cmd = []string{lane5, chAddr.String(), strconv.Itoa(voucherAmt4)}
|
||||||
|
voucher4 := creatorCLI.runCmd(paychVoucherCreateCmd, cmd)
|
||||||
|
vouchers = append(vouchers, voucherSpec{serialized: voucher4, lane: 5, amt: voucherAmt4})
|
||||||
|
|
||||||
|
// creator: paych voucher list <channel> --export
|
||||||
|
cmd = []string{"--export", chAddr.String()}
|
||||||
|
list := creatorCLI.runCmd(paychVoucherListCmd, cmd)
|
||||||
|
|
||||||
|
// Check that voucher list output is correct on creator
|
||||||
|
checkVoucherOutput(t, list, vouchers)
|
||||||
|
|
||||||
|
// creator: paych voucher best-spendable <channel>
|
||||||
|
cmd = []string{"--export", chAddr.String()}
|
||||||
|
bestSpendable := creatorCLI.runCmd(paychVoucherBestSpendableCmd, cmd)
|
||||||
|
|
||||||
|
// Check that best spendable output is correct on creator
|
||||||
|
bestVouchers := []voucherSpec{
|
||||||
|
{serialized: voucher1, lane: 0, amt: voucherAmt1},
|
||||||
|
{serialized: voucher4, lane: 5, amt: voucherAmt4},
|
||||||
|
}
|
||||||
|
checkVoucherOutput(t, bestSpendable, bestVouchers)
|
||||||
|
|
||||||
|
// receiver: paych voucher add <voucher>
|
||||||
|
cmd = []string{chAddr.String(), voucher1}
|
||||||
|
receiverCLI.runCmd(paychVoucherAddCmd, cmd)
|
||||||
|
|
||||||
|
// receiver: paych voucher add <voucher>
|
||||||
|
cmd = []string{chAddr.String(), voucher2}
|
||||||
|
receiverCLI.runCmd(paychVoucherAddCmd, cmd)
|
||||||
|
|
||||||
|
// receiver: paych voucher add <voucher>
|
||||||
|
cmd = []string{chAddr.String(), voucher3}
|
||||||
|
receiverCLI.runCmd(paychVoucherAddCmd, cmd)
|
||||||
|
|
||||||
|
// receiver: paych voucher add <voucher>
|
||||||
|
cmd = []string{chAddr.String(), voucher4}
|
||||||
|
receiverCLI.runCmd(paychVoucherAddCmd, cmd)
|
||||||
|
|
||||||
|
// receiver: paych voucher list <channel> --export
|
||||||
|
cmd = []string{"--export", chAddr.String()}
|
||||||
|
list = receiverCLI.runCmd(paychVoucherListCmd, cmd)
|
||||||
|
|
||||||
|
// Check that voucher list output is correct on receiver
|
||||||
|
checkVoucherOutput(t, list, vouchers)
|
||||||
|
|
||||||
|
// receiver: paych voucher best-spendable <channel>
|
||||||
|
cmd = []string{"--export", chAddr.String()}
|
||||||
|
bestSpendable = receiverCLI.runCmd(paychVoucherBestSpendableCmd, cmd)
|
||||||
|
|
||||||
|
// Check that best spendable output is correct on receiver
|
||||||
|
bestVouchers = []voucherSpec{
|
||||||
|
{serialized: voucher1, lane: 0, amt: voucherAmt1},
|
||||||
|
{serialized: voucher4, lane: 5, amt: voucherAmt4},
|
||||||
|
}
|
||||||
|
checkVoucherOutput(t, bestSpendable, bestVouchers)
|
||||||
|
|
||||||
|
// receiver: paych voucher submit <channel> <voucher>
|
||||||
|
cmd = []string{chAddr.String(), voucher1}
|
||||||
|
receiverCLI.runCmd(paychVoucherSubmitCmd, cmd)
|
||||||
|
|
||||||
|
// receiver: paych voucher best-spendable <channel>
|
||||||
|
cmd = []string{"--export", chAddr.String()}
|
||||||
|
bestSpendable = receiverCLI.runCmd(paychVoucherBestSpendableCmd, cmd)
|
||||||
|
|
||||||
|
// Check that best spendable output no longer includes submitted voucher
|
||||||
|
bestVouchers = []voucherSpec{
|
||||||
|
{serialized: voucher4, lane: 5, amt: voucherAmt4},
|
||||||
|
}
|
||||||
|
checkVoucherOutput(t, bestSpendable, bestVouchers)
|
||||||
|
|
||||||
|
// There are three vouchers in lane 5: 50, 70, 80
|
||||||
|
// Submit the voucher for 50. Best spendable should still be 80.
|
||||||
|
// receiver: paych voucher submit <channel> <voucher>
|
||||||
|
cmd = []string{chAddr.String(), voucher2}
|
||||||
|
receiverCLI.runCmd(paychVoucherSubmitCmd, cmd)
|
||||||
|
|
||||||
|
// receiver: paych voucher best-spendable <channel>
|
||||||
|
cmd = []string{"--export", chAddr.String()}
|
||||||
|
bestSpendable = receiverCLI.runCmd(paychVoucherBestSpendableCmd, cmd)
|
||||||
|
|
||||||
|
// Check that best spendable output still includes the voucher for 80
|
||||||
|
bestVouchers = []voucherSpec{
|
||||||
|
{serialized: voucher4, lane: 5, amt: voucherAmt4},
|
||||||
|
}
|
||||||
|
checkVoucherOutput(t, bestSpendable, bestVouchers)
|
||||||
|
|
||||||
|
// Submit the voucher for 80
|
||||||
|
// receiver: paych voucher submit <channel> <voucher>
|
||||||
|
cmd = []string{chAddr.String(), voucher4}
|
||||||
|
receiverCLI.runCmd(paychVoucherSubmitCmd, cmd)
|
||||||
|
|
||||||
|
// receiver: paych voucher best-spendable <channel>
|
||||||
|
cmd = []string{"--export", chAddr.String()}
|
||||||
|
bestSpendable = receiverCLI.runCmd(paychVoucherBestSpendableCmd, cmd)
|
||||||
|
|
||||||
|
// Check that best spendable output no longer includes submitted voucher
|
||||||
|
bestVouchers = []voucherSpec{}
|
||||||
|
checkVoucherOutput(t, bestSpendable, bestVouchers)
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkVoucherOutput(t *testing.T, list string, vouchers []voucherSpec) {
|
||||||
|
lines := strings.Split(list, "\n")
|
||||||
|
listVouchers := make(map[string]string)
|
||||||
|
for _, line := range lines {
|
||||||
|
parts := strings.Split(line, ";")
|
||||||
|
if len(parts) == 2 {
|
||||||
|
serialized := strings.TrimSpace(parts[1])
|
||||||
|
listVouchers[serialized] = strings.TrimSpace(parts[0])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, vchr := range vouchers {
|
||||||
|
res, ok := listVouchers[vchr.serialized]
|
||||||
|
require.True(t, ok)
|
||||||
|
require.Regexp(t, fmt.Sprintf("Lane %d", vchr.lane), res)
|
||||||
|
require.Regexp(t, fmt.Sprintf("%d", vchr.amt), res)
|
||||||
|
delete(listVouchers, vchr.serialized)
|
||||||
|
}
|
||||||
|
for _, vchr := range listVouchers {
|
||||||
|
require.Fail(t, "Extra voucher "+vchr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func startTwoNodesOneMiner(ctx context.Context, t *testing.T, blocktime time.Duration) ([]test.TestNode, []address.Address) {
|
||||||
n, sn := builder.RPCMockSbBuilder(t, 2, test.OneMiner)
|
n, sn := builder.RPCMockSbBuilder(t, 2, test.OneMiner)
|
||||||
|
|
||||||
paymentCreator := n[0]
|
paymentCreator := n[0]
|
||||||
@ -88,39 +310,7 @@ func TestPaymentChannels(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create mock CLI
|
// Create mock CLI
|
||||||
mockCLI := newMockCLI(t)
|
return n, []address.Address{creatorAddr, receiverAddr}
|
||||||
creatorCLI := mockCLI.client(paymentCreator.ListenAddr)
|
|
||||||
receiverCLI := mockCLI.client(paymentReceiver.ListenAddr)
|
|
||||||
|
|
||||||
// creator: paych get <creator> <receiver> <amount>
|
|
||||||
channelAmt := "100000"
|
|
||||||
cmd := []string{creatorAddr.String(), receiverAddr.String(), channelAmt}
|
|
||||||
chstr := creatorCLI.runCmd(paychGetCmd, cmd)
|
|
||||||
|
|
||||||
chAddr, err := address.NewFromString(chstr)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// creator: paych voucher create <channel> <amount>
|
|
||||||
voucherAmt := 100
|
|
||||||
vamt := strconv.Itoa(voucherAmt)
|
|
||||||
cmd = []string{chAddr.String(), vamt}
|
|
||||||
voucher := creatorCLI.runCmd(paychVoucherCreateCmd, cmd)
|
|
||||||
|
|
||||||
// receiver: paych voucher add <channel> <voucher>
|
|
||||||
cmd = []string{chAddr.String(), voucher}
|
|
||||||
receiverCLI.runCmd(paychVoucherAddCmd, cmd)
|
|
||||||
|
|
||||||
// creator: paych settle <channel>
|
|
||||||
cmd = []string{chAddr.String()}
|
|
||||||
creatorCLI.runCmd(paychSettleCmd, cmd)
|
|
||||||
|
|
||||||
// Wait for the chain to reach the settle height
|
|
||||||
chState := getPaychState(ctx, t, paymentReceiver, chAddr)
|
|
||||||
waitForHeight(ctx, t, paymentReceiver, chState.SettlingAt)
|
|
||||||
|
|
||||||
// receiver: paych collect <channel>
|
|
||||||
cmd = []string{chAddr.String()}
|
|
||||||
receiverCLI.runCmd(paychCloseCmd, cmd)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type mockCLI struct {
|
type mockCLI struct {
|
||||||
|
@ -9,7 +9,7 @@ import (
|
|||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/node/repo"
|
"github.com/filecoin-project/lotus/node/repo"
|
||||||
manet "github.com/multiformats/go-multiaddr-net"
|
manet "github.com/multiformats/go-multiaddr/net"
|
||||||
)
|
)
|
||||||
|
|
||||||
var pprofCmd = &cli.Command{
|
var pprofCmd = &cli.Command{
|
||||||
@ -44,7 +44,7 @@ var PprofGoroutines = &cli.Command{
|
|||||||
|
|
||||||
addr = "http://" + addr + "/debug/pprof/goroutine?debug=2"
|
addr = "http://" + addr + "/debug/pprof/goroutine?debug=2"
|
||||||
|
|
||||||
r, err := http.Get(addr)
|
r, err := http.Get(addr) //nolint:gosec
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
10
cli/state.go
10
cli/state.go
@ -353,6 +353,9 @@ var stateReplaySetCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
ts, err = types.NewTipSet(headers)
|
ts, err = types.NewTipSet(headers)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
var r *api.MsgLookup
|
var r *api.MsgLookup
|
||||||
r, err = fapi.StateWaitMsg(ctx, mcid, build.MessageConfidence)
|
r, err = fapi.StateWaitMsg(ctx, mcid, build.MessageConfidence)
|
||||||
@ -365,10 +368,10 @@ var stateReplaySetCmd = &cli.Command{
|
|||||||
return xerrors.Errorf("loading tipset: %w", err)
|
return xerrors.Errorf("loading tipset: %w", err)
|
||||||
}
|
}
|
||||||
ts, err = fapi.ChainGetTipSet(ctx, childTs.Parents())
|
ts, err = fapi.ChainGetTipSet(ctx, childTs.Parents())
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1499,7 +1502,7 @@ func parseParamsForMethod(act cid.Cid, method uint64, args []string) ([]byte, er
|
|||||||
}
|
}
|
||||||
p.Elem().Field(i).Set(reflect.ValueOf(val))
|
p.Elem().Field(i).Set(reflect.ValueOf(val))
|
||||||
case reflect.TypeOf(peer.ID("")):
|
case reflect.TypeOf(peer.ID("")):
|
||||||
pid, err := peer.IDB58Decode(args[i])
|
pid, err := peer.Decode(args[i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to parse peer ID: %s", err)
|
return nil, fmt.Errorf("failed to parse peer ID: %s", err)
|
||||||
}
|
}
|
||||||
@ -1584,6 +1587,9 @@ var stateMarketBalanceCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
balance, err := api.StateMarketBalance(ctx, addr, ts.Key())
|
balance, err := api.StateMarketBalance(ctx, addr, ts.Key())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
fmt.Printf("Escrow: %s\n", types.FIL(balance.Escrow))
|
fmt.Printf("Escrow: %s\n", types.FIL(balance.Escrow))
|
||||||
fmt.Printf("Locked: %s\n", types.FIL(balance.Locked))
|
fmt.Printf("Locked: %s\n", types.FIL(balance.Locked))
|
||||||
|
@ -385,10 +385,9 @@ var walletVerify = &cli.Command{
|
|||||||
if api.WalletVerify(ctx, addr, msg, &sig) {
|
if api.WalletVerify(ctx, addr, msg, &sig) {
|
||||||
fmt.Println("valid")
|
fmt.Println("valid")
|
||||||
return nil
|
return nil
|
||||||
} else {
|
}
|
||||||
fmt.Println("invalid")
|
fmt.Println("invalid")
|
||||||
return NewCliError("CLI Verify called with invalid signature")
|
return NewCliError("CLI Verify called with invalid signature")
|
||||||
}
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -214,7 +214,7 @@ func countGasCosts(et *types.ExecutionTrace) (int64, int64) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, sub := range et.Subcalls {
|
for _, sub := range et.Subcalls {
|
||||||
c, v := countGasCosts(&sub)
|
c, v := countGasCosts(&sub) //nolint
|
||||||
cgas += c
|
cgas += c
|
||||||
vgas += v
|
vgas += v
|
||||||
}
|
}
|
||||||
@ -222,24 +222,6 @@ func countGasCosts(et *types.ExecutionTrace) (int64, int64) {
|
|||||||
return cgas, vgas
|
return cgas, vgas
|
||||||
}
|
}
|
||||||
|
|
||||||
func compStats(vals []float64) (float64, float64) {
|
|
||||||
var sum float64
|
|
||||||
|
|
||||||
for _, v := range vals {
|
|
||||||
sum += v
|
|
||||||
}
|
|
||||||
|
|
||||||
av := sum / float64(len(vals))
|
|
||||||
|
|
||||||
var varsum float64
|
|
||||||
for _, v := range vals {
|
|
||||||
delta := av - v
|
|
||||||
varsum += delta * delta
|
|
||||||
}
|
|
||||||
|
|
||||||
return av, math.Sqrt(varsum / float64(len(vals)))
|
|
||||||
}
|
|
||||||
|
|
||||||
type stats struct {
|
type stats struct {
|
||||||
timeTaken meanVar
|
timeTaken meanVar
|
||||||
gasRatio meanVar
|
gasRatio meanVar
|
||||||
@ -264,20 +246,20 @@ func (cov1 *covar) VarianceX() float64 {
|
|||||||
return cov1.m2x / (cov1.n - 1)
|
return cov1.m2x / (cov1.n - 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v1 *covar) StddevX() float64 {
|
func (cov1 *covar) StddevX() float64 {
|
||||||
return math.Sqrt(v1.VarianceX())
|
return math.Sqrt(cov1.VarianceX())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cov1 *covar) VarianceY() float64 {
|
func (cov1 *covar) VarianceY() float64 {
|
||||||
return cov1.m2y / (cov1.n - 1)
|
return cov1.m2y / (cov1.n - 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v1 *covar) StddevY() float64 {
|
func (cov1 *covar) StddevY() float64 {
|
||||||
return math.Sqrt(v1.VarianceY())
|
return math.Sqrt(cov1.VarianceY())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cov1 *covar) AddPoint(x, y float64) {
|
func (cov1 *covar) AddPoint(x, y float64) {
|
||||||
cov1.n += 1
|
cov1.n++
|
||||||
|
|
||||||
dx := x - cov1.meanX
|
dx := x - cov1.meanX
|
||||||
cov1.meanX += dx / cov1.n
|
cov1.meanX += dx / cov1.n
|
||||||
@ -344,7 +326,7 @@ type meanVar struct {
|
|||||||
|
|
||||||
func (v1 *meanVar) AddPoint(value float64) {
|
func (v1 *meanVar) AddPoint(value float64) {
|
||||||
// based on https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
|
// based on https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
|
||||||
v1.n += 1
|
v1.n++
|
||||||
delta := value - v1.mean
|
delta := value - v1.mean
|
||||||
v1.mean += delta / v1.n
|
v1.mean += delta / v1.n
|
||||||
delta2 := value - v1.mean
|
delta2 := value - v1.mean
|
||||||
@ -481,7 +463,7 @@ var importAnalyzeCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
http.ListenAndServe("localhost:6060", nil)
|
http.ListenAndServe("localhost:6060", nil) //nolint:errcheck
|
||||||
}()
|
}()
|
||||||
|
|
||||||
fi, err := os.Open(cctx.Args().First())
|
fi, err := os.Open(cctx.Args().First())
|
||||||
|
@ -26,6 +26,11 @@ func main() {
|
|||||||
EnvVars: []string{"LOTUS_PATH"},
|
EnvVars: []string{"LOTUS_PATH"},
|
||||||
Value: "~/.lotus", // TODO: Consider XDG_DATA_HOME
|
Value: "~/.lotus", // TODO: Consider XDG_DATA_HOME
|
||||||
},
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "api",
|
||||||
|
EnvVars: []string{"FULLNODE_API_INFO"},
|
||||||
|
Value: "",
|
||||||
|
},
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: "db",
|
Name: "db",
|
||||||
EnvVars: []string{"LOTUS_DB"},
|
EnvVars: []string{"LOTUS_DB"},
|
||||||
|
@ -96,12 +96,6 @@ func (p *Processor) HandleMarketChanges(ctx context.Context, marketTips ActorTip
|
|||||||
log.Fatalw("Failed to persist market actors", "error", err)
|
log.Fatalw("Failed to persist market actors", "error", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// we persist the dealID <--> minerID,sectorID here since the dealID needs to be stored above first
|
|
||||||
if err := p.storePreCommitDealInfo(p.sectorDealEvents); err != nil {
|
|
||||||
close(p.sectorDealEvents)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := p.updateMarket(ctx, marketChanges); err != nil {
|
if err := p.updateMarket(ctx, marketChanges); err != nil {
|
||||||
log.Fatalw("Failed to update market actors", "error", err)
|
log.Fatalw("Failed to update market actors", "error", err)
|
||||||
}
|
}
|
||||||
@ -272,48 +266,6 @@ func (p *Processor) storeMarketActorDealProposals(ctx context.Context, marketTip
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Processor) storePreCommitDealInfo(dealEvents <-chan *SectorDealEvent) error {
|
|
||||||
tx, err := p.db.Begin()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := tx.Exec(`create temp table mds (like minerid_dealid_sectorid excluding constraints) on commit drop;`); err != nil {
|
|
||||||
return xerrors.Errorf("Failed to create temp table for minerid_dealid_sectorid: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
stmt, err := tx.Prepare(`copy mds (deal_id, miner_id, sector_id) from STDIN`)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("Failed to prepare minerid_dealid_sectorid statement: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for sde := range dealEvents {
|
|
||||||
for _, did := range sde.DealIDs {
|
|
||||||
if _, err := stmt.Exec(
|
|
||||||
uint64(did),
|
|
||||||
sde.MinerID.String(),
|
|
||||||
sde.SectorID,
|
|
||||||
); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := stmt.Close(); err != nil {
|
|
||||||
return xerrors.Errorf("Failed to close miner sector deals statement: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := tx.Exec(`insert into minerid_dealid_sectorid select * from mds on conflict do nothing`); err != nil {
|
|
||||||
return xerrors.Errorf("Failed to insert into miner deal sector table: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := tx.Commit(); err != nil {
|
|
||||||
return xerrors.Errorf("Failed to commit miner deal sector table: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Processor) updateMarketActorDealProposals(ctx context.Context, marketTip []marketActorInfo) error {
|
func (p *Processor) updateMarketActorDealProposals(ctx context.Context, marketTip []marketActorInfo) error {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
defer func() {
|
defer func() {
|
||||||
|
@ -3,7 +3,6 @@ package processor
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
@ -120,10 +119,6 @@ func (p *Processor) persistMessagesAndReceipts(ctx context.Context, blocks map[c
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *Processor) storeReceipts(recs map[mrec]*types.MessageReceipt) error {
|
func (p *Processor) storeReceipts(recs map[mrec]*types.MessageReceipt) error {
|
||||||
start := time.Now()
|
|
||||||
defer func() {
|
|
||||||
log.Debugw("Persisted Receipts", "duration", time.Since(start).String())
|
|
||||||
}()
|
|
||||||
tx, err := p.db.Begin()
|
tx, err := p.db.Begin()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -164,10 +159,6 @@ create temp table recs (like receipts excluding constraints) on commit drop;
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *Processor) storeMsgInclusions(incls map[cid.Cid][]cid.Cid) error {
|
func (p *Processor) storeMsgInclusions(incls map[cid.Cid][]cid.Cid) error {
|
||||||
start := time.Now()
|
|
||||||
defer func() {
|
|
||||||
log.Debugw("Persisted Message Inclusions", "duration", time.Since(start).String())
|
|
||||||
}()
|
|
||||||
tx, err := p.db.Begin()
|
tx, err := p.db.Begin()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -206,10 +197,6 @@ create temp table mi (like block_messages excluding constraints) on commit drop;
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *Processor) storeMessages(msgs map[cid.Cid]*types.Message) error {
|
func (p *Processor) storeMessages(msgs map[cid.Cid]*types.Message) error {
|
||||||
start := time.Now()
|
|
||||||
defer func() {
|
|
||||||
log.Debugw("Persisted Messages", "duration", time.Since(start).String())
|
|
||||||
}()
|
|
||||||
tx, err := p.db.Begin()
|
tx, err := p.db.Begin()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -271,7 +271,11 @@ func (p *Processor) persistMiners(ctx context.Context, miners []minerActorInfo)
|
|||||||
preCommitEvents := make(chan *MinerSectorsEvent, 8)
|
preCommitEvents := make(chan *MinerSectorsEvent, 8)
|
||||||
sectorEvents := make(chan *MinerSectorsEvent, 8)
|
sectorEvents := make(chan *MinerSectorsEvent, 8)
|
||||||
partitionEvents := make(chan *MinerSectorsEvent, 8)
|
partitionEvents := make(chan *MinerSectorsEvent, 8)
|
||||||
p.sectorDealEvents = make(chan *SectorDealEvent, 8)
|
dealEvents := make(chan *SectorDealEvent, 8)
|
||||||
|
|
||||||
|
grp.Go(func() error {
|
||||||
|
return p.storePreCommitDealInfo(dealEvents)
|
||||||
|
})
|
||||||
|
|
||||||
grp.Go(func() error {
|
grp.Go(func() error {
|
||||||
return p.storeMinerSectorEvents(ctx, sectorEvents, preCommitEvents, partitionEvents)
|
return p.storeMinerSectorEvents(ctx, sectorEvents, preCommitEvents, partitionEvents)
|
||||||
@ -280,9 +284,9 @@ func (p *Processor) persistMiners(ctx context.Context, miners []minerActorInfo)
|
|||||||
grp.Go(func() error {
|
grp.Go(func() error {
|
||||||
defer func() {
|
defer func() {
|
||||||
close(preCommitEvents)
|
close(preCommitEvents)
|
||||||
close(p.sectorDealEvents)
|
close(dealEvents)
|
||||||
}()
|
}()
|
||||||
return p.storeMinerPreCommitInfo(ctx, miners, preCommitEvents, p.sectorDealEvents)
|
return p.storeMinerPreCommitInfo(ctx, miners, preCommitEvents, dealEvents)
|
||||||
})
|
})
|
||||||
|
|
||||||
grp.Go(func() error {
|
grp.Go(func() error {
|
||||||
@ -314,7 +318,10 @@ func (p *Processor) storeMinerPreCommitInfo(ctx context.Context, miners []minerA
|
|||||||
return xerrors.Errorf("Failed to prepare miner precommit info statement: %w", err)
|
return xerrors.Errorf("Failed to prepare miner precommit info statement: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
grp, _ := errgroup.WithContext(ctx)
|
||||||
for _, m := range miners {
|
for _, m := range miners {
|
||||||
|
m := m
|
||||||
|
grp.Go(func() error {
|
||||||
minerSectors, err := adt.AsArray(p.ctxStore, m.state.Sectors)
|
minerSectors, err := adt.AsArray(p.ctxStore, m.state.Sectors)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -323,13 +330,12 @@ func (p *Processor) storeMinerPreCommitInfo(ctx context.Context, miners []minerA
|
|||||||
changes, err := p.getMinerPreCommitChanges(ctx, m)
|
changes, err := p.getMinerPreCommitChanges(ctx, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if strings.Contains(err.Error(), types.ErrActorNotFound.Error()) {
|
if strings.Contains(err.Error(), types.ErrActorNotFound.Error()) {
|
||||||
continue
|
return nil
|
||||||
} else {
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if changes == nil {
|
if changes == nil {
|
||||||
continue
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
preCommitAdded := make([]uint64, len(changes.Added))
|
preCommitAdded := make([]uint64, len(changes.Added))
|
||||||
@ -408,6 +414,11 @@ func (p *Processor) storeMinerPreCommitInfo(ctx context.Context, miners []minerA
|
|||||||
Event: PreCommitExpired,
|
Event: PreCommitExpired,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err := grp.Wait(); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := stmt.Close(); err != nil {
|
if err := stmt.Close(); err != nil {
|
||||||
@ -439,17 +450,19 @@ func (p *Processor) storeMinerSectorInfo(ctx context.Context, miners []minerActo
|
|||||||
return xerrors.Errorf("Failed to prepare miner sector info statement: %w", err)
|
return xerrors.Errorf("Failed to prepare miner sector info statement: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
grp, _ := errgroup.WithContext(ctx)
|
||||||
for _, m := range miners {
|
for _, m := range miners {
|
||||||
|
m := m
|
||||||
|
grp.Go(func() error {
|
||||||
changes, err := p.getMinerSectorChanges(ctx, m)
|
changes, err := p.getMinerSectorChanges(ctx, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if strings.Contains(err.Error(), types.ErrActorNotFound.Error()) {
|
if strings.Contains(err.Error(), types.ErrActorNotFound.Error()) {
|
||||||
continue
|
return nil
|
||||||
} else {
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if changes == nil {
|
if changes == nil {
|
||||||
continue
|
return nil
|
||||||
}
|
}
|
||||||
var sectorsAdded []uint64
|
var sectorsAdded []uint64
|
||||||
var ccAdded []uint64
|
var ccAdded []uint64
|
||||||
@ -469,7 +482,7 @@ func (p *Processor) storeMinerSectorInfo(ctx context.Context, miners []minerActo
|
|||||||
added.ExpectedDayReward.String(),
|
added.ExpectedDayReward.String(),
|
||||||
added.ExpectedStoragePledge.String(),
|
added.ExpectedStoragePledge.String(),
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return err
|
log.Errorw("writing miner sector changes statement", "error", err.Error())
|
||||||
}
|
}
|
||||||
if len(added.DealIDs) == 0 {
|
if len(added.DealIDs) == 0 {
|
||||||
ccAdded = append(ccAdded, uint64(added.SectorNumber))
|
ccAdded = append(ccAdded, uint64(added.SectorNumber))
|
||||||
@ -500,6 +513,12 @@ func (p *Processor) storeMinerSectorInfo(ctx context.Context, miners []minerActo
|
|||||||
SectorIDs: extended,
|
SectorIDs: extended,
|
||||||
Event: SectorExtended,
|
Event: SectorExtended,
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := grp.Wait(); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := stmt.Close(); err != nil {
|
if err := stmt.Close(); err != nil {
|
||||||
@ -911,6 +930,48 @@ func (p *Processor) storeMinersActorInfoState(ctx context.Context, miners []mine
|
|||||||
return tx.Commit()
|
return tx.Commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *Processor) storePreCommitDealInfo(dealEvents <-chan *SectorDealEvent) error {
|
||||||
|
tx, err := p.db.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := tx.Exec(`create temp table mds (like minerid_dealid_sectorid excluding constraints) on commit drop;`); err != nil {
|
||||||
|
return xerrors.Errorf("Failed to create temp table for minerid_dealid_sectorid: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stmt, err := tx.Prepare(`copy mds (deal_id, miner_id, sector_id) from STDIN`)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("Failed to prepare minerid_dealid_sectorid statement: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for sde := range dealEvents {
|
||||||
|
for _, did := range sde.DealIDs {
|
||||||
|
if _, err := stmt.Exec(
|
||||||
|
uint64(did),
|
||||||
|
sde.MinerID.String(),
|
||||||
|
sde.SectorID,
|
||||||
|
); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := stmt.Close(); err != nil {
|
||||||
|
return xerrors.Errorf("Failed to close miner sector deals statement: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := tx.Exec(`insert into minerid_dealid_sectorid select * from mds on conflict do nothing`); err != nil {
|
||||||
|
return xerrors.Errorf("Failed to insert into miner deal sector table: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := tx.Commit(); err != nil {
|
||||||
|
return xerrors.Errorf("Failed to commit miner deal sector table: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func (p *Processor) storeMinersPower(miners []minerActorInfo) error {
|
func (p *Processor) storeMinersPower(miners []minerActorInfo) error {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
defer func() {
|
defer func() {
|
||||||
|
@ -47,8 +47,6 @@ func (p *Processor) subMpool(ctx context.Context) {
|
|||||||
msgs[v.Message.Message.Cid()] = &v.Message.Message
|
msgs[v.Message.Message.Cid()] = &v.Message.Message
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("Processing %d mpool updates", len(msgs))
|
|
||||||
|
|
||||||
err := p.storeMessages(msgs)
|
err := p.storeMessages(msgs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err)
|
log.Error(err)
|
||||||
|
@ -7,6 +7,7 @@ import (
|
|||||||
|
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
||||||
"github.com/filecoin-project/specs-actors/actors/util/smoothing"
|
"github.com/filecoin-project/specs-actors/actors/util/smoothing"
|
||||||
@ -15,7 +16,19 @@ import (
|
|||||||
type powerActorInfo struct {
|
type powerActorInfo struct {
|
||||||
common actorInfo
|
common actorInfo
|
||||||
|
|
||||||
epochSmoothingEstimate *smoothing.FilterEstimate
|
totalRawBytes big.Int
|
||||||
|
totalRawBytesCommitted big.Int
|
||||||
|
totalQualityAdjustedBytes big.Int
|
||||||
|
totalQualityAdjustedBytesCommitted big.Int
|
||||||
|
totalPledgeCollateral big.Int
|
||||||
|
|
||||||
|
newRawBytes big.Int
|
||||||
|
newQualityAdjustedBytes big.Int
|
||||||
|
newPledgeCollateral big.Int
|
||||||
|
newQAPowerSmoothed *smoothing.FilterEstimate
|
||||||
|
|
||||||
|
minerCount int64
|
||||||
|
minerCountAboveMinimumPower int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Processor) setupPower() error {
|
func (p *Processor) setupPower() error {
|
||||||
@ -25,13 +38,27 @@ func (p *Processor) setupPower() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if _, err := tx.Exec(`
|
if _, err := tx.Exec(`
|
||||||
create table if not exists power_smoothing_estimates
|
create table if not exists chain_power
|
||||||
(
|
(
|
||||||
state_root text not null
|
state_root text not null
|
||||||
constraint power_smoothing_estimates_pk
|
constraint power_smoothing_estimates_pk
|
||||||
primary key,
|
primary key,
|
||||||
position_estimate text not null,
|
|
||||||
velocity_estimate text not null
|
new_raw_bytes_power text not null,
|
||||||
|
new_qa_bytes_power text not null,
|
||||||
|
new_pledge_collateral text not null,
|
||||||
|
|
||||||
|
total_raw_bytes_power text not null,
|
||||||
|
total_raw_bytes_committed text not null,
|
||||||
|
total_qa_bytes_power text not null,
|
||||||
|
total_qa_bytes_committed text not null,
|
||||||
|
total_pledge_collateral text not null,
|
||||||
|
|
||||||
|
qa_smoothed_position_estimate text not null,
|
||||||
|
qa_smoothed_velocity_estimate text not null,
|
||||||
|
|
||||||
|
miner_count int not null,
|
||||||
|
minimum_consensus_miner_count int not null
|
||||||
);
|
);
|
||||||
`); err != nil {
|
`); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -60,8 +87,8 @@ func (p *Processor) processPowerActors(ctx context.Context, powerTips ActorTips)
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
var out []powerActorInfo
|
var out []powerActorInfo
|
||||||
for tipset, powers := range powerTips {
|
for tipset, powerStates := range powerTips {
|
||||||
for _, act := range powers {
|
for _, act := range powerStates {
|
||||||
var pw powerActorInfo
|
var pw powerActorInfo
|
||||||
pw.common = act
|
pw.common = act
|
||||||
|
|
||||||
@ -80,7 +107,19 @@ func (p *Processor) processPowerActors(ctx context.Context, powerTips ActorTips)
|
|||||||
return nil, xerrors.Errorf("unmarshal state (@ %s): %w", pw.common.stateroot.String(), err)
|
return nil, xerrors.Errorf("unmarshal state (@ %s): %w", pw.common.stateroot.String(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
pw.epochSmoothingEstimate = powerActorState.ThisEpochQAPowerSmoothed
|
pw.totalRawBytes = powerActorState.TotalRawBytePower
|
||||||
|
pw.totalRawBytesCommitted = powerActorState.TotalBytesCommitted
|
||||||
|
pw.totalQualityAdjustedBytes = powerActorState.TotalQualityAdjPower
|
||||||
|
pw.totalQualityAdjustedBytesCommitted = powerActorState.TotalQABytesCommitted
|
||||||
|
pw.totalPledgeCollateral = powerActorState.TotalPledgeCollateral
|
||||||
|
|
||||||
|
pw.newRawBytes = powerActorState.ThisEpochRawBytePower
|
||||||
|
pw.newQualityAdjustedBytes = powerActorState.ThisEpochQualityAdjPower
|
||||||
|
pw.newPledgeCollateral = powerActorState.ThisEpochPledgeCollateral
|
||||||
|
pw.newQAPowerSmoothed = powerActorState.ThisEpochQAPowerSmoothed
|
||||||
|
|
||||||
|
pw.minerCount = powerActorState.MinerCount
|
||||||
|
pw.minerCountAboveMinimumPower = powerActorState.MinerAboveMinPowerCount
|
||||||
out = append(out, pw)
|
out = append(out, pw)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -88,46 +127,59 @@ func (p *Processor) processPowerActors(ctx context.Context, powerTips ActorTips)
|
|||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Processor) persistPowerActors(ctx context.Context, powers []powerActorInfo) error {
|
func (p *Processor) persistPowerActors(ctx context.Context, powerStates []powerActorInfo) error {
|
||||||
// NB: use errgroup when there is more than a single store operation
|
// NB: use errgroup when there is more than a single store operation
|
||||||
return p.storePowerSmoothingEstimates(powers)
|
return p.storePowerSmoothingEstimates(powerStates)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Processor) storePowerSmoothingEstimates(powers []powerActorInfo) error {
|
func (p *Processor) storePowerSmoothingEstimates(powerStates []powerActorInfo) error {
|
||||||
tx, err := p.db.Begin()
|
tx, err := p.db.Begin()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("begin power_smoothing_estimates tx: %w", err)
|
return xerrors.Errorf("begin chain_power tx: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := tx.Exec(`create temp table rse (like power_smoothing_estimates) on commit drop`); err != nil {
|
if _, err := tx.Exec(`create temp table cp (like chain_power) on commit drop`); err != nil {
|
||||||
return xerrors.Errorf("prep power_smoothing_estimates: %w", err)
|
return xerrors.Errorf("prep chain_power: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
stmt, err := tx.Prepare(`copy rse (state_root, position_estimate, velocity_estimate) from stdin;`)
|
stmt, err := tx.Prepare(`copy cp (state_root, new_raw_bytes_power, new_qa_bytes_power, new_pledge_collateral, total_raw_bytes_power, total_raw_bytes_committed, total_qa_bytes_power, total_qa_bytes_committed, total_pledge_collateral, qa_smoothed_position_estimate, qa_smoothed_velocity_estimate, miner_count, minimum_consensus_miner_count) from stdin;`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("prepare tmp power_smoothing_estimates: %w", err)
|
return xerrors.Errorf("prepare tmp chain_power: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, powerState := range powers {
|
for _, ps := range powerStates {
|
||||||
if _, err := stmt.Exec(
|
if _, err := stmt.Exec(
|
||||||
powerState.common.stateroot.String(),
|
ps.common.stateroot.String(),
|
||||||
powerState.epochSmoothingEstimate.PositionEstimate.String(),
|
ps.newRawBytes.String(),
|
||||||
powerState.epochSmoothingEstimate.VelocityEstimate.String(),
|
ps.newQualityAdjustedBytes.String(),
|
||||||
|
ps.newPledgeCollateral.String(),
|
||||||
|
|
||||||
|
ps.totalRawBytes.String(),
|
||||||
|
ps.totalRawBytesCommitted.String(),
|
||||||
|
ps.totalQualityAdjustedBytes.String(),
|
||||||
|
ps.totalQualityAdjustedBytesCommitted.String(),
|
||||||
|
ps.totalPledgeCollateral.String(),
|
||||||
|
|
||||||
|
ps.newQAPowerSmoothed.PositionEstimate.String(),
|
||||||
|
ps.newQAPowerSmoothed.VelocityEstimate.String(),
|
||||||
|
|
||||||
|
ps.minerCount,
|
||||||
|
ps.minerCountAboveMinimumPower,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return xerrors.Errorf("failed to store smoothing estimate: %w", err)
|
return xerrors.Errorf("failed to store smoothing estimate: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := stmt.Close(); err != nil {
|
if err := stmt.Close(); err != nil {
|
||||||
return xerrors.Errorf("close prepared power_smoothing_estimates: %w", err)
|
return xerrors.Errorf("close prepared chain_power: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := tx.Exec(`insert into power_smoothing_estimates select * from rse on conflict do nothing`); err != nil {
|
if _, err := tx.Exec(`insert into chain_power select * from cp on conflict do nothing`); err != nil {
|
||||||
return xerrors.Errorf("insert power_smoothing_estimates from tmp: %w", err)
|
return xerrors.Errorf("insert chain_power from tmp: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := tx.Commit(); err != nil {
|
if err := tx.Commit(); err != nil {
|
||||||
return xerrors.Errorf("commit power_smoothing_estimates tx: %w", err)
|
return xerrors.Errorf("commit chain_power tx: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -8,7 +8,6 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
@ -36,9 +35,6 @@ type Processor struct {
|
|||||||
|
|
||||||
// number of blocks processed at a time
|
// number of blocks processed at a time
|
||||||
batch int
|
batch int
|
||||||
|
|
||||||
// process communication channels
|
|
||||||
sectorDealEvents chan *SectorDealEvent
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type ActorTips map[types.TipSetKey][]actorInfo
|
type ActorTips map[types.TipSetKey][]actorInfo
|
||||||
@ -144,60 +140,63 @@ func (p *Processor) Start(ctx context.Context) {
|
|||||||
"AccountChanges", len(actorChanges[builtin.AccountActorCodeID]),
|
"AccountChanges", len(actorChanges[builtin.AccountActorCodeID]),
|
||||||
"nullRounds", len(nullRounds))
|
"nullRounds", len(nullRounds))
|
||||||
|
|
||||||
grp, ctx := errgroup.WithContext(ctx)
|
grp := sync.WaitGroup{}
|
||||||
|
|
||||||
grp.Go(func() error {
|
grp.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer grp.Done()
|
||||||
if err := p.HandleMarketChanges(ctx, actorChanges[builtin.StorageMarketActorCodeID]); err != nil {
|
if err := p.HandleMarketChanges(ctx, actorChanges[builtin.StorageMarketActorCodeID]); err != nil {
|
||||||
return xerrors.Errorf("Failed to handle market changes: %w", err)
|
log.Errorf("Failed to handle market changes: %w", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
log.Info("Processed Market Changes")
|
}()
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
grp.Go(func() error {
|
grp.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer grp.Done()
|
||||||
if err := p.HandleMinerChanges(ctx, actorChanges[builtin.StorageMinerActorCodeID]); err != nil {
|
if err := p.HandleMinerChanges(ctx, actorChanges[builtin.StorageMinerActorCodeID]); err != nil {
|
||||||
return xerrors.Errorf("Failed to handle miner changes: %w", err)
|
log.Errorf("Failed to handle miner changes: %w", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
log.Info("Processed Miner Changes")
|
}()
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
grp.Go(func() error {
|
grp.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer grp.Done()
|
||||||
if err := p.HandleRewardChanges(ctx, actorChanges[builtin.RewardActorCodeID], nullRounds); err != nil {
|
if err := p.HandleRewardChanges(ctx, actorChanges[builtin.RewardActorCodeID], nullRounds); err != nil {
|
||||||
return xerrors.Errorf("Failed to handle reward changes: %w", err)
|
log.Errorf("Failed to handle reward changes: %w", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
log.Info("Processed Reward Changes")
|
}()
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
grp.Go(func() error {
|
grp.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer grp.Done()
|
||||||
if err := p.HandlePowerChanges(ctx, actorChanges[builtin.StoragePowerActorCodeID]); err != nil {
|
if err := p.HandlePowerChanges(ctx, actorChanges[builtin.StoragePowerActorCodeID]); err != nil {
|
||||||
return xerrors.Errorf("Failed to handle power actor changes: %w", err)
|
log.Errorf("Failed to handle power actor changes: %w", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
log.Info("Processes Power Changes")
|
}()
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
grp.Go(func() error {
|
grp.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer grp.Done()
|
||||||
if err := p.HandleMessageChanges(ctx, toProcess); err != nil {
|
if err := p.HandleMessageChanges(ctx, toProcess); err != nil {
|
||||||
return xerrors.Errorf("Failed to handle message changes: %w", err)
|
log.Errorf("Failed to handle message changes: %w", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
log.Info("Processed Message Changes")
|
}()
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
grp.Go(func() error {
|
grp.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer grp.Done()
|
||||||
if err := p.HandleCommonActorsChanges(ctx, actorChanges); err != nil {
|
if err := p.HandleCommonActorsChanges(ctx, actorChanges); err != nil {
|
||||||
return xerrors.Errorf("Failed to handle common actor changes: %w", err)
|
log.Errorf("Failed to handle common actor changes: %w", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
log.Info("Processed CommonActor Changes")
|
}()
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
if err := grp.Wait(); err != nil {
|
grp.Wait()
|
||||||
log.Errorw("Failed to handle actor changes...retrying", "error", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := p.markBlocksProcessed(ctx, toProcess); err != nil {
|
if err := p.markBlocksProcessed(ctx, toProcess); err != nil {
|
||||||
log.Fatalw("Failed to mark blocks as processed", "error", err)
|
log.Fatalw("Failed to mark blocks as processed", "error", err)
|
||||||
@ -206,7 +205,7 @@ func (p *Processor) Start(ctx context.Context) {
|
|||||||
if err := p.refreshViews(); err != nil {
|
if err := p.refreshViews(); err != nil {
|
||||||
log.Errorw("Failed to refresh views", "error", err)
|
log.Errorw("Failed to refresh views", "error", err)
|
||||||
}
|
}
|
||||||
log.Infow("Processed Batch", "duration", time.Since(loopStart).String())
|
log.Infow("Processed Batch Complete", "duration", time.Since(loopStart).String())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@ -370,7 +369,9 @@ where rnk <= $1
|
|||||||
maxBlock = bh.Height
|
maxBlock = bh.Height
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if minBlock <= maxBlock {
|
||||||
log.Infow("Gathered Blocks to Process", "start", minBlock, "end", maxBlock)
|
log.Infow("Gathered Blocks to Process", "start", minBlock, "end", maxBlock)
|
||||||
|
}
|
||||||
return out, rows.Close()
|
return out, rows.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5,7 +5,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
||||||
@ -13,20 +12,23 @@ import (
|
|||||||
"github.com/filecoin-project/specs-actors/actors/builtin/reward"
|
"github.com/filecoin-project/specs-actors/actors/builtin/reward"
|
||||||
"github.com/filecoin-project/specs-actors/actors/util/smoothing"
|
"github.com/filecoin-project/specs-actors/actors/util/smoothing"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
type rewardActorInfo struct {
|
type rewardActorInfo struct {
|
||||||
common actorInfo
|
common actorInfo
|
||||||
|
|
||||||
// expected power in bytes during this epoch
|
cumSumBaselinePower big.Int
|
||||||
baselinePower big.Int
|
cumSumRealizedPower big.Int
|
||||||
|
|
||||||
// base reward in attofil for each block found during this epoch
|
effectiveNetworkTime int64
|
||||||
baseBlockReward big.Int
|
effectiveBaselinePower big.Int
|
||||||
|
|
||||||
epochSmoothingEstimate *smoothing.FilterEstimate
|
newBaselinePower big.Int
|
||||||
|
newBaseReward big.Int
|
||||||
|
newSmoothingEstimate *smoothing.FilterEstimate
|
||||||
|
|
||||||
|
totalMinedReward big.Int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Processor) setupRewards() error {
|
func (p *Processor) setupRewards() error {
|
||||||
@ -36,34 +38,23 @@ func (p *Processor) setupRewards() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if _, err := tx.Exec(`
|
if _, err := tx.Exec(`
|
||||||
/*
|
|
||||||
* captures base block reward per miner per state root and does not
|
|
||||||
* include penalties or gas reward
|
|
||||||
*/
|
|
||||||
create table if not exists base_block_rewards
|
|
||||||
(
|
|
||||||
state_root text not null
|
|
||||||
constraint block_rewards_pk
|
|
||||||
primary key,
|
|
||||||
base_block_reward numeric not null
|
|
||||||
);
|
|
||||||
|
|
||||||
/* captures chain-specific power state for any given stateroot */
|
/* captures chain-specific power state for any given stateroot */
|
||||||
create table if not exists chain_power
|
create table if not exists chain_reward
|
||||||
(
|
(
|
||||||
state_root text not null
|
state_root text not null
|
||||||
constraint chain_power_pk
|
constraint chain_reward_pk
|
||||||
primary key,
|
primary key,
|
||||||
baseline_power text not null
|
cum_sum_baseline text not null,
|
||||||
);
|
cum_sum_realized text not null,
|
||||||
|
effective_network_time int not null,
|
||||||
|
effective_baseline_power text not null,
|
||||||
|
|
||||||
create table if not exists reward_smoothing_estimates
|
new_baseline_power text not null,
|
||||||
(
|
new_reward numeric not null,
|
||||||
state_root text not null
|
new_reward_smoothed_position_estimate text not null,
|
||||||
constraint reward_smoothing_estimates_pk
|
new_reward_smoothed_velocity_estimate text not null,
|
||||||
primary key,
|
|
||||||
position_estimate text not null,
|
total_mined_reward text not null
|
||||||
velocity_estimate text not null
|
|
||||||
);
|
);
|
||||||
`); err != nil {
|
`); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -113,9 +104,14 @@ func (p *Processor) processRewardActors(ctx context.Context, rewardTips ActorTip
|
|||||||
return nil, xerrors.Errorf("unmarshal state (@ %s): %w", rw.common.stateroot.String(), err)
|
return nil, xerrors.Errorf("unmarshal state (@ %s): %w", rw.common.stateroot.String(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
rw.baseBlockReward = rewardActorState.ThisEpochReward
|
rw.cumSumBaselinePower = rewardActorState.CumsumBaseline
|
||||||
rw.baselinePower = rewardActorState.ThisEpochBaselinePower
|
rw.cumSumRealizedPower = rewardActorState.CumsumRealized
|
||||||
rw.epochSmoothingEstimate = rewardActorState.ThisEpochRewardSmoothed
|
rw.effectiveNetworkTime = int64(rewardActorState.EffectiveNetworkTime)
|
||||||
|
rw.effectiveBaselinePower = rewardActorState.EffectiveBaselinePower
|
||||||
|
rw.newBaselinePower = rewardActorState.ThisEpochBaselinePower
|
||||||
|
rw.newBaseReward = rewardActorState.ThisEpochReward
|
||||||
|
rw.newSmoothingEstimate = rewardActorState.ThisEpochRewardSmoothed
|
||||||
|
rw.totalMinedReward = rewardActorState.TotalMined
|
||||||
out = append(out, rw)
|
out = append(out, rw)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -145,8 +141,14 @@ func (p *Processor) processRewardActors(ctx context.Context, rewardTips ActorTip
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
rw.baseBlockReward = rewardActorState.ThisEpochReward
|
rw.cumSumBaselinePower = rewardActorState.CumsumBaseline
|
||||||
rw.baselinePower = rewardActorState.ThisEpochBaselinePower
|
rw.cumSumRealizedPower = rewardActorState.CumsumRealized
|
||||||
|
rw.effectiveNetworkTime = int64(rewardActorState.EffectiveNetworkTime)
|
||||||
|
rw.effectiveBaselinePower = rewardActorState.EffectiveBaselinePower
|
||||||
|
rw.newBaselinePower = rewardActorState.ThisEpochBaselinePower
|
||||||
|
rw.newBaseReward = rewardActorState.ThisEpochReward
|
||||||
|
rw.newSmoothingEstimate = rewardActorState.ThisEpochRewardSmoothed
|
||||||
|
rw.totalMinedReward = rewardActorState.TotalMined
|
||||||
out = append(out, rw)
|
out = append(out, rw)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -159,149 +161,47 @@ func (p *Processor) persistRewardActors(ctx context.Context, rewards []rewardAct
|
|||||||
log.Debugw("Persisted Reward Actors", "duration", time.Since(start).String())
|
log.Debugw("Persisted Reward Actors", "duration", time.Since(start).String())
|
||||||
}()
|
}()
|
||||||
|
|
||||||
grp, ctx := errgroup.WithContext(ctx)
|
|
||||||
|
|
||||||
grp.Go(func() error {
|
|
||||||
if err := p.storeChainPower(rewards); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
grp.Go(func() error {
|
|
||||||
if err := p.storeBaseBlockReward(rewards); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
grp.Go(func() error {
|
|
||||||
if err := p.storeRewardSmoothingEstimates(rewards); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
return grp.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Processor) storeChainPower(rewards []rewardActorInfo) error {
|
|
||||||
tx, err := p.db.Begin()
|
tx, err := p.db.Begin()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("begin chain_power tx: %w", err)
|
return xerrors.Errorf("begin chain_reward tx: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := tx.Exec(`create temp table cp (like chain_power excluding constraints) on commit drop`); err != nil {
|
if _, err := tx.Exec(`create temp table cr (like chain_reward excluding constraints) on commit drop`); err != nil {
|
||||||
return xerrors.Errorf("prep chain_power temp: %w", err)
|
return xerrors.Errorf("prep chain_reward temp: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
stmt, err := tx.Prepare(`copy cp (state_root, baseline_power) from STDIN`)
|
stmt, err := tx.Prepare(`copy cr ( state_root, cum_sum_baseline, cum_sum_realized, effective_network_time, effective_baseline_power, new_baseline_power, new_reward, new_reward_smoothed_position_estimate, new_reward_smoothed_velocity_estimate, total_mined_reward) from STDIN`)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("prepare tmp chain_power: %w", err)
|
return xerrors.Errorf("prepare tmp chain_reward: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, rewardState := range rewards {
|
for _, rewardState := range rewards {
|
||||||
if _, err := stmt.Exec(
|
if _, err := stmt.Exec(
|
||||||
rewardState.common.stateroot.String(),
|
rewardState.common.stateroot.String(),
|
||||||
rewardState.baselinePower.String(),
|
rewardState.cumSumBaselinePower.String(),
|
||||||
|
rewardState.cumSumRealizedPower.String(),
|
||||||
|
rewardState.effectiveNetworkTime,
|
||||||
|
rewardState.effectiveBaselinePower.String(),
|
||||||
|
rewardState.newBaselinePower.String(),
|
||||||
|
rewardState.newBaseReward.String(),
|
||||||
|
rewardState.newSmoothingEstimate.PositionEstimate.String(),
|
||||||
|
rewardState.newSmoothingEstimate.VelocityEstimate.String(),
|
||||||
|
rewardState.totalMinedReward.String(),
|
||||||
); err != nil {
|
); err != nil {
|
||||||
log.Errorw("failed to store chain power", "state_root", rewardState.common.stateroot, "error", err)
|
log.Errorw("failed to store chain power", "state_root", rewardState.common.stateroot, "error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := stmt.Close(); err != nil {
|
if err := stmt.Close(); err != nil {
|
||||||
return xerrors.Errorf("close prepared chain_power: %w", err)
|
return xerrors.Errorf("close prepared chain_reward: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := tx.Exec(`insert into chain_power select * from cp on conflict do nothing`); err != nil {
|
if _, err := tx.Exec(`insert into chain_reward select * from cr on conflict do nothing`); err != nil {
|
||||||
return xerrors.Errorf("insert chain_power from tmp: %w", err)
|
return xerrors.Errorf("insert chain_reward from tmp: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := tx.Commit(); err != nil {
|
if err := tx.Commit(); err != nil {
|
||||||
return xerrors.Errorf("commit chain_power tx: %w", err)
|
return xerrors.Errorf("commit chain_reward tx: %w", err)
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Processor) storeBaseBlockReward(rewards []rewardActorInfo) error {
|
|
||||||
tx, err := p.db.Begin()
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("begin base_block_reward tx: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := tx.Exec(`create temp table bbr (like base_block_rewards excluding constraints) on commit drop`); err != nil {
|
|
||||||
return xerrors.Errorf("prep base_block_reward temp: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
stmt, err := tx.Prepare(`copy bbr (state_root, base_block_reward) from STDIN`)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("prepare tmp base_block_reward: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, rewardState := range rewards {
|
|
||||||
baseBlockReward := big.Div(rewardState.baseBlockReward, big.NewIntUnsigned(build.BlocksPerEpoch))
|
|
||||||
if _, err := stmt.Exec(
|
|
||||||
rewardState.common.stateroot.String(),
|
|
||||||
baseBlockReward.String(),
|
|
||||||
); err != nil {
|
|
||||||
log.Errorw("failed to store base block reward", "state_root", rewardState.common.stateroot, "error", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := stmt.Close(); err != nil {
|
|
||||||
return xerrors.Errorf("close prepared base_block_reward: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := tx.Exec(`insert into base_block_rewards select * from bbr on conflict do nothing`); err != nil {
|
|
||||||
return xerrors.Errorf("insert base_block_reward from tmp: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := tx.Commit(); err != nil {
|
|
||||||
return xerrors.Errorf("commit base_block_reward tx: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Processor) storeRewardSmoothingEstimates(rewards []rewardActorInfo) error {
|
|
||||||
tx, err := p.db.Begin()
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("begin reward_smoothing_estimates tx: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := tx.Exec(`create temp table rse (like reward_smoothing_estimates) on commit drop`); err != nil {
|
|
||||||
return xerrors.Errorf("prep reward_smoothing_estimates: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
stmt, err := tx.Prepare(`copy rse (state_root, position_estimate, velocity_estimate) from stdin;`)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("prepare tmp reward_smoothing_estimates: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, rewardState := range rewards {
|
|
||||||
if rewardState.epochSmoothingEstimate == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if _, err := stmt.Exec(
|
|
||||||
rewardState.common.stateroot.String(),
|
|
||||||
rewardState.epochSmoothingEstimate.PositionEstimate.String(),
|
|
||||||
rewardState.epochSmoothingEstimate.VelocityEstimate.String(),
|
|
||||||
); err != nil {
|
|
||||||
return xerrors.Errorf("failed to store smoothing estimate: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := stmt.Close(); err != nil {
|
|
||||||
return xerrors.Errorf("close prepared reward_smoothing_estimates: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := tx.Exec(`insert into reward_smoothing_estimates select * from rse on conflict do nothing`); err != nil {
|
|
||||||
return xerrors.Errorf("insert reward_smoothing_estimates from tmp: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := tx.Commit(); err != nil {
|
|
||||||
return xerrors.Errorf("commit reward_smoothing_estimates tx: %w", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -2,20 +2,25 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"database/sql"
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
_ "net/http/pprof"
|
_ "net/http/pprof"
|
||||||
"os"
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
_ "github.com/lib/pq"
|
_ "github.com/lib/pq"
|
||||||
|
|
||||||
lcli "github.com/filecoin-project/lotus/cli"
|
"github.com/filecoin-project/go-jsonrpc"
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
"github.com/filecoin-project/lotus/cmd/lotus-chainwatch/processor"
|
"github.com/filecoin-project/lotus/cmd/lotus-chainwatch/processor"
|
||||||
"github.com/filecoin-project/lotus/cmd/lotus-chainwatch/scheduler"
|
"github.com/filecoin-project/lotus/cmd/lotus-chainwatch/scheduler"
|
||||||
"github.com/filecoin-project/lotus/cmd/lotus-chainwatch/syncer"
|
"github.com/filecoin-project/lotus/cmd/lotus-chainwatch/syncer"
|
||||||
|
"github.com/filecoin-project/lotus/cmd/lotus-chainwatch/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
var runCmd = &cli.Command{
|
var runCmd = &cli.Command{
|
||||||
@ -24,12 +29,12 @@ var runCmd = &cli.Command{
|
|||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
&cli.IntFlag{
|
&cli.IntFlag{
|
||||||
Name: "max-batch",
|
Name: "max-batch",
|
||||||
Value: 1000,
|
Value: 50,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
go func() {
|
go func() {
|
||||||
http.ListenAndServe(":6060", nil)
|
http.ListenAndServe(":6060", nil) //nolint:errcheck
|
||||||
}()
|
}()
|
||||||
ll := cctx.String("log-level")
|
ll := cctx.String("log-level")
|
||||||
if err := logging.SetLogLevel("*", ll); err != nil {
|
if err := logging.SetLogLevel("*", ll); err != nil {
|
||||||
@ -39,10 +44,25 @@ var runCmd = &cli.Command{
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
api, closer, err := lcli.GetFullNodeAPI(cctx)
|
var api api.FullNode
|
||||||
|
var closer jsonrpc.ClientCloser
|
||||||
|
var err error
|
||||||
|
if tokenMaddr := cctx.String("api"); tokenMaddr != "" {
|
||||||
|
toks := strings.Split(tokenMaddr, ":")
|
||||||
|
if len(toks) != 2 {
|
||||||
|
return fmt.Errorf("invalid api tokens, expected <token>:<maddr>, got: %s", tokenMaddr)
|
||||||
|
}
|
||||||
|
|
||||||
|
api, closer, err = util.GetFullNodeAPIUsingCredentials(cctx.Context, toks[1], toks[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
api, closer, err = lcli.GetFullNodeAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
defer closer()
|
defer closer()
|
||||||
ctx := lcli.ReqContext(cctx)
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
@ -70,7 +90,7 @@ var runCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
db.SetMaxOpenConns(1350)
|
db.SetMaxOpenConns(1350)
|
||||||
|
|
||||||
sync := syncer.NewSyncer(db, api)
|
sync := syncer.NewSyncer(db, api, 1400)
|
||||||
sync.Start(ctx)
|
sync.Start(ctx)
|
||||||
|
|
||||||
proc := processor.NewProcessor(ctx, db, api, maxBatch)
|
proc := processor.NewProcessor(ctx, db, api, maxBatch)
|
||||||
|
@ -3,7 +3,6 @@ package scheduler
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
)
|
)
|
||||||
@ -24,9 +23,9 @@ func setupTopMinerByBaseRewardSchema(ctx context.Context, db *sql.DB) error {
|
|||||||
with total_rewards_by_miner as (
|
with total_rewards_by_miner as (
|
||||||
select
|
select
|
||||||
b.miner,
|
b.miner,
|
||||||
sum(bbr.base_block_reward * b.win_count) as total_reward
|
sum(cr.new_reward * b.win_count) as total_reward
|
||||||
from blocks b
|
from blocks b
|
||||||
inner join base_block_rewards bbr on b.parentstateroot = bbr.state_root
|
inner join chain_reward cr on b.parentstateroot = cr.state_root
|
||||||
group by 1
|
group by 1
|
||||||
) select
|
) select
|
||||||
rank() over (order by total_reward desc),
|
rank() over (order by total_reward desc),
|
||||||
@ -43,17 +42,17 @@ func setupTopMinerByBaseRewardSchema(ctx context.Context, db *sql.DB) error {
|
|||||||
b."timestamp"as current_timestamp,
|
b."timestamp"as current_timestamp,
|
||||||
max(b.height) as current_height
|
max(b.height) as current_height
|
||||||
from blocks b
|
from blocks b
|
||||||
join base_block_rewards bbr on b.parentstateroot = bbr.state_root
|
join chain_reward cr on b.parentstateroot = cr.state_root
|
||||||
where bbr.base_block_reward is not null
|
where cr.new_reward is not null
|
||||||
group by 1
|
group by 1
|
||||||
order by 1 desc
|
order by 1 desc
|
||||||
limit 1;
|
limit 1;
|
||||||
`); err != nil {
|
`); err != nil {
|
||||||
return xerrors.Errorf("create top_miner_by_base_reward views", err)
|
return xerrors.Errorf("create top_miners_by_base_reward views: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := tx.Commit(); err != nil {
|
if err := tx.Commit(); err != nil {
|
||||||
return xerrors.Errorf("commiting top_miner_by_base_reward views", err)
|
return xerrors.Errorf("committing top_miners_by_base_reward views; %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -65,11 +64,6 @@ func refreshTopMinerByBaseReward(ctx context.Context, db *sql.DB) error {
|
|||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
t := time.Now()
|
|
||||||
defer func() {
|
|
||||||
log.Debugw("refresh top_miners_by_base_reward", "duration", time.Since(t).String())
|
|
||||||
}()
|
|
||||||
|
|
||||||
_, err := db.Exec("refresh materialized view top_miners_by_base_reward;")
|
_, err := db.Exec("refresh materialized view top_miners_by_base_reward;")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("refresh top_miners_by_base_reward: %w", err)
|
return xerrors.Errorf("refresh top_miners_by_base_reward: %w", err)
|
||||||
|
@ -25,7 +25,7 @@ func PrepareScheduler(db *sql.DB) *Scheduler {
|
|||||||
|
|
||||||
func (s *Scheduler) setupSchema(ctx context.Context) error {
|
func (s *Scheduler) setupSchema(ctx context.Context) error {
|
||||||
if err := setupTopMinerByBaseRewardSchema(ctx, s.db); err != nil {
|
if err := setupTopMinerByBaseRewardSchema(ctx, s.db); err != nil {
|
||||||
return xerrors.Errorf("setup top miners by reward schema", err)
|
return xerrors.Errorf("setup top miners by reward schema: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -35,14 +35,14 @@ func (s *Scheduler) Start(ctx context.Context) {
|
|||||||
log.Debug("Starting Scheduler")
|
log.Debug("Starting Scheduler")
|
||||||
|
|
||||||
if err := s.setupSchema(ctx); err != nil {
|
if err := s.setupSchema(ctx); err != nil {
|
||||||
log.Fatalw("applying scheduling schema", err)
|
log.Fatalw("applying scheduling schema", "error", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
// run once on start after schema has initialized
|
// run once on start after schema has initialized
|
||||||
time.Sleep(5 * time.Second)
|
time.Sleep(1 * time.Minute)
|
||||||
if err := refreshTopMinerByBaseReward(ctx, s.db); err != nil {
|
if err := refreshTopMinerByBaseReward(ctx, s.db); err != nil {
|
||||||
log.Errorf(err.Error())
|
log.Errorw("failed to refresh top miner", "error", err)
|
||||||
}
|
}
|
||||||
refreshTopMinerCh := time.NewTicker(30 * time.Second)
|
refreshTopMinerCh := time.NewTicker(30 * time.Second)
|
||||||
defer refreshTopMinerCh.Stop()
|
defer refreshTopMinerCh.Stop()
|
||||||
@ -50,7 +50,7 @@ func (s *Scheduler) Start(ctx context.Context) {
|
|||||||
select {
|
select {
|
||||||
case <-refreshTopMinerCh.C:
|
case <-refreshTopMinerCh.C:
|
||||||
if err := refreshTopMinerByBaseReward(ctx, s.db); err != nil {
|
if err := refreshTopMinerByBaseReward(ctx, s.db); err != nil {
|
||||||
log.Errorf(err.Error())
|
log.Errorw("failed to refresh top miner", "error", err)
|
||||||
}
|
}
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
|
@ -11,16 +11,17 @@ import (
|
|||||||
func (s *Syncer) subBlocks(ctx context.Context) {
|
func (s *Syncer) subBlocks(ctx context.Context) {
|
||||||
sub, err := s.node.SyncIncomingBlocks(ctx)
|
sub, err := s.node.SyncIncomingBlocks(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err)
|
log.Errorf("opening incoming block channel: %+v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Infow("Capturing incoming blocks")
|
||||||
for bh := range sub {
|
for bh := range sub {
|
||||||
err := s.storeHeaders(map[cid.Cid]*types.BlockHeader{
|
err := s.storeHeaders(map[cid.Cid]*types.BlockHeader{
|
||||||
bh.Cid(): bh,
|
bh.Cid(): bh,
|
||||||
}, false, time.Now())
|
}, false, time.Now())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("%+v", err)
|
log.Errorf("storing incoming block header: %+v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -23,14 +23,17 @@ var log = logging.Logger("syncer")
|
|||||||
type Syncer struct {
|
type Syncer struct {
|
||||||
db *sql.DB
|
db *sql.DB
|
||||||
|
|
||||||
|
lookbackLimit uint64
|
||||||
|
|
||||||
headerLk sync.Mutex
|
headerLk sync.Mutex
|
||||||
node api.FullNode
|
node api.FullNode
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewSyncer(db *sql.DB, node api.FullNode) *Syncer {
|
func NewSyncer(db *sql.DB, node api.FullNode, lookbackLimit uint64) *Syncer {
|
||||||
return &Syncer{
|
return &Syncer{
|
||||||
db: db,
|
db: db,
|
||||||
node: node,
|
node: node,
|
||||||
|
lookbackLimit: lookbackLimit,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -148,25 +151,28 @@ create index if not exists state_heights_parentstateroot_index
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *Syncer) Start(ctx context.Context) {
|
func (s *Syncer) Start(ctx context.Context) {
|
||||||
|
if err := logging.SetLogLevel("syncer", "info"); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
log.Debug("Starting Syncer")
|
log.Debug("Starting Syncer")
|
||||||
|
|
||||||
if err := s.setupSchemas(); err != nil {
|
if err := s.setupSchemas(); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// doing the initial sync here lets us avoid the HCCurrent case in the switch
|
// capture all reported blocks
|
||||||
head, err := s.node.ChainHead(ctx)
|
go s.subBlocks(ctx)
|
||||||
if err != nil {
|
|
||||||
log.Fatalw("Failed to get chain head form lotus", "error", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
unsynced, err := s.unsyncedBlocks(ctx, head, time.Unix(0, 0))
|
// we need to ensure that on a restart we don't reprocess the whole flarping chain
|
||||||
|
var sinceEpoch uint64
|
||||||
|
blkCID, height, err := s.mostRecentlySyncedBlockHeight()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalw("failed to gather unsynced blocks", "error", err)
|
log.Fatalw("failed to find most recently synced block", "error", err)
|
||||||
|
} else {
|
||||||
|
if height > 0 {
|
||||||
|
log.Infow("Found starting point for syncing", "blockCID", blkCID.String(), "height", height)
|
||||||
|
sinceEpoch = uint64(height)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.storeHeaders(unsynced, true, time.Now()); err != nil {
|
|
||||||
log.Fatalw("failed to store unsynced blocks", "error", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// continue to keep the block headers table up to date.
|
// continue to keep the block headers table up to date.
|
||||||
@ -175,13 +181,18 @@ func (s *Syncer) Start(ctx context.Context) {
|
|||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
lastSynced := time.Now()
|
|
||||||
go func() {
|
go func() {
|
||||||
for notif := range notifs {
|
for notif := range notifs {
|
||||||
for _, change := range notif {
|
for _, change := range notif {
|
||||||
switch change.Type {
|
switch change.Type {
|
||||||
|
case store.HCCurrent:
|
||||||
|
// This case is important for capturing the initial state of a node
|
||||||
|
// which might be on a dead network with no new blocks being produced.
|
||||||
|
// It also allows a fresh Chainwatch instance to start walking the
|
||||||
|
// chain without waiting for a new block to come along.
|
||||||
|
fallthrough
|
||||||
case store.HCApply:
|
case store.HCApply:
|
||||||
unsynced, err := s.unsyncedBlocks(ctx, change.Val, lastSynced)
|
unsynced, err := s.unsyncedBlocks(ctx, change.Val, sinceEpoch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorw("failed to gather unsynced blocks", "error", err)
|
log.Errorw("failed to gather unsynced blocks", "error", err)
|
||||||
}
|
}
|
||||||
@ -194,13 +205,13 @@ func (s *Syncer) Start(ctx context.Context) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.storeHeaders(unsynced, true, lastSynced); err != nil {
|
if err := s.storeHeaders(unsynced, true, time.Now()); err != nil {
|
||||||
// so this is pretty bad, need some kind of retry..
|
// so this is pretty bad, need some kind of retry..
|
||||||
// for now just log an error and the blocks will be attempted again on next notifi
|
// for now just log an error and the blocks will be attempted again on next notifi
|
||||||
log.Errorw("failed to store unsynced blocks", "error", err)
|
log.Errorw("failed to store unsynced blocks", "error", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
lastSynced = time.Now()
|
sinceEpoch = uint64(change.Val.Height())
|
||||||
case store.HCRevert:
|
case store.HCRevert:
|
||||||
log.Debug("revert todo")
|
log.Debug("revert todo")
|
||||||
}
|
}
|
||||||
@ -209,12 +220,8 @@ func (s *Syncer) Start(ctx context.Context) {
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Syncer) unsyncedBlocks(ctx context.Context, head *types.TipSet, since time.Time) (map[cid.Cid]*types.BlockHeader, error) {
|
func (s *Syncer) unsyncedBlocks(ctx context.Context, head *types.TipSet, since uint64) (map[cid.Cid]*types.BlockHeader, error) {
|
||||||
// get a list of blocks we have already synced in the past 3 mins. This ensures we aren't returning the entire
|
hasList, err := s.syncedBlocks(since, s.lookbackLimit)
|
||||||
// table every time.
|
|
||||||
lookback := since.Add(-(time.Minute * 3))
|
|
||||||
log.Debugw("Gathering unsynced blocks", "since", lookback.String())
|
|
||||||
hasList, err := s.syncedBlocks(lookback)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -257,9 +264,8 @@ func (s *Syncer) unsyncedBlocks(ctx context.Context, head *types.TipSet, since t
|
|||||||
return toSync, nil
|
return toSync, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Syncer) syncedBlocks(timestamp time.Time) (map[cid.Cid]struct{}, error) {
|
func (s *Syncer) syncedBlocks(since, limit uint64) (map[cid.Cid]struct{}, error) {
|
||||||
// timestamp is used to return a configurable amount of rows based on when they were last added.
|
rws, err := s.db.Query(`select bs.cid FROM blocks_synced bs left join blocks b on b.cid = bs.cid where b.height <= $1 and bs.processed_at is not null limit $2`, since, limit)
|
||||||
rws, err := s.db.Query(`select cid FROM blocks_synced where synced_at > $1`, timestamp.Unix())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("Failed to query blocks_synced: %w", err)
|
return nil, xerrors.Errorf("Failed to query blocks_synced: %w", err)
|
||||||
}
|
}
|
||||||
@ -281,6 +287,33 @@ func (s *Syncer) syncedBlocks(timestamp time.Time) (map[cid.Cid]struct{}, error)
|
|||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Syncer) mostRecentlySyncedBlockHeight() (cid.Cid, int64, error) {
|
||||||
|
rw := s.db.QueryRow(`
|
||||||
|
select blocks_synced.cid, b.height
|
||||||
|
from blocks_synced
|
||||||
|
left join blocks b on blocks_synced.cid = b.cid
|
||||||
|
where processed_at is not null
|
||||||
|
order by height desc
|
||||||
|
limit 1
|
||||||
|
`)
|
||||||
|
|
||||||
|
var c string
|
||||||
|
var h int64
|
||||||
|
if err := rw.Scan(&c, &h); err != nil {
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
return cid.Undef, 0, nil
|
||||||
|
}
|
||||||
|
return cid.Undef, -1, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ci, err := cid.Parse(c)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, -1, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ci, h, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (s *Syncer) storeCirculatingSupply(ctx context.Context, tipset *types.TipSet) error {
|
func (s *Syncer) storeCirculatingSupply(ctx context.Context, tipset *types.TipSet) error {
|
||||||
supply, err := s.node.StateCirculatingSupply(ctx, tipset.Key())
|
supply, err := s.node.StateCirculatingSupply(ctx, tipset.Key())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -288,7 +321,9 @@ func (s *Syncer) storeCirculatingSupply(ctx context.Context, tipset *types.TipSe
|
|||||||
}
|
}
|
||||||
|
|
||||||
ceInsert := `insert into chain_economics (parent_state_root, circulating_fil, vested_fil, mined_fil, burnt_fil, locked_fil) ` +
|
ceInsert := `insert into chain_economics (parent_state_root, circulating_fil, vested_fil, mined_fil, burnt_fil, locked_fil) ` +
|
||||||
`values ('%s', '%s', '%s', '%s', '%s', '%s');`
|
`values ('%s', '%s', '%s', '%s', '%s', '%s') on conflict on constraint chain_economics_pk do ` +
|
||||||
|
`update set (circulating_fil, vested_fil, mined_fil, burnt_fil, locked_fil) = ('%[2]s', '%[3]s', '%[4]s', '%[5]s', '%[6]s') ` +
|
||||||
|
`where chain_economics.parent_state_root = '%[1]s';`
|
||||||
|
|
||||||
if _, err := s.db.Exec(fmt.Sprintf(ceInsert,
|
if _, err := s.db.Exec(fmt.Sprintf(ceInsert,
|
||||||
tipset.ParentState().String(),
|
tipset.ParentState().String(),
|
||||||
|
34
cmd/lotus-chainwatch/util/api.go
Normal file
34
cmd/lotus-chainwatch/util/api.go
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-jsonrpc"
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/api/client"
|
||||||
|
ma "github.com/multiformats/go-multiaddr"
|
||||||
|
manet "github.com/multiformats/go-multiaddr/net"
|
||||||
|
)
|
||||||
|
|
||||||
|
func GetFullNodeAPIUsingCredentials(ctx context.Context, listenAddr, token string) (api.FullNode, jsonrpc.ClientCloser, error) {
|
||||||
|
parsedAddr, err := ma.NewMultiaddr(listenAddr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, addr, err := manet.DialArgs(parsedAddr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return client.NewFullNodeRPC(ctx, apiURI(addr), apiHeaders(token))
|
||||||
|
}
|
||||||
|
func apiURI(addr string) string {
|
||||||
|
return "ws://" + addr + "/rpc/v0"
|
||||||
|
}
|
||||||
|
func apiHeaders(token string) http.Header {
|
||||||
|
headers := http.Header{}
|
||||||
|
headers.Add("Authorization", "Bearer "+token)
|
||||||
|
return headers
|
||||||
|
}
|
@ -1,77 +1,27 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"html/template"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
rice "github.com/GeertJohan/go.rice"
|
rice "github.com/GeertJohan/go.rice"
|
||||||
"github.com/ipfs/go-cid"
|
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
"github.com/libp2p/go-libp2p-core/peer"
|
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/actors"
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
lcli "github.com/filecoin-project/lotus/cli"
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var log = logging.Logger("main")
|
var log = logging.Logger("main")
|
||||||
|
|
||||||
var supportedSectors struct {
|
|
||||||
SectorSizes []struct {
|
|
||||||
Name string
|
|
||||||
Value uint64
|
|
||||||
Default bool
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
for supportedSector, _ := range miner.SupportedProofTypes {
|
|
||||||
sectorSize, err := supportedSector.SectorSize()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
supportedSectors.SectorSizes = append(supportedSectors.SectorSizes, struct {
|
|
||||||
Name string
|
|
||||||
Value uint64
|
|
||||||
Default bool
|
|
||||||
}{
|
|
||||||
Name: sectorSize.ShortString(),
|
|
||||||
Value: uint64(sectorSize),
|
|
||||||
Default: false,
|
|
||||||
})
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Slice(supportedSectors.SectorSizes[:], func(i, j int) bool {
|
|
||||||
return supportedSectors.SectorSizes[i].Value < supportedSectors.SectorSizes[j].Value
|
|
||||||
})
|
|
||||||
|
|
||||||
supportedSectors.SectorSizes[0].Default = true
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
logging.SetLogLevel("*", "INFO")
|
logging.SetLogLevel("*", "INFO")
|
||||||
|
|
||||||
@ -144,11 +94,6 @@ var runCmd = &cli.Command{
|
|||||||
return xerrors.Errorf("parsing source address (provide correct --from flag!): %w", err)
|
return xerrors.Errorf("parsing source address (provide correct --from flag!): %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
defaultMinerPeer, err := peer.Decode("12D3KooWJpBNhwgvoZ15EB1JwRTRpxgM9D2fwq6eEktrJJG74aP6")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
h := &handler{
|
h := &handler{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
api: nodeApi,
|
api: nodeApi,
|
||||||
@ -162,23 +107,10 @@ var runCmd = &cli.Command{
|
|||||||
WalletRate: 15 * time.Minute,
|
WalletRate: 15 * time.Minute,
|
||||||
WalletBurst: 2,
|
WalletBurst: 2,
|
||||||
}),
|
}),
|
||||||
minerLimiter: NewLimiter(LimiterConfig{
|
|
||||||
TotalRate: 500 * time.Millisecond,
|
|
||||||
TotalBurst: build.BlockMessageLimit,
|
|
||||||
IPRate: 10 * time.Minute,
|
|
||||||
IPBurst: 2,
|
|
||||||
WalletRate: 1 * time.Hour,
|
|
||||||
WalletBurst: 2,
|
|
||||||
}),
|
|
||||||
defaultMinerPeer: defaultMinerPeer,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
http.Handle("/", http.FileServer(rice.MustFindBox("site").HTTPBox()))
|
http.Handle("/", http.FileServer(rice.MustFindBox("site").HTTPBox()))
|
||||||
http.HandleFunc("/miner.html", h.minerhtml)
|
|
||||||
http.HandleFunc("/send", h.send)
|
http.HandleFunc("/send", h.send)
|
||||||
http.HandleFunc("/mkminer", h.mkminer)
|
|
||||||
http.HandleFunc("/msgwait", h.msgwait)
|
|
||||||
http.HandleFunc("/msgwaitaddr", h.msgwaitaddr)
|
|
||||||
|
|
||||||
fmt.Printf("Open http://%s\n", cctx.String("front"))
|
fmt.Printf("Open http://%s\n", cctx.String("front"))
|
||||||
|
|
||||||
@ -199,47 +131,12 @@ type handler struct {
|
|||||||
sendPerRequest types.FIL
|
sendPerRequest types.FIL
|
||||||
|
|
||||||
limiter *Limiter
|
limiter *Limiter
|
||||||
minerLimiter *Limiter
|
|
||||||
|
|
||||||
defaultMinerPeer peer.ID
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *handler) minerhtml(w http.ResponseWriter, r *http.Request) {
|
|
||||||
f, err := rice.MustFindBox("site").Open("_miner.html")
|
|
||||||
if err != nil {
|
|
||||||
w.WriteHeader(500)
|
|
||||||
_, _ = w.Write([]byte(err.Error()))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
tmpl, err := ioutil.ReadAll(f)
|
|
||||||
if err != nil {
|
|
||||||
w.WriteHeader(500)
|
|
||||||
_, _ = w.Write([]byte(err.Error()))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var executedTmpl bytes.Buffer
|
|
||||||
|
|
||||||
t, err := template.New("miner.html").Parse(string(tmpl))
|
|
||||||
if err := t.Execute(&executedTmpl, supportedSectors); err != nil {
|
|
||||||
w.WriteHeader(500)
|
|
||||||
_, _ = w.Write([]byte(err.Error()))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := io.Copy(w, &executedTmpl); err != nil {
|
|
||||||
log.Errorf("failed to write template to string %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) send(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) send(w http.ResponseWriter, r *http.Request) {
|
||||||
to, err := address.NewFromString(r.FormValue("address"))
|
to, err := address.NewFromString(r.FormValue("address"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
w.WriteHeader(400)
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
_, _ = w.Write([]byte(err.Error()))
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -282,168 +179,9 @@ func (h *handler) send(w http.ResponseWriter, r *http.Request) {
|
|||||||
To: to,
|
To: to,
|
||||||
}, nil)
|
}, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
w.WriteHeader(400)
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
_, _ = w.Write([]byte(err.Error()))
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _ = w.Write([]byte(smsg.Cid().String()))
|
_, _ = w.Write([]byte(smsg.Cid().String()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) mkminer(w http.ResponseWriter, r *http.Request) {
|
|
||||||
owner, err := address.NewFromString(r.FormValue("address"))
|
|
||||||
if err != nil {
|
|
||||||
w.WriteHeader(400)
|
|
||||||
_, _ = w.Write([]byte(err.Error()))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if owner.Protocol() != address.BLS {
|
|
||||||
w.WriteHeader(400)
|
|
||||||
_, _ = w.Write([]byte("Miner address must use BLS. A BLS address starts with the prefix 't3'."))
|
|
||||||
_, _ = w.Write([]byte("Please create a BLS address by running \"lotus wallet new bls\" while connected to a Lotus node."))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ssize, err := strconv.ParseInt(r.FormValue("sectorSize"), 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Infof("%s: create actor start", owner)
|
|
||||||
|
|
||||||
// Limit based on wallet address
|
|
||||||
limiter := h.minerLimiter.GetWalletLimiter(owner.String())
|
|
||||||
if !limiter.Allow() {
|
|
||||||
http.Error(w, http.StatusText(http.StatusTooManyRequests)+": wallet limit", http.StatusTooManyRequests)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Limit based on IP
|
|
||||||
reqIP := r.Header.Get("X-Real-IP")
|
|
||||||
if reqIP == "" {
|
|
||||||
h, _, err := net.SplitHostPort(r.RemoteAddr)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("could not get ip from: %s, err: %s", r.RemoteAddr, err)
|
|
||||||
}
|
|
||||||
reqIP = h
|
|
||||||
}
|
|
||||||
limiter = h.minerLimiter.GetIPLimiter(reqIP)
|
|
||||||
if !limiter.Allow() {
|
|
||||||
http.Error(w, http.StatusText(http.StatusTooManyRequests)+": IP limit", http.StatusTooManyRequests)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// General limiter owner allow throttling all messages that can make it into the mpool
|
|
||||||
if !h.minerLimiter.Allow() {
|
|
||||||
http.Error(w, http.StatusText(http.StatusTooManyRequests)+": global limit", http.StatusTooManyRequests)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
smsg, err := h.api.MpoolPushMessage(h.ctx, &types.Message{
|
|
||||||
Value: types.BigInt(h.sendPerRequest),
|
|
||||||
From: h.from,
|
|
||||||
To: owner,
|
|
||||||
}, nil)
|
|
||||||
if err != nil {
|
|
||||||
w.WriteHeader(400)
|
|
||||||
w.Write([]byte("pushfunds: " + err.Error()))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
log.Infof("%s: push funds %s", owner, smsg.Cid())
|
|
||||||
|
|
||||||
spt, err := ffiwrapper.SealProofTypeFromSectorSize(abi.SectorSize(ssize))
|
|
||||||
if err != nil {
|
|
||||||
w.WriteHeader(400)
|
|
||||||
w.Write([]byte("sealprooftype: " + err.Error()))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
params, err := actors.SerializeParams(&power.CreateMinerParams{
|
|
||||||
Owner: owner,
|
|
||||||
Worker: owner,
|
|
||||||
SealProofType: spt,
|
|
||||||
Peer: abi.PeerID(h.defaultMinerPeer),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
w.WriteHeader(400)
|
|
||||||
w.Write([]byte(err.Error()))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
createStorageMinerMsg := &types.Message{
|
|
||||||
To: builtin.StoragePowerActorAddr,
|
|
||||||
From: h.from,
|
|
||||||
Value: big.Zero(),
|
|
||||||
|
|
||||||
Method: builtin.MethodsPower.CreateMiner,
|
|
||||||
Params: params,
|
|
||||||
}
|
|
||||||
|
|
||||||
signed, err := h.api.MpoolPushMessage(r.Context(), createStorageMinerMsg, nil)
|
|
||||||
if err != nil {
|
|
||||||
w.WriteHeader(400)
|
|
||||||
w.Write([]byte(err.Error()))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Infof("%s: create miner msg: %s", owner, signed.Cid())
|
|
||||||
|
|
||||||
http.Redirect(w, r, fmt.Sprintf("/wait.html?f=%s&m=%s&o=%s", signed.Cid(), smsg.Cid(), owner), 303)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *handler) msgwait(w http.ResponseWriter, r *http.Request) {
|
|
||||||
c, err := cid.Parse(r.FormValue("cid"))
|
|
||||||
if err != nil {
|
|
||||||
w.WriteHeader(400)
|
|
||||||
w.Write([]byte(err.Error()))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
mw, err := h.api.StateWaitMsg(r.Context(), c, build.MessageConfidence)
|
|
||||||
if err != nil {
|
|
||||||
w.WriteHeader(400)
|
|
||||||
w.Write([]byte(err.Error()))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if mw.Receipt.ExitCode != 0 {
|
|
||||||
w.WriteHeader(400)
|
|
||||||
w.Write([]byte(xerrors.Errorf("create miner failed: exit code %d", mw.Receipt.ExitCode).Error()))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
w.WriteHeader(200)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *handler) msgwaitaddr(w http.ResponseWriter, r *http.Request) {
|
|
||||||
c, err := cid.Parse(r.FormValue("cid"))
|
|
||||||
if err != nil {
|
|
||||||
w.WriteHeader(400)
|
|
||||||
w.Write([]byte(err.Error()))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
mw, err := h.api.StateWaitMsg(r.Context(), c, build.MessageConfidence)
|
|
||||||
if err != nil {
|
|
||||||
w.WriteHeader(400)
|
|
||||||
w.Write([]byte(err.Error()))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if mw.Receipt.ExitCode != 0 {
|
|
||||||
w.WriteHeader(400)
|
|
||||||
w.Write([]byte(xerrors.Errorf("create miner failed: exit code %d", mw.Receipt.ExitCode).Error()))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
w.WriteHeader(200)
|
|
||||||
|
|
||||||
var ma power.CreateMinerReturn
|
|
||||||
if err := ma.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return)); err != nil {
|
|
||||||
log.Errorf("%w", err)
|
|
||||||
w.WriteHeader(400)
|
|
||||||
w.Write([]byte(err.Error()))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprintf(w, "{\"addr\": \"%s\"}", ma.IDAddress)
|
|
||||||
}
|
|
||||||
|
@ -1,51 +0,0 @@
|
|||||||
<!DOCTYPE html>
|
|
||||||
<html>
|
|
||||||
<head>
|
|
||||||
<title>Creating Miner - Lotus Fountain</title>
|
|
||||||
<link rel="stylesheet" type="text/css" href="main.css">
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<div class="Index">
|
|
||||||
<div class="Index-nodes">
|
|
||||||
<div class="Index-node">
|
|
||||||
[CREATING MINER]
|
|
||||||
</div>
|
|
||||||
<div class="Index-node" id="formnd">
|
|
||||||
<form id="f" action='/mkminer' method='POST'>
|
|
||||||
<span>Enter owner/worker address:</span>
|
|
||||||
<input type='text' name='address' style="width: 300px" placeholder="t3...">
|
|
||||||
<select name="sectorSize">
|
|
||||||
{{range .SectorSizes}}
|
|
||||||
<option {{if .Default}}selected{{end}} value="{{ .Value }}">{{ .Name }}</option>
|
|
||||||
{{end}}
|
|
||||||
</select>
|
|
||||||
<button type='submit'>Create Miner</button>
|
|
||||||
</form>
|
|
||||||
</div>
|
|
||||||
<div id="plswait" style="display: none" class="Index-node">
|
|
||||||
<b>Waiting for transaction on chain..</b>
|
|
||||||
</div>
|
|
||||||
<div class="Index-node">
|
|
||||||
<span>When creating miner, DO NOT REFRESH THE PAGE, wait for it to load. This can take more than 5min.</span>
|
|
||||||
</div>
|
|
||||||
<div class="Index-node">
|
|
||||||
<span>If you don't have an owner/worker address, you can create it by following <a target="_blank" href="https://lotu.sh/en+mining#get-started-22083">these instructions</a>.</span>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div class="Index-footer">
|
|
||||||
<div>
|
|
||||||
<a href="index.html">[Back]</a>
|
|
||||||
<span style="float: right">Not dispensing real Filecoin tokens</span>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<script>
|
|
||||||
let f = document.getElementById('f')
|
|
||||||
f.onsubmit = ev => {
|
|
||||||
document.getElementById('plswait').style.display = 'block'
|
|
||||||
document.getElementById('formnd').style.display = 'none'
|
|
||||||
}
|
|
||||||
|
|
||||||
</script>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
@ -13,9 +13,6 @@
|
|||||||
<div class="Index-node">
|
<div class="Index-node">
|
||||||
<a href="funds.html">[Send Funds]</a>
|
<a href="funds.html">[Send Funds]</a>
|
||||||
</div>
|
</div>
|
||||||
<div class="Index-node">
|
|
||||||
<a href="miner.html">[Create Miner]</a>
|
|
||||||
</div>
|
|
||||||
</div>
|
</div>
|
||||||
<div class="Index-footer">
|
<div class="Index-footer">
|
||||||
<div>
|
<div>
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user