Merge pull request #11569 from filecoin-project/release/v1.25.2
chore: merge `release/v1.25.2` into `releases`
This commit is contained in:
commit
f51f83bfec
@ -1,7 +1,7 @@
|
||||
version: 2.1
|
||||
orbs:
|
||||
aws-cli: circleci/aws-cli@1.3.2
|
||||
docker: circleci/docker@2.1.4
|
||||
aws-cli: circleci/aws-cli@4.1.1
|
||||
docker: circleci/docker@2.3.0
|
||||
|
||||
executors:
|
||||
golang:
|
||||
@ -70,8 +70,6 @@ commands:
|
||||
name: Restore parameters cache
|
||||
keys:
|
||||
- 'v26-2k-lotus-params'
|
||||
paths:
|
||||
- /var/tmp/filecoin-proof-parameters/
|
||||
- run: ./lotus fetch-params 2048
|
||||
- save_cache:
|
||||
name: Save parameters cache
|
||||
@ -96,6 +94,7 @@ commands:
|
||||
git fetch --all
|
||||
install-ubuntu-deps:
|
||||
steps:
|
||||
- run: sudo apt install curl ca-certificates gnupg
|
||||
- run: sudo apt-get update
|
||||
- run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev
|
||||
check-go-version:
|
||||
@ -143,9 +142,9 @@ jobs:
|
||||
Run tests with gotestsum.
|
||||
working_directory: ~/lotus
|
||||
parameters: &test-params
|
||||
executor:
|
||||
type: executor
|
||||
default: golang
|
||||
resource_class:
|
||||
type: string
|
||||
default: medium+
|
||||
go-test-flags:
|
||||
type: string
|
||||
default: "-timeout 20m"
|
||||
@ -164,7 +163,14 @@ jobs:
|
||||
type: string
|
||||
default: unit
|
||||
description: Test suite name to report to CircleCI.
|
||||
executor: << parameters.executor >>
|
||||
docker:
|
||||
- image: cimg/go:1.20
|
||||
environment:
|
||||
LOTUS_HARMONYDB_HOSTS: yugabyte
|
||||
- image: yugabytedb/yugabyte:2.18.0.0-b65
|
||||
command: bin/yugabyted start --daemon=false
|
||||
name: yugabyte
|
||||
resource_class: << parameters.resource_class >>
|
||||
steps:
|
||||
- install-ubuntu-deps
|
||||
- attach_workspace:
|
||||
@ -182,6 +188,8 @@ jobs:
|
||||
command: |
|
||||
mkdir -p /tmp/test-reports/<< parameters.suite >>
|
||||
mkdir -p /tmp/test-artifacts
|
||||
dockerize -wait tcp://yugabyte:5433 -timeout 3m
|
||||
env
|
||||
gotestsum \
|
||||
--format standard-verbose \
|
||||
--junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \
|
||||
@ -209,7 +217,9 @@ jobs:
|
||||
Branch on github.com/filecoin-project/test-vectors to checkout and
|
||||
test with. If empty (the default) the commit defined by the git
|
||||
submodule is used.
|
||||
executor: << parameters.executor >>
|
||||
docker:
|
||||
- image: cimg/go:1.20
|
||||
resource_class: << parameters.resource_class >>
|
||||
steps:
|
||||
- install-ubuntu-deps
|
||||
- attach_workspace:
|
||||
@ -396,15 +406,14 @@ jobs:
|
||||
Run golangci-lint.
|
||||
working_directory: ~/lotus
|
||||
parameters:
|
||||
executor:
|
||||
type: executor
|
||||
default: golang
|
||||
args:
|
||||
type: string
|
||||
default: ''
|
||||
description: |
|
||||
Arguments to pass to golangci-lint
|
||||
executor: << parameters.executor >>
|
||||
docker:
|
||||
- image: cimg/go:1.20
|
||||
resource_class: medium+
|
||||
steps:
|
||||
- install-ubuntu-deps
|
||||
- attach_workspace:
|
||||
@ -575,7 +584,7 @@ workflows:
|
||||
- build
|
||||
suite: itest-deals_concurrent
|
||||
target: "./itests/deals_concurrent_test.go"
|
||||
executor: golang-2xl
|
||||
resource_class: 2xlarge
|
||||
- test:
|
||||
name: test-itest-deals_invalid_utf8_label
|
||||
requires:
|
||||
@ -768,6 +777,18 @@ workflows:
|
||||
- build
|
||||
suite: itest-get_messages_in_ts
|
||||
target: "./itests/get_messages_in_ts_test.go"
|
||||
- test:
|
||||
name: test-itest-harmonydb
|
||||
requires:
|
||||
- build
|
||||
suite: itest-harmonydb
|
||||
target: "./itests/harmonydb_test.go"
|
||||
- test:
|
||||
name: test-itest-harmonytask
|
||||
requires:
|
||||
- build
|
||||
suite: itest-harmonytask
|
||||
target: "./itests/harmonytask_test.go"
|
||||
- test:
|
||||
name: test-itest-lite_migration
|
||||
requires:
|
||||
@ -976,14 +997,14 @@ workflows:
|
||||
- build
|
||||
suite: itest-wdpost_worker_config
|
||||
target: "./itests/wdpost_worker_config_test.go"
|
||||
executor: golang-2xl
|
||||
resource_class: 2xlarge
|
||||
- test:
|
||||
name: test-itest-worker
|
||||
requires:
|
||||
- build
|
||||
suite: itest-worker
|
||||
target: "./itests/worker_test.go"
|
||||
executor: golang-2xl
|
||||
resource_class: 2xlarge
|
||||
- test:
|
||||
name: test-itest-worker_upgrade
|
||||
requires:
|
||||
@ -996,32 +1017,28 @@ workflows:
|
||||
- build
|
||||
suite: utest-unit-cli
|
||||
target: "./cli/... ./cmd/... ./api/..."
|
||||
resource_class: 2xlarge
|
||||
get-params: true
|
||||
executor: golang-2xl
|
||||
- test:
|
||||
name: test-unit-node
|
||||
requires:
|
||||
- build
|
||||
suite: utest-unit-node
|
||||
target: "./node/..."
|
||||
|
||||
|
||||
- test:
|
||||
name: test-unit-rest
|
||||
requires:
|
||||
- build
|
||||
suite: utest-unit-rest
|
||||
target: "./blockstore/... ./build/... ./chain/... ./conformance/... ./gateway/... ./journal/... ./lib/... ./markets/... ./paychmgr/... ./tools/..."
|
||||
|
||||
executor: golang-2xl
|
||||
resource_class: 2xlarge
|
||||
- test:
|
||||
name: test-unit-storage
|
||||
requires:
|
||||
- build
|
||||
suite: utest-unit-storage
|
||||
target: "./storage/... ./extern/..."
|
||||
|
||||
|
||||
get-params: true
|
||||
- test:
|
||||
go-test-flags: "-run=TestMulticoreSDR"
|
||||
requires:
|
||||
|
@ -10,11 +10,25 @@ import (
|
||||
"text/template"
|
||||
)
|
||||
|
||||
var GoVersion = "" // from init below. Ex: 1.19.7
|
||||
|
||||
//go:generate go run ./gen.go ..
|
||||
|
||||
//go:embed template.yml
|
||||
var templateFile embed.FS
|
||||
|
||||
func init() {
|
||||
b, err := os.ReadFile("../go.mod")
|
||||
if err != nil {
|
||||
panic("cannot find go.mod in parent folder")
|
||||
}
|
||||
for _, line := range strings.Split(string(b), "\n") {
|
||||
if strings.HasPrefix(line, "go ") {
|
||||
GoVersion = line[3:]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type (
|
||||
dirs = []string
|
||||
suite = string
|
||||
@ -111,6 +125,7 @@ func main() {
|
||||
Networks []string
|
||||
ItestFiles []string
|
||||
UnitSuites map[string]string
|
||||
GoVersion string
|
||||
}
|
||||
in := data{
|
||||
Networks: []string{"mainnet", "butterflynet", "calibnet", "debug"},
|
||||
@ -125,6 +140,7 @@ func main() {
|
||||
}
|
||||
return ret
|
||||
}(),
|
||||
GoVersion: GoVersion,
|
||||
}
|
||||
|
||||
out, err := os.Create("./config.yml")
|
||||
|
@ -1,7 +1,7 @@
|
||||
version: 2.1
|
||||
orbs:
|
||||
aws-cli: circleci/aws-cli@1.3.2
|
||||
docker: circleci/docker@2.1.4
|
||||
aws-cli: circleci/aws-cli@4.1.1
|
||||
docker: circleci/docker@2.3.0
|
||||
|
||||
executors:
|
||||
golang:
|
||||
@ -70,8 +70,6 @@ commands:
|
||||
name: Restore parameters cache
|
||||
keys:
|
||||
- 'v26-2k-lotus-params'
|
||||
paths:
|
||||
- /var/tmp/filecoin-proof-parameters/
|
||||
- run: ./lotus fetch-params 2048
|
||||
- save_cache:
|
||||
name: Save parameters cache
|
||||
@ -96,6 +94,7 @@ commands:
|
||||
git fetch --all
|
||||
install-ubuntu-deps:
|
||||
steps:
|
||||
- run: sudo apt install curl ca-certificates gnupg
|
||||
- run: sudo apt-get update
|
||||
- run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev
|
||||
check-go-version:
|
||||
@ -143,9 +142,9 @@ jobs:
|
||||
Run tests with gotestsum.
|
||||
working_directory: ~/lotus
|
||||
parameters: &test-params
|
||||
executor:
|
||||
type: executor
|
||||
default: golang
|
||||
resource_class:
|
||||
type: string
|
||||
default: medium+
|
||||
go-test-flags:
|
||||
type: string
|
||||
default: "-timeout 20m"
|
||||
@ -164,7 +163,14 @@ jobs:
|
||||
type: string
|
||||
default: unit
|
||||
description: Test suite name to report to CircleCI.
|
||||
executor: << parameters.executor >>
|
||||
docker:
|
||||
- image: cimg/go:[[ .GoVersion]]
|
||||
environment:
|
||||
LOTUS_HARMONYDB_HOSTS: yugabyte
|
||||
- image: yugabytedb/yugabyte:2.18.0.0-b65
|
||||
command: bin/yugabyted start --daemon=false
|
||||
name: yugabyte
|
||||
resource_class: << parameters.resource_class >>
|
||||
steps:
|
||||
- install-ubuntu-deps
|
||||
- attach_workspace:
|
||||
@ -182,6 +188,8 @@ jobs:
|
||||
command: |
|
||||
mkdir -p /tmp/test-reports/<< parameters.suite >>
|
||||
mkdir -p /tmp/test-artifacts
|
||||
dockerize -wait tcp://yugabyte:5433 -timeout 3m
|
||||
env
|
||||
gotestsum \
|
||||
--format standard-verbose \
|
||||
--junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \
|
||||
@ -209,7 +217,9 @@ jobs:
|
||||
Branch on github.com/filecoin-project/test-vectors to checkout and
|
||||
test with. If empty (the default) the commit defined by the git
|
||||
submodule is used.
|
||||
executor: << parameters.executor >>
|
||||
docker:
|
||||
- image: cimg/go:[[ .GoVersion]]
|
||||
resource_class: << parameters.resource_class >>
|
||||
steps:
|
||||
- install-ubuntu-deps
|
||||
- attach_workspace:
|
||||
@ -396,15 +406,14 @@ jobs:
|
||||
Run golangci-lint.
|
||||
working_directory: ~/lotus
|
||||
parameters:
|
||||
executor:
|
||||
type: executor
|
||||
default: golang
|
||||
args:
|
||||
type: string
|
||||
default: ''
|
||||
description: |
|
||||
Arguments to pass to golangci-lint
|
||||
executor: << parameters.executor >>
|
||||
docker:
|
||||
- image: cimg/go:[[ .GoVersion]]
|
||||
resource_class: medium+
|
||||
steps:
|
||||
- install-ubuntu-deps
|
||||
- attach_workspace:
|
||||
@ -543,7 +552,7 @@ workflows:
|
||||
suite: itest-[[ $name ]]
|
||||
target: "./itests/[[ $file ]]"
|
||||
[[- if or (eq $name "worker") (eq $name "deals_concurrent") (eq $name "wdpost_worker_config")]]
|
||||
executor: golang-2xl
|
||||
resource_class: 2xlarge
|
||||
[[- end]]
|
||||
[[- if or (eq $name "wdpost") (eq $name "sector_pledge")]]
|
||||
get-params: true
|
||||
@ -557,9 +566,16 @@ workflows:
|
||||
- build
|
||||
suite: utest-[[ $suite ]]
|
||||
target: "[[ $pkgs ]]"
|
||||
[[if eq $suite "unit-cli"]]get-params: true[[end]]
|
||||
[[if eq $suite "unit-cli"]]executor: golang-2xl[[end]]
|
||||
[[- if eq $suite "unit-rest"]]executor: golang-2xl[[end]]
|
||||
[[- if eq $suite "unit-storage"]]
|
||||
get-params: true
|
||||
[[- end -]]
|
||||
[[- if eq $suite "unit-cli"]]
|
||||
resource_class: 2xlarge
|
||||
get-params: true
|
||||
[[- end -]]
|
||||
[[- if eq $suite "unit-rest"]]
|
||||
resource_class: 2xlarge
|
||||
[[- end -]]
|
||||
[[- end]]
|
||||
- test:
|
||||
go-test-flags: "-run=TestMulticoreSDR"
|
||||
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -6,6 +6,7 @@
|
||||
/lotus-chainwatch
|
||||
/lotus-shed
|
||||
/lotus-sim
|
||||
/lotus-provider
|
||||
/lotus-townhall
|
||||
/lotus-fountain
|
||||
/lotus-stats
|
||||
@ -41,6 +42,7 @@ build/paramfetch.sh
|
||||
bin/ipget
|
||||
bin/tmp/*
|
||||
.idea
|
||||
.vscode
|
||||
scratchpad
|
||||
|
||||
build/builtin-actors/v*
|
||||
|
@ -14,6 +14,7 @@ linters:
|
||||
- varcheck
|
||||
- deadcode
|
||||
- scopelint
|
||||
- unused
|
||||
|
||||
# We don't want to skip builtin/
|
||||
skip-dirs-use-default: false
|
||||
|
193
CHANGELOG.md
193
CHANGELOG.md
@ -1,5 +1,192 @@
|
||||
# Lotus changelog
|
||||
|
||||
# UNRELEASED
|
||||
|
||||
## Improvements
|
||||
|
||||
# v1.25.2 / 2024-01-11
|
||||
|
||||
This is an optional but **highly recommended feature release** of Lotus, as it includes fixes for synchronizations issues that users have experienced. The feature release also introduces `Lotus-Provider` in its alpha testing phase, as well as the ability to call external PC2-binaries during the sealing process.
|
||||
|
||||
## ☢️ Upgrade Warnings ☢️
|
||||
|
||||
There are no upgrade warnings for this feature release.
|
||||
|
||||
## ⭐️ Highlights ⭐️
|
||||
|
||||
### Lotus-Provider
|
||||
The feature release ships the alpha release of the new Lotus-Provider binary, together with its initial features - High Availability of WindowPoSt and WinningPoSt.
|
||||
|
||||
So what is so exciting about Lotus-Provider:
|
||||
|
||||
**High Availability**
|
||||
- You can run as many `Lotus-Provider` instances as you want for both WindowPoSt and WinningPOSt.
|
||||
- You can connect them to as many clustered Yugabyte instances as you want to. This allows for an NxN configuration where all instances can communicate with all others.
|
||||
- You have the option to connect different instances to different chain daemons.
|
||||
|
||||
**Simplicity**
|
||||
- Once the configuration is in the database, setting up a new machine with Lotus-Provider is straightforward. Simply start the binary with the correct flags to find YugabyteDB and specify which configuration layers it should use.
|
||||
|
||||
**Durability**
|
||||
- `Lotus-Provider` is designed with robustness in mind. Updates to the system are handled seamlessly, ensuring that performance and stability are maintained when taking down machines in your cluster for updates.
|
||||
|
||||
Read more about [`Lotus-Provider` in the documentation here](https://lotus.filecoin.io/storage-providers/lotus-provider/overview/). And check out the how you can migrate from [Lotus-Miner to Lotus-Provider here](https://lotus.filecoin.io/storage-providers/lotus-provider/setup/). **(Only recommended in testnets while its in Alpha)**
|
||||
|
||||
### External PC2-binaries
|
||||
|
||||
In this feature release, storage providers can call external PC2-binaries during the sealing process. This allows storage providers to leverage the SupraSeal PC2 binary, which has been shown to improve sealing speed in the PC2-phase. For instance, our current benchmarks show that an NVIDIA RTX A5000 card was able to complete PC2 in approximately 2.5 minutes.
|
||||
|
||||
We have verified that SupraSeal PC2 functions properly with Committed Capacity (CC) sectors, both SyntheticPoReps and non-Synthetic PoReps. However calling SupraSeal PC2 with deal sectors is not supported in this feature release.
|
||||
|
||||
For more information on how to use SupraSeal PC2 with your `lotus-worker`, as well as how to use feature, please [refer to the documentation](https://lotus.filecoin.io/tutorials/lotus-miner/supra-seal-pc2/).
|
||||
|
||||
## New features
|
||||
- feat: sturdypost work branch ([filecoin-project/lotus#11405](https://github.com/filecoin-project/lotus/pull/11405))
|
||||
- Adds the `Lotus-Provider` binary, and the HarmonyDB framework.
|
||||
- feat: worker: Support delegating precommit2 to external binary ([filecoin-project/lotus#11185](https://github.com/filecoin-project/lotus/pull/11185))
|
||||
- Allows for delegating PreCommit2 to an exteranl binary.
|
||||
- feat: build: Add SupraSeal-PC2 binary script ([filecoin-project/lotus#11430](https://github.com/filecoin-project/lotus/pull/11430))
|
||||
- Adds a script for building the SupraSeal-PC2 binary easily.
|
||||
- Feat: daemon: Auto remove existing chain if importing chain file or snapshot ([filecoin-project/lotus#11277](https://github.com/filecoin-project/lotus/pull/11277))
|
||||
- Auto removes the existing chain when importing a snapshot.
|
||||
- feat: Add ETA to lotus sync wait (#11211) ([filecoin-project/lotus#11211](https://github.com/filecoin-project/lotus/pull/11211))
|
||||
- Adds a ETA indicator to `lotus sync wait`, so you can get an estimate for how long until sync is completed.
|
||||
- feat: mpool/wdpost: Maximize feecap config ([filecoin-project/lotus#9746](https://github.com/filecoin-project/lotus/pull/9746))
|
||||
- Adds a Maximixe FeeCap Config
|
||||
- feat: Add lotus-bench cli option to stress test any binary ([filecoin-project/lotus#11270](https://github.com/filecoin-project/lotus/pull/11270))
|
||||
- Enables the `Lotus-Bench` to run any binary and analyze their latency and histogram distribution, track most common errors, perform stress testing under different concurrency levels and see how it works under different QPS.
|
||||
- feat: chain import: don't walk to genesis - 2-3x faster snapshot import (#11446) ([filecoin-project/lotus#11446](https://github.com/filecoin-project/lotus/pull/11446))
|
||||
- Improves Snapshot import speed, by not walking back to genesis on import.
|
||||
- feat: metric: export Mpool message count ([filecoin-project/lotus#11361](https://github.com/filecoin-project/lotus/pull/11361))
|
||||
- Adds the mpool count as a prometheus metric.
|
||||
- feat: bench: flag to output GenerateWinningPoStWithVanilla params ([filecoin-project/lotus#11460](https://github.com/filecoin-project/lotus/pull/11460))
|
||||
|
||||
## Improvements
|
||||
- feat: bootstrap: add glif bootstrap node on calibration ([filecoin-project/lotus#11175](https://github.com/filecoin-project/lotus/pull/11175))
|
||||
- fix: bench: Set ticket and seed to a non-all zero value ([filecoin-project/lotus#11429](https://github.com/filecoin-project/lotus/pull/11429))
|
||||
- fix: alert: Check UDPbuffer-size ([filecoin-project/lotus#11360](https://github.com/filecoin-project/lotus/pull/11360))
|
||||
- feat: cli: sort actor CIDs alphabetically before printing (#11345) ([filecoin-project/lotus#11345](https://github.com/filecoin-project/lotus/pull/11345))
|
||||
- fix: worker: Connect when --listen is not set ([filecoin-project/lotus#11294](https://github.com/filecoin-project/lotus/pull/11294))
|
||||
- fix: miner info: Show correct sector state counts ([filecoin-project/lotus#11456](https://github.com/filecoin-project/lotus/pull/11456))
|
||||
- feat: miner: defensive check for equivocation ([filecoin-project/lotus#11321](https://github.com/filecoin-project/lotus/pull/11321))
|
||||
- feat: Instructions for setting up Grafana/Prometheus for monitoring local lotus node ([filecoin-project/lotus#11276](https://github.com/filecoin-project/lotus/pull/11276))
|
||||
- fix: cli: Wrap error in wallet sign ([filecoin-project/lotus#11273](https://github.com/filecoin-project/lotus/pull/11273))
|
||||
- fix: Add time slicing to splitstore purging to reduce lock congestion ([filecoin-project/lotus#11269](https://github.com/filecoin-project/lotus/pull/11269))
|
||||
- feat: sealing: load SectorsSummary from sealing SectorStats instead of calling API each time ([filecoin-project/lotus#11353](https://github.com/filecoin-project/lotus/pull/11353))
|
||||
- fix: shed: additional metrics in `mpool miner-select-messages` ([filecoin-project/lotus#11253](https://github.com/filecoin-project/lotus/pull/11253))
|
||||
- storage: Return soft err when sector alloc fails in acquire ([filecoin-project/lotus#11338](https://github.com/filecoin-project/lotus/pull/11338))
|
||||
- feat: miner: log detailed timing breakdown when mining takes longer than the block's timestamp ([filecoin-project/lotus#11228](https://github.com/filecoin-project/lotus/pull/11228))
|
||||
- fix: shed: make invariants checker work with splitstore ([filecoin-project/lotus#11391](https://github.com/filecoin-project/lotus/pull/11391))
|
||||
- feat: eth: encode eth tx input as solidity ABI (#11402) ([filecoin-project/lotus#11402](https://github.com/filecoin-project/lotus/pull/11402))
|
||||
- fix: eth: use the correct state-tree when resolving addresses (#11387) ([filecoin-project/lotus#11387](https://github.com/filecoin-project/lotus/pull/11387))
|
||||
- fix: eth: remove trace sanity check (#11385) ([filecoin-project/lotus#11385](https://github.com/filecoin-project/lotus/pull/11385))
|
||||
- fix: chain: make failure to load the chain state fatal (#11426) ([filecoin-project/lotus#11426](https://github.com/filecoin-project/lotus/pull/11426))
|
||||
- fix: build: an epoch is near an upgrade iff the upgrade is enabled (#11401) ([filecoin-project/lotus#11401](https://github.com/filecoin-project/lotus/pull/11401))
|
||||
- fix: eth: handle unresolvable addresses (#11433) ([filecoin-project/lotus#11433](https://github.com/filecoin-project/lotus/pull/11433))
|
||||
- fix: eth: correctly encode and simplify native input/output encoding (#11382) ([filecoin-project/lotus#11382](https://github.com/filecoin-project/lotus/pull/11382))
|
||||
- fix: worker: listen for interrupt signals in GetStorageMinerAPI loop (#11309) ([filecoin-project/lotus#11309](https://github.com/filecoin-project/lotus/pull/11309))
|
||||
- fix: sync: iterate over returned messages directly (#11373) ([filecoin-project/lotus#11373](https://github.com/filecoin-project/lotus/pull/11373))
|
||||
- fix: miner: correct duration logs in mineOne ([filecoin-project/lotus#11241](https://github.com/filecoin-project/lotus/pull/11241))
|
||||
- fix: cli: Add print to unseal cmd ([filecoin-project/lotus#11271](https://github.com/filecoin-project/lotus/pull/11271))
|
||||
- fix: networking: avoid dialing when trying to handshake peers ([filecoin-project/lotus#11262](https://github.com/filecoin-project/lotus/pull/11262))
|
||||
- metric milliseconds computation with golang original method (#11403) ([filecoin-project/lotus#11403](https://github.com/filecoin-project/lotus/pull/11403))
|
||||
- feat: shed: fix blockstore prune (#11197) ([filecoin-project/lotus#11197](https://github.com/filecoin-project/lotus/pull/11197))
|
||||
- refactor:ffi: replace ClearLayerData with ClearCache (#11352) ([filecoin-project/lotus#11352](https://github.com/filecoin-project/lotus/pull/11352))
|
||||
- fix: api: compute gasUsedRatio based on max gas in the tipset (#11354) ([filecoin-project/lotus#11354](https://github.com/filecoin-project/lotus/pull/11354))
|
||||
- fix: api: compute the effective gas cost with the correct base-fee (#11357) ([filecoin-project/lotus#11357](https://github.com/filecoin-project/lotus/pull/11357))
|
||||
- fix: api: return errors on failure to lookup an eth txn receipt (#11329) ([filecoin-project/lotus#11329](https://github.com/filecoin-project/lotus/pull/11329))
|
||||
- fix: api: exclude reverted events in `eth_getLogs` results (#11318) ([filecoin-project/lotus#11318](https://github.com/filecoin-project/lotus/pull/11318))
|
||||
- api: Add block param to eth_estimateGas ([filecoin-project/lotus#11462](https://github.com/filecoin-project/lotus/pull/11462))
|
||||
- opt: fix duplicate check exitcode ([filecoin-project/lotus#11171](https://github.com/filecoin-project/lotus/pull/11171))
|
||||
- fix: lotus-provider: show addresses in log ([filecoin-project/lotus#11490](https://github.com/filecoin-project/lotus/pull/11490))
|
||||
- fix: lotus-provider: Wait for the correct taskID ([filecoin-project/lotus#11493](https://github.com/filecoin-project/lotus/pull/11493))
|
||||
- harmony: Fix task reclaim on restart ([filecoin-project/lotus#11498](https://github.com/filecoin-project/lotus/pull/11498))
|
||||
- fix: lotus-provider: Fix log output format in wdPostTaskCmd ([filecoin-project/lotus#11504](https://github.com/filecoin-project/lotus/pull/11504))
|
||||
- fix: lp docsgen ([filecoin-project/lotus#11488](https://github.com/filecoin-project/lotus/pull/11488))
|
||||
- fix: lotus-provider do not suggest default layer ([filecoin-project/lotus#11486](https://github.com/filecoin-project/lotus/pull/11486))
|
||||
- feat: syncer: optimize syncFork for one-epoch forks ([filecoin-project/lotus#11533](https://github.com/filecoin-project/lotus/pull/11533))
|
||||
- fix: sync: do not include incoming in return of syncFork ([filecoin-project/lotus#11541](https://github.com/filecoin-project/lotus/pull/11541))
|
||||
- fix: wdpost: fix vanilla proof indexes ([filecoin-project/lotus#11550](https://github.com/filecoin-project/lotus/pull/11550))
|
||||
- feat: exchange: change GetBlocks to always fetch the requested number of tipsets ([filecoin-project/lotus#11565](https://github.com/filecoin-project/lotus/pull/11565))
|
||||
|
||||
## Dependencies
|
||||
- update go-libp2p to v0.31.0 ([filecoin-project/lotus#11225](https://github.com/filecoin-project/lotus/pull/11225))
|
||||
- deps: gostatetype (#11437) ([filecoin-project/lotus#11437](https://github.com/filecoin-project/lotus/pull/11437))
|
||||
- fix: deps: stop using go-libp2p deprecated peer.ID.Pretty ([filecoin-project/lotus#11263](https://github.com/filecoin-project/lotus/pull/11263))
|
||||
- chore:libp2p:update libp2p deps in release-v1.25.2 to v0.31.1 ([filecoin-project/lotus#11524](https://github.com/filecoin-project/lotus/pull/11524))
|
||||
- deps: update go-multiaddr to v0.12.0 ([filecoin-project/lotus#11524](https://github.com/filecoin-project/lotus/pull/11558))
|
||||
- dep: go-multi-address to v0.12.1 ([filecoin-project/lotus#11564](https://github.com/filecoin-project/lotus/pull/11564))
|
||||
|
||||
## Others
|
||||
- chore: update FFI (#11431) ([filecoin-project/lotus#11431](https://github.com/filecoin-project/lotus/pull/11431))
|
||||
- chore: build: bump master to v1.25.1-dev ([filecoin-project/lotus#11450](https://github.com/filecoin-project/lotus/pull/11450))
|
||||
- chore: releases :merge releases into master ([filecoin-project/lotus#11448](https://github.com/filecoin-project/lotus/pull/11448))
|
||||
- chore: actors: update v12 to the final release ([filecoin-project/lotus#11440](https://github.com/filecoin-project/lotus/pull/11440))
|
||||
- chore: Remove ipfs main bootstrap nodes (#11200) ([filecoin-project/lotus#11200](https://github.com/filecoin-project/lotus/pull/11200))
|
||||
- Remove PL's european bootstrap nodes from mainnet.pi ([filecoin-project/lotus#11315](https://github.com/filecoin-project/lotus/pull/11315))
|
||||
- chore: deps: update to go-state-types v0.12.7 ([filecoin-project/lotus#11428](https://github.com/filecoin-project/lotus/pull/11428))
|
||||
- fix: Add .vscode to gitignore ([filecoin-project/lotus#11275](https://github.com/filecoin-project/lotus/pull/11275))
|
||||
- fix: test: temporarily exempt SynthPorep constants from test ([filecoin-project/lotus#11259](https://github.com/filecoin-project/lotus/pull/11259))
|
||||
- feat: skip TestSealAndVerify3 until it's fixed ([filecoin-project/lotus#11230](https://github.com/filecoin-project/lotus/pull/11230))
|
||||
- Update RELEASE_ISSUE_TEMPLATE.md ([filecoin-project/lotus#11250](https://github.com/filecoin-project/lotus/pull/11250))
|
||||
- fix: config: Update ColdStoreType comments ([filecoin-project/lotus#11274](https://github.com/filecoin-project/lotus/pull/11274))
|
||||
- readme: bump up golang version (#11347) ([filecoin-project/lotus#11347](https://github.com/filecoin-project/lotus/pull/11347))
|
||||
- chore: watermelon: upgrade epoch ([filecoin-project/lotus#11374](https://github.com/filecoin-project/lotus/pull/11374))
|
||||
- add support for v12 check invariants and also a default case to reduce future confusion (#11371) ([filecoin-project/lotus#11371](https://github.com/filecoin-project/lotus/pull/11371))
|
||||
- test: drand: switch tests to drand testnet (from devnet) (#11359) ([filecoin-project/lotus#11359](https://github.com/filecoin-project/lotus/pull/11359))
|
||||
- feat: chain: light-weight patch to fix calibrationnet again by removing move_partitions from built-in actors (#11409) ([filecoin-project/lotus#11409](https://github.com/filecoin-project/lotus/pull/11409))
|
||||
- chore: cli: Revert move-partitions cmd ([filecoin-project/lotus#11408](https://github.com/filecoin-project/lotus/pull/11408))
|
||||
- chore: forward-port calibnet hotfix to master ([filecoin-project/lotus#11407](https://github.com/filecoin-project/lotus/pull/11407))
|
||||
- fix: migration: set premigration to 90 minutes ([filecoin-project/lotus#11395](https://github.com/filecoin-project/lotus/pull/11395))
|
||||
- feat: chain: light-weight patch to fix calibrationnet (#11363) ([filecoin-project/lotus#11363](https://github.com/filecoin-project/lotus/pull/11363))
|
||||
- chore: merge feat/nv21 into master ([filecoin-project/lotus#11336](https://github.com/filecoin-project/lotus/pull/11336))
|
||||
- docs: Link the release section in the release flow doc ([filecoin-project/lotus#11299](https://github.com/filecoin-project/lotus/pull/11299))
|
||||
- fix: ci: fetch params for the storage unit tests ([filecoin-project/lotus#11441](https://github.com/filecoin-project/lotus/pull/11441))
|
||||
- Update mainnet.pi ([filecoin-project/lotus#11288](https://github.com/filecoin-project/lotus/pull/11288))
|
||||
- add go linter - "unused" (#11235) ([filecoin-project/lotus#11235](https://github.com/filecoin-project/lotus/pull/11235))
|
||||
- Fix/texts (#11298) ([filecoin-project/lotus#11298](https://github.com/filecoin-project/lotus/pull/11298))
|
||||
- fix typo in rate-limit flag description (#11316) ([filecoin-project/lotus#11316](https://github.com/filecoin-project/lotus/pull/11316))
|
||||
- eth_filter flake debug ([filecoin-project/lotus#11261](https://github.com/filecoin-project/lotus/pull/11261))
|
||||
- fix: sealing: typo in FinalizeReplicaUpdate ([filecoin-project/lotus#11255](https://github.com/filecoin-project/lotus/pull/11255))
|
||||
- chore: slice loop replace (#11349) ([filecoin-project/lotus#11349](https://github.com/filecoin-project/lotus/pull/11349))
|
||||
- backport: docker build fix for v1.25.2 ([filecoin-project/lotus#11560](https://github.com/filecoin-project/lotus/pull/11560))
|
||||
|
||||
## Contributors
|
||||
|
||||
| Contributor | Commits | Lines ± | Files Changed |
|
||||
|-------------|---------|---------|---------------|
|
||||
| Andrew Jackson (Ajax) | 161 | +24328/-12464 | 4148 |
|
||||
| Łukasz Magiera | 99 | +5238/-2690 | 260 |
|
||||
| Shrenuj Bansal | 27 | +3402/-1265 | 76 |
|
||||
| Fridrik Asmundsson | 15 | +1148/-307 | 58 |
|
||||
| Steven Allen | 15 | +674/-337 | 35 |
|
||||
| Ian Norden | 1 | +625/-3 | 4 |
|
||||
| Aarsh Shah | 4 | +227/-167 | 14 |
|
||||
| Phi | 19 | +190/-183 | 32 |
|
||||
| Aayush Rajasekaran | 3 | +291/-56 | 16 |
|
||||
| Mikers | 2 | +76/-262 | 19 |
|
||||
| Aayush | 14 | +111/-59 | 21 |
|
||||
| Friðrik Ásmundsson | 1 | +101/-1 | 2 |
|
||||
| Alejandro Criado-Pérez | 1 | +36/-36 | 27 |
|
||||
| Jie Hou | 5 | +36/-10 | 5 |
|
||||
| Florian RUEN | 2 | +24/-19 | 5 |
|
||||
| Phi-rjan | 3 | +20/-8 | 3 |
|
||||
| Icarus9913 | 1 | +11/-11 | 6 |
|
||||
| Jiaying Wang | 3 | +8/-7 | 5 |
|
||||
| guangwu | 1 | +3/-10 | 2 |
|
||||
| Marten Seemann | 1 | +6/-6 | 2 |
|
||||
| simlecode | 1 | +0/-6 | 2 |
|
||||
| GlacierWalrus | 2 | +0/-5 | 2 |
|
||||
| Anton Evangelatov | 1 | +2/-2 | 1 |
|
||||
| Ales Dumikau | 3 | +2/-2 | 3 |
|
||||
| renran | 1 | +2/-1 | 1 |
|
||||
| Volker Mische | 1 | +1/-1 | 1 |
|
||||
| Icarus Wu | 1 | +1/-1 | 1 |
|
||||
| Hubert | 1 | +1/-1 | 1 |
|
||||
| Aloxaf | 1 | +1/-1 | 1 |
|
||||
| Alejandro | 1 | +1/-1 | 1 |
|
||||
| lazavikmaria | 1 | +1/-0 | 1 |
|
||||
|
||||
# v1.25.1 / 2023-12-09
|
||||
|
||||
This is a **highly recommended PATCH RELEASE.** The patch release fixes the issue were node operators trying to catch up sync were unable to sync large message blocks/epochs due to an increased number of messages on the network.
|
||||
@ -9,6 +196,7 @@ This patch release allows for up to 10k messages per block. Additionally, it int
|
||||
## Improvements
|
||||
- fix: exchange: allow up to 10k messages per block ([filecoin-project/lotus#11506](https://github.com/filecoin-project/lotus/pull/11506))
|
||||
|
||||
>>>>>>> releases
|
||||
|
||||
# v 1.25.0 / 2023-11-22
|
||||
|
||||
@ -94,6 +282,9 @@ Lotus-workers can now be built to leverage the SupraSeal C2 sealing optimization
|
||||
- fix(client): single-root error message ([filecoin-project/lotus#11214](https://github.com/filecoin-project/lotus/pull/11214))
|
||||
- fix: worker: Convert `DC_[SectorSize]_[ResourceRestriction]` if set ([filecoin-project/lotus#11224](https://github.com/filecoin-project/lotus/pull/11224))
|
||||
- chore: backport #11338 onto release/v1.25.0 ([filecoin-project/lotus#11350](https://github.com/filecoin-project/lotus/pull/11350))
|
||||
- fix: lotus-provider: lotus-provider msg sending ([filecoin-project/lotus#11480](https://github.com/filecoin-project/lotus/pull/11480))
|
||||
- fix: lotus-provider: Fix winning PoSt ([filecoin-project/lotus#11483](https://github.com/filecoin-project/lotus/pull/11483))
|
||||
- chore: fix: sql Scan cannot write to an object ([filecoin-project/lotus#11487](https://github.com/filecoin-project/lotus/pull/11487))
|
||||
|
||||
## Dependencies
|
||||
- deps: update go-libp2p to v0.28.1 ([filecoin-project/lotus#10998](https://github.com/filecoin-project/lotus/pull/10998))
|
||||
@ -255,7 +446,7 @@ This feature release requires a **minimum Go version of v1.19.12 or higher to su
|
||||
- feat: sealing: flag to run data_cid untied from addpiece ([filecoin-project/lotus#10797](https://github.com/filecoin-project/lotus/pull/10797))
|
||||
- feat: Lotus Gateway: add MpoolPending, ChainGetBlock and MinerGetBaseInfo ([filecoin-project/lotus#10929](https://github.com/filecoin-project/lotus/pull/10929))
|
||||
|
||||
## Improvements && Bug Fixe
|
||||
## Improvements && Bug Fixes
|
||||
- chore: update ffi & fvm ([filecoin-project/lotus#11040](https://github.com/filecoin-project/lotus/pull/11040))
|
||||
- feat: Make sure we don't store duplidate actor events caused to reorgs in events.db ([filecoin-project/lotus#11015](https://github.com/filecoin-project/lotus/pull/11015))
|
||||
- sealing: Use only non-assigned deals when selecting snap sectors ([filecoin-project/lotus#11002](https://github.com/filecoin-project/lotus/pull/11002))
|
||||
|
@ -109,6 +109,7 @@ COPY --from=lotus-builder /opt/filecoin/lotus-wallet /usr/local/bin/
|
||||
COPY --from=lotus-builder /opt/filecoin/lotus-gateway /usr/local/bin/
|
||||
COPY --from=lotus-builder /opt/filecoin/lotus-miner /usr/local/bin/
|
||||
COPY --from=lotus-builder /opt/filecoin/lotus-worker /usr/local/bin/
|
||||
COPY --from=lotus-builder /opt/filecoin/lotus-provider /usr/local/bin/
|
||||
COPY --from=lotus-builder /opt/filecoin/lotus-stats /usr/local/bin/
|
||||
COPY --from=lotus-builder /opt/filecoin/lotus-fountain /usr/local/bin/
|
||||
|
||||
@ -117,11 +118,13 @@ RUN mkdir /var/lib/lotus
|
||||
RUN mkdir /var/lib/lotus-miner
|
||||
RUN mkdir /var/lib/lotus-worker
|
||||
RUN mkdir /var/lib/lotus-wallet
|
||||
RUN mkdir /var/lib/lotus-provider
|
||||
RUN chown fc: /var/tmp/filecoin-proof-parameters
|
||||
RUN chown fc: /var/lib/lotus
|
||||
RUN chown fc: /var/lib/lotus-miner
|
||||
RUN chown fc: /var/lib/lotus-worker
|
||||
RUN chown fc: /var/lib/lotus-wallet
|
||||
RUN chown fc: /var/lib/lotus-provider
|
||||
|
||||
|
||||
VOLUME /var/tmp/filecoin-proof-parameters
|
||||
@ -129,6 +132,7 @@ VOLUME /var/lib/lotus
|
||||
VOLUME /var/lib/lotus-miner
|
||||
VOLUME /var/lib/lotus-worker
|
||||
VOLUME /var/lib/lotus-wallet
|
||||
VOLUME /var/lib/lotus-provider
|
||||
|
||||
EXPOSE 1234
|
||||
EXPOSE 2345
|
||||
|
@ -73,7 +73,7 @@ All releases under an odd minor version number indicate **feature releases**. Th
|
||||
|
||||
Feature releases include new development and bug fixes. They are not mandatory, but still highly recommended, **as they may contain critical security fixes**. Note that some of these releases may be very small patch releases that include critical hotfixes. There is no way to distinguish between a bug fix release and a feature release on the "feature" version. Both cases will use the "patch" version number.
|
||||
|
||||
We aim to ship a new feature release of the Lotus software from our development (master) branch every 3 weeks, so users can expect a regular cadence of Lotus feature releases. Note that mandatory releases for network upgrades may disrupt this schedule. For more, see the Release Cycle section (TODO: Link).
|
||||
We aim to ship a new feature release of the Lotus software from our development (master) branch every 3 weeks, so users can expect a regular cadence of Lotus feature releases. Note that mandatory releases for network upgrades may disrupt this schedule. For more, see the [Release Cycle section](#release-cycle).
|
||||
|
||||
### Examples Scenarios
|
||||
|
||||
|
48
Makefile
48
Makefile
@ -66,7 +66,7 @@ CLEAN+=build/.update-modules
|
||||
deps: $(BUILD_DEPS)
|
||||
.PHONY: deps
|
||||
|
||||
build-devnets: build lotus-seed lotus-shed
|
||||
build-devnets: build lotus-seed lotus-shed lotus-provider
|
||||
.PHONY: build-devnets
|
||||
|
||||
debug: GOFLAGS+=-tags=debug
|
||||
@ -97,6 +97,15 @@ lotus-miner: $(BUILD_DEPS)
|
||||
.PHONY: lotus-miner
|
||||
BINS+=lotus-miner
|
||||
|
||||
lotus-provider: $(BUILD_DEPS)
|
||||
rm -f lotus-provider
|
||||
$(GOCC) build $(GOFLAGS) -o lotus-provider ./cmd/lotus-provider
|
||||
.PHONY: lotus-provider
|
||||
BINS+=lotus-provider
|
||||
|
||||
lp2k: GOFLAGS+=-tags=2k
|
||||
lp2k: lotus-provider
|
||||
|
||||
lotus-worker: $(BUILD_DEPS)
|
||||
rm -f lotus-worker
|
||||
$(GOCC) build $(GOFLAGS) -o lotus-worker ./cmd/lotus-worker
|
||||
@ -121,7 +130,7 @@ an existing lotus binary in your PATH. This may cause problems if you don't run
|
||||
|
||||
.PHONY: build
|
||||
|
||||
install: install-daemon install-miner install-worker
|
||||
install: install-daemon install-miner install-worker install-provider
|
||||
|
||||
install-daemon:
|
||||
install -C ./lotus /usr/local/bin/lotus
|
||||
@ -129,6 +138,9 @@ install-daemon:
|
||||
install-miner:
|
||||
install -C ./lotus-miner /usr/local/bin/lotus-miner
|
||||
|
||||
install-provider:
|
||||
install -C ./lotus-provider /usr/local/bin/lotus-provider
|
||||
|
||||
install-worker:
|
||||
install -C ./lotus-worker /usr/local/bin/lotus-worker
|
||||
|
||||
@ -144,6 +156,9 @@ uninstall-daemon:
|
||||
uninstall-miner:
|
||||
rm -f /usr/local/bin/lotus-miner
|
||||
|
||||
uninstall-provider:
|
||||
rm -f /usr/local/bin/lotus-provider
|
||||
|
||||
uninstall-worker:
|
||||
rm -f /usr/local/bin/lotus-worker
|
||||
|
||||
@ -241,6 +256,14 @@ install-miner-service: install-miner install-daemon-service
|
||||
@echo
|
||||
@echo "lotus-miner service installed. Don't forget to run 'sudo systemctl start lotus-miner' to start it and 'sudo systemctl enable lotus-miner' for it to be enabled on startup."
|
||||
|
||||
install-provider-service: install-provider install-daemon-service
|
||||
mkdir -p /etc/systemd/system
|
||||
mkdir -p /var/log/lotus
|
||||
install -C -m 0644 ./scripts/lotus-provider.service /etc/systemd/system/lotus-provider.service
|
||||
systemctl daemon-reload
|
||||
@echo
|
||||
@echo "lotus-provider service installed. Don't forget to run 'sudo systemctl start lotus-provider' to start it and 'sudo systemctl enable lotus-provider' for it to be enabled on startup."
|
||||
|
||||
install-main-services: install-miner-service
|
||||
|
||||
install-all-services: install-main-services
|
||||
@ -259,6 +282,12 @@ clean-miner-service:
|
||||
rm -f /etc/systemd/system/lotus-miner.service
|
||||
systemctl daemon-reload
|
||||
|
||||
clean-provider-service:
|
||||
-systemctl stop lotus-provider
|
||||
-systemctl disable lotus-provider
|
||||
rm -f /etc/systemd/system/lotus-provider.service
|
||||
systemctl daemon-reload
|
||||
|
||||
clean-main-services: clean-daemon-service
|
||||
|
||||
clean-all-services: clean-main-services
|
||||
@ -294,7 +323,8 @@ actors-code-gen:
|
||||
$(GOCC) run ./chain/actors/agen
|
||||
$(GOCC) fmt ./...
|
||||
|
||||
actors-gen: actors-code-gen fiximports
|
||||
actors-gen: actors-code-gen
|
||||
./scripts/fiximports
|
||||
.PHONY: actors-gen
|
||||
|
||||
bundle-gen:
|
||||
@ -328,7 +358,7 @@ docsgen-md-bin: api-gen actors-gen
|
||||
docsgen-openrpc-bin: api-gen actors-gen
|
||||
$(GOCC) build $(GOFLAGS) -o docgen-openrpc ./api/docgen-openrpc/cmd
|
||||
|
||||
docsgen-md: docsgen-md-full docsgen-md-storage docsgen-md-worker
|
||||
docsgen-md: docsgen-md-full docsgen-md-storage docsgen-md-worker docsgen-md-provider
|
||||
|
||||
docsgen-md-full: docsgen-md-bin
|
||||
./docgen-md "api/api_full.go" "FullNode" "api" "./api" > documentation/en/api-v1-unstable-methods.md
|
||||
@ -337,6 +367,8 @@ docsgen-md-storage: docsgen-md-bin
|
||||
./docgen-md "api/api_storage.go" "StorageMiner" "api" "./api" > documentation/en/api-v0-methods-miner.md
|
||||
docsgen-md-worker: docsgen-md-bin
|
||||
./docgen-md "api/api_worker.go" "Worker" "api" "./api" > documentation/en/api-v0-methods-worker.md
|
||||
docsgen-md-provider: docsgen-md-bin
|
||||
./docgen-md "api/api_lp.go" "Provider" "api" "./api" > documentation/en/api-v0-methods-provider.md
|
||||
|
||||
docsgen-openrpc: docsgen-openrpc-full docsgen-openrpc-storage docsgen-openrpc-worker docsgen-openrpc-gateway
|
||||
|
||||
@ -354,21 +386,23 @@ docsgen-openrpc-gateway: docsgen-openrpc-bin
|
||||
fiximports:
|
||||
./scripts/fiximports
|
||||
|
||||
gen: actors-code-gen type-gen cfgdoc-gen docsgen api-gen circleci fiximports
|
||||
gen: actors-code-gen type-gen cfgdoc-gen docsgen api-gen circleci
|
||||
./scripts/fiximports
|
||||
@echo ">>> IF YOU'VE MODIFIED THE CLI OR CONFIG, REMEMBER TO ALSO RUN 'make docsgen-cli'"
|
||||
.PHONY: gen
|
||||
|
||||
jen: gen
|
||||
|
||||
snap: lotus lotus-miner lotus-worker
|
||||
snap: lotus lotus-miner lotus-worker lotus-provider
|
||||
snapcraft
|
||||
# snapcraft upload ./lotus_*.snap
|
||||
|
||||
# separate from gen because it needs binaries
|
||||
docsgen-cli: lotus lotus-miner lotus-worker
|
||||
docsgen-cli: lotus lotus-miner lotus-worker lotus-provider
|
||||
python3 ./scripts/generate-lotus-cli.py
|
||||
./lotus config default > documentation/en/default-lotus-config.toml
|
||||
./lotus-miner config default > documentation/en/default-lotus-miner-config.toml
|
||||
./lotus-provider config default > documentation/en/default-lotus-provider-config.toml
|
||||
.PHONY: docsgen-cli
|
||||
|
||||
print-%:
|
||||
|
@ -71,10 +71,10 @@ For other distributions you can find the required dependencies [here.](https://l
|
||||
|
||||
#### Go
|
||||
|
||||
To build Lotus, you need a working installation of [Go 1.19.12 or higher](https://golang.org/dl/):
|
||||
To build Lotus, you need a working installation of [Go 1.20.10 or higher](https://golang.org/dl/):
|
||||
|
||||
```bash
|
||||
wget -c https://golang.org/dl/go1.19.12.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local
|
||||
wget -c https://golang.org/dl/go1.20.10.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local
|
||||
```
|
||||
|
||||
**TIP:**
|
||||
@ -133,6 +133,8 @@ Note: The default branch `master` is the dev branch where the latest new feature
|
||||
|
||||
6. You should now have Lotus installed. You can now [start the Lotus daemon and sync the chain](https://lotus.filecoin.io/lotus/install/linux/#start-the-lotus-daemon-and-sync-the-chain).
|
||||
|
||||
7. (Optional) Follow the [Setting Up Prometheus and Grafana](https://github.com/filecoin-project/lotus/tree/master/metrics/README.md) guide for detailed instructions on setting up a working monitoring system running against a local running lotus node.
|
||||
|
||||
## License
|
||||
|
||||
Dual-licensed under [MIT](https://github.com/filecoin-project/lotus/blob/master/LICENSE-MIT) + [Apache 2.0](https://github.com/filecoin-project/lotus/blob/master/LICENSE-APACHE)
|
||||
|
@ -824,7 +824,7 @@ type FullNode interface {
|
||||
EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthFeeHistory, error) //perm:read
|
||||
|
||||
EthMaxPriorityFeePerGas(ctx context.Context) (ethtypes.EthBigInt, error) //perm:read
|
||||
EthEstimateGas(ctx context.Context, tx ethtypes.EthCall) (ethtypes.EthUint64, error) //perm:read
|
||||
EthEstimateGas(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthUint64, error) //perm:read
|
||||
EthCall(ctx context.Context, tx ethtypes.EthCall, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) //perm:read
|
||||
|
||||
EthSendRawTransaction(ctx context.Context, rawTx ethtypes.EthBytes) (ethtypes.EthHash, error) //perm:read
|
||||
|
@ -114,7 +114,7 @@ type Gateway interface {
|
||||
EthGasPrice(ctx context.Context) (ethtypes.EthBigInt, error)
|
||||
EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthFeeHistory, error)
|
||||
EthMaxPriorityFeePerGas(ctx context.Context) (ethtypes.EthBigInt, error)
|
||||
EthEstimateGas(ctx context.Context, tx ethtypes.EthCall) (ethtypes.EthUint64, error)
|
||||
EthEstimateGas(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthUint64, error)
|
||||
EthCall(ctx context.Context, tx ethtypes.EthCall, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error)
|
||||
EthSendRawTransaction(ctx context.Context, rawTx ethtypes.EthBytes) (ethtypes.EthHash, error)
|
||||
EthGetLogs(ctx context.Context, filter *ethtypes.EthFilterSpec) (*ethtypes.EthFilterResult, error)
|
||||
|
10
api/api_lp.go
Normal file
10
api/api_lp.go
Normal file
@ -0,0 +1,10 @@
|
||||
package api
|
||||
|
||||
import "context"
|
||||
|
||||
type LotusProvider interface {
|
||||
Version(context.Context) (Version, error) //perm:admin
|
||||
|
||||
// Trigger shutdown
|
||||
Shutdown(context.Context) error //perm:admin
|
||||
}
|
@ -15,6 +15,16 @@ import (
|
||||
"github.com/filecoin-project/lotus/lib/rpcenc"
|
||||
)
|
||||
|
||||
// NewProviderRpc creates a new http jsonrpc client.
|
||||
func NewProviderRpc(ctx context.Context, addr string, requestHeader http.Header) (api.LotusProvider, jsonrpc.ClientCloser, error) {
|
||||
var res v1api.LotusProviderStruct
|
||||
|
||||
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
||||
api.GetInternalStructs(&res), requestHeader, jsonrpc.WithErrors(api.RPCErrors))
|
||||
|
||||
return &res, closer, err
|
||||
}
|
||||
|
||||
// NewCommonRPCV0 creates a new http jsonrpc client.
|
||||
func NewCommonRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.CommonNet, jsonrpc.ClientCloser, error) {
|
||||
var res v0api.CommonNetStruct
|
||||
|
@ -432,6 +432,10 @@ func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []r
|
||||
i = &api.GatewayStruct{}
|
||||
t = reflect.TypeOf(new(struct{ api.Gateway })).Elem()
|
||||
permStruct = append(permStruct, reflect.TypeOf(api.GatewayStruct{}.Internal))
|
||||
case "Provider":
|
||||
i = &api.LotusProviderStruct{}
|
||||
t = reflect.TypeOf(new(struct{ api.LotusProvider })).Elem()
|
||||
permStruct = append(permStruct, reflect.TypeOf(api.LotusProviderStruct{}.Internal))
|
||||
default:
|
||||
panic("unknown type")
|
||||
}
|
||||
|
@ -1042,7 +1042,7 @@ func (mr *MockFullNodeMockRecorder) EthChainId(arg0 interface{}) *gomock.Call {
|
||||
}
|
||||
|
||||
// EthEstimateGas mocks base method.
|
||||
func (m *MockFullNode) EthEstimateGas(arg0 context.Context, arg1 ethtypes.EthCall) (ethtypes.EthUint64, error) {
|
||||
func (m *MockFullNode) EthEstimateGas(arg0 context.Context, arg1 jsonrpc.RawParams) (ethtypes.EthUint64, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthEstimateGas", arg0, arg1)
|
||||
ret0, _ := ret[0].(ethtypes.EthUint64)
|
||||
|
@ -41,6 +41,12 @@ func PermissionedWorkerAPI(a Worker) Worker {
|
||||
return &out
|
||||
}
|
||||
|
||||
func PermissionedAPI[T, P any](a T) *P {
|
||||
var out P
|
||||
permissionedProxies(a, &out)
|
||||
return &out
|
||||
}
|
||||
|
||||
func PermissionedWalletAPI(a Wallet) Wallet {
|
||||
var out WalletStruct
|
||||
permissionedProxies(a, &out)
|
||||
|
@ -255,7 +255,7 @@ type FullNodeMethods struct {
|
||||
|
||||
EthChainId func(p0 context.Context) (ethtypes.EthUint64, error) `perm:"read"`
|
||||
|
||||
EthEstimateGas func(p0 context.Context, p1 ethtypes.EthCall) (ethtypes.EthUint64, error) `perm:"read"`
|
||||
EthEstimateGas func(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthUint64, error) `perm:"read"`
|
||||
|
||||
EthFeeHistory func(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthFeeHistory, error) `perm:"read"`
|
||||
|
||||
@ -679,7 +679,7 @@ type GatewayMethods struct {
|
||||
|
||||
EthChainId func(p0 context.Context) (ethtypes.EthUint64, error) ``
|
||||
|
||||
EthEstimateGas func(p0 context.Context, p1 ethtypes.EthCall) (ethtypes.EthUint64, error) ``
|
||||
EthEstimateGas func(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthUint64, error) ``
|
||||
|
||||
EthFeeHistory func(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthFeeHistory, error) ``
|
||||
|
||||
@ -831,6 +831,19 @@ type GatewayMethods struct {
|
||||
type GatewayStub struct {
|
||||
}
|
||||
|
||||
type LotusProviderStruct struct {
|
||||
Internal LotusProviderMethods
|
||||
}
|
||||
|
||||
type LotusProviderMethods struct {
|
||||
Shutdown func(p0 context.Context) error `perm:"admin"`
|
||||
|
||||
Version func(p0 context.Context) (Version, error) `perm:"admin"`
|
||||
}
|
||||
|
||||
type LotusProviderStub struct {
|
||||
}
|
||||
|
||||
type NetStruct struct {
|
||||
Internal NetMethods
|
||||
}
|
||||
@ -2134,14 +2147,14 @@ func (s *FullNodeStub) EthChainId(p0 context.Context) (ethtypes.EthUint64, error
|
||||
return *new(ethtypes.EthUint64), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) EthEstimateGas(p0 context.Context, p1 ethtypes.EthCall) (ethtypes.EthUint64, error) {
|
||||
func (s *FullNodeStruct) EthEstimateGas(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthUint64, error) {
|
||||
if s.Internal.EthEstimateGas == nil {
|
||||
return *new(ethtypes.EthUint64), ErrNotSupported
|
||||
}
|
||||
return s.Internal.EthEstimateGas(p0, p1)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) EthEstimateGas(p0 context.Context, p1 ethtypes.EthCall) (ethtypes.EthUint64, error) {
|
||||
func (s *FullNodeStub) EthEstimateGas(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthUint64, error) {
|
||||
return *new(ethtypes.EthUint64), ErrNotSupported
|
||||
}
|
||||
|
||||
@ -4400,14 +4413,14 @@ func (s *GatewayStub) EthChainId(p0 context.Context) (ethtypes.EthUint64, error)
|
||||
return *new(ethtypes.EthUint64), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) EthEstimateGas(p0 context.Context, p1 ethtypes.EthCall) (ethtypes.EthUint64, error) {
|
||||
func (s *GatewayStruct) EthEstimateGas(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthUint64, error) {
|
||||
if s.Internal.EthEstimateGas == nil {
|
||||
return *new(ethtypes.EthUint64), ErrNotSupported
|
||||
}
|
||||
return s.Internal.EthEstimateGas(p0, p1)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) EthEstimateGas(p0 context.Context, p1 ethtypes.EthCall) (ethtypes.EthUint64, error) {
|
||||
func (s *GatewayStub) EthEstimateGas(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthUint64, error) {
|
||||
return *new(ethtypes.EthUint64), ErrNotSupported
|
||||
}
|
||||
|
||||
@ -5214,6 +5227,28 @@ func (s *GatewayStub) Web3ClientVersion(p0 context.Context) (string, error) {
|
||||
return "", ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *LotusProviderStruct) Shutdown(p0 context.Context) error {
|
||||
if s.Internal.Shutdown == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.Shutdown(p0)
|
||||
}
|
||||
|
||||
func (s *LotusProviderStub) Shutdown(p0 context.Context) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *LotusProviderStruct) Version(p0 context.Context) (Version, error) {
|
||||
if s.Internal.Version == nil {
|
||||
return *new(Version), ErrNotSupported
|
||||
}
|
||||
return s.Internal.Version(p0)
|
||||
}
|
||||
|
||||
func (s *LotusProviderStub) Version(p0 context.Context) (Version, error) {
|
||||
return *new(Version), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *NetStruct) ID(p0 context.Context) (peer.ID, error) {
|
||||
if s.Internal.ID == nil {
|
||||
return *new(peer.ID), ErrNotSupported
|
||||
@ -7442,6 +7477,7 @@ var _ CommonNet = new(CommonNetStruct)
|
||||
var _ EthSubscriber = new(EthSubscriberStruct)
|
||||
var _ FullNode = new(FullNodeStruct)
|
||||
var _ Gateway = new(GatewayStruct)
|
||||
var _ LotusProvider = new(LotusProviderStruct)
|
||||
var _ Net = new(NetStruct)
|
||||
var _ Signable = new(SignableStruct)
|
||||
var _ StorageMiner = new(StorageMinerStruct)
|
||||
|
@ -56,9 +56,17 @@ type PubsubScore struct {
|
||||
Score *pubsub.PeerScoreSnapshot
|
||||
}
|
||||
|
||||
// MessageSendSpec contains optional fields which modify message sending behavior
|
||||
type MessageSendSpec struct {
|
||||
// MaxFee specifies a cap on network fees related to this message
|
||||
MaxFee abi.TokenAmount
|
||||
|
||||
// MsgUuid specifies a unique message identifier which can be used on node (or node cluster)
|
||||
// level to prevent double-sends of messages even when nonce generation is not handled by sender
|
||||
MsgUuid uuid.UUID
|
||||
|
||||
// MaximizeFeeCap makes message FeeCap be based entirely on MaxFee
|
||||
MaximizeFeeCap bool
|
||||
}
|
||||
|
||||
type MpoolMessageWhole struct {
|
||||
|
@ -12,3 +12,5 @@ type RawFullNodeAPI FullNode
|
||||
func PermissionedFullAPI(a FullNode) FullNode {
|
||||
return api.PermissionedFullAPI(a)
|
||||
}
|
||||
|
||||
type LotusProviderStruct = api.LotusProviderStruct
|
||||
|
@ -59,6 +59,8 @@ var (
|
||||
|
||||
MinerAPIVersion0 = newVer(1, 5, 0)
|
||||
WorkerAPIVersion0 = newVer(1, 7, 0)
|
||||
|
||||
ProviderAPIVersion0 = newVer(1, 0, 0)
|
||||
)
|
||||
|
||||
//nolint:varcheck,deadcode
|
||||
|
@ -183,3 +183,17 @@ func (b *idstore) Close() error {
|
||||
func (b *idstore) Flush(ctx context.Context) error {
|
||||
return b.bs.Flush(ctx)
|
||||
}
|
||||
|
||||
func (b *idstore) CollectGarbage(ctx context.Context, options ...BlockstoreGCOption) error {
|
||||
if bs, ok := b.bs.(BlockstoreGC); ok {
|
||||
return bs.CollectGarbage(ctx, options...)
|
||||
}
|
||||
return xerrors.Errorf("not supported")
|
||||
}
|
||||
|
||||
func (b *idstore) GCOnce(ctx context.Context, options ...BlockstoreGCOption) error {
|
||||
if bs, ok := b.bs.(BlockstoreGCOnce); ok {
|
||||
return bs.GCOnce(ctx, options...)
|
||||
}
|
||||
return xerrors.Errorf("not supported")
|
||||
}
|
||||
|
@ -182,7 +182,6 @@ type SplitStore struct {
|
||||
|
||||
compactionIndex int64
|
||||
pruneIndex int64
|
||||
onlineGCCnt int64
|
||||
|
||||
ctx context.Context
|
||||
cancel func()
|
||||
|
@ -68,6 +68,7 @@ var (
|
||||
const (
|
||||
batchSize = 16384
|
||||
cidKeySize = 128
|
||||
purgeWorkSliceDuration = time.Second
|
||||
)
|
||||
|
||||
func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error {
|
||||
@ -1372,9 +1373,21 @@ func (s *SplitStore) purge(coldr *ColdSetReader, checkpoint *Checkpoint, markSet
|
||||
return err
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
|
||||
err := coldr.ForEach(func(c cid.Cid) error {
|
||||
batch = append(batch, c)
|
||||
if len(batch) == batchSize {
|
||||
// add some time slicing to the purge as this a very disk I/O heavy operation that
|
||||
// requires write access to txnLk that may starve other operations that require
|
||||
// access to the blockstore.
|
||||
elapsed := time.Since(now)
|
||||
if elapsed > purgeWorkSliceDuration {
|
||||
// work 1 slice, sleep 4 slices, or 20% utilization
|
||||
time.Sleep(4 * elapsed)
|
||||
now = time.Now()
|
||||
}
|
||||
|
||||
return deleteBatch()
|
||||
}
|
||||
|
||||
|
@ -3,9 +3,9 @@ package splitstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
|
@ -1,9 +1,6 @@
|
||||
/dns4/bootstrap-0.mainnet.filops.net/tcp/1347/p2p/12D3KooWCVe8MmsEMes2FzgTpt9fXtmCY7wrq91GRiaC8PHSCCBj
|
||||
/dns4/bootstrap-1.mainnet.filops.net/tcp/1347/p2p/12D3KooWCwevHg1yLCvktf2nvLu7L9894mcrJR4MsBCcm4syShVc
|
||||
/dns4/bootstrap-2.mainnet.filops.net/tcp/1347/p2p/12D3KooWEWVwHGn2yR36gKLozmb4YjDJGerotAPGxmdWZx2nxMC4
|
||||
/dns4/bootstrap-3.mainnet.filops.net/tcp/1347/p2p/12D3KooWKhgq8c7NQ9iGjbyK7v7phXvG6492HQfiDaGHLHLQjk7R
|
||||
/dns4/bootstrap-4.mainnet.filops.net/tcp/1347/p2p/12D3KooWL6PsFNPhYftrJzGgF5U18hFoaVhfGk7xwzD8yVrHJ3Uc
|
||||
/dns4/bootstrap-5.mainnet.filops.net/tcp/1347/p2p/12D3KooWLFynvDQiUpXoHroV1YxKHhPJgysQGH2k3ZGwtWzR4dFH
|
||||
/dns4/bootstrap-6.mainnet.filops.net/tcp/1347/p2p/12D3KooWP5MwCiqdMETF9ub1P3MbCvQCcfconnYHbWg6sUJcDRQQ
|
||||
/dns4/bootstrap-7.mainnet.filops.net/tcp/1347/p2p/12D3KooWRs3aY1p3juFjPy8gPN95PEQChm2QKGUCAdcDCC4EBMKf
|
||||
/dns4/bootstrap-8.mainnet.filops.net/tcp/1347/p2p/12D3KooWScFR7385LTyR4zU1bYdzSiiAb5rnNABfVahPvVSzyTkR
|
||||
@ -11,7 +8,5 @@
|
||||
/dns4/bootstrap-0.starpool.in/tcp/12757/p2p/12D3KooWGHpBMeZbestVEWkfdnC9u7p6uFHXL1n7m1ZBqsEmiUzz
|
||||
/dns4/bootstrap-1.starpool.in/tcp/12757/p2p/12D3KooWQZrGH1PxSNZPum99M1zNvjNFM33d1AAu5DcvdHptuU7u
|
||||
/dns4/node.glif.io/tcp/1235/p2p/12D3KooWBF8cpp65hp2u9LK5mh19x67ftAam84z9LsfaquTDSBpt
|
||||
/dns4/bootstrap-0.ipfsmain.cn/tcp/34721/p2p/12D3KooWQnwEGNqcM2nAcPtRR9rAX8Hrg4k9kJLCHoTR5chJfz6d
|
||||
/dns4/bootstrap-1.ipfsmain.cn/tcp/34723/p2p/12D3KooWMKxMkD5DMpSWsW7dBddKxKT7L2GgbNuckz9otxvkvByP
|
||||
/dns4/bootstarp-0.1475.io/tcp/61256/p2p/12D3KooWRzCVDwHUkgdK7eRgnoXbjDAELhxPErjHzbRLguSV1aRt
|
||||
/dns4/bootstrap-venus.mainnet.filincubator.com/tcp/8888/p2p/QmQu8C6deXwKvJP2D8B6QGyhngc3ZiDnFzEHBDx8yeBXST
|
||||
|
@ -5,5 +5,8 @@ import (
|
||||
)
|
||||
|
||||
func IsNearUpgrade(epoch, upgradeEpoch abi.ChainEpoch) bool {
|
||||
if upgradeEpoch < 0 {
|
||||
return false
|
||||
}
|
||||
return epoch > upgradeEpoch-Finality && epoch < upgradeEpoch+Finality
|
||||
}
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -37,7 +37,7 @@ func BuildTypeString() string {
|
||||
}
|
||||
|
||||
// BuildVersion is the local build version
|
||||
const BuildVersion = "1.25.1"
|
||||
const BuildVersion = "1.25.2"
|
||||
|
||||
func UserVersion() string {
|
||||
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
|
||||
|
@ -72,7 +72,7 @@ func (s *state{{.v}}) AvailableBalance(bal abi.TokenAmount) (available abi.Token
|
||||
available = abi.NewTokenAmount(0)
|
||||
}
|
||||
}()
|
||||
// this panics if the miner doesnt have enough funds to cover their locked pledge
|
||||
// this panics if the miner doesn't have enough funds to cover their locked pledge
|
||||
available{{if (ge .v 2)}}, err{{end}} = s.GetAvailableBalance(bal)
|
||||
return available, err
|
||||
}
|
||||
|
2
chain/actors/builtin/miner/v0.go
generated
2
chain/actors/builtin/miner/v0.go
generated
@ -62,7 +62,7 @@ func (s *state0) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun
|
||||
available = abi.NewTokenAmount(0)
|
||||
}
|
||||
}()
|
||||
// this panics if the miner doesnt have enough funds to cover their locked pledge
|
||||
// this panics if the miner doesn't have enough funds to cover their locked pledge
|
||||
available = s.GetAvailableBalance(bal)
|
||||
return available, err
|
||||
}
|
||||
|
2
chain/actors/builtin/miner/v10.go
generated
2
chain/actors/builtin/miner/v10.go
generated
@ -62,7 +62,7 @@ func (s *state10) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmou
|
||||
available = abi.NewTokenAmount(0)
|
||||
}
|
||||
}()
|
||||
// this panics if the miner doesnt have enough funds to cover their locked pledge
|
||||
// this panics if the miner doesn't have enough funds to cover their locked pledge
|
||||
available, err = s.GetAvailableBalance(bal)
|
||||
return available, err
|
||||
}
|
||||
|
2
chain/actors/builtin/miner/v11.go
generated
2
chain/actors/builtin/miner/v11.go
generated
@ -62,7 +62,7 @@ func (s *state11) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmou
|
||||
available = abi.NewTokenAmount(0)
|
||||
}
|
||||
}()
|
||||
// this panics if the miner doesnt have enough funds to cover their locked pledge
|
||||
// this panics if the miner doesn't have enough funds to cover their locked pledge
|
||||
available, err = s.GetAvailableBalance(bal)
|
||||
return available, err
|
||||
}
|
||||
|
2
chain/actors/builtin/miner/v12.go
generated
2
chain/actors/builtin/miner/v12.go
generated
@ -62,7 +62,7 @@ func (s *state12) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmou
|
||||
available = abi.NewTokenAmount(0)
|
||||
}
|
||||
}()
|
||||
// this panics if the miner doesnt have enough funds to cover their locked pledge
|
||||
// this panics if the miner doesn't have enough funds to cover their locked pledge
|
||||
available, err = s.GetAvailableBalance(bal)
|
||||
return available, err
|
||||
}
|
||||
|
2
chain/actors/builtin/miner/v2.go
generated
2
chain/actors/builtin/miner/v2.go
generated
@ -61,7 +61,7 @@ func (s *state2) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun
|
||||
available = abi.NewTokenAmount(0)
|
||||
}
|
||||
}()
|
||||
// this panics if the miner doesnt have enough funds to cover their locked pledge
|
||||
// this panics if the miner doesn't have enough funds to cover their locked pledge
|
||||
available, err = s.GetAvailableBalance(bal)
|
||||
return available, err
|
||||
}
|
||||
|
2
chain/actors/builtin/miner/v3.go
generated
2
chain/actors/builtin/miner/v3.go
generated
@ -62,7 +62,7 @@ func (s *state3) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun
|
||||
available = abi.NewTokenAmount(0)
|
||||
}
|
||||
}()
|
||||
// this panics if the miner doesnt have enough funds to cover their locked pledge
|
||||
// this panics if the miner doesn't have enough funds to cover their locked pledge
|
||||
available, err = s.GetAvailableBalance(bal)
|
||||
return available, err
|
||||
}
|
||||
|
2
chain/actors/builtin/miner/v4.go
generated
2
chain/actors/builtin/miner/v4.go
generated
@ -62,7 +62,7 @@ func (s *state4) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun
|
||||
available = abi.NewTokenAmount(0)
|
||||
}
|
||||
}()
|
||||
// this panics if the miner doesnt have enough funds to cover their locked pledge
|
||||
// this panics if the miner doesn't have enough funds to cover their locked pledge
|
||||
available, err = s.GetAvailableBalance(bal)
|
||||
return available, err
|
||||
}
|
||||
|
2
chain/actors/builtin/miner/v5.go
generated
2
chain/actors/builtin/miner/v5.go
generated
@ -62,7 +62,7 @@ func (s *state5) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun
|
||||
available = abi.NewTokenAmount(0)
|
||||
}
|
||||
}()
|
||||
// this panics if the miner doesnt have enough funds to cover their locked pledge
|
||||
// this panics if the miner doesn't have enough funds to cover their locked pledge
|
||||
available, err = s.GetAvailableBalance(bal)
|
||||
return available, err
|
||||
}
|
||||
|
2
chain/actors/builtin/miner/v6.go
generated
2
chain/actors/builtin/miner/v6.go
generated
@ -62,7 +62,7 @@ func (s *state6) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun
|
||||
available = abi.NewTokenAmount(0)
|
||||
}
|
||||
}()
|
||||
// this panics if the miner doesnt have enough funds to cover their locked pledge
|
||||
// this panics if the miner doesn't have enough funds to cover their locked pledge
|
||||
available, err = s.GetAvailableBalance(bal)
|
||||
return available, err
|
||||
}
|
||||
|
2
chain/actors/builtin/miner/v7.go
generated
2
chain/actors/builtin/miner/v7.go
generated
@ -62,7 +62,7 @@ func (s *state7) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun
|
||||
available = abi.NewTokenAmount(0)
|
||||
}
|
||||
}()
|
||||
// this panics if the miner doesnt have enough funds to cover their locked pledge
|
||||
// this panics if the miner doesn't have enough funds to cover their locked pledge
|
||||
available, err = s.GetAvailableBalance(bal)
|
||||
return available, err
|
||||
}
|
||||
|
2
chain/actors/builtin/miner/v8.go
generated
2
chain/actors/builtin/miner/v8.go
generated
@ -62,7 +62,7 @@ func (s *state8) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun
|
||||
available = abi.NewTokenAmount(0)
|
||||
}
|
||||
}()
|
||||
// this panics if the miner doesnt have enough funds to cover their locked pledge
|
||||
// this panics if the miner doesn't have enough funds to cover their locked pledge
|
||||
available, err = s.GetAvailableBalance(bal)
|
||||
return available, err
|
||||
}
|
||||
|
2
chain/actors/builtin/miner/v9.go
generated
2
chain/actors/builtin/miner/v9.go
generated
@ -62,7 +62,7 @@ func (s *state9) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun
|
||||
available = abi.NewTokenAmount(0)
|
||||
}
|
||||
}()
|
||||
// this panics if the miner doesnt have enough funds to cover their locked pledge
|
||||
// this panics if the miner doesn't have enough funds to cover their locked pledge
|
||||
available, err = s.GetAvailableBalance(bal)
|
||||
return available, err
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
// Load returns an abstract copy of payment channel state, irregardless of actor version
|
||||
// Load returns an abstract copy of payment channel state, regardless of actor version
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
if name, av, ok := actors.GetActorMetaByCode(act.Code); ok {
|
||||
if name != manifest.PaychKey {
|
||||
|
@ -29,7 +29,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
// Load returns an abstract copy of payment channel state, irregardless of actor version
|
||||
// Load returns an abstract copy of payment channel state, regardless of actor version
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
if name, av, ok := actors.GetActorMetaByCode(act.Code); ok {
|
||||
if name != manifest.PaychKey {
|
||||
|
@ -13,7 +13,7 @@ import (
|
||||
func SerializeParams(i cbg.CBORMarshaler) ([]byte, aerrors.ActorError) {
|
||||
buf := new(bytes.Buffer)
|
||||
if err := i.MarshalCBOR(buf); err != nil {
|
||||
// TODO: shouldnt this be a fatal error?
|
||||
// TODO: shouldn't this be a fatal error?
|
||||
return nil, aerrors.Absorb(err, exitcode.ErrSerialization, "failed to encode parameter")
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
|
@ -867,6 +867,24 @@ func AggregatePreCommitNetworkFee(nwVer network.Version, aggregateSize int, base
|
||||
}
|
||||
}
|
||||
|
||||
var PoStToSealMap map[abi.RegisteredPoStProof]abi.RegisteredSealProof
|
||||
|
||||
func init() {
|
||||
PoStToSealMap = make(map[abi.RegisteredPoStProof]abi.RegisteredSealProof)
|
||||
for sealProof, info := range abi.SealProofInfos {
|
||||
PoStToSealMap[info.WinningPoStProof] = sealProof
|
||||
PoStToSealMap[info.WindowPoStProof] = sealProof
|
||||
}
|
||||
}
|
||||
|
||||
func GetSealProofFromPoStProof(postProof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) {
|
||||
sealProof, exists := PoStToSealMap[postProof]
|
||||
if !exists {
|
||||
return 0, xerrors.New("no corresponding RegisteredSealProof for the given RegisteredPoStProof")
|
||||
}
|
||||
return sealProof, nil
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
|
@ -343,6 +343,23 @@ func AggregatePreCommitNetworkFee(nwVer network.Version, aggregateSize int, base
|
||||
}
|
||||
}
|
||||
|
||||
var PoStToSealMap map[abi.RegisteredPoStProof]abi.RegisteredSealProof
|
||||
func init() {
|
||||
PoStToSealMap = make(map[abi.RegisteredPoStProof]abi.RegisteredSealProof)
|
||||
for sealProof, info := range abi.SealProofInfos {
|
||||
PoStToSealMap[info.WinningPoStProof] = sealProof
|
||||
PoStToSealMap[info.WindowPoStProof] = sealProof
|
||||
}
|
||||
}
|
||||
|
||||
func GetSealProofFromPoStProof(postProof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) {
|
||||
sealProof, exists := PoStToSealMap[postProof]
|
||||
if !exists {
|
||||
return 0, xerrors.New("no corresponding RegisteredSealProof for the given RegisteredPoStProof")
|
||||
}
|
||||
return sealProof, nil
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
|
@ -29,19 +29,6 @@ import (
|
||||
|
||||
var log = logging.Logger("drand")
|
||||
|
||||
type drandPeer struct {
|
||||
addr string
|
||||
tls bool
|
||||
}
|
||||
|
||||
func (dp *drandPeer) Address() string {
|
||||
return dp.addr
|
||||
}
|
||||
|
||||
func (dp *drandPeer) IsTLS() bool {
|
||||
return dp.tls
|
||||
}
|
||||
|
||||
// DrandBeacon connects Lotus with a drand network in order to provide
|
||||
// randomness to the system in a way that's aligned with Filecoin rounds/epochs.
|
||||
//
|
||||
|
@ -17,7 +17,7 @@ import (
|
||||
)
|
||||
|
||||
func TestPrintGroupInfo(t *testing.T) {
|
||||
server := build.DrandConfigs[build.DrandDevnet].Servers[0]
|
||||
server := build.DrandConfigs[build.DrandTestnet].Servers[0]
|
||||
c, err := hclient.New(server, nil, nil)
|
||||
assert.NoError(t, err)
|
||||
cg := c.(interface {
|
||||
@ -31,7 +31,7 @@ func TestPrintGroupInfo(t *testing.T) {
|
||||
|
||||
func TestMaxBeaconRoundForEpoch(t *testing.T) {
|
||||
todayTs := uint64(1652222222)
|
||||
db, err := NewDrandBeacon(todayTs, build.BlockDelaySecs, nil, build.DrandConfigs[build.DrandDevnet])
|
||||
db, err := NewDrandBeacon(todayTs, build.BlockDelaySecs, nil, build.DrandConfigs[build.DrandTestnet])
|
||||
assert.NoError(t, err)
|
||||
mbr15 := db.MaxBeaconRoundForEpoch(network.Version15, 100)
|
||||
mbr16 := db.MaxBeaconRoundForEpoch(network.Version16, 100)
|
||||
|
@ -362,7 +362,8 @@ func CreateBlockHeader(ctx context.Context, sm *stmgr.StateManager, pts *types.T
|
||||
var blsMsgCids, secpkMsgCids []cid.Cid
|
||||
var blsSigs []crypto.Signature
|
||||
nv := sm.GetNetworkVersion(ctx, bt.Epoch)
|
||||
for _, msg := range bt.Messages {
|
||||
for _, msgTmp := range bt.Messages {
|
||||
msg := msgTmp
|
||||
if msg.Signature.Type == crypto.SigTypeBLS {
|
||||
blsSigs = append(blsSigs, msg.Signature)
|
||||
blsMessages = append(blsMessages, &msg.Message)
|
||||
|
@ -147,9 +147,6 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context,
|
||||
return xerrors.Errorf("callback failed on cron message: %w", err)
|
||||
}
|
||||
}
|
||||
if ret.ExitCode != 0 {
|
||||
return xerrors.Errorf("cron exit was non-zero: %d", ret.ExitCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -91,9 +91,6 @@ var RewardFunc = func(ctx context.Context, vmi vm.Interface, em stmgr.ExecMonito
|
||||
}
|
||||
}
|
||||
|
||||
if ret.ExitCode != 0 {
|
||||
return xerrors.Errorf("reward application message failed (exit %d): %s", ret.ExitCode, ret.ActorErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -180,7 +180,7 @@ func (e *heightEventsObserver) Revert(ctx context.Context, from, to *types.TipSe
|
||||
// Update the head first so we don't accidental skip reverting a concurrent call to ChainAt.
|
||||
e.updateHead(to)
|
||||
|
||||
// Call revert on all hights between the two tipsets, handling empty tipsets.
|
||||
// Call revert on all heights between the two tipsets, handling empty tipsets.
|
||||
for h := from.Height(); h > to.Height(); h-- {
|
||||
e.lk.Lock()
|
||||
triggers := e.tsHeights[h]
|
||||
|
@ -673,7 +673,7 @@ func TestCalled(t *testing.T) {
|
||||
}, 3, 20, matchAddrMethod(t0123, 5))
|
||||
require.NoError(t, err)
|
||||
|
||||
// create few blocks to make sure nothing get's randomly called
|
||||
// create few blocks to make sure nothing gets randomly called
|
||||
|
||||
fcs.advance(0, 4, 0, nil) // H=5
|
||||
require.Equal(t, false, applied)
|
||||
@ -991,7 +991,7 @@ func TestCalledNull(t *testing.T) {
|
||||
}, 3, 20, matchAddrMethod(t0123, 5))
|
||||
require.NoError(t, err)
|
||||
|
||||
// create few blocks to make sure nothing get's randomly called
|
||||
// create few blocks to make sure nothing gets randomly called
|
||||
|
||||
fcs.advance(0, 4, 0, nil) // H=5
|
||||
require.Equal(t, false, applied)
|
||||
@ -1050,7 +1050,7 @@ func TestRemoveTriggersOnMessage(t *testing.T) {
|
||||
}, 3, 20, matchAddrMethod(t0123, 5))
|
||||
require.NoError(t, err)
|
||||
|
||||
// create few blocks to make sure nothing get's randomly called
|
||||
// create few blocks to make sure nothing gets randomly called
|
||||
|
||||
fcs.advance(0, 4, 0, nil) // H=5
|
||||
require.Equal(t, false, applied)
|
||||
@ -1155,7 +1155,7 @@ func TestStateChanged(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// create few blocks to make sure nothing get's randomly called
|
||||
// create few blocks to make sure nothing gets randomly called
|
||||
|
||||
fcs.advance(0, 4, 0, nil) // H=5
|
||||
require.Equal(t, false, applied)
|
||||
|
@ -388,7 +388,7 @@ func (m *EventFilterManager) Install(ctx context.Context, minHeight, maxHeight a
|
||||
|
||||
if m.EventIndex != nil && minHeight != -1 && minHeight < currentHeight {
|
||||
// Filter needs historic events
|
||||
if err := m.EventIndex.PrefillFilter(ctx, f); err != nil {
|
||||
if err := m.EventIndex.PrefillFilter(ctx, f, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
@ -481,7 +481,7 @@ func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, rever
|
||||
}
|
||||
|
||||
// PrefillFilter fills a filter's collection of events from the historic index
|
||||
func (ei *EventIndex) PrefillFilter(ctx context.Context, f *EventFilter) error {
|
||||
func (ei *EventIndex) PrefillFilter(ctx context.Context, f *EventFilter, excludeReverted bool) error {
|
||||
clauses := []string{}
|
||||
values := []any{}
|
||||
joins := []string{}
|
||||
@ -500,6 +500,11 @@ func (ei *EventIndex) PrefillFilter(ctx context.Context, f *EventFilter) error {
|
||||
}
|
||||
}
|
||||
|
||||
if excludeReverted {
|
||||
clauses = append(clauses, "event.reverted=?")
|
||||
values = append(values, false)
|
||||
}
|
||||
|
||||
if len(f.addresses) > 0 {
|
||||
subclauses := []string{}
|
||||
for _, addr := range f.addresses {
|
||||
|
@ -272,7 +272,7 @@ func TestEventIndexPrefillFilter(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
tc := tc // appease lint
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
if err := ei.PrefillFilter(context.Background(), tc.filter); err != nil {
|
||||
if err := ei.PrefillFilter(context.Background(), tc.filter, false); err != nil {
|
||||
require.NoError(t, err, "prefill filter events")
|
||||
}
|
||||
|
||||
@ -281,3 +281,619 @@ func TestEventIndexPrefillFilter(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) {
|
||||
rng := pseudo.New(pseudo.NewSource(299792458))
|
||||
a1 := randomF4Addr(t, rng)
|
||||
a2 := randomF4Addr(t, rng)
|
||||
a3 := randomF4Addr(t, rng)
|
||||
|
||||
a1ID := abi.ActorID(1)
|
||||
a2ID := abi.ActorID(2)
|
||||
|
||||
addrMap := addressMap{}
|
||||
addrMap.add(a1ID, a1)
|
||||
addrMap.add(a2ID, a2)
|
||||
|
||||
ev1 := fakeEvent(
|
||||
a1ID,
|
||||
[]kv{
|
||||
{k: "type", v: []byte("approval")},
|
||||
{k: "signer", v: []byte("addr1")},
|
||||
},
|
||||
[]kv{
|
||||
{k: "amount", v: []byte("2988181")},
|
||||
},
|
||||
)
|
||||
ev2 := fakeEvent(
|
||||
a2ID,
|
||||
[]kv{
|
||||
{k: "type", v: []byte("approval")},
|
||||
{k: "signer", v: []byte("addr2")},
|
||||
},
|
||||
[]kv{
|
||||
{k: "amount", v: []byte("2988182")},
|
||||
},
|
||||
)
|
||||
|
||||
st := newStore()
|
||||
events := []*types.Event{ev1}
|
||||
revertedEvents := []*types.Event{ev2}
|
||||
em := executedMessage{
|
||||
msg: fakeMessage(randomF4Addr(t, rng), randomF4Addr(t, rng)),
|
||||
rct: fakeReceipt(t, rng, st, events),
|
||||
evs: events,
|
||||
}
|
||||
revertedEm := executedMessage{
|
||||
msg: fakeMessage(randomF4Addr(t, rng), randomF4Addr(t, rng)),
|
||||
rct: fakeReceipt(t, rng, st, revertedEvents),
|
||||
evs: revertedEvents,
|
||||
}
|
||||
|
||||
events14000 := buildTipSetEvents(t, rng, 14000, em)
|
||||
revertedEvents14000 := buildTipSetEvents(t, rng, 14000, revertedEm)
|
||||
cid14000, err := events14000.msgTs.Key().Cid()
|
||||
require.NoError(t, err, "tipset cid")
|
||||
reveredCID14000, err := revertedEvents14000.msgTs.Key().Cid()
|
||||
require.NoError(t, err, "tipset cid")
|
||||
|
||||
noCollectedEvents := []*CollectedEvent{}
|
||||
oneCollectedEvent := []*CollectedEvent{
|
||||
{
|
||||
Entries: ev1.Entries,
|
||||
EmitterAddr: a1,
|
||||
EventIdx: 0,
|
||||
Reverted: false,
|
||||
Height: 14000,
|
||||
TipSetKey: events14000.msgTs.Key(),
|
||||
MsgIdx: 0,
|
||||
MsgCid: em.msg.Cid(),
|
||||
},
|
||||
}
|
||||
twoCollectedEvent := []*CollectedEvent{
|
||||
{
|
||||
Entries: ev1.Entries,
|
||||
EmitterAddr: a1,
|
||||
EventIdx: 0,
|
||||
Reverted: false,
|
||||
Height: 14000,
|
||||
TipSetKey: events14000.msgTs.Key(),
|
||||
MsgIdx: 0,
|
||||
MsgCid: em.msg.Cid(),
|
||||
},
|
||||
{
|
||||
Entries: ev2.Entries,
|
||||
EmitterAddr: a2,
|
||||
EventIdx: 0,
|
||||
Reverted: true,
|
||||
Height: 14000,
|
||||
TipSetKey: revertedEvents14000.msgTs.Key(),
|
||||
MsgIdx: 0,
|
||||
MsgCid: revertedEm.msg.Cid(),
|
||||
},
|
||||
}
|
||||
oneCollectedRevertedEvent := []*CollectedEvent{
|
||||
{
|
||||
Entries: ev2.Entries,
|
||||
EmitterAddr: a2,
|
||||
EventIdx: 0,
|
||||
Reverted: true,
|
||||
Height: 14000,
|
||||
TipSetKey: revertedEvents14000.msgTs.Key(),
|
||||
MsgIdx: 0,
|
||||
MsgCid: revertedEm.msg.Cid(),
|
||||
},
|
||||
}
|
||||
|
||||
workDir, err := os.MkdirTemp("", "lotusevents")
|
||||
require.NoError(t, err, "create temporary work directory")
|
||||
|
||||
defer func() {
|
||||
_ = os.RemoveAll(workDir)
|
||||
}()
|
||||
t.Logf("using work dir %q", workDir)
|
||||
|
||||
dbPath := filepath.Join(workDir, "actorevents.db")
|
||||
|
||||
ei, err := NewEventIndex(context.Background(), dbPath, nil)
|
||||
require.NoError(t, err, "create event index")
|
||||
if err := ei.CollectEvents(context.Background(), revertedEvents14000, false, addrMap.ResolveAddress); err != nil {
|
||||
require.NoError(t, err, "collect reverted events")
|
||||
}
|
||||
if err := ei.CollectEvents(context.Background(), revertedEvents14000, true, addrMap.ResolveAddress); err != nil {
|
||||
require.NoError(t, err, "revert reverted events")
|
||||
}
|
||||
if err := ei.CollectEvents(context.Background(), events14000, false, addrMap.ResolveAddress); err != nil {
|
||||
require.NoError(t, err, "collect events")
|
||||
}
|
||||
|
||||
inclusiveTestCases := []struct {
|
||||
name string
|
||||
filter *EventFilter
|
||||
te *TipSetEvents
|
||||
want []*CollectedEvent
|
||||
}{
|
||||
{
|
||||
name: "nomatch tipset min height",
|
||||
filter: &EventFilter{
|
||||
minHeight: 14001,
|
||||
maxHeight: -1,
|
||||
},
|
||||
te: events14000,
|
||||
want: noCollectedEvents,
|
||||
},
|
||||
{
|
||||
name: "nomatch tipset max height",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: 13999,
|
||||
},
|
||||
te: events14000,
|
||||
want: noCollectedEvents,
|
||||
},
|
||||
{
|
||||
name: "match tipset min height",
|
||||
filter: &EventFilter{
|
||||
minHeight: 14000,
|
||||
maxHeight: -1,
|
||||
},
|
||||
te: events14000,
|
||||
want: twoCollectedEvent,
|
||||
},
|
||||
{
|
||||
name: "match tipset cid",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: -1,
|
||||
tipsetCid: cid14000,
|
||||
},
|
||||
te: events14000,
|
||||
want: oneCollectedEvent,
|
||||
},
|
||||
{
|
||||
name: "match tipset cid",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: -1,
|
||||
tipsetCid: reveredCID14000,
|
||||
},
|
||||
te: revertedEvents14000,
|
||||
want: oneCollectedRevertedEvent,
|
||||
},
|
||||
{
|
||||
name: "nomatch address",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: -1,
|
||||
addresses: []address.Address{a3},
|
||||
},
|
||||
te: events14000,
|
||||
want: noCollectedEvents,
|
||||
},
|
||||
{
|
||||
name: "match address 2",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: -1,
|
||||
addresses: []address.Address{a2},
|
||||
},
|
||||
te: revertedEvents14000,
|
||||
want: oneCollectedRevertedEvent,
|
||||
},
|
||||
{
|
||||
name: "match address 1",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: -1,
|
||||
addresses: []address.Address{a1},
|
||||
},
|
||||
te: events14000,
|
||||
want: oneCollectedEvent,
|
||||
},
|
||||
{
|
||||
name: "match one entry",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: -1,
|
||||
keys: map[string][][]byte{
|
||||
"type": {
|
||||
[]byte("approval"),
|
||||
},
|
||||
},
|
||||
},
|
||||
te: events14000,
|
||||
want: twoCollectedEvent,
|
||||
},
|
||||
{
|
||||
name: "match one entry with alternate values",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: -1,
|
||||
keys: map[string][][]byte{
|
||||
"type": {
|
||||
[]byte("cancel"),
|
||||
[]byte("propose"),
|
||||
[]byte("approval"),
|
||||
},
|
||||
},
|
||||
},
|
||||
te: events14000,
|
||||
want: twoCollectedEvent,
|
||||
},
|
||||
{
|
||||
name: "nomatch one entry by missing value",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: -1,
|
||||
keys: map[string][][]byte{
|
||||
"type": {
|
||||
[]byte("cancel"),
|
||||
[]byte("propose"),
|
||||
},
|
||||
},
|
||||
},
|
||||
te: events14000,
|
||||
want: noCollectedEvents,
|
||||
},
|
||||
{
|
||||
name: "nomatch one entry by missing key",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: -1,
|
||||
keys: map[string][][]byte{
|
||||
"method": {
|
||||
[]byte("approval"),
|
||||
},
|
||||
},
|
||||
},
|
||||
te: events14000,
|
||||
want: noCollectedEvents,
|
||||
},
|
||||
{
|
||||
name: "match one entry with multiple keys",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: -1,
|
||||
keys: map[string][][]byte{
|
||||
"type": {
|
||||
[]byte("approval"),
|
||||
},
|
||||
"signer": {
|
||||
[]byte("addr1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
te: events14000,
|
||||
want: oneCollectedEvent,
|
||||
},
|
||||
{
|
||||
name: "match one entry with multiple keys",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: -1,
|
||||
keys: map[string][][]byte{
|
||||
"type": {
|
||||
[]byte("approval"),
|
||||
},
|
||||
"signer": {
|
||||
[]byte("addr2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
te: revertedEvents14000,
|
||||
want: oneCollectedRevertedEvent,
|
||||
},
|
||||
{
|
||||
name: "nomatch one entry with one mismatching key",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: -1,
|
||||
keys: map[string][][]byte{
|
||||
"type": {
|
||||
[]byte("approval"),
|
||||
},
|
||||
"approver": {
|
||||
[]byte("addr1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
te: events14000,
|
||||
want: noCollectedEvents,
|
||||
},
|
||||
{
|
||||
name: "nomatch one entry with one mismatching value",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: -1,
|
||||
keys: map[string][][]byte{
|
||||
"type": {
|
||||
[]byte("approval"),
|
||||
},
|
||||
"signer": {
|
||||
[]byte("addr3"),
|
||||
},
|
||||
},
|
||||
},
|
||||
te: events14000,
|
||||
want: noCollectedEvents,
|
||||
},
|
||||
{
|
||||
name: "nomatch one entry with one unindexed key",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: -1,
|
||||
keys: map[string][][]byte{
|
||||
"amount": {
|
||||
[]byte("2988181"),
|
||||
},
|
||||
},
|
||||
},
|
||||
te: events14000,
|
||||
want: noCollectedEvents,
|
||||
},
|
||||
{
|
||||
name: "nomatch one entry with one unindexed key",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: -1,
|
||||
keys: map[string][][]byte{
|
||||
"amount": {
|
||||
[]byte("2988182"),
|
||||
},
|
||||
},
|
||||
},
|
||||
te: events14000,
|
||||
want: noCollectedEvents,
|
||||
},
|
||||
}
|
||||
|
||||
exclusiveTestCases := []struct {
|
||||
name string
|
||||
filter *EventFilter
|
||||
te *TipSetEvents
|
||||
want []*CollectedEvent
|
||||
}{
|
||||
{
|
||||
name: "nomatch tipset min height",
|
||||
filter: &EventFilter{
|
||||
minHeight: 14001,
|
||||
maxHeight: -1,
|
||||
},
|
||||
te: events14000,
|
||||
want: noCollectedEvents,
|
||||
},
|
||||
{
|
||||
name: "nomatch tipset max height",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: 13999,
|
||||
},
|
||||
te: events14000,
|
||||
want: noCollectedEvents,
|
||||
},
|
||||
{
|
||||
name: "match tipset min height",
|
||||
filter: &EventFilter{
|
||||
minHeight: 14000,
|
||||
maxHeight: -1,
|
||||
},
|
||||
te: events14000,
|
||||
want: oneCollectedEvent,
|
||||
},
|
||||
{
|
||||
name: "match tipset cid",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: -1,
|
||||
tipsetCid: cid14000,
|
||||
},
|
||||
te: events14000,
|
||||
want: oneCollectedEvent,
|
||||
},
|
||||
{
|
||||
name: "match tipset cid but reverted",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: -1,
|
||||
tipsetCid: reveredCID14000,
|
||||
},
|
||||
te: revertedEvents14000,
|
||||
want: noCollectedEvents,
|
||||
},
|
||||
{
|
||||
name: "nomatch address",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: -1,
|
||||
addresses: []address.Address{a3},
|
||||
},
|
||||
te: events14000,
|
||||
want: noCollectedEvents,
|
||||
},
|
||||
{
|
||||
name: "nomatch address 2 but reverted",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: -1,
|
||||
addresses: []address.Address{a2},
|
||||
},
|
||||
te: revertedEvents14000,
|
||||
want: noCollectedEvents,
|
||||
},
|
||||
{
|
||||
name: "match address",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: -1,
|
||||
addresses: []address.Address{a1},
|
||||
},
|
||||
te: events14000,
|
||||
want: oneCollectedEvent,
|
||||
},
|
||||
{
|
||||
name: "match one entry",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: -1,
|
||||
keys: map[string][][]byte{
|
||||
"type": {
|
||||
[]byte("approval"),
|
||||
},
|
||||
},
|
||||
},
|
||||
te: events14000,
|
||||
want: oneCollectedEvent,
|
||||
},
|
||||
{
|
||||
name: "match one entry with alternate values",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: -1,
|
||||
keys: map[string][][]byte{
|
||||
"type": {
|
||||
[]byte("cancel"),
|
||||
[]byte("propose"),
|
||||
[]byte("approval"),
|
||||
},
|
||||
},
|
||||
},
|
||||
te: events14000,
|
||||
want: oneCollectedEvent,
|
||||
},
|
||||
{
|
||||
name: "nomatch one entry by missing value",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: -1,
|
||||
keys: map[string][][]byte{
|
||||
"type": {
|
||||
[]byte("cancel"),
|
||||
[]byte("propose"),
|
||||
},
|
||||
},
|
||||
},
|
||||
te: events14000,
|
||||
want: noCollectedEvents,
|
||||
},
|
||||
{
|
||||
name: "nomatch one entry by missing key",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: -1,
|
||||
keys: map[string][][]byte{
|
||||
"method": {
|
||||
[]byte("approval"),
|
||||
},
|
||||
},
|
||||
},
|
||||
te: events14000,
|
||||
want: noCollectedEvents,
|
||||
},
|
||||
{
|
||||
name: "match one entry with multiple keys",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: -1,
|
||||
keys: map[string][][]byte{
|
||||
"type": {
|
||||
[]byte("approval"),
|
||||
},
|
||||
"signer": {
|
||||
[]byte("addr1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
te: events14000,
|
||||
want: oneCollectedEvent,
|
||||
},
|
||||
{
|
||||
name: "nomatch one entry with one mismatching key",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: -1,
|
||||
keys: map[string][][]byte{
|
||||
"type": {
|
||||
[]byte("approval"),
|
||||
},
|
||||
"approver": {
|
||||
[]byte("addr1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
te: events14000,
|
||||
want: noCollectedEvents,
|
||||
},
|
||||
{
|
||||
name: "nomatch one entry with matching reverted value",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: -1,
|
||||
keys: map[string][][]byte{
|
||||
"type": {
|
||||
[]byte("approval"),
|
||||
},
|
||||
"signer": {
|
||||
[]byte("addr2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
te: events14000,
|
||||
want: noCollectedEvents,
|
||||
},
|
||||
{
|
||||
name: "nomatch one entry with one mismatching value",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: -1,
|
||||
keys: map[string][][]byte{
|
||||
"type": {
|
||||
[]byte("approval"),
|
||||
},
|
||||
"signer": {
|
||||
[]byte("addr3"),
|
||||
},
|
||||
},
|
||||
},
|
||||
te: events14000,
|
||||
want: noCollectedEvents,
|
||||
},
|
||||
{
|
||||
name: "nomatch one entry with one unindexed key",
|
||||
filter: &EventFilter{
|
||||
minHeight: -1,
|
||||
maxHeight: -1,
|
||||
keys: map[string][][]byte{
|
||||
"amount": {
|
||||
[]byte("2988181"),
|
||||
},
|
||||
},
|
||||
},
|
||||
te: events14000,
|
||||
want: noCollectedEvents,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range inclusiveTestCases {
|
||||
tc := tc // appease lint
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
if err := ei.PrefillFilter(context.Background(), tc.filter, false); err != nil {
|
||||
require.NoError(t, err, "prefill filter events")
|
||||
}
|
||||
|
||||
coll := tc.filter.TakeCollectedEvents(context.Background())
|
||||
require.ElementsMatch(t, coll, tc.want, tc.name)
|
||||
})
|
||||
}
|
||||
|
||||
for _, tc := range exclusiveTestCases {
|
||||
tc := tc // appease lint
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
if err := ei.PrefillFilter(context.Background(), tc.filter, true); err != nil {
|
||||
require.NoError(t, err, "prefill filter events")
|
||||
}
|
||||
|
||||
coll := tc.filter.TakeCollectedEvents(context.Background())
|
||||
require.ElementsMatch(t, coll, tc.want, tc.name)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -1,25 +0,0 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
)
|
||||
|
||||
type contextStore struct {
|
||||
ctx context.Context
|
||||
cst *cbor.BasicIpldStore
|
||||
}
|
||||
|
||||
func (cs *contextStore) Context() context.Context {
|
||||
return cs.ctx
|
||||
}
|
||||
|
||||
func (cs *contextStore) Get(ctx context.Context, c cid.Cid, out interface{}) error {
|
||||
return cs.cst.Get(ctx, c, out)
|
||||
}
|
||||
|
||||
func (cs *contextStore) Put(ctx context.Context, v interface{}) (cid.Cid, error) {
|
||||
return cs.cst.Put(ctx, v)
|
||||
}
|
@ -247,7 +247,7 @@ func (c *client) processResponse(req *Request, res *Response, tipsets []*types.T
|
||||
// If we didn't request the headers they should have been provided
|
||||
// by the caller.
|
||||
if len(tipsets) < len(res.Chain) {
|
||||
return nil, xerrors.Errorf("not enought tipsets provided for message response validation, needed %d, have %d", len(res.Chain), len(tipsets))
|
||||
return nil, xerrors.Errorf("not enough tipsets provided for message response validation, needed %d, have %d", len(res.Chain), len(tipsets))
|
||||
}
|
||||
chain := make([]*BSTipSet, 0, resLength)
|
||||
for i, resChain := range res.Chain {
|
||||
@ -284,16 +284,18 @@ func (c *client) validateCompressedIndices(chain []*BSTipSet) error {
|
||||
len(msgs.SecpkIncludes), blocksNum)
|
||||
}
|
||||
|
||||
blsLen := uint64(len(msgs.Bls))
|
||||
secpLen := uint64(len(msgs.Secpk))
|
||||
for blockIdx := 0; blockIdx < blocksNum; blockIdx++ {
|
||||
for _, mi := range msgs.BlsIncludes[blockIdx] {
|
||||
if int(mi) >= len(msgs.Bls) {
|
||||
if mi >= blsLen {
|
||||
return xerrors.Errorf("index in BlsIncludes (%d) exceeds number of messages (%d)",
|
||||
mi, len(msgs.Bls))
|
||||
}
|
||||
}
|
||||
|
||||
for _, mi := range msgs.SecpkIncludes[blockIdx] {
|
||||
if int(mi) >= len(msgs.Secpk) {
|
||||
if mi >= secpLen {
|
||||
return xerrors.Errorf("index in SecpkIncludes (%d) exceeds number of messages (%d)",
|
||||
mi, len(msgs.Secpk))
|
||||
}
|
||||
@ -315,18 +317,36 @@ func (c *client) GetBlocks(ctx context.Context, tsk types.TipSetKey, count int)
|
||||
)
|
||||
}
|
||||
|
||||
var ret []*types.TipSet
|
||||
start := tsk.Cids()
|
||||
for len(ret) < count {
|
||||
req := &Request{
|
||||
Head: tsk.Cids(),
|
||||
Length: uint64(count),
|
||||
Head: start,
|
||||
Length: uint64(count - len(ret)),
|
||||
Options: Headers,
|
||||
}
|
||||
|
||||
validRes, err := c.doRequest(ctx, req, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, xerrors.Errorf("failed to doRequest: %w", err)
|
||||
}
|
||||
|
||||
return validRes.tipsets, nil
|
||||
if len(validRes.tipsets) == 0 {
|
||||
return nil, xerrors.Errorf("doRequest fetched zero tipsets: %w", err)
|
||||
}
|
||||
|
||||
ret = append(ret, validRes.tipsets...)
|
||||
|
||||
last := validRes.tipsets[len(validRes.tipsets)-1]
|
||||
if last.Height() <= 1 {
|
||||
// we've walked all the way up to genesis, return
|
||||
break
|
||||
}
|
||||
|
||||
start = last.Parents().Cids()
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// GetFullTipSet implements Client.GetFullTipSet(). Refer to the godocs there.
|
||||
@ -341,12 +361,16 @@ func (c *client) GetFullTipSet(ctx context.Context, peer peer.ID, tsk types.TipS
|
||||
|
||||
validRes, err := c.doRequest(ctx, req, &peer, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, xerrors.Errorf("failed to doRequest: %w", err)
|
||||
}
|
||||
|
||||
return validRes.toFullTipSets()[0], nil
|
||||
// If `doRequest` didn't fail we are guaranteed to have at least
|
||||
// *one* tipset here, so it's safe to index directly.
|
||||
fullTipsets := validRes.toFullTipSets()
|
||||
|
||||
if len(fullTipsets) == 0 {
|
||||
return nil, xerrors.New("unexpectedly got no tipsets in exchange")
|
||||
}
|
||||
|
||||
return fullTipsets[0], nil
|
||||
}
|
||||
|
||||
// GetChainMessages implements Client.GetChainMessages(). Refer to the godocs there.
|
||||
@ -386,7 +410,7 @@ func (c *client) sendRequestToPeer(ctx context.Context, peer peer.ID, req *Reque
|
||||
defer span.End()
|
||||
if span.IsRecordingEvents() {
|
||||
span.AddAttributes(
|
||||
trace.StringAttribute("peer", peer.Pretty()),
|
||||
trace.StringAttribute("peer", peer.String()),
|
||||
)
|
||||
}
|
||||
defer func() {
|
||||
|
@ -28,8 +28,8 @@ type Server interface {
|
||||
// used by the Syncer.
|
||||
type Client interface {
|
||||
// GetBlocks fetches block headers from the network, from the provided
|
||||
// tipset *backwards*, returning as many tipsets as the count parameter,
|
||||
// or less.
|
||||
// tipset *backwards*, returning as many tipsets as the count parameter.
|
||||
// The ONLY case in which we return fewer than `count` tipsets is if we hit genesis.
|
||||
GetBlocks(ctx context.Context, tsk types.TipSetKey, count int) ([]*types.TipSet, error)
|
||||
|
||||
// GetChainMessages fetches messages from the network, starting from the first provided tipset
|
||||
|
@ -251,7 +251,8 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal
|
||||
}
|
||||
|
||||
params := &markettypes.PublishStorageDealsParams{}
|
||||
for _, preseal := range m.Sectors {
|
||||
for _, presealTmp := range m.Sectors {
|
||||
preseal := presealTmp
|
||||
preseal.Deal.VerifiedDeal = true
|
||||
preseal.Deal.EndEpoch = minerInfos[i].presealExp
|
||||
p := markettypes.ClientDealProposal{
|
||||
|
@ -131,7 +131,7 @@ func NewMsgIndex(lctx context.Context, basePath string, cs ChainStore) (MsgIndex
|
||||
|
||||
db, err := sql.Open("sqlite3", dbPath)
|
||||
if err != nil {
|
||||
// TODO [nice to have]: automaticaly delete corrupt databases
|
||||
// TODO [nice to have]: automatically delete corrupt databases
|
||||
// but for now we can just error and let the operator delete.
|
||||
return nil, xerrors.Errorf("error opening msgindex database: %w", err)
|
||||
}
|
||||
|
@ -39,23 +39,6 @@ func (ps *Store) save(ctx context.Context, state *FundedAddressState) error {
|
||||
return ps.ds.Put(ctx, k, b)
|
||||
}
|
||||
|
||||
// get the state for the given address
|
||||
func (ps *Store) get(ctx context.Context, addr address.Address) (*FundedAddressState, error) {
|
||||
k := dskeyForAddr(addr)
|
||||
|
||||
data, err := ps.ds.Get(ctx, k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var state FundedAddressState
|
||||
err = cborrpc.ReadCborRPC(bytes.NewReader(data), &state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &state, nil
|
||||
}
|
||||
|
||||
// forEach calls iter with each address in the datastore
|
||||
func (ps *Store) forEach(ctx context.Context, iter func(*FundedAddressState)) error {
|
||||
res, err := ps.ds.Query(ctx, dsq.Query{Prefix: dsKeyAddr})
|
||||
|
@ -5,7 +5,6 @@ import (
|
||||
"math"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestBlockProbability(t *testing.T) {
|
||||
@ -23,7 +22,6 @@ func TestBlockProbability(t *testing.T) {
|
||||
|
||||
func TestWinnerProba(t *testing.T) {
|
||||
//stm: @OTHER_IMPLEMENTATION_BLOCK_PROB_002
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
const N = 1000000
|
||||
winnerProba := noWinnersProb()
|
||||
sum := 0
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/minio/blake2b-simd"
|
||||
"github.com/raulk/clock"
|
||||
"go.opencensus.io/stats"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||
@ -210,8 +211,10 @@ func ComputeRBF(curPrem abi.TokenAmount, replaceByFeeRatio types.Percent) abi.To
|
||||
|
||||
func CapGasFee(mff dtypes.DefaultMaxFeeFunc, msg *types.Message, sendSpec *api.MessageSendSpec) {
|
||||
var maxFee abi.TokenAmount
|
||||
var maximizeFeeCap bool
|
||||
if sendSpec != nil {
|
||||
maxFee = sendSpec.MaxFee
|
||||
maximizeFeeCap = sendSpec.MaximizeFeeCap
|
||||
}
|
||||
if maxFee.Int == nil || maxFee.Equals(big.Zero()) {
|
||||
mf, err := mff()
|
||||
@ -222,15 +225,12 @@ func CapGasFee(mff dtypes.DefaultMaxFeeFunc, msg *types.Message, sendSpec *api.M
|
||||
maxFee = mf
|
||||
}
|
||||
|
||||
gl := types.NewInt(uint64(msg.GasLimit))
|
||||
totalFee := types.BigMul(msg.GasFeeCap, gl)
|
||||
|
||||
if totalFee.LessThanEqual(maxFee) {
|
||||
msg.GasPremium = big.Min(msg.GasFeeCap, msg.GasPremium) // cap premium at FeeCap
|
||||
return
|
||||
gaslimit := types.NewInt(uint64(msg.GasLimit))
|
||||
totalFee := types.BigMul(msg.GasFeeCap, gaslimit)
|
||||
if maximizeFeeCap || totalFee.GreaterThan(maxFee) {
|
||||
msg.GasFeeCap = big.Div(maxFee, gaslimit)
|
||||
}
|
||||
|
||||
msg.GasFeeCap = big.Div(maxFee, gl)
|
||||
msg.GasPremium = big.Min(msg.GasFeeCap, msg.GasPremium) // cap premium at FeeCap
|
||||
}
|
||||
|
||||
@ -1022,6 +1022,9 @@ func (mp *MessagePool) addLocked(ctx context.Context, m *types.SignedMessage, st
|
||||
}
|
||||
})
|
||||
|
||||
// Record the current size of the Mpool
|
||||
stats.Record(ctx, metrics.MpoolMessageCount.M(int64(mp.currentSize)))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1214,6 +1217,9 @@ func (mp *MessagePool) remove(ctx context.Context, from address.Address, nonce u
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Record the current size of the Mpool
|
||||
stats.Record(ctx, metrics.MpoolMessageCount.M(int64(mp.currentSize)))
|
||||
}
|
||||
|
||||
func (mp *MessagePool) Pending(ctx context.Context) ([]*types.SignedMessage, *types.TipSet) {
|
||||
|
@ -438,7 +438,8 @@ func (st *StateTree) Flush(ctx context.Context) (cid.Cid, error) {
|
||||
return cid.Undef, xerrors.Errorf("tried to flush state tree with snapshots on the stack")
|
||||
}
|
||||
|
||||
for addr, sto := range st.snaps.layers[0].actors {
|
||||
for addr, stoTmp := range st.snaps.layers[0].actors {
|
||||
sto := stoTmp
|
||||
if sto.Delete {
|
||||
if err := st.root.Delete(abi.AddrKey(addr)); err != nil {
|
||||
return cid.Undef, err
|
||||
@ -570,7 +571,7 @@ func (st *StateTree) ForEach(f func(address.Address, *types.Actor) error) error
|
||||
}
|
||||
|
||||
// no need to record anything here, there are no duplicates in the actors HAMT
|
||||
// iself.
|
||||
// itself.
|
||||
if _, ok := seen[addr]; ok {
|
||||
return nil
|
||||
}
|
||||
@ -588,7 +589,7 @@ func (st *StateTree) ForEach(f func(address.Address, *types.Actor) error) error
|
||||
}
|
||||
|
||||
// no need to record anything here, there are no duplicates in the actors HAMT
|
||||
// iself.
|
||||
// itself.
|
||||
if _, ok := seen[addr]; ok {
|
||||
return nil
|
||||
}
|
||||
|
@ -235,11 +235,6 @@ func (sm *StateManager) hasExpensiveForkBetween(parent, height abi.ChainEpoch) b
|
||||
return false
|
||||
}
|
||||
|
||||
func (sm *StateManager) hasExpensiveFork(height abi.ChainEpoch) bool {
|
||||
_, ok := sm.expensiveUpgrades[height]
|
||||
return ok
|
||||
}
|
||||
|
||||
func runPreMigration(ctx context.Context, sm *StateManager, fn PreMigrationFunc, cache *nv16.MemMigrationCache, ts *types.TipSet) {
|
||||
height := ts.Height()
|
||||
parent := ts.ParentState()
|
||||
|
@ -42,7 +42,7 @@ func TestIndexSeeks(t *testing.T) {
|
||||
cs := store.NewChainStore(nbs, nbs, syncds.MutexWrap(datastore.NewMapDatastore()), filcns.Weight, nil)
|
||||
defer cs.Close() //nolint:errcheck
|
||||
|
||||
_, err = cs.Import(ctx, bytes.NewReader(gencar))
|
||||
_, _, err = cs.Import(ctx, bytes.NewReader(gencar))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -212,13 +212,8 @@ func (cs *ChainStore) MessagesForTipset(ctx context.Context, ts *types.TipSet) (
|
||||
|
||||
var out []types.ChainMsg
|
||||
for _, bm := range bmsgs {
|
||||
for _, blsm := range bm.BlsMessages {
|
||||
out = append(out, blsm)
|
||||
}
|
||||
|
||||
for _, secm := range bm.SecpkMessages {
|
||||
out = append(out, secm)
|
||||
}
|
||||
out = append(out, bm.BlsMessages...)
|
||||
out = append(out, bm.SecpkMessages...)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
|
@ -60,7 +60,7 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRo
|
||||
})
|
||||
}
|
||||
|
||||
func (cs *ChainStore) Import(ctx context.Context, r io.Reader) (*types.TipSet, error) {
|
||||
func (cs *ChainStore) Import(ctx context.Context, r io.Reader) (head *types.TipSet, genesis *types.BlockHeader, err error) {
|
||||
// TODO: writing only to the state blockstore is incorrect.
|
||||
// At this time, both the state and chain blockstores are backed by the
|
||||
// universal store. When we physically segregate the stores, we will need
|
||||
@ -69,7 +69,7 @@ func (cs *ChainStore) Import(ctx context.Context, r io.Reader) (*types.TipSet, e
|
||||
|
||||
br, err := carv2.NewBlockReader(r)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("loadcar failed: %w", err)
|
||||
return nil, nil, xerrors.Errorf("loadcar failed: %w", err)
|
||||
}
|
||||
|
||||
s := cs.StateBlockstore()
|
||||
@ -80,27 +80,51 @@ func (cs *ChainStore) Import(ctx context.Context, r io.Reader) (*types.TipSet, e
|
||||
putThrottle <- nil
|
||||
}
|
||||
|
||||
if len(br.Roots) == 0 {
|
||||
return nil, nil, xerrors.Errorf("no roots in snapshot car file")
|
||||
}
|
||||
nextTailCid := br.Roots[0]
|
||||
|
||||
var tailBlock types.BlockHeader
|
||||
tailBlock.Height = abi.ChainEpoch(-1)
|
||||
|
||||
var buf []blocks.Block
|
||||
for {
|
||||
blk, err := br.Next()
|
||||
if err != nil {
|
||||
|
||||
// we're at the end
|
||||
if err == io.EOF {
|
||||
if len(buf) > 0 {
|
||||
if err := s.PutMany(ctx, buf); err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// check for header block, looking for genesis
|
||||
if blk.Cid() == nextTailCid && tailBlock.Height != 0 {
|
||||
if err := tailBlock.UnmarshalCBOR(bytes.NewReader(blk.RawData())); err != nil {
|
||||
return nil, nil, xerrors.Errorf("failed to unmarshal genesis block: %w", err)
|
||||
}
|
||||
if len(tailBlock.Parents) > 0 {
|
||||
nextTailCid = tailBlock.Parents[0]
|
||||
} else {
|
||||
// note: even the 0th block has a parent linking to the cbor genesis block
|
||||
return nil, nil, xerrors.Errorf("current block (epoch %d cid %s) has no parents", tailBlock.Height, tailBlock.Cid())
|
||||
}
|
||||
}
|
||||
|
||||
// append to batch
|
||||
buf = append(buf, blk)
|
||||
|
||||
if len(buf) > 1000 {
|
||||
if lastErr := <-putThrottle; lastErr != nil { // consume one error to have the right to add one
|
||||
return nil, lastErr
|
||||
return nil, nil, lastErr
|
||||
}
|
||||
|
||||
go func(buf []blocks.Block) {
|
||||
@ -113,13 +137,17 @@ func (cs *ChainStore) Import(ctx context.Context, r io.Reader) (*types.TipSet, e
|
||||
// check errors
|
||||
for i := 0; i < parallelPuts; i++ {
|
||||
if lastErr := <-putThrottle; lastErr != nil {
|
||||
return nil, lastErr
|
||||
return nil, nil, lastErr
|
||||
}
|
||||
}
|
||||
|
||||
if tailBlock.Height != 0 {
|
||||
return nil, nil, xerrors.Errorf("expected genesis block to have height 0 (genesis), got %d: %s", tailBlock.Height, tailBlock.Cid())
|
||||
}
|
||||
|
||||
root, err := cs.LoadTipSet(ctx, types.NewTipSetKey(br.Roots...))
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to load root tipset from chainfile: %w", err)
|
||||
return nil, nil, xerrors.Errorf("failed to load root tipset from chainfile: %w", err)
|
||||
}
|
||||
|
||||
ts := root
|
||||
@ -135,10 +163,10 @@ func (cs *ChainStore) Import(ctx context.Context, r io.Reader) (*types.TipSet, e
|
||||
}
|
||||
|
||||
if err := cs.PersistTipsets(ctx, tssToPersist); err != nil {
|
||||
return nil, xerrors.Errorf("failed to persist tipsets: %w", err)
|
||||
return nil, nil, xerrors.Errorf("failed to persist tipsets: %w", err)
|
||||
}
|
||||
|
||||
return root, nil
|
||||
return root, &tailBlock, nil
|
||||
}
|
||||
|
||||
type walkSchedTaskType int
|
||||
@ -167,7 +195,7 @@ func (t walkSchedTaskType) String() string {
|
||||
case dagTask:
|
||||
return "dag"
|
||||
}
|
||||
panic(fmt.Sprintf("unknow task %d", t))
|
||||
panic(fmt.Sprintf("unknown task %d", t))
|
||||
}
|
||||
|
||||
type walkTask struct {
|
||||
@ -656,9 +684,7 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe
|
||||
}
|
||||
|
||||
if b.Height > 0 {
|
||||
for _, p := range b.Parents {
|
||||
blocksToWalk = append(blocksToWalk, p)
|
||||
}
|
||||
blocksToWalk = append(blocksToWalk, b.Parents...)
|
||||
} else {
|
||||
// include the genesis block
|
||||
cids = append(cids, b.Parents...)
|
||||
|
@ -118,7 +118,7 @@ func TestChainExportImport(t *testing.T) {
|
||||
cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), filcns.Weight, nil)
|
||||
defer cs.Close() //nolint:errcheck
|
||||
|
||||
root, err := cs.Import(context.TODO(), buf)
|
||||
root, _, err := cs.Import(context.TODO(), buf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -153,7 +153,7 @@ func TestChainImportTipsetKeyCid(t *testing.T) {
|
||||
cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), filcns.Weight, nil)
|
||||
defer cs.Close() //nolint:errcheck
|
||||
|
||||
root, err := cs.Import(ctx, buf)
|
||||
root, _, err := cs.Import(ctx, buf)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Truef(t, root.Equals(last), "imported chain differed from exported chain")
|
||||
@ -202,7 +202,7 @@ func TestChainExportImportFull(t *testing.T) {
|
||||
cs := store.NewChainStore(nbs, nbs, ds, filcns.Weight, nil)
|
||||
defer cs.Close() //nolint:errcheck
|
||||
|
||||
root, err := cs.Import(context.TODO(), buf)
|
||||
root, _, err := cs.Import(context.TODO(), buf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -516,7 +516,7 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
if len(idxrMsg.ExtraData) == 0 {
|
||||
log.Debugw("ignoring messsage missing miner id", "peer", originPeer)
|
||||
log.Debugw("ignoring message missing miner id", "peer", originPeer)
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
|
||||
@ -552,7 +552,7 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg
|
||||
// Check that the miner ID maps to the peer that sent the message.
|
||||
err = v.authenticateMessage(ctx, minerAddr, originPeer)
|
||||
if err != nil {
|
||||
log.Warnw("cannot authenticate messsage", "err", err, "peer", originPeer, "minerID", minerAddr)
|
||||
log.Warnw("cannot authenticate message", "err", err, "peer", originPeer, "minerID", minerAddr)
|
||||
stats.Record(ctx, metrics.IndexerMessageValidationFailure.M(1))
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
|
61
chain/sub/ratelimit/queue_test.go
Normal file
61
chain/sub/ratelimit/queue_test.go
Normal file
@ -0,0 +1,61 @@
|
||||
package ratelimit
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestQueue(t *testing.T) {
|
||||
const size = 3
|
||||
q := &queue{buf: make([]int64, size)}
|
||||
|
||||
if q.len() != 0 {
|
||||
t.Fatalf("q.len() = %d, expect 0", q.len())
|
||||
}
|
||||
|
||||
if q.cap() != size {
|
||||
t.Fatalf("q.cap() = %d, expect %d", q.cap(), size)
|
||||
}
|
||||
|
||||
for i := int64(0); i < int64(size); i++ {
|
||||
err := q.push(i)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot push element %d", i)
|
||||
}
|
||||
}
|
||||
|
||||
if q.len() != size {
|
||||
t.Fatalf("q.len() = %d, expect %d", q.len(), size)
|
||||
}
|
||||
|
||||
err := q.push(int64(size))
|
||||
if err != ErrRateLimitExceeded {
|
||||
t.Fatalf("pushing element beyond capacity should have failed with err: %s, got %s", ErrRateLimitExceeded, err)
|
||||
}
|
||||
|
||||
if q.front() != 0 {
|
||||
t.Fatalf("q.front() = %d, expect 0", q.front())
|
||||
}
|
||||
|
||||
if q.back() != int64(size-1) {
|
||||
t.Fatalf("q.back() = %d, expect %d", q.back(), size-1)
|
||||
}
|
||||
|
||||
popVal := q.pop()
|
||||
if popVal != 0 {
|
||||
t.Fatalf("q.pop() = %d, expect 0", popVal)
|
||||
}
|
||||
|
||||
if q.len() != size-1 {
|
||||
t.Fatalf("q.len() = %d, expect %d", q.len(), size-1)
|
||||
}
|
||||
|
||||
// Testing truncation.
|
||||
threshold := int64(1)
|
||||
q.truncate(threshold)
|
||||
if q.len() != 1 {
|
||||
t.Fatalf("q.len() after truncate = %d, expect 1", q.len())
|
||||
}
|
||||
if q.front() != 2 {
|
||||
t.Fatalf("q.front() after truncate = %d, expect 2", q.front())
|
||||
}
|
||||
}
|
@ -844,7 +844,7 @@ loop:
|
||||
return nil, xerrors.Errorf("failed to load next local tipset: %w", err)
|
||||
}
|
||||
if base.IsChildOf(knownParent) {
|
||||
// common case: receiving a block thats potentially part of the same tipset as our best block
|
||||
// common case: receiving a block that's potentially part of the same tipset as our best block
|
||||
return blockSet, nil
|
||||
}
|
||||
|
||||
@ -886,6 +886,35 @@ func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, know
|
||||
}
|
||||
}
|
||||
|
||||
incomingParentsTsk := incoming.Parents()
|
||||
commonParent := false
|
||||
for _, incomingParent := range incomingParentsTsk.Cids() {
|
||||
if known.Contains(incomingParent) {
|
||||
commonParent = true
|
||||
}
|
||||
}
|
||||
|
||||
if commonParent {
|
||||
// known contains at least one of incoming's Parents => the common ancestor is known's Parents (incoming's Grandparents)
|
||||
// in this case, we need to return {incoming.Parents()}
|
||||
incomingParents, err := syncer.store.LoadTipSet(ctx, incomingParentsTsk)
|
||||
if err != nil {
|
||||
// fallback onto the network
|
||||
tips, err := syncer.Exchange.GetBlocks(ctx, incoming.Parents(), 1)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to fetch incomingParents from the network: %w", err)
|
||||
}
|
||||
|
||||
if len(tips) == 0 {
|
||||
return nil, xerrors.Errorf("network didn't return any tipsets")
|
||||
}
|
||||
|
||||
incomingParents = tips[0]
|
||||
}
|
||||
|
||||
return []*types.TipSet{incomingParents}, nil
|
||||
}
|
||||
|
||||
// TODO: Does this mean we always ask for ForkLengthThreshold blocks from the network, even if we just need, like, 2? Yes.
|
||||
// Would it not be better to ask in smaller chunks, given that an ~ForkLengthThreshold is very rare?
|
||||
tips, err := syncer.Exchange.GetBlocks(ctx, incoming.Parents(), int(build.ForkLengthThreshold))
|
||||
|
@ -11,7 +11,6 @@ import (
|
||||
"github.com/ipfs/go-cid"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
@ -344,13 +343,6 @@ func (tu *syncTestUtil) addClientNode() int {
|
||||
return len(tu.nds) - 1
|
||||
}
|
||||
|
||||
func (tu *syncTestUtil) pid(n int) peer.ID {
|
||||
nal, err := tu.nds[n].NetAddrsListen(tu.ctx)
|
||||
require.NoError(tu.t, err)
|
||||
|
||||
return nal.ID
|
||||
}
|
||||
|
||||
func (tu *syncTestUtil) connect(from, to int) {
|
||||
toPI, err := tu.nds[to].NetAddrsListen(tu.ctx)
|
||||
require.NoError(tu.t, err)
|
||||
|
@ -7,9 +7,6 @@ import (
|
||||
)
|
||||
|
||||
func TestDecodeBlockMsg(t *testing.T) {
|
||||
type args struct {
|
||||
b []byte
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
data []byte
|
||||
|
@ -62,9 +62,14 @@ type EthTxArgs struct {
|
||||
// - BlockHash
|
||||
// - BlockNumber
|
||||
// - TransactionIndex
|
||||
// - From
|
||||
// - Hash
|
||||
func EthTxFromSignedEthMessage(smsg *types.SignedMessage) (EthTx, error) {
|
||||
// The from address is always an f410f address, never an ID or other address.
|
||||
if !IsEthAddress(smsg.Message.From) {
|
||||
return EthTx{}, xerrors.Errorf("sender must be an eth account, was %s", smsg.Message.From)
|
||||
}
|
||||
|
||||
// Probably redundant, but we might as well check.
|
||||
if smsg.Signature.Type != typescrypto.SigTypeDelegated {
|
||||
return EthTx{}, xerrors.Errorf("signature is not delegated type, is type: %d", smsg.Signature.Type)
|
||||
}
|
||||
@ -79,10 +84,18 @@ func EthTxFromSignedEthMessage(smsg *types.SignedMessage) (EthTx, error) {
|
||||
return EthTx{}, xerrors.Errorf("failed to recover signature: %w", err)
|
||||
}
|
||||
|
||||
from, err := EthAddressFromFilecoinAddress(smsg.Message.From)
|
||||
if err != nil {
|
||||
// This should be impossible as we've already asserted that we have an EthAddress
|
||||
// sender...
|
||||
return EthTx{}, xerrors.Errorf("sender was not an eth account")
|
||||
}
|
||||
|
||||
return EthTx{
|
||||
Nonce: EthUint64(txArgs.Nonce),
|
||||
ChainID: EthUint64(txArgs.ChainID),
|
||||
To: txArgs.To,
|
||||
From: from,
|
||||
Value: EthBigInt(txArgs.Value),
|
||||
Type: Eip1559TxType,
|
||||
Gas: EthUint64(txArgs.GasLimit),
|
||||
|
@ -799,6 +799,45 @@ func GetContractEthAddressFromCode(sender EthAddress, salt [32]byte, initcode []
|
||||
return ethAddr, nil
|
||||
}
|
||||
|
||||
// EthEstimateGasParams handles raw jsonrpc params for eth_estimateGas
|
||||
type EthEstimateGasParams struct {
|
||||
Tx EthCall
|
||||
BlkParam *EthBlockNumberOrHash
|
||||
}
|
||||
|
||||
func (e *EthEstimateGasParams) UnmarshalJSON(b []byte) error {
|
||||
var params []json.RawMessage
|
||||
err := json.Unmarshal(b, ¶ms)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch len(params) {
|
||||
case 2:
|
||||
err = json.Unmarshal(params[1], &e.BlkParam)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
case 1:
|
||||
err = json.Unmarshal(params[0], &e.Tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return xerrors.Errorf("expected 1 or 2 params, got %d", len(params))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e EthEstimateGasParams) MarshalJSON() ([]byte, error) {
|
||||
if e.BlkParam != nil {
|
||||
return json.Marshal([]interface{}{e.Tx, e.BlkParam})
|
||||
}
|
||||
return json.Marshal([]interface{}{e.Tx})
|
||||
}
|
||||
|
||||
// EthFeeHistoryParams handles raw jsonrpc params for eth_feeHistory
|
||||
type EthFeeHistoryParams struct {
|
||||
BlkCount EthUint64
|
||||
|
@ -12,6 +12,9 @@ import (
|
||||
type FIL BigInt
|
||||
|
||||
func (f FIL) String() string {
|
||||
if f.Int == nil {
|
||||
return "0 FIL"
|
||||
}
|
||||
return f.Unitless() + " FIL"
|
||||
}
|
||||
|
||||
|
@ -140,7 +140,7 @@ func (t *messageReceiptV0) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
case cbg.MajNegativeInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 negative oveflow")
|
||||
return fmt.Errorf("int64 negative overflow")
|
||||
}
|
||||
extraI = -1 - extraI
|
||||
default:
|
||||
@ -186,7 +186,7 @@ func (t *messageReceiptV0) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
case cbg.MajNegativeInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 negative oveflow")
|
||||
return fmt.Errorf("int64 negative overflow")
|
||||
}
|
||||
extraI = -1 - extraI
|
||||
default:
|
||||
@ -278,7 +278,7 @@ func (t *messageReceiptV1) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
case cbg.MajNegativeInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 negative oveflow")
|
||||
return fmt.Errorf("int64 negative overflow")
|
||||
}
|
||||
extraI = -1 - extraI
|
||||
default:
|
||||
@ -324,7 +324,7 @@ func (t *messageReceiptV1) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
case cbg.MajNegativeInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 negative oveflow")
|
||||
return fmt.Errorf("int64 negative overflow")
|
||||
}
|
||||
extraI = -1 - extraI
|
||||
default:
|
||||
|
@ -27,24 +27,3 @@ type StateTree interface {
|
||||
|
||||
Version() StateTreeVersion
|
||||
}
|
||||
|
||||
type storageWrapper struct {
|
||||
s Storage
|
||||
}
|
||||
|
||||
func (sw *storageWrapper) Put(i cbg.CBORMarshaler) (cid.Cid, error) {
|
||||
c, err := sw.s.Put(i)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (sw *storageWrapper) Get(c cid.Cid, out cbg.CBORUnmarshaler) error {
|
||||
if err := sw.s.Get(c, out); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
crand "crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
@ -145,7 +146,10 @@ func MakeUnsignedMessageVectors() []vectors.UnsignedMessageVector {
|
||||
}
|
||||
|
||||
params := make([]byte, 32)
|
||||
rand.Read(params)
|
||||
_, err = crand.Read(params)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
msg := &types.Message{
|
||||
To: to,
|
||||
|
@ -1,6 +1,7 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
@ -8,7 +9,6 @@ import (
|
||||
"syscall"
|
||||
|
||||
ufcli "github.com/urfave/cli/v2"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
type PrintHelpErr struct {
|
||||
@ -52,7 +52,7 @@ func RunApp(app *ufcli.App) {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: %s\n\n", err) // nolint:errcheck
|
||||
}
|
||||
var phe *PrintHelpErr
|
||||
if xerrors.As(err, &phe) {
|
||||
if errors.As(err, &phe) {
|
||||
_ = ufcli.ShowCommandHelp(phe.Ctx, phe.Ctx.Command.Name)
|
||||
}
|
||||
os.Exit(1)
|
||||
|
@ -282,7 +282,7 @@ var NetDisconnect = &cli.Command{
|
||||
fmt.Println("failure")
|
||||
return err
|
||||
}
|
||||
fmt.Printf("disconnect %s: ", pid.Pretty())
|
||||
fmt.Printf("disconnect %s: ", pid)
|
||||
err = api.NetDisconnect(ctx, pid)
|
||||
if err != nil {
|
||||
fmt.Println("failure")
|
||||
@ -312,7 +312,7 @@ var NetConnect = &cli.Command{
|
||||
}
|
||||
|
||||
for _, pi := range pis {
|
||||
fmt.Printf("connect %s: ", pi.ID.Pretty())
|
||||
fmt.Printf("connect %s: ", pi.ID)
|
||||
err := api.NetConnect(ctx, pi)
|
||||
if err != nil {
|
||||
fmt.Println("failure")
|
||||
@ -847,7 +847,8 @@ var NetStatCmd = &cli.Command{
|
||||
})
|
||||
|
||||
for _, stat := range stats {
|
||||
printScope(&stat.stat, name+stat.name)
|
||||
tmp := stat.stat
|
||||
printScope(&tmp, name+stat.name)
|
||||
}
|
||||
|
||||
}
|
||||
|
25
cli/state.go
25
cli/state.go
@ -1920,8 +1920,29 @@ var StateSysActorCIDsCmd = &cli.Command{
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for name, cid := range actorsCids {
|
||||
_, _ = fmt.Fprintf(tw, "%v\t%v\n", name, cid)
|
||||
|
||||
var actorsCidTuples []struct {
|
||||
actorName string
|
||||
actorCid cid.Cid
|
||||
}
|
||||
|
||||
for name, actorCid := range actorsCids {
|
||||
keyVal := struct {
|
||||
actorName string
|
||||
actorCid cid.Cid
|
||||
}{
|
||||
actorName: name,
|
||||
actorCid: actorCid,
|
||||
}
|
||||
actorsCidTuples = append(actorsCidTuples, keyVal)
|
||||
}
|
||||
|
||||
sort.Slice(actorsCidTuples, func(i, j int) bool {
|
||||
return actorsCidTuples[i].actorName < actorsCidTuples[j].actorName
|
||||
})
|
||||
|
||||
for _, keyVal := range actorsCidTuples {
|
||||
_, _ = fmt.Fprintf(tw, "%v\t%v\n", keyVal.actorName, keyVal.actorCid)
|
||||
}
|
||||
return tw.Flush()
|
||||
},
|
||||
|
@ -14,6 +14,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/api/v0api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
cliutil "github.com/filecoin-project/lotus/cli/util"
|
||||
)
|
||||
|
||||
var SyncCmd = &cli.Command{
|
||||
@ -262,6 +263,9 @@ func SyncWait(ctx context.Context, napi v0api.FullNode, watch bool) error {
|
||||
}
|
||||
firstApp = state.VMApplied
|
||||
|
||||
// eta computes the ETA for the sync to complete (with a lookback of 10 processed items)
|
||||
eta := cliutil.NewETA(10)
|
||||
|
||||
for {
|
||||
state, err := napi.SyncState(ctx)
|
||||
if err != nil {
|
||||
@ -312,8 +316,10 @@ func SyncWait(ctx context.Context, napi v0api.FullNode, watch bool) error {
|
||||
fmt.Print("\r\x1b[2K\x1b[A")
|
||||
}
|
||||
|
||||
todo := theight - ss.Height
|
||||
|
||||
fmt.Printf("Worker: %d; Base: %d; Target: %d (diff: %d)\n", workerID, baseHeight, theight, heightDiff)
|
||||
fmt.Printf("State: %s; Current Epoch: %d; Todo: %d\n", ss.Stage, ss.Height, theight-ss.Height)
|
||||
fmt.Printf("State: %s; Current Epoch: %d; Todo: %d, ETA: %s\n", ss.Stage, ss.Height, todo, eta.Update(int64(todo)))
|
||||
lastLines = 2
|
||||
|
||||
if i%samples == 0 {
|
||||
|
@ -119,7 +119,7 @@ func GetAPIInfoMulti(ctx *cli.Context, t repo.RepoType) ([]APIInfo, error) {
|
||||
}
|
||||
}
|
||||
|
||||
return []APIInfo{}, fmt.Errorf("could not determine API endpoint for node type: %v", t.Type())
|
||||
return []APIInfo{}, fmt.Errorf("could not determine API endpoint for node type: %v. Try setting environment variable: %s", t.Type(), primaryEnv)
|
||||
}
|
||||
|
||||
func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) {
|
||||
@ -164,6 +164,28 @@ func GetRawAPIMulti(ctx *cli.Context, t repo.RepoType, version string) ([]HttpHe
|
||||
return httpHeads, nil
|
||||
}
|
||||
|
||||
func GetRawAPIMultiV2(ctx *cli.Context, ainfoCfg []string, version string) ([]HttpHead, error) {
|
||||
var httpHeads []HttpHead
|
||||
|
||||
if len(ainfoCfg) == 0 {
|
||||
return httpHeads, xerrors.Errorf("could not get API info: none configured. \nConsider getting base.toml with './lotus-provider config get base >/tmp/base.toml' \nthen adding \n[APIs] \n ChainApiInfo = [\" result_from lotus auth api-info --perm=admin \"]\n and updating it with './lotus-provider config set /tmp/base.toml'")
|
||||
}
|
||||
for _, i := range ainfoCfg {
|
||||
ainfo := ParseApiInfo(i)
|
||||
addr, err := ainfo.DialArgs(version)
|
||||
if err != nil {
|
||||
return httpHeads, xerrors.Errorf("could not get DialArgs: %w", err)
|
||||
}
|
||||
httpHeads = append(httpHeads, HttpHead{addr: addr, header: ainfo.AuthHeader()})
|
||||
}
|
||||
|
||||
if IsVeryVerbose {
|
||||
_, _ = fmt.Fprintf(ctx.App.Writer, "using raw API %s endpoint: %s\n", version, httpHeads[0].addr)
|
||||
}
|
||||
|
||||
return httpHeads, nil
|
||||
}
|
||||
|
||||
func GetRawAPI(ctx *cli.Context, t repo.RepoType, version string) (string, http.Header, error) {
|
||||
heads, err := GetRawAPIMulti(ctx, t, version)
|
||||
if err != nil {
|
||||
@ -393,6 +415,68 @@ func GetFullNodeAPIV1(ctx *cli.Context, opts ...GetFullNodeOption) (v1api.FullNo
|
||||
return &v1API, finalCloser, nil
|
||||
}
|
||||
|
||||
func GetFullNodeAPIV1LotusProvider(ctx *cli.Context, ainfoCfg []string, opts ...GetFullNodeOption) (v1api.FullNode, jsonrpc.ClientCloser, error) {
|
||||
if tn, ok := ctx.App.Metadata["testnode-full"]; ok {
|
||||
return tn.(v1api.FullNode), func() {}, nil
|
||||
}
|
||||
|
||||
var options GetFullNodeOptions
|
||||
for _, opt := range opts {
|
||||
opt(&options)
|
||||
}
|
||||
|
||||
var rpcOpts []jsonrpc.Option
|
||||
if options.ethSubHandler != nil {
|
||||
rpcOpts = append(rpcOpts, jsonrpc.WithClientHandler("Filecoin", options.ethSubHandler), jsonrpc.WithClientHandlerAlias("eth_subscription", "Filecoin.EthSubscription"))
|
||||
}
|
||||
|
||||
heads, err := GetRawAPIMultiV2(ctx, ainfoCfg, "v1")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if IsVeryVerbose {
|
||||
_, _ = fmt.Fprintln(ctx.App.Writer, "using full node API v1 endpoint:", heads[0].addr)
|
||||
}
|
||||
|
||||
var fullNodes []api.FullNode
|
||||
var closers []jsonrpc.ClientCloser
|
||||
|
||||
for _, head := range heads {
|
||||
v1api, closer, err := client.NewFullNodeRPCV1(ctx.Context, head.addr, head.header, rpcOpts...)
|
||||
if err != nil {
|
||||
log.Warnf("Not able to establish connection to node with addr: %s", head.addr)
|
||||
continue
|
||||
}
|
||||
fullNodes = append(fullNodes, v1api)
|
||||
closers = append(closers, closer)
|
||||
}
|
||||
|
||||
// When running in cluster mode and trying to establish connections to multiple nodes, fail
|
||||
// if less than 2 lotus nodes are actually running
|
||||
if len(heads) > 1 && len(fullNodes) < 2 {
|
||||
return nil, nil, xerrors.Errorf("Not able to establish connection to more than a single node")
|
||||
}
|
||||
|
||||
finalCloser := func() {
|
||||
for _, c := range closers {
|
||||
c()
|
||||
}
|
||||
}
|
||||
|
||||
var v1API api.FullNodeStruct
|
||||
FullNodeProxy(fullNodes, &v1API)
|
||||
|
||||
v, err := v1API.Version(ctx.Context)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if !v.APIVersion.EqMajorMinor(api.FullAPIVersion1) {
|
||||
return nil, nil, xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", api.FullAPIVersion1, v.APIVersion)
|
||||
}
|
||||
return &v1API, finalCloser, nil
|
||||
}
|
||||
|
||||
type GetStorageMinerOptions struct {
|
||||
PreferHttp bool
|
||||
}
|
||||
|
94
cli/util/eta.go
Normal file
94
cli/util/eta.go
Normal file
@ -0,0 +1,94 @@
|
||||
package cliutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ETA implements a very simple eta calculator based on the number of remaining items. It does not
|
||||
// require knowing the work size in advance and is therefore suitable for streaming workloads and
|
||||
// also does not require that consecutive updates have a monotonically decreasing remaining value.
|
||||
type ETA struct {
|
||||
// max number of items to keep in memory
|
||||
maxItems int
|
||||
// a queue of most recently updated items
|
||||
items []item
|
||||
// we store the last calculated ETA which we reuse if there was not change in remaining items
|
||||
lastETA string
|
||||
}
|
||||
|
||||
type item struct {
|
||||
timestamp time.Time
|
||||
remaining int64
|
||||
}
|
||||
|
||||
// NewETA creates a new ETA calculator of the given size
|
||||
func NewETA(maxItems int) *ETA {
|
||||
return &ETA{
|
||||
maxItems: maxItems,
|
||||
items: make([]item, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// Update updates the ETA calculator with the remaining number of items and returns the ETA
|
||||
func (e *ETA) Update(remaining int64) string {
|
||||
item := item{
|
||||
timestamp: time.Now(),
|
||||
remaining: remaining,
|
||||
}
|
||||
|
||||
if len(e.items) == 0 {
|
||||
e.items = append(e.items, item)
|
||||
return ""
|
||||
}
|
||||
|
||||
if e.items[len(e.items)-1].remaining == remaining {
|
||||
// we ignore updates with the same remaining value and just return the previous ETA
|
||||
return e.lastETA
|
||||
} else if e.items[len(e.items)-1].remaining < remaining {
|
||||
// remaining went up from previous update, lets estimate how many items were processed using the
|
||||
// average number processed items in the queue.
|
||||
var avgProcessedPerItem int64 = 1
|
||||
if len(e.items) > 1 {
|
||||
diffRemaining := e.items[0].remaining - e.items[len(e.items)-1].remaining
|
||||
avgProcessedPerItem = int64(math.Round(float64(diffRemaining) / float64(len(e.items))))
|
||||
}
|
||||
|
||||
// diff is the difference in increase in remaining since last update plus the average number of processed
|
||||
// items we estimate that were processed this round
|
||||
diff := remaining - e.items[len(e.items)-1].remaining + avgProcessedPerItem
|
||||
|
||||
// we update all items in the queue by shifting their remaining value accordingly. This means that we
|
||||
// always have strictly decreasing remaining values in the queue
|
||||
for i := range e.items {
|
||||
e.items[i].remaining += diff
|
||||
}
|
||||
}
|
||||
|
||||
// append the item to the queue and remove the oldest item if needed
|
||||
if len(e.items) >= e.maxItems {
|
||||
e.items = e.items[1:]
|
||||
}
|
||||
e.items = append(e.items, item)
|
||||
|
||||
// calculate the average processing time per item in the queue
|
||||
diffMs := e.items[len(e.items)-1].timestamp.Sub(e.items[0].timestamp).Milliseconds()
|
||||
nrItemsProcessed := e.items[0].remaining - e.items[len(e.items)-1].remaining
|
||||
avg := diffMs / nrItemsProcessed
|
||||
|
||||
// use that average processing time to estimate how long the remaining items will take
|
||||
// and cache that ETA so we don't have to recalculate it on every update unless the
|
||||
// remaining value changes
|
||||
e.lastETA = msToETA(avg * remaining)
|
||||
|
||||
return e.lastETA
|
||||
}
|
||||
|
||||
func msToETA(ms int64) string {
|
||||
seconds := ms / 1000
|
||||
minutes := seconds / 60
|
||||
hours := minutes / 60
|
||||
|
||||
return fmt.Sprintf("%02dh:%02dm:%02ds", hours, minutes%60, seconds%60)
|
||||
}
|
@ -22,6 +22,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/lib/tablewriter"
|
||||
)
|
||||
@ -459,7 +460,12 @@ var walletSign = &cli.Command{
|
||||
sig, err := api.WalletSign(ctx, addr, msg)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
// Check if the address is a multisig address
|
||||
act, actErr := api.StateGetActor(ctx, addr, types.EmptyTSK)
|
||||
if actErr == nil && builtin.IsMultisigActor(act.Code) {
|
||||
return xerrors.Errorf("specified signer address is a multisig actor, it doesn’t have keys to sign transactions. To send a message with a multisig, signers of the multisig need to propose and approve transactions.")
|
||||
}
|
||||
return xerrors.Errorf("failed to sign message: %w", err)
|
||||
}
|
||||
|
||||
sigBytes := append([]byte{byte(sig.Type)}, sig.Data...)
|
||||
|
312
cmd/lotus-bench/cli.go
Normal file
312
cmd/lotus-bench/cli.go
Normal file
@ -0,0 +1,312 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var cliCmd = &cli.Command{
|
||||
Name: "cli",
|
||||
Usage: "Runs a concurrent stress test on one or more binaries commands and prints the performance metrics including latency distribution and histogram",
|
||||
Description: `This benchmark has the following features:
|
||||
* Can query each command both sequentially and concurrently
|
||||
* Supports rate limiting
|
||||
* Can query multiple different commands at once (supporting different concurrency level and rate limiting for each command)
|
||||
* Gives a nice reporting summary of the stress testing of each command (including latency distribution, histogram and more)
|
||||
* Easy to use
|
||||
|
||||
To use this benchmark you must specify the commands you want to test using the --cmd options, the format of it is:
|
||||
|
||||
--cmd=CMD[:CONCURRENCY][:QPS] where only NAME is required.
|
||||
|
||||
Here are some real examples:
|
||||
lotus-bench cli --cmd='lotus-shed mpool miner-select-messages' // runs the command with default concurrency and qps
|
||||
lotus-bench cli --cmd='lotus-shed mpool miner-select-messages:3' // override concurrency to 3
|
||||
lotus-bench cli --cmd='lotus-shed mpool miner-select-messages::100' // override to 100 qps while using default concurrency
|
||||
lotus-bench cli --cmd='lotus-shed mpool miner-select-messages:3:100' // run using 3 workers but limit to 100 qps
|
||||
lotus-bench cli --cmd='lotus-shed mpool miner-select-messages' --cmd='lotus sync wait' // run two commands at once
|
||||
`,
|
||||
Flags: []cli.Flag{
|
||||
&cli.DurationFlag{
|
||||
Name: "duration",
|
||||
Value: 60 * time.Second,
|
||||
Usage: "Duration of benchmark in seconds",
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "concurrency",
|
||||
Value: 10,
|
||||
Usage: "How many workers should be used per command (can be overridden per command)",
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "qps",
|
||||
Value: 0,
|
||||
Usage: "How many requests per second should be sent per command (can be overridden per command), a value of 0 means no limit",
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "cmd",
|
||||
Usage: `Command to benchmark, you can specify multiple commands by repeating this flag. You can also specify command specific options to set the concurrency and qps for each command (see usage).`,
|
||||
},
|
||||
&cli.DurationFlag{
|
||||
Name: "watch",
|
||||
Value: 0 * time.Second,
|
||||
Usage: "If >0 then generates reports every N seconds (only supports linux/unix)",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "print-response",
|
||||
Value: false,
|
||||
Usage: "print the response of each request",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if len(cctx.StringSlice("cmd")) == 0 {
|
||||
return errors.New("you must specify and least one cmd to benchmark")
|
||||
}
|
||||
|
||||
var cmds []*CMD
|
||||
for _, str := range cctx.StringSlice("cmd") {
|
||||
entries := strings.SplitN(str, ":", 3)
|
||||
if len(entries) == 0 {
|
||||
return errors.New("invalid cmd format")
|
||||
}
|
||||
|
||||
// check if concurrency was specified
|
||||
concurrency := cctx.Int("concurrency")
|
||||
if len(entries) > 1 {
|
||||
if len(entries[1]) > 0 {
|
||||
var err error
|
||||
concurrency, err = strconv.Atoi(entries[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not parse concurrency value from command %s: %v", entries[0], err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// check if qps was specified
|
||||
qps := cctx.Int("qps")
|
||||
if len(entries) > 2 {
|
||||
if len(entries[2]) > 0 {
|
||||
var err error
|
||||
qps, err = strconv.Atoi(entries[2])
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not parse qps value from command %s: %v", entries[0], err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cmds = append(cmds, &CMD{
|
||||
w: os.Stdout,
|
||||
cmd: entries[0],
|
||||
concurrency: concurrency,
|
||||
qps: qps,
|
||||
printResp: cctx.Bool("print-response"),
|
||||
})
|
||||
}
|
||||
|
||||
// terminate early on ctrl+c
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt)
|
||||
go func() {
|
||||
<-c
|
||||
fmt.Println("Received interrupt, stopping...")
|
||||
for _, cmd := range cmds {
|
||||
cmd.Stop()
|
||||
}
|
||||
}()
|
||||
|
||||
// stop all threads after duration
|
||||
go func() {
|
||||
time.Sleep(cctx.Duration("duration"))
|
||||
for _, cmd := range cmds {
|
||||
cmd.Stop()
|
||||
}
|
||||
}()
|
||||
|
||||
// start all threads
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(cmds))
|
||||
|
||||
for _, cmd := range cmds {
|
||||
go func(cmd *CMD) {
|
||||
defer wg.Done()
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
fmt.Printf("error running cmd: %v\n", err)
|
||||
}
|
||||
}(cmd)
|
||||
}
|
||||
|
||||
// if watch is set then print a report every N seconds
|
||||
var progressCh chan struct{}
|
||||
if cctx.Duration("watch") > 0 {
|
||||
progressCh = make(chan struct{}, 1)
|
||||
go func(progressCh chan struct{}) {
|
||||
ticker := time.NewTicker(cctx.Duration("watch"))
|
||||
for {
|
||||
clearAndPrintReport := func() {
|
||||
// clear the screen move the cursor to the top left
|
||||
fmt.Print("\033[2J")
|
||||
fmt.Printf("\033[%d;%dH", 1, 1)
|
||||
for i, cmd := range cmds {
|
||||
cmd.Report()
|
||||
if i < len(cmds)-1 {
|
||||
fmt.Println()
|
||||
}
|
||||
}
|
||||
}
|
||||
select {
|
||||
case <-ticker.C:
|
||||
clearAndPrintReport()
|
||||
case <-progressCh:
|
||||
clearAndPrintReport()
|
||||
return
|
||||
}
|
||||
}
|
||||
}(progressCh)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
if progressCh != nil {
|
||||
// wait for the watch go routine to return
|
||||
progressCh <- struct{}{}
|
||||
|
||||
// no need to print the report again
|
||||
return nil
|
||||
}
|
||||
|
||||
// print the report for each command
|
||||
for i, cmd := range cmds {
|
||||
cmd.Report()
|
||||
if i < len(cmds)-1 {
|
||||
fmt.Println()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// CMD handles the benchmarking of a single command.
|
||||
type CMD struct {
|
||||
w io.Writer
|
||||
// the cmd we want to benchmark
|
||||
cmd string
|
||||
// the number of concurrent requests to make to this command
|
||||
concurrency int
|
||||
// if >0 then limit to qps is the max number of requests per second to make to this command (0 = no limit)
|
||||
qps int
|
||||
// whether or not to print the response of each request (useful for debugging)
|
||||
printResp bool
|
||||
// instruct the worker go routines to stop
|
||||
stopCh chan struct{}
|
||||
// when the command bencharking started
|
||||
start time.Time
|
||||
// results channel is used by the workers to send results to the reporter
|
||||
results chan *result
|
||||
// reporter handles reading the results from workers and printing the report statistics
|
||||
reporter *Reporter
|
||||
}
|
||||
|
||||
func (c *CMD) Run() error {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(c.concurrency)
|
||||
|
||||
c.results = make(chan *result, c.concurrency*1_000)
|
||||
c.stopCh = make(chan struct{}, c.concurrency)
|
||||
|
||||
go func() {
|
||||
c.reporter = NewReporter(c.results, c.w)
|
||||
c.reporter.Run()
|
||||
}()
|
||||
|
||||
c.start = time.Now()
|
||||
|
||||
// throttle the number of requests per second
|
||||
var qpsTicker *time.Ticker
|
||||
if c.qps > 0 {
|
||||
qpsTicker = time.NewTicker(time.Second / time.Duration(c.qps))
|
||||
}
|
||||
|
||||
for i := 0; i < c.concurrency; i++ {
|
||||
go func() {
|
||||
c.startWorker(qpsTicker)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// close the results channel so reporter will stop
|
||||
close(c.results)
|
||||
|
||||
// wait until the reporter is done
|
||||
<-c.reporter.doneCh
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CMD) startWorker(qpsTicker *time.Ticker) {
|
||||
for {
|
||||
// check if we should stop
|
||||
select {
|
||||
case <-c.stopCh:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// wait for the next tick if we are rate limiting this command
|
||||
if qpsTicker != nil {
|
||||
<-qpsTicker.C
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
|
||||
var statusCode int = 0
|
||||
|
||||
arr := strings.Fields(c.cmd)
|
||||
|
||||
data, err := exec.Command(arr[0], arr[1:]...).Output()
|
||||
if err != nil {
|
||||
fmt.Println("1")
|
||||
if exitError, ok := err.(*exec.ExitError); ok {
|
||||
statusCode = exitError.ExitCode()
|
||||
} else {
|
||||
statusCode = 1
|
||||
}
|
||||
} else {
|
||||
if c.printResp {
|
||||
fmt.Printf("[%s] %s", c.cmd, string(data))
|
||||
}
|
||||
}
|
||||
|
||||
c.results <- &result{
|
||||
statusCode: &statusCode,
|
||||
err: err,
|
||||
duration: time.Since(start),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CMD) Stop() {
|
||||
for i := 0; i < c.concurrency; i++ {
|
||||
c.stopCh <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CMD) Report() {
|
||||
total := time.Since(c.start)
|
||||
fmt.Fprintf(c.w, "[%s]:\n", c.cmd)
|
||||
fmt.Fprintf(c.w, "- Options:\n")
|
||||
fmt.Fprintf(c.w, " - concurrency: %d\n", c.concurrency)
|
||||
fmt.Fprintf(c.w, " - qps: %d\n", c.qps)
|
||||
c.reporter.Print(total, c.w)
|
||||
}
|
@ -304,7 +304,7 @@ var importBenchCmd = &cli.Command{
|
||||
return fmt.Errorf("no CAR file provided for import")
|
||||
}
|
||||
|
||||
head, err = cs.Import(cctx.Context, carFile)
|
||||
head, _, err = cs.Import(cctx.Context, carFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -497,21 +497,6 @@ type Invocation struct {
|
||||
|
||||
const GasPerNs = 10
|
||||
|
||||
func countGasCosts(et *types.ExecutionTrace) int64 {
|
||||
var cgas int64
|
||||
|
||||
for _, gc := range et.GasCharges {
|
||||
cgas += gc.ComputeGas
|
||||
}
|
||||
|
||||
for _, sub := range et.Subcalls {
|
||||
c := countGasCosts(&sub) //nolint
|
||||
cgas += c
|
||||
}
|
||||
|
||||
return cgas
|
||||
}
|
||||
|
||||
type stats struct {
|
||||
timeTaken meanVar
|
||||
gasRatio meanVar
|
||||
|
@ -3,10 +3,10 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
@ -120,6 +120,7 @@ func main() {
|
||||
sealBenchCmd,
|
||||
simpleCmd,
|
||||
importBenchCmd,
|
||||
cliCmd,
|
||||
rpcCmd,
|
||||
},
|
||||
}
|
||||
@ -546,7 +547,10 @@ var sealBenchCmd = &cli.Command{
|
||||
}
|
||||
|
||||
var challenge [32]byte
|
||||
rand.Read(challenge[:])
|
||||
_, err = rand.Read(challenge[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
beforePost := time.Now()
|
||||
|
||||
@ -776,9 +780,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
|
||||
start := time.Now()
|
||||
log.Infof("[%d] Writing piece into sector...", i)
|
||||
|
||||
r := rand.New(rand.NewSource(100 + int64(i)))
|
||||
|
||||
pi, err := sb.AddPiece(context.TODO(), sid, nil, abi.PaddedPieceSize(sectorSize).Unpadded(), r)
|
||||
pi, err := sb.AddPiece(context.TODO(), sid, nil, abi.PaddedPieceSize(sectorSize).Unpadded(), rand.Reader)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
181
cmd/lotus-bench/reporter.go
Normal file
181
cmd/lotus-bench/reporter.go
Normal file
@ -0,0 +1,181 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
)
|
||||
|
||||
// result is the result of a single rpc method request.
|
||||
type result struct {
|
||||
err error
|
||||
statusCode *int
|
||||
duration time.Duration
|
||||
}
|
||||
|
||||
// Reporter reads the results from the workers through the results channel and aggregates the results.
|
||||
type Reporter struct {
|
||||
// write the report to this writer
|
||||
w io.Writer
|
||||
// the reporter read the results from this channel
|
||||
results chan *result
|
||||
// doneCh is used to signal that the reporter has finished reading the results (channel has closed)
|
||||
doneCh chan bool
|
||||
|
||||
// lock protect the following fields during critical sections (if --watch was specified)
|
||||
lock sync.Mutex
|
||||
// the latencies of all requests
|
||||
latencies []int64
|
||||
// the number of requests that returned each status code
|
||||
statusCodes map[int]int
|
||||
// the number of errors that occurred
|
||||
errors map[string]int
|
||||
}
|
||||
|
||||
func NewReporter(results chan *result, w io.Writer) *Reporter {
|
||||
return &Reporter{
|
||||
w: w,
|
||||
results: results,
|
||||
doneCh: make(chan bool, 1),
|
||||
statusCodes: make(map[int]int),
|
||||
errors: make(map[string]int),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reporter) Run() {
|
||||
for res := range r.results {
|
||||
r.lock.Lock()
|
||||
|
||||
r.latencies = append(r.latencies, res.duration.Milliseconds())
|
||||
|
||||
if res.statusCode != nil {
|
||||
r.statusCodes[*res.statusCode]++
|
||||
}
|
||||
|
||||
if res.err != nil {
|
||||
if len(r.errors) < 1_000_000 {
|
||||
r.errors[res.err.Error()]++
|
||||
} else {
|
||||
// we don't want to store too many errors in memory
|
||||
r.errors["hidden"]++
|
||||
}
|
||||
} else {
|
||||
r.errors["nil"]++
|
||||
}
|
||||
|
||||
r.lock.Unlock()
|
||||
}
|
||||
|
||||
r.doneCh <- true
|
||||
}
|
||||
|
||||
func (r *Reporter) Print(elapsed time.Duration, w io.Writer) {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
nrReq := int64(len(r.latencies))
|
||||
if nrReq == 0 {
|
||||
fmt.Println("No requests were made")
|
||||
return
|
||||
}
|
||||
|
||||
// we need to sort the latencies slice to calculate the percentiles
|
||||
sort.Slice(r.latencies, func(i, j int) bool {
|
||||
return r.latencies[i] < r.latencies[j]
|
||||
})
|
||||
|
||||
var totalLatency int64 = 0
|
||||
for _, latency := range r.latencies {
|
||||
totalLatency += latency
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "- Total Requests: %d\n", nrReq)
|
||||
fmt.Fprintf(w, "- Total Duration: %dms\n", elapsed.Milliseconds())
|
||||
fmt.Fprintf(w, "- Requests/sec: %f\n", float64(nrReq)/elapsed.Seconds())
|
||||
fmt.Fprintf(w, "- Avg latency: %dms\n", totalLatency/nrReq)
|
||||
fmt.Fprintf(w, "- Median latency: %dms\n", r.latencies[nrReq/2])
|
||||
fmt.Fprintf(w, "- Latency distribution:\n")
|
||||
percentiles := []float64{0.1, 0.5, 0.9, 0.95, 0.99, 0.999}
|
||||
for _, p := range percentiles {
|
||||
idx := int64(p * float64(nrReq))
|
||||
fmt.Fprintf(w, " %s%% in %dms\n", fmt.Sprintf("%.2f", p*100.0), r.latencies[idx])
|
||||
}
|
||||
|
||||
// create a simple histogram with 10 buckets spanning the range of latency
|
||||
// into equal ranges
|
||||
//
|
||||
nrBucket := 10
|
||||
buckets := make([]Bucket, nrBucket)
|
||||
latencyRange := r.latencies[len(r.latencies)-1]
|
||||
bucketRange := latencyRange / int64(nrBucket)
|
||||
|
||||
// mark the end of each bucket
|
||||
for i := 0; i < nrBucket; i++ {
|
||||
buckets[i].start = int64(i) * bucketRange
|
||||
buckets[i].end = buckets[i].start + bucketRange
|
||||
// extend the last bucked by any remaning range caused by the integer division
|
||||
if i == nrBucket-1 {
|
||||
buckets[i].end = latencyRange
|
||||
}
|
||||
}
|
||||
|
||||
// count the number of requests in each bucket
|
||||
currBucket := 0
|
||||
for i := 0; i < len(r.latencies); {
|
||||
if r.latencies[i] <= buckets[currBucket].end {
|
||||
buckets[currBucket].cnt++
|
||||
i++
|
||||
} else {
|
||||
currBucket++
|
||||
}
|
||||
}
|
||||
|
||||
// print the histogram using a tabwriter which will align the columns nicely
|
||||
fmt.Fprintf(w, "- Histogram:\n")
|
||||
const padding = 2
|
||||
tabWriter := tabwriter.NewWriter(w, 0, 0, padding, ' ', tabwriter.AlignRight|tabwriter.Debug)
|
||||
for i := 0; i < nrBucket; i++ {
|
||||
ratio := float64(buckets[i].cnt) / float64(nrReq)
|
||||
bars := strings.Repeat("#", int(ratio*100))
|
||||
fmt.Fprintf(tabWriter, " %d-%dms\t%d\t%s (%s%%)\n", buckets[i].start, buckets[i].end, buckets[i].cnt, bars, fmt.Sprintf("%.2f", ratio*100))
|
||||
}
|
||||
tabWriter.Flush() //nolint:errcheck
|
||||
|
||||
fmt.Fprintf(w, "- Status codes:\n")
|
||||
for code, cnt := range r.statusCodes {
|
||||
fmt.Fprintf(w, " [%d]: %d\n", code, cnt)
|
||||
}
|
||||
|
||||
// print the 10 most occurring errors (in case error values are not unique)
|
||||
//
|
||||
type kv struct {
|
||||
err string
|
||||
cnt int
|
||||
}
|
||||
var sortedErrors []kv
|
||||
for err, cnt := range r.errors {
|
||||
sortedErrors = append(sortedErrors, kv{err, cnt})
|
||||
}
|
||||
sort.Slice(sortedErrors, func(i, j int) bool {
|
||||
return sortedErrors[i].cnt > sortedErrors[j].cnt
|
||||
})
|
||||
fmt.Fprintf(w, "- Errors (top 10):\n")
|
||||
for i, se := range sortedErrors {
|
||||
if i > 10 {
|
||||
break
|
||||
}
|
||||
fmt.Fprintf(w, " [%s]: %d\n", se.err, se.cnt)
|
||||
}
|
||||
}
|
||||
|
||||
type Bucket struct {
|
||||
start int64
|
||||
// the end value of the bucket
|
||||
end int64
|
||||
// how many entries are in the bucket
|
||||
cnt int
|
||||
}
|
@ -9,11 +9,9 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
@ -243,13 +241,6 @@ type RPCMethod struct {
|
||||
reporter *Reporter
|
||||
}
|
||||
|
||||
// result is the result of a single rpc method request.
|
||||
type result struct {
|
||||
err error
|
||||
statusCode *int
|
||||
duration time.Duration
|
||||
}
|
||||
|
||||
func (rpc *RPCMethod) Run() error {
|
||||
client := &http.Client{
|
||||
Timeout: 0,
|
||||
@ -411,166 +402,3 @@ func (rpc *RPCMethod) Report() {
|
||||
fmt.Fprintf(rpc.w, " - qps: %d\n", rpc.qps)
|
||||
rpc.reporter.Print(total, rpc.w)
|
||||
}
|
||||
|
||||
// Reporter reads the results from the workers through the results channel and aggregates the results.
|
||||
type Reporter struct {
|
||||
// write the report to this writer
|
||||
w io.Writer
|
||||
// the reporter read the results from this channel
|
||||
results chan *result
|
||||
// doneCh is used to signal that the reporter has finished reading the results (channel has closed)
|
||||
doneCh chan bool
|
||||
|
||||
// lock protect the following fields during critical sections (if --watch was specified)
|
||||
lock sync.Mutex
|
||||
// the latencies of all requests
|
||||
latencies []int64
|
||||
// the number of requests that returned each status code
|
||||
statusCodes map[int]int
|
||||
// the number of errors that occurred
|
||||
errors map[string]int
|
||||
}
|
||||
|
||||
func NewReporter(results chan *result, w io.Writer) *Reporter {
|
||||
return &Reporter{
|
||||
w: w,
|
||||
results: results,
|
||||
doneCh: make(chan bool, 1),
|
||||
statusCodes: make(map[int]int),
|
||||
errors: make(map[string]int),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reporter) Run() {
|
||||
for res := range r.results {
|
||||
r.lock.Lock()
|
||||
|
||||
r.latencies = append(r.latencies, res.duration.Milliseconds())
|
||||
|
||||
if res.statusCode != nil {
|
||||
r.statusCodes[*res.statusCode]++
|
||||
}
|
||||
|
||||
if res.err != nil {
|
||||
if len(r.errors) < 1_000_000 {
|
||||
r.errors[res.err.Error()]++
|
||||
} else {
|
||||
// we don't want to store too many errors in memory
|
||||
r.errors["hidden"]++
|
||||
}
|
||||
} else {
|
||||
r.errors["nil"]++
|
||||
}
|
||||
|
||||
r.lock.Unlock()
|
||||
}
|
||||
|
||||
r.doneCh <- true
|
||||
}
|
||||
|
||||
func (r *Reporter) Print(elapsed time.Duration, w io.Writer) {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
nrReq := int64(len(r.latencies))
|
||||
if nrReq == 0 {
|
||||
fmt.Println("No requests were made")
|
||||
return
|
||||
}
|
||||
|
||||
// we need to sort the latencies slice to calculate the percentiles
|
||||
sort.Slice(r.latencies, func(i, j int) bool {
|
||||
return r.latencies[i] < r.latencies[j]
|
||||
})
|
||||
|
||||
var totalLatency int64 = 0
|
||||
for _, latency := range r.latencies {
|
||||
totalLatency += latency
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "- Total Requests: %d\n", nrReq)
|
||||
fmt.Fprintf(w, "- Total Duration: %dms\n", elapsed.Milliseconds())
|
||||
fmt.Fprintf(w, "- Requests/sec: %f\n", float64(nrReq)/elapsed.Seconds())
|
||||
fmt.Fprintf(w, "- Avg latency: %dms\n", totalLatency/nrReq)
|
||||
fmt.Fprintf(w, "- Median latency: %dms\n", r.latencies[nrReq/2])
|
||||
fmt.Fprintf(w, "- Latency distribution:\n")
|
||||
percentiles := []float64{0.1, 0.5, 0.9, 0.95, 0.99, 0.999}
|
||||
for _, p := range percentiles {
|
||||
idx := int64(p * float64(nrReq))
|
||||
fmt.Fprintf(w, " %s%% in %dms\n", fmt.Sprintf("%.2f", p*100.0), r.latencies[idx])
|
||||
}
|
||||
|
||||
// create a simple histogram with 10 buckets spanning the range of latency
|
||||
// into equal ranges
|
||||
//
|
||||
nrBucket := 10
|
||||
buckets := make([]Bucket, nrBucket)
|
||||
latencyRange := r.latencies[len(r.latencies)-1]
|
||||
bucketRange := latencyRange / int64(nrBucket)
|
||||
|
||||
// mark the end of each bucket
|
||||
for i := 0; i < nrBucket; i++ {
|
||||
buckets[i].start = int64(i) * bucketRange
|
||||
buckets[i].end = buckets[i].start + bucketRange
|
||||
// extend the last bucked by any remaning range caused by the integer division
|
||||
if i == nrBucket-1 {
|
||||
buckets[i].end = latencyRange
|
||||
}
|
||||
}
|
||||
|
||||
// count the number of requests in each bucket
|
||||
currBucket := 0
|
||||
for i := 0; i < len(r.latencies); {
|
||||
if r.latencies[i] <= buckets[currBucket].end {
|
||||
buckets[currBucket].cnt++
|
||||
i++
|
||||
} else {
|
||||
currBucket++
|
||||
}
|
||||
}
|
||||
|
||||
// print the histogram using a tabwriter which will align the columns nicely
|
||||
fmt.Fprintf(w, "- Histogram:\n")
|
||||
const padding = 2
|
||||
tabWriter := tabwriter.NewWriter(w, 0, 0, padding, ' ', tabwriter.AlignRight|tabwriter.Debug)
|
||||
for i := 0; i < nrBucket; i++ {
|
||||
ratio := float64(buckets[i].cnt) / float64(nrReq)
|
||||
bars := strings.Repeat("#", int(ratio*100))
|
||||
fmt.Fprintf(tabWriter, " %d-%dms\t%d\t%s (%s%%)\n", buckets[i].start, buckets[i].end, buckets[i].cnt, bars, fmt.Sprintf("%.2f", ratio*100))
|
||||
}
|
||||
tabWriter.Flush() //nolint:errcheck
|
||||
|
||||
fmt.Fprintf(w, "- Status codes:\n")
|
||||
for code, cnt := range r.statusCodes {
|
||||
fmt.Fprintf(w, " [%d]: %d\n", code, cnt)
|
||||
}
|
||||
|
||||
// print the 10 most occurring errors (in case error values are not unique)
|
||||
//
|
||||
type kv struct {
|
||||
err string
|
||||
cnt int
|
||||
}
|
||||
var sortedErrors []kv
|
||||
for err, cnt := range r.errors {
|
||||
sortedErrors = append(sortedErrors, kv{err, cnt})
|
||||
}
|
||||
sort.Slice(sortedErrors, func(i, j int) bool {
|
||||
return sortedErrors[i].cnt > sortedErrors[j].cnt
|
||||
})
|
||||
fmt.Fprintf(w, "- Errors (top 10):\n")
|
||||
for i, se := range sortedErrors {
|
||||
if i > 10 {
|
||||
break
|
||||
}
|
||||
fmt.Fprintf(w, " [%s]: %d\n", se.err, se.cnt)
|
||||
}
|
||||
}
|
||||
|
||||
type Bucket struct {
|
||||
start int64
|
||||
// the end value of the bucket
|
||||
end int64
|
||||
// how many entries are in the bucket
|
||||
cnt int
|
||||
}
|
||||
|
@ -266,7 +266,10 @@ var simplePreCommit1 = &cli.Command{
|
||||
ProofType: spt(sectorSize, cctx.Bool("synthetic")),
|
||||
}
|
||||
|
||||
var ticket [32]byte // all zero
|
||||
ticket := [32]byte{}
|
||||
for i := range ticket {
|
||||
ticket[i] = 1
|
||||
}
|
||||
|
||||
pieces, err := ParsePieceInfos(cctx, 3)
|
||||
if err != nil {
|
||||
@ -305,7 +308,36 @@ var simplePreCommit2 = &cli.Command{
|
||||
Name: "synthetic",
|
||||
Usage: "generate synthetic PoRep proofs",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "external-pc2",
|
||||
Usage: "command for computing PC2 externally",
|
||||
},
|
||||
},
|
||||
Description: `Compute PreCommit2 inputs and seal a sector.
|
||||
|
||||
--external-pc2 can be used to compute the PreCommit2 inputs externally.
|
||||
The flag behaves similarly to the related lotus-worker flag, using it in
|
||||
lotus-bench may be useful for testing if the external PreCommit2 command is
|
||||
invoked correctly.
|
||||
|
||||
The command will be called with a number of environment variables set:
|
||||
* EXTSEAL_PC2_SECTOR_NUM: the sector number
|
||||
* EXTSEAL_PC2_SECTOR_MINER: the miner id
|
||||
* EXTSEAL_PC2_PROOF_TYPE: the proof type
|
||||
* EXTSEAL_PC2_SECTOR_SIZE: the sector size in bytes
|
||||
* EXTSEAL_PC2_CACHE: the path to the cache directory
|
||||
* EXTSEAL_PC2_SEALED: the path to the sealed sector file (initialized with unsealed data by the caller)
|
||||
* EXTSEAL_PC2_PC1OUT: output from rust-fil-proofs precommit1 phase (base64 encoded json)
|
||||
|
||||
The command is expected to:
|
||||
* Create cache sc-02-data-tree-r* files
|
||||
* Create cache sc-02-data-tree-c* files
|
||||
* Create cache p_aux / t_aux files
|
||||
* Transform the sealed file in place
|
||||
|
||||
Example invocation of lotus-bench as external executor:
|
||||
'./lotus-bench simple precommit2 --sector-size $EXTSEAL_PC2_SECTOR_SIZE $EXTSEAL_PC2_SEALED $EXTSEAL_PC2_CACHE $EXTSEAL_PC2_PC1OUT'
|
||||
`,
|
||||
ArgsUsage: "[sealed] [cache] [pc1 out]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
ctx := cctx.Context
|
||||
@ -330,7 +362,18 @@ var simplePreCommit2 = &cli.Command{
|
||||
storiface.FTSealed: cctx.Args().Get(0),
|
||||
storiface.FTCache: cctx.Args().Get(1),
|
||||
}
|
||||
sealer, err := ffiwrapper.New(pp)
|
||||
|
||||
var opts []ffiwrapper.FFIWrapperOpt
|
||||
|
||||
if cctx.IsSet("external-pc2") {
|
||||
extSeal := ffiwrapper.ExternalSealer{
|
||||
PreCommit2: ffiwrapper.MakeExternPrecommit2(cctx.String("external-pc2")),
|
||||
}
|
||||
|
||||
opts = append(opts, ffiwrapper.WithExternalSealCalls(extSeal))
|
||||
}
|
||||
|
||||
sealer, err := ffiwrapper.New(pp, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -420,7 +463,12 @@ var simpleCommit1 = &cli.Command{
|
||||
|
||||
start := time.Now()
|
||||
|
||||
var ticket, seed [32]byte // all zero
|
||||
ticket := [32]byte{}
|
||||
seed := [32]byte{}
|
||||
for i := range ticket {
|
||||
ticket[i] = 1
|
||||
seed[i] = 1
|
||||
}
|
||||
|
||||
commd, err := cid.Parse(cctx.Args().Get(2))
|
||||
if err != nil {
|
||||
@ -650,6 +698,10 @@ var simpleWinningPost = &cli.Command{
|
||||
Usage: "pass miner address (only necessary if using existing sectorbuilder)",
|
||||
Value: "t01000",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "show-inputs",
|
||||
Usage: "output inputs for winning post generation",
|
||||
},
|
||||
},
|
||||
ArgsUsage: "[sealed] [cache] [comm R] [sector num]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
@ -720,6 +772,17 @@ var simpleWinningPost = &cli.Command{
|
||||
fmt.Printf("Vanilla %s (%s)\n", challenge.Sub(start), bps(sectorSize, 1, challenge.Sub(start)))
|
||||
fmt.Printf("Proof %s (%s)\n", end.Sub(challenge), bps(sectorSize, 1, end.Sub(challenge)))
|
||||
fmt.Println(base64.StdEncoding.EncodeToString(proof[0].ProofBytes))
|
||||
|
||||
if cctx.Bool("show-inputs") {
|
||||
fmt.Println("GenerateWinningPoStWithVanilla info:")
|
||||
|
||||
fmt.Printf(" wpt: %d\n", wpt)
|
||||
fmt.Printf(" mid: %d\n", mid)
|
||||
fmt.Printf(" rand: %x\n", rand)
|
||||
fmt.Printf(" vp: %x\n", vp)
|
||||
fmt.Printf(" proof: %x\n", proof)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
@ -143,7 +143,7 @@ var runCmd = &cli.Command{
|
||||
},
|
||||
&cli.DurationFlag{
|
||||
Name: "rate-limit-timeout",
|
||||
Usage: "the maximum time to wait for the rate limter before returning an error to clients",
|
||||
Usage: "the maximum time to wait for the rate limiter before returning an error to clients",
|
||||
Value: gateway.DefaultRateLimitTimeout,
|
||||
},
|
||||
&cli.Int64Flag{
|
||||
|
@ -463,7 +463,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode
|
||||
wsts := statestore.New(namespace.Wrap(mds, modules.WorkerCallsPrefix))
|
||||
smsts := statestore.New(namespace.Wrap(mds, modules.ManagerWorkPrefix))
|
||||
|
||||
si := paths.NewIndex(nil)
|
||||
si := paths.NewMemIndex(nil)
|
||||
|
||||
lstor, err := paths.NewLocal(ctx, lr, si, nil)
|
||||
if err != nil {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user