Merge pull request #11109 from filecoin-project/release/v1.23.3
release: v1.23.3
This commit is contained in:
commit
7bb1f98ac6
@ -7,12 +7,12 @@ executors:
|
|||||||
golang:
|
golang:
|
||||||
docker:
|
docker:
|
||||||
# Must match GO_VERSION_MIN in project root
|
# Must match GO_VERSION_MIN in project root
|
||||||
- image: cimg/go:1.19.7
|
- image: cimg/go:1.19.12
|
||||||
resource_class: medium+
|
resource_class: medium+
|
||||||
golang-2xl:
|
golang-2xl:
|
||||||
docker:
|
docker:
|
||||||
# Must match GO_VERSION_MIN in project root
|
# Must match GO_VERSION_MIN in project root
|
||||||
- image: cimg/go:1.19.7
|
- image: cimg/go:1.19.12
|
||||||
resource_class: 2xlarge
|
resource_class: 2xlarge
|
||||||
ubuntu:
|
ubuntu:
|
||||||
docker:
|
docker:
|
||||||
|
@ -7,12 +7,12 @@ executors:
|
|||||||
golang:
|
golang:
|
||||||
docker:
|
docker:
|
||||||
# Must match GO_VERSION_MIN in project root
|
# Must match GO_VERSION_MIN in project root
|
||||||
- image: cimg/go:1.19.7
|
- image: cimg/go:1.19.12
|
||||||
resource_class: medium+
|
resource_class: medium+
|
||||||
golang-2xl:
|
golang-2xl:
|
||||||
docker:
|
docker:
|
||||||
# Must match GO_VERSION_MIN in project root
|
# Must match GO_VERSION_MIN in project root
|
||||||
- image: cimg/go:1.19.7
|
- image: cimg/go:1.19.12
|
||||||
resource_class: 2xlarge
|
resource_class: 2xlarge
|
||||||
ubuntu:
|
ubuntu:
|
||||||
docker:
|
docker:
|
||||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -52,3 +52,4 @@ dist/
|
|||||||
# The following files are checked into git and result
|
# The following files are checked into git and result
|
||||||
# in dirty git state if removed from the docker context
|
# in dirty git state if removed from the docker context
|
||||||
!extern/filecoin-ffi/rust/filecoin.pc
|
!extern/filecoin-ffi/rust/filecoin.pc
|
||||||
|
!extern/test-vectors
|
||||||
|
@ -12,7 +12,6 @@ linters:
|
|||||||
- unconvert
|
- unconvert
|
||||||
- staticcheck
|
- staticcheck
|
||||||
- varcheck
|
- varcheck
|
||||||
- structcheck
|
|
||||||
- deadcode
|
- deadcode
|
||||||
- scopelint
|
- scopelint
|
||||||
|
|
||||||
|
106
CHANGELOG.md
106
CHANGELOG.md
@ -1,5 +1,111 @@
|
|||||||
# Lotus changelog
|
# Lotus changelog
|
||||||
|
|
||||||
|
# UNRELEASED
|
||||||
|
|
||||||
|
# v1.23.3 / 2023-08-01
|
||||||
|
|
||||||
|
This feature release of Lotus includes numerous improvements and enhancements for node operators, ETH RPC-providers and storage providers.
|
||||||
|
|
||||||
|
This feature release requires a **minimum Go version of v1.19.12 or higher to successfully build Lotus**. Go version 1.20 is also supported, but 1.21 is NOT.
|
||||||
|
|
||||||
|
## Highlights
|
||||||
|
|
||||||
|
- [Lotus now includes a Slasher tool](https://github.com/filecoin-project/lotus/pull/10928) to monitor the network for Consensus Faults, and report them as appropriate
|
||||||
|
- The Slasher investigates all incoming blocks, and assesses whether they trigger any of the three Consensus Faults defined in the Filecoin protocol
|
||||||
|
- If any faults are detected, the Slasher sends a `ReportConsensusFault` message to the faulty miner
|
||||||
|
- For more information on the Slasher, including how to run it, please find the documentation [here](https://lotus.filecoin.io/lotus/manage/slasher-and-disputer/)
|
||||||
|
- The Ethereum-like RPC exposed by Lotus is now compatible with EIP-1898: https://github.com/filecoin-project/lotus/pull/10815
|
||||||
|
- The lotus-miner PieceReader now supports parallel reads: https://github.com/filecoin-project/lotus/pull/10913
|
||||||
|
- Added new environment variable `LOTUS_EXEC_TRACE_CACHE_SIZE` to configure execution trace cache size ([filecoin-project/lotus#10585](https://github.com/filecoin-project/lotus/pull/10585))
|
||||||
|
- If unset, we default to caching 16 most recent execution traces. Storage Providers may want to set this to 0, while exchanges may want to crank it up.
|
||||||
|
|
||||||
|
## New features
|
||||||
|
- feat: miner cli: sectors list upgrade-bounds tool ([filecoin-project/lotus#10923](https://github.com/filecoin-project/lotus/pull/10923))
|
||||||
|
- Add new RPC stress testing tool (lotus-bench rpc) with rich reporting ([filecoin-project/lotus#10761](https://github.com/filecoin-project/lotus/pull/10761))
|
||||||
|
- feat: alert: Add FVM_CONCURRENCY alert ([filecoin-project/lotus#10933](https://github.com/filecoin-project/lotus/pull/10933))
|
||||||
|
- feat: Add eth_syncing RPC method ([filecoin-project/lotus#10719](https://github.com/filecoin-project/lotus/pull/10719))
|
||||||
|
- feat: sealing: flag to run data_cid untied from addpiece ([filecoin-project/lotus#10797](https://github.com/filecoin-project/lotus/pull/10797))
|
||||||
|
- feat: Lotus Gateway: add MpoolPending, ChainGetBlock and MinerGetBaseInfo ([filecoin-project/lotus#10929](https://github.com/filecoin-project/lotus/pull/10929))
|
||||||
|
|
||||||
|
## Improvements
|
||||||
|
- chore: update ffi & fvm ([filecoin-project/lotus#11040](https://github.com/filecoin-project/lotus/pull/11040))
|
||||||
|
- feat: Make sure we don't store duplidate actor events caused to reorgs in events.db ([filecoin-project/lotus#11015](https://github.com/filecoin-project/lotus/pull/11015))
|
||||||
|
- sealing: Use only non-assigned deals when selecting snap sectors ([filecoin-project/lotus#11002](https://github.com/filecoin-project/lotus/pull/11002))
|
||||||
|
- chore: not display privatekey ([filecoin-project/lotus#11006](https://github.com/filecoin-project/lotus/pull/11006))
|
||||||
|
- chore: shed: update actor version ([filecoin-project/lotus#11020](https://github.com/filecoin-project/lotus/pull/11020))
|
||||||
|
- chore: migrate to boxo ([filecoin-project/lotus#10921](https://github.com/filecoin-project/lotus/pull/10921))
|
||||||
|
- feat: deflake TestDealsWithFinalizeEarly ([filecoin-project/lotus#10978](https://github.com/filecoin-project/lotus/pull/10978))
|
||||||
|
- fix: pubsub: do not treat ErrExistingNonce as Reject ([filecoin-project/lotus#10973](https://github.com/filecoin-project/lotus/pull/10973))
|
||||||
|
- feat: deflake TestDMLevelPartialRetrieval (#10972) ([filecoin-project/lotus#10972](https://github.com/filecoin-project/lotus/pull/10972))
|
||||||
|
- fix: eth: ensure that the event topics are non-nil ([filecoin-project/lotus#10971](https://github.com/filecoin-project/lotus/pull/10971))
|
||||||
|
- Add comment stating msgIndex is an experimental feature ([filecoin-project/lotus#10968](https://github.com/filecoin-project/lotus/pull/10968))
|
||||||
|
- feat: cli(compute-state) default to the tipset at the given epoch ([filecoin-project/lotus#10965](https://github.com/filecoin-project/lotus/pull/10965))
|
||||||
|
- Upgrade urfave dependency which now supports DisableSliceFlagSeparato… ([filecoin-project/lotus#10950](https://github.com/filecoin-project/lotus/pull/10950))
|
||||||
|
- Add new lotus-shed command for computing eth hash for a given message cid (#10961) ([filecoin-project/lotus#10961](https://github.com/filecoin-project/lotus/pull/10961))
|
||||||
|
- Prefill GetTipsetByHeight skiplist cache on lotus startup ([filecoin-project/lotus#10955](https://github.com/filecoin-project/lotus/pull/10955))
|
||||||
|
- Add lotus-shed command for backfilling txhash.db ([filecoin-project/lotus#10932](https://github.com/filecoin-project/lotus/pull/10932))
|
||||||
|
- chore: deps: update to go-libp2p 0.27.5 ([filecoin-project/lotus#10948](https://github.com/filecoin-project/lotus/pull/10948))
|
||||||
|
- Small improvement to make gen output ([filecoin-project/lotus#10951](https://github.com/filecoin-project/lotus/pull/10951))
|
||||||
|
- fix: improve perf of msgindex backfill ([filecoin-project/lotus#10941](https://github.com/filecoin-project/lotus/pull/10941))
|
||||||
|
- deps: update libp2p ([filecoin-project/lotus#10936](https://github.com/filecoin-project/lotus/pull/10936))
|
||||||
|
- sealing: Improve upgrade sector selection ([filecoin-project/lotus#10915](https://github.com/filecoin-project/lotus/pull/10915))
|
||||||
|
- Add timing test for mpool select with a large mpool dump ([filecoin-project/lotus#10650](https://github.com/filecoin-project/lotus/pull/10650))
|
||||||
|
- feat: slashfilter: drop outdated near-upgrade check ([filecoin-project/lotus#10925](https://github.com/filecoin-project/lotus/pull/10925))
|
||||||
|
- opt: MinerInfo adds the PendingOwnerAddress field ([filecoin-project/lotus#10927](https://github.com/filecoin-project/lotus/pull/10927))
|
||||||
|
- feat: itest: force PoSt more aggressively around deadline closure ([filecoin-project/lotus#10926](https://github.com/filecoin-project/lotus/pull/10926))
|
||||||
|
- test: messagepool: gas rewards are negative if GasFeeCap too low ([filecoin-project/lotus#10649](https://github.com/filecoin-project/lotus/pull/10649))
|
||||||
|
- fix: types: error out on decoding BlockMsg with extraneous data ([filecoin-project/lotus#10863](https://github.com/filecoin-project/lotus/pull/10863))
|
||||||
|
- update interop upgrade schedule ([filecoin-project/lotus#10879](https://github.com/filecoin-project/lotus/pull/10879))
|
||||||
|
- itests: Test PoSt V1_1 on workers ([filecoin-project/lotus#10732](https://github.com/filecoin-project/lotus/pull/10732))
|
||||||
|
- Update gas_balancing.md ([filecoin-project/lotus#10924](https://github.com/filecoin-project/lotus/pull/10924))
|
||||||
|
- feat: cli: Make compact partitions cmd better ([filecoin-project/lotus#9070](https://github.com/filecoin-project/lotus/pull/9070))
|
||||||
|
- fix: include extra messages in ComputeState InvocResult output ([filecoin-project/lotus#10628](https://github.com/filecoin-project/lotus/pull/10628))
|
||||||
|
- feat: pubsub: treat ErrGasFeeCapTooLow as ignore, not reject ([filecoin-project/lotus#10652](https://github.com/filecoin-project/lotus/pull/10652))
|
||||||
|
- feat: run lotus-shed commands in context that is cancelled on sigterm ([filecoin-project/lotus#10877](https://github.com/filecoin-project/lotus/pull/10877))
|
||||||
|
- fix:lotus-fountain:set default data-cap same as MinVerifiedDealSize ([filecoin-project/lotus#10920](https://github.com/filecoin-project/lotus/pull/10920))
|
||||||
|
- pass the right g-recaptcha data
|
||||||
|
- fix: not call RUnlock ([filecoin-project/lotus#10912](https://github.com/filecoin-project/lotus/pull/10912))
|
||||||
|
- opt: cli: If present, print Events Root ([filecoin-project/lotus#10893](https://github.com/filecoin-project/lotus/pull/10893))
|
||||||
|
- Calibration faucet UI improvements ([filecoin-project/lotus#10905](https://github.com/filecoin-project/lotus/pull/10905))
|
||||||
|
- chore: chain: replace storetheindex with go-libipni ([filecoin-project/lotus#10841](https://github.com/filecoin-project/lotus/pull/10841))
|
||||||
|
- Add alerts to `Lotus info` cmd ([filecoin-project/lotus#10894](https://github.com/filecoin-project/lotus/pull/10894))
|
||||||
|
- fix: cli: make redeclare cmd work properly ([filecoin-project/lotus#10860](https://github.com/filecoin-project/lotus/pull/10860))
|
||||||
|
- fix: shed remove datacap not working with ledger ([filecoin-project/lotus#10880](https://github.com/filecoin-project/lotus/pull/10880))
|
||||||
|
- Check if epoch is negative in GetTipsetByHeight ([filecoin-project/lotus#10878](https://github.com/filecoin-project/lotus/pull/10878))
|
||||||
|
- chore: update go-fil-markets ([filecoin-project/lotus#10867](https://github.com/filecoin-project/lotus/pull/10867))
|
||||||
|
- feat: alerts: Add lotus-miner legacy-markets alert ([filecoin-project/lotus#10868](https://github.com/filecoin-project/lotus/pull/10868))
|
||||||
|
- feat:fountain:add grant-datacap support ([filecoin-project/lotus#10856](https://github.com/filecoin-project/lotus/pull/10856))
|
||||||
|
- feat: itests: add logs to blockminer.go failure case ([filecoin-project/lotus#10861](https://github.com/filecoin-project/lotus/pull/10861))
|
||||||
|
- feat: eth: Add support for blockHash param in eth_getLogs ([filecoin-project/lotus#10782](https://github.com/filecoin-project/lotus/pull/10782))
|
||||||
|
- lotus-fountain: make compatible with 0x addresses #10560 ([filecoin-project/lotus#10784](https://github.com/filecoin-project/lotus/pull/10784))
|
||||||
|
- feat: deflake sector_import_simple ([filecoin-project/lotus#10858](https://github.com/filecoin-project/lotus/pull/10858))
|
||||||
|
- fix: splitstore: remove deadlock around waiting for sync ([filecoin-project/lotus#10857](https://github.com/filecoin-project/lotus/pull/10857))
|
||||||
|
- fix: sched: Address GET_32G_MAX_CONCURRENT regression (#10850) ([filecoin-project/lotus#10850](https://github.com/filecoin-project/lotus/pull/10850))
|
||||||
|
- feat: fix deadlock in splitstore-mpool interaction ([filecoin-project/lotus#10840](https://github.com/filecoin-project/lotus/pull/10840))
|
||||||
|
- chore: update go-libp2p to v0.27.3 ([filecoin-project/lotus#10671](https://github.com/filecoin-project/lotus/pull/10671))
|
||||||
|
- libp2p: add QUIC and WebTransport to default listen addresses ([filecoin-project/lotus#10848](https://github.com/filecoin-project/lotus/pull/10848))
|
||||||
|
- fix: ci: Debugging m1 build ([filecoin-project/lotus#10749](https://github.com/filecoin-project/lotus/pull/10749))
|
||||||
|
- Validate that FromBlock/ToBlock epoch is indeed a hex value (#10780) ([filecoin-project/lotus#10780](https://github.com/filecoin-project/lotus/pull/10780))
|
||||||
|
- fix: remove invalid field UpgradePriceListOopsHeight ([filecoin-project/lotus#10772](https://github.com/filecoin-project/lotus/pull/10772))
|
||||||
|
- feat: deflake eth_balance_test ([filecoin-project/lotus#10847](https://github.com/filecoin-project/lotus/pull/10847))
|
||||||
|
- fix: tests: Use mutex-wrapped datastore in storage tests ([filecoin-project/lotus#10846](https://github.com/filecoin-project/lotus/pull/10846))
|
||||||
|
- Make lotus-fountain UI slightly friendlier ([filecoin-project/lotus#10785](https://github.com/filecoin-project/lotus/pull/10785))
|
||||||
|
- Make (un)subscribe and filter RPC methods require only read perm ([filecoin-project/lotus#10825](https://github.com/filecoin-project/lotus/pull/10825))
|
||||||
|
- deps: Update go-jsonrpc to v0.3.1 ([filecoin-project/lotus#10845](https://github.com/filecoin-project/lotus/pull/10845))
|
||||||
|
- feat: deflake paych_api_test ([filecoin-project/lotus#10843](https://github.com/filecoin-project/lotus/pull/10843))
|
||||||
|
- fix: Eth RPC: do not occlude block param errors. ([filecoin-project/lotus#10534](https://github.com/filecoin-project/lotus/pull/10534))
|
||||||
|
- feat: cli: More ux-friendly batching cmds ([filecoin-project/lotus#10837](https://github.com/filecoin-project/lotus/pull/10837))
|
||||||
|
- fix: cli: Hide legacy markets cmds ([filecoin-project/lotus#10842](https://github.com/filecoin-project/lotus/pull/10842))
|
||||||
|
- feat: chainstore: exit early in MaybeTakeHeavierTipset ([filecoin-project/lotus#10839](https://github.com/filecoin-project/lotus/pull/10839))
|
||||||
|
- fix: itest: fix eth deploy test flake ([filecoin-project/lotus#10829](https://github.com/filecoin-project/lotus/pull/10829))
|
||||||
|
- style: mempool: chain errors using xerrors.Errorf ([filecoin-project/lotus#10836](https://github.com/filecoin-project/lotus/pull/10836))
|
||||||
|
- feat: deflake msgindex_test.go ([filecoin-project/lotus#10826](https://github.com/filecoin-project/lotus/pull/10826))
|
||||||
|
- feat: deflake TestEthFeeHistory ([filecoin-project/lotus#10816](https://github.com/filecoin-project/lotus/pull/10816))
|
||||||
|
- feat: make RunClientTest louder when deals fail ([filecoin-project/lotus#10817](https://github.com/filecoin-project/lotus/pull/10817))
|
||||||
|
- fix: cli: Change arg wording in change-beneficiary cmd ([filecoin-project/lotus#10823](https://github.com/filecoin-project/lotus/pull/10823))
|
||||||
|
- refactor: streamline error handling in CheckPendingMessages (#10818) ([filecoin-project/lotus#10818](https://github.com/filecoin-project/lotus/pull/10818))
|
||||||
|
- feat: Add tmp indices to events table while performing migration to V2
|
||||||
|
|
||||||
# v1.23.2 / 2023-06-28
|
# v1.23.2 / 2023-06-28
|
||||||
|
|
||||||
This is a patch release on top of 1.23.1 containing the fix for https://github.com/filecoin-project/lotus/issues/10906
|
This is a patch release on top of 1.23.1 containing the fix for https://github.com/filecoin-project/lotus/issues/10906
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#####################################
|
#####################################
|
||||||
FROM golang:1.19.7-buster AS lotus-builder
|
FROM golang:1.19.12-bullseye AS lotus-builder
|
||||||
MAINTAINER Lotus Development Team
|
MAINTAINER Lotus Development Team
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev
|
RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev
|
||||||
@ -58,7 +58,7 @@ COPY --from=lotus-builder /lib/*/libgcc_s.so.1 /lib/
|
|||||||
COPY --from=lotus-builder /lib/*/libutil.so.1 /lib/
|
COPY --from=lotus-builder /lib/*/libutil.so.1 /lib/
|
||||||
COPY --from=lotus-builder /usr/lib/*/libltdl.so.7 /lib/
|
COPY --from=lotus-builder /usr/lib/*/libltdl.so.7 /lib/
|
||||||
COPY --from=lotus-builder /usr/lib/*/libnuma.so.1 /lib/
|
COPY --from=lotus-builder /usr/lib/*/libnuma.so.1 /lib/
|
||||||
COPY --from=lotus-builder /usr/lib/*/libhwloc.so.5 /lib/
|
COPY --from=lotus-builder /usr/lib/*/libhwloc.so.* /lib/
|
||||||
COPY --from=lotus-builder /usr/lib/*/libOpenCL.so.1 /lib/
|
COPY --from=lotus-builder /usr/lib/*/libOpenCL.so.1 /lib/
|
||||||
|
|
||||||
RUN useradd -r -u 532 -U fc \
|
RUN useradd -r -u 532 -U fc \
|
||||||
|
273
Dockerfile.lotus
273
Dockerfile.lotus
@ -1,273 +0,0 @@
|
|||||||
##### DEPRECATED
|
|
||||||
|
|
||||||
FROM golang:1.19.7-buster AS builder-deps
|
|
||||||
MAINTAINER Lotus Development Team
|
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev
|
|
||||||
|
|
||||||
ENV XDG_CACHE_HOME="/tmp"
|
|
||||||
|
|
||||||
### taken from https://github.com/rust-lang/docker-rust/blob/master/1.63.0/buster/Dockerfile
|
|
||||||
ENV RUSTUP_HOME=/usr/local/rustup \
|
|
||||||
CARGO_HOME=/usr/local/cargo \
|
|
||||||
PATH=/usr/local/cargo/bin:$PATH \
|
|
||||||
RUST_VERSION=1.63.0
|
|
||||||
|
|
||||||
RUN set -eux; \
|
|
||||||
dpkgArch="$(dpkg --print-architecture)"; \
|
|
||||||
case "${dpkgArch##*-}" in \
|
|
||||||
amd64) rustArch='x86_64-unknown-linux-gnu'; rustupSha256='5cc9ffd1026e82e7fb2eec2121ad71f4b0f044e88bca39207b3f6b769aaa799c' ;; \
|
|
||||||
arm64) rustArch='aarch64-unknown-linux-gnu'; rustupSha256='e189948e396d47254103a49c987e7fb0e5dd8e34b200aa4481ecc4b8e41fb929' ;; \
|
|
||||||
*) echo >&2 "unsupported architecture: ${dpkgArch}"; exit 1 ;; \
|
|
||||||
esac; \
|
|
||||||
url="https://static.rust-lang.org/rustup/archive/1.25.1/${rustArch}/rustup-init"; \
|
|
||||||
wget "$url"; \
|
|
||||||
echo "${rustupSha256} *rustup-init" | sha256sum -c -; \
|
|
||||||
chmod +x rustup-init; \
|
|
||||||
./rustup-init -y --no-modify-path --profile minimal --default-toolchain $RUST_VERSION --default-host ${rustArch}; \
|
|
||||||
rm rustup-init; \
|
|
||||||
chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \
|
|
||||||
rustup --version; \
|
|
||||||
cargo --version; \
|
|
||||||
rustc --version;
|
|
||||||
### end rust
|
|
||||||
|
|
||||||
FROM builder-deps AS builder-local
|
|
||||||
MAINTAINER Lotus Development Team
|
|
||||||
|
|
||||||
COPY ./ /opt/filecoin
|
|
||||||
WORKDIR /opt/filecoin
|
|
||||||
|
|
||||||
### make configurable filecoin-ffi build
|
|
||||||
ARG FFI_BUILD_FROM_SOURCE=0
|
|
||||||
ENV FFI_BUILD_FROM_SOURCE=${FFI_BUILD_FROM_SOURCE}
|
|
||||||
|
|
||||||
RUN make clean deps
|
|
||||||
|
|
||||||
|
|
||||||
FROM builder-local AS builder-test
|
|
||||||
MAINTAINER Lotus Development Team
|
|
||||||
|
|
||||||
WORKDIR /opt/filecoin
|
|
||||||
|
|
||||||
RUN make debug
|
|
||||||
|
|
||||||
|
|
||||||
FROM builder-local AS builder
|
|
||||||
MAINTAINER Lotus Development Team
|
|
||||||
|
|
||||||
WORKDIR /opt/filecoin
|
|
||||||
|
|
||||||
ARG RUSTFLAGS=""
|
|
||||||
ARG GOFLAGS=""
|
|
||||||
|
|
||||||
RUN make lotus lotus-miner lotus-worker lotus-shed lotus-wallet lotus-gateway lotus-stats
|
|
||||||
|
|
||||||
|
|
||||||
FROM ubuntu:20.04 AS base
|
|
||||||
MAINTAINER Lotus Development Team
|
|
||||||
|
|
||||||
# Base resources
|
|
||||||
COPY --from=builder /etc/ssl/certs /etc/ssl/certs
|
|
||||||
COPY --from=builder /lib/*/libdl.so.2 /lib/
|
|
||||||
COPY --from=builder /lib/*/librt.so.1 /lib/
|
|
||||||
COPY --from=builder /lib/*/libgcc_s.so.1 /lib/
|
|
||||||
COPY --from=builder /lib/*/libutil.so.1 /lib/
|
|
||||||
COPY --from=builder /usr/lib/*/libltdl.so.7 /lib/
|
|
||||||
COPY --from=builder /usr/lib/*/libnuma.so.1 /lib/
|
|
||||||
COPY --from=builder /usr/lib/*/libhwloc.so.5 /lib/
|
|
||||||
COPY --from=builder /usr/lib/*/libOpenCL.so.1 /lib/
|
|
||||||
|
|
||||||
RUN useradd -r -u 532 -U fc \
|
|
||||||
&& mkdir -p /etc/OpenCL/vendors \
|
|
||||||
&& echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd
|
|
||||||
|
|
||||||
###
|
|
||||||
FROM base AS lotus
|
|
||||||
MAINTAINER Lotus Development Team
|
|
||||||
|
|
||||||
COPY --from=builder /opt/filecoin/lotus /usr/local/bin/
|
|
||||||
COPY --from=builder /opt/filecoin/lotus-shed /usr/local/bin/
|
|
||||||
COPY scripts/docker-lotus-entrypoint.sh /
|
|
||||||
|
|
||||||
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
|
|
||||||
ENV LOTUS_PATH /var/lib/lotus
|
|
||||||
ENV DOCKER_LOTUS_IMPORT_SNAPSHOT https://fil-chain-snapshots-fallback.s3.amazonaws.com/mainnet/minimal_finality_stateroots_latest.car
|
|
||||||
ENV DOCKER_LOTUS_IMPORT_WALLET ""
|
|
||||||
|
|
||||||
RUN mkdir /var/lib/lotus /var/tmp/filecoin-proof-parameters
|
|
||||||
RUN chown fc: /var/lib/lotus /var/tmp/filecoin-proof-parameters
|
|
||||||
|
|
||||||
VOLUME /var/lib/lotus
|
|
||||||
VOLUME /var/tmp/filecoin-proof-parameters
|
|
||||||
|
|
||||||
USER fc
|
|
||||||
|
|
||||||
EXPOSE 1234
|
|
||||||
|
|
||||||
ENTRYPOINT ["/docker-lotus-entrypoint.sh"]
|
|
||||||
|
|
||||||
CMD ["-help"]
|
|
||||||
|
|
||||||
###
|
|
||||||
FROM base AS lotus-wallet
|
|
||||||
MAINTAINER Lotus Development Team
|
|
||||||
|
|
||||||
COPY --from=builder /opt/filecoin/lotus-wallet /usr/local/bin/
|
|
||||||
|
|
||||||
ENV WALLET_PATH /var/lib/lotus-wallet
|
|
||||||
|
|
||||||
RUN mkdir /var/lib/lotus-wallet
|
|
||||||
RUN chown fc: /var/lib/lotus-wallet
|
|
||||||
|
|
||||||
VOLUME /var/lib/lotus-wallet
|
|
||||||
|
|
||||||
USER fc
|
|
||||||
|
|
||||||
EXPOSE 1777
|
|
||||||
|
|
||||||
ENTRYPOINT ["/usr/local/bin/lotus-wallet"]
|
|
||||||
|
|
||||||
CMD ["-help"]
|
|
||||||
|
|
||||||
###
|
|
||||||
FROM base AS lotus-gateway
|
|
||||||
MAINTAINER Lotus Development Team
|
|
||||||
|
|
||||||
COPY --from=builder /opt/filecoin/lotus-gateway /usr/local/bin/
|
|
||||||
|
|
||||||
USER fc
|
|
||||||
|
|
||||||
EXPOSE 1234
|
|
||||||
|
|
||||||
ENTRYPOINT ["/usr/local/bin/lotus-gateway"]
|
|
||||||
|
|
||||||
CMD ["-help"]
|
|
||||||
|
|
||||||
|
|
||||||
###
|
|
||||||
FROM base AS lotus-miner
|
|
||||||
MAINTAINER Lotus Development Team
|
|
||||||
|
|
||||||
COPY --from=builder /opt/filecoin/lotus-miner /usr/local/bin/
|
|
||||||
COPY scripts/docker-lotus-miner-entrypoint.sh /
|
|
||||||
|
|
||||||
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
|
|
||||||
ENV LOTUS_MINER_PATH /var/lib/lotus-miner
|
|
||||||
|
|
||||||
RUN mkdir /var/lib/lotus-miner /var/tmp/filecoin-proof-parameters
|
|
||||||
RUN chown fc: /var/lib/lotus-miner /var/tmp/filecoin-proof-parameters
|
|
||||||
|
|
||||||
VOLUME /var/lib/lotus-miner
|
|
||||||
VOLUME /var/tmp/filecoin-proof-parameters
|
|
||||||
|
|
||||||
USER fc
|
|
||||||
|
|
||||||
EXPOSE 2345
|
|
||||||
|
|
||||||
ENTRYPOINT ["/docker-lotus-miner-entrypoint.sh"]
|
|
||||||
|
|
||||||
CMD ["-help"]
|
|
||||||
|
|
||||||
|
|
||||||
###
|
|
||||||
FROM base AS lotus-worker
|
|
||||||
MAINTAINER Lotus Development Team
|
|
||||||
|
|
||||||
COPY --from=builder /opt/filecoin/lotus-worker /usr/local/bin/
|
|
||||||
|
|
||||||
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
|
|
||||||
ENV LOTUS_WORKER_PATH /var/lib/lotus-worker
|
|
||||||
|
|
||||||
RUN mkdir /var/lib/lotus-worker
|
|
||||||
RUN chown fc: /var/lib/lotus-worker
|
|
||||||
|
|
||||||
VOLUME /var/lib/lotus-worker
|
|
||||||
|
|
||||||
USER fc
|
|
||||||
|
|
||||||
EXPOSE 3456
|
|
||||||
|
|
||||||
ENTRYPOINT ["/usr/local/bin/lotus-worker"]
|
|
||||||
|
|
||||||
CMD ["-help"]
|
|
||||||
|
|
||||||
|
|
||||||
###
|
|
||||||
from base as lotus-all-in-one
|
|
||||||
|
|
||||||
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
|
|
||||||
ENV LOTUS_MINER_PATH /var/lib/lotus-miner
|
|
||||||
ENV LOTUS_PATH /var/lib/lotus
|
|
||||||
ENV LOTUS_WORKER_PATH /var/lib/lotus-worker
|
|
||||||
ENV WALLET_PATH /var/lib/lotus-wallet
|
|
||||||
ENV DOCKER_LOTUS_IMPORT_SNAPSHOT https://fil-chain-snapshots-fallback.s3.amazonaws.com/mainnet/minimal_finality_stateroots_latest.car
|
|
||||||
|
|
||||||
COPY --from=builder /opt/filecoin/lotus /usr/local/bin/
|
|
||||||
COPY --from=builder /opt/filecoin/lotus-shed /usr/local/bin/
|
|
||||||
COPY --from=builder /opt/filecoin/lotus-wallet /usr/local/bin/
|
|
||||||
COPY --from=builder /opt/filecoin/lotus-gateway /usr/local/bin/
|
|
||||||
COPY --from=builder /opt/filecoin/lotus-miner /usr/local/bin/
|
|
||||||
COPY --from=builder /opt/filecoin/lotus-worker /usr/local/bin/
|
|
||||||
COPY --from=builder /opt/filecoin/lotus-stats /usr/local/bin/
|
|
||||||
|
|
||||||
RUN mkdir /var/tmp/filecoin-proof-parameters
|
|
||||||
RUN mkdir /var/lib/lotus
|
|
||||||
RUN mkdir /var/lib/lotus-miner
|
|
||||||
RUN mkdir /var/lib/lotus-worker
|
|
||||||
RUN mkdir /var/lib/lotus-wallet
|
|
||||||
RUN chown fc: /var/tmp/filecoin-proof-parameters
|
|
||||||
RUN chown fc: /var/lib/lotus
|
|
||||||
RUN chown fc: /var/lib/lotus-miner
|
|
||||||
RUN chown fc: /var/lib/lotus-worker
|
|
||||||
RUN chown fc: /var/lib/lotus-wallet
|
|
||||||
|
|
||||||
|
|
||||||
VOLUME /var/tmp/filecoin-proof-parameters
|
|
||||||
VOLUME /var/lib/lotus
|
|
||||||
VOLUME /var/lib/lotus-miner
|
|
||||||
VOLUME /var/lib/lotus-worker
|
|
||||||
VOLUME /var/lib/lotus-wallet
|
|
||||||
|
|
||||||
EXPOSE 1234
|
|
||||||
EXPOSE 2345
|
|
||||||
EXPOSE 3456
|
|
||||||
EXPOSE 1777
|
|
||||||
|
|
||||||
###
|
|
||||||
from base as lotus-test
|
|
||||||
|
|
||||||
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
|
|
||||||
ENV LOTUS_MINER_PATH /var/lib/lotus-miner
|
|
||||||
ENV LOTUS_PATH /var/lib/lotus
|
|
||||||
ENV LOTUS_WORKER_PATH /var/lib/lotus-worker
|
|
||||||
ENV WALLET_PATH /var/lib/lotus-wallet
|
|
||||||
|
|
||||||
COPY --from=builder-test /opt/filecoin/lotus /usr/local/bin/
|
|
||||||
COPY --from=builder-test /opt/filecoin/lotus-miner /usr/local/bin/
|
|
||||||
COPY --from=builder-test /opt/filecoin/lotus-worker /usr/local/bin/
|
|
||||||
COPY --from=builder-test /opt/filecoin/lotus-seed /usr/local/bin/
|
|
||||||
|
|
||||||
RUN mkdir /var/tmp/filecoin-proof-parameters
|
|
||||||
RUN mkdir /var/lib/lotus
|
|
||||||
RUN mkdir /var/lib/lotus-miner
|
|
||||||
RUN mkdir /var/lib/lotus-worker
|
|
||||||
RUN mkdir /var/lib/lotus-wallet
|
|
||||||
RUN chown fc: /var/tmp/filecoin-proof-parameters
|
|
||||||
RUN chown fc: /var/lib/lotus
|
|
||||||
RUN chown fc: /var/lib/lotus-miner
|
|
||||||
RUN chown fc: /var/lib/lotus-worker
|
|
||||||
RUN chown fc: /var/lib/lotus-wallet
|
|
||||||
|
|
||||||
|
|
||||||
VOLUME /var/tmp/filecoin-proof-parameters
|
|
||||||
VOLUME /var/lib/lotus
|
|
||||||
VOLUME /var/lib/lotus-miner
|
|
||||||
VOLUME /var/lib/lotus-worker
|
|
||||||
VOLUME /var/lib/lotus-wallet
|
|
||||||
|
|
||||||
EXPOSE 1234
|
|
||||||
EXPOSE 2345
|
|
||||||
EXPOSE 3456
|
|
||||||
EXPOSE 1777
|
|
||||||
|
|
@ -1 +1 @@
|
|||||||
1.19.7
|
1.19.12
|
||||||
|
2
Makefile
2
Makefile
@ -355,7 +355,7 @@ fiximports:
|
|||||||
./scripts/fiximports
|
./scripts/fiximports
|
||||||
|
|
||||||
gen: actors-code-gen type-gen cfgdoc-gen docsgen api-gen circleci fiximports
|
gen: actors-code-gen type-gen cfgdoc-gen docsgen api-gen circleci fiximports
|
||||||
@echo ">>> IF YOU'VE MODIFIED THE CLI OR CONFIG, REMEMBER TO ALSO MAKE docsgen-cli"
|
@echo ">>> IF YOU'VE MODIFIED THE CLI OR CONFIG, REMEMBER TO ALSO RUN 'make docsgen-cli'"
|
||||||
.PHONY: gen
|
.PHONY: gen
|
||||||
|
|
||||||
jen: gen
|
jen: gen
|
||||||
|
@ -71,10 +71,10 @@ For other distributions you can find the required dependencies [here.](https://l
|
|||||||
|
|
||||||
#### Go
|
#### Go
|
||||||
|
|
||||||
To build Lotus, you need a working installation of [Go 1.19.7 or higher](https://golang.org/dl/):
|
To build Lotus, you need a working installation of [Go 1.19.12 or higher](https://golang.org/dl/):
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
wget -c https://golang.org/dl/go1.19.7.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local
|
wget -c https://golang.org/dl/go1.19.12.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local
|
||||||
```
|
```
|
||||||
|
|
||||||
**TIP:**
|
**TIP:**
|
||||||
|
@ -796,31 +796,32 @@ type FullNode interface {
|
|||||||
// EthGetBlockTransactionCountByHash returns the number of messages in the TipSet
|
// EthGetBlockTransactionCountByHash returns the number of messages in the TipSet
|
||||||
EthGetBlockTransactionCountByHash(ctx context.Context, blkHash ethtypes.EthHash) (ethtypes.EthUint64, error) //perm:read
|
EthGetBlockTransactionCountByHash(ctx context.Context, blkHash ethtypes.EthHash) (ethtypes.EthUint64, error) //perm:read
|
||||||
|
|
||||||
EthGetBlockByHash(ctx context.Context, blkHash ethtypes.EthHash, fullTxInfo bool) (ethtypes.EthBlock, error) //perm:read
|
EthGetBlockByHash(ctx context.Context, blkHash ethtypes.EthHash, fullTxInfo bool) (ethtypes.EthBlock, error) //perm:read
|
||||||
EthGetBlockByNumber(ctx context.Context, blkNum string, fullTxInfo bool) (ethtypes.EthBlock, error) //perm:read
|
EthGetBlockByNumber(ctx context.Context, blkNum string, fullTxInfo bool) (ethtypes.EthBlock, error) //perm:read
|
||||||
EthGetTransactionByHash(ctx context.Context, txHash *ethtypes.EthHash) (*ethtypes.EthTx, error) //perm:read
|
EthGetTransactionByHash(ctx context.Context, txHash *ethtypes.EthHash) (*ethtypes.EthTx, error) //perm:read
|
||||||
EthGetTransactionByHashLimited(ctx context.Context, txHash *ethtypes.EthHash, limit abi.ChainEpoch) (*ethtypes.EthTx, error) //perm:read
|
EthGetTransactionByHashLimited(ctx context.Context, txHash *ethtypes.EthHash, limit abi.ChainEpoch) (*ethtypes.EthTx, error) //perm:read
|
||||||
EthGetTransactionHashByCid(ctx context.Context, cid cid.Cid) (*ethtypes.EthHash, error) //perm:read
|
EthGetTransactionHashByCid(ctx context.Context, cid cid.Cid) (*ethtypes.EthHash, error) //perm:read
|
||||||
EthGetMessageCidByTransactionHash(ctx context.Context, txHash *ethtypes.EthHash) (*cid.Cid, error) //perm:read
|
EthGetMessageCidByTransactionHash(ctx context.Context, txHash *ethtypes.EthHash) (*cid.Cid, error) //perm:read
|
||||||
EthGetTransactionCount(ctx context.Context, sender ethtypes.EthAddress, blkOpt string) (ethtypes.EthUint64, error) //perm:read
|
EthGetTransactionCount(ctx context.Context, sender ethtypes.EthAddress, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthUint64, error) //perm:read
|
||||||
EthGetTransactionReceipt(ctx context.Context, txHash ethtypes.EthHash) (*EthTxReceipt, error) //perm:read
|
EthGetTransactionReceipt(ctx context.Context, txHash ethtypes.EthHash) (*EthTxReceipt, error) //perm:read
|
||||||
EthGetTransactionReceiptLimited(ctx context.Context, txHash ethtypes.EthHash, limit abi.ChainEpoch) (*EthTxReceipt, error) //perm:read
|
EthGetTransactionReceiptLimited(ctx context.Context, txHash ethtypes.EthHash, limit abi.ChainEpoch) (*EthTxReceipt, error) //perm:read
|
||||||
EthGetTransactionByBlockHashAndIndex(ctx context.Context, blkHash ethtypes.EthHash, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error) //perm:read
|
EthGetTransactionByBlockHashAndIndex(ctx context.Context, blkHash ethtypes.EthHash, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error) //perm:read
|
||||||
EthGetTransactionByBlockNumberAndIndex(ctx context.Context, blkNum ethtypes.EthUint64, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error) //perm:read
|
EthGetTransactionByBlockNumberAndIndex(ctx context.Context, blkNum ethtypes.EthUint64, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error) //perm:read
|
||||||
|
|
||||||
EthGetCode(ctx context.Context, address ethtypes.EthAddress, blkOpt string) (ethtypes.EthBytes, error) //perm:read
|
EthGetCode(ctx context.Context, address ethtypes.EthAddress, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) //perm:read
|
||||||
EthGetStorageAt(ctx context.Context, address ethtypes.EthAddress, position ethtypes.EthBytes, blkParam string) (ethtypes.EthBytes, error) //perm:read
|
EthGetStorageAt(ctx context.Context, address ethtypes.EthAddress, position ethtypes.EthBytes, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) //perm:read
|
||||||
EthGetBalance(ctx context.Context, address ethtypes.EthAddress, blkParam string) (ethtypes.EthBigInt, error) //perm:read
|
EthGetBalance(ctx context.Context, address ethtypes.EthAddress, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBigInt, error) //perm:read
|
||||||
EthChainId(ctx context.Context) (ethtypes.EthUint64, error) //perm:read
|
EthChainId(ctx context.Context) (ethtypes.EthUint64, error) //perm:read
|
||||||
NetVersion(ctx context.Context) (string, error) //perm:read
|
EthSyncing(ctx context.Context) (ethtypes.EthSyncingResult, error) //perm:read
|
||||||
NetListening(ctx context.Context) (bool, error) //perm:read
|
NetVersion(ctx context.Context) (string, error) //perm:read
|
||||||
EthProtocolVersion(ctx context.Context) (ethtypes.EthUint64, error) //perm:read
|
NetListening(ctx context.Context) (bool, error) //perm:read
|
||||||
EthGasPrice(ctx context.Context) (ethtypes.EthBigInt, error) //perm:read
|
EthProtocolVersion(ctx context.Context) (ethtypes.EthUint64, error) //perm:read
|
||||||
EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthFeeHistory, error) //perm:read
|
EthGasPrice(ctx context.Context) (ethtypes.EthBigInt, error) //perm:read
|
||||||
|
EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthFeeHistory, error) //perm:read
|
||||||
|
|
||||||
EthMaxPriorityFeePerGas(ctx context.Context) (ethtypes.EthBigInt, error) //perm:read
|
EthMaxPriorityFeePerGas(ctx context.Context) (ethtypes.EthBigInt, error) //perm:read
|
||||||
EthEstimateGas(ctx context.Context, tx ethtypes.EthCall) (ethtypes.EthUint64, error) //perm:read
|
EthEstimateGas(ctx context.Context, tx ethtypes.EthCall) (ethtypes.EthUint64, error) //perm:read
|
||||||
EthCall(ctx context.Context, tx ethtypes.EthCall, blkParam string) (ethtypes.EthBytes, error) //perm:read
|
EthCall(ctx context.Context, tx ethtypes.EthCall, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) //perm:read
|
||||||
|
|
||||||
EthSendRawTransaction(ctx context.Context, rawTx ethtypes.EthBytes) (ethtypes.EthHash, error) //perm:read
|
EthSendRawTransaction(ctx context.Context, rawTx ethtypes.EthBytes) (ethtypes.EthHash, error) //perm:read
|
||||||
|
|
||||||
@ -829,23 +830,23 @@ type FullNode interface {
|
|||||||
|
|
||||||
// Polling method for a filter, returns event logs which occurred since last poll.
|
// Polling method for a filter, returns event logs which occurred since last poll.
|
||||||
// (requires write perm since timestamp of last filter execution will be written)
|
// (requires write perm since timestamp of last filter execution will be written)
|
||||||
EthGetFilterChanges(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) //perm:write
|
EthGetFilterChanges(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) //perm:read
|
||||||
|
|
||||||
// Returns event logs matching filter with given id.
|
// Returns event logs matching filter with given id.
|
||||||
// (requires write perm since timestamp of last filter execution will be written)
|
// (requires write perm since timestamp of last filter execution will be written)
|
||||||
EthGetFilterLogs(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) //perm:write
|
EthGetFilterLogs(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) //perm:read
|
||||||
|
|
||||||
// Installs a persistent filter based on given filter spec.
|
// Installs a persistent filter based on given filter spec.
|
||||||
EthNewFilter(ctx context.Context, filter *ethtypes.EthFilterSpec) (ethtypes.EthFilterID, error) //perm:write
|
EthNewFilter(ctx context.Context, filter *ethtypes.EthFilterSpec) (ethtypes.EthFilterID, error) //perm:read
|
||||||
|
|
||||||
// Installs a persistent filter to notify when a new block arrives.
|
// Installs a persistent filter to notify when a new block arrives.
|
||||||
EthNewBlockFilter(ctx context.Context) (ethtypes.EthFilterID, error) //perm:write
|
EthNewBlockFilter(ctx context.Context) (ethtypes.EthFilterID, error) //perm:read
|
||||||
|
|
||||||
// Installs a persistent filter to notify when new messages arrive in the message pool.
|
// Installs a persistent filter to notify when new messages arrive in the message pool.
|
||||||
EthNewPendingTransactionFilter(ctx context.Context) (ethtypes.EthFilterID, error) //perm:write
|
EthNewPendingTransactionFilter(ctx context.Context) (ethtypes.EthFilterID, error) //perm:read
|
||||||
|
|
||||||
// Uninstalls a filter with given id.
|
// Uninstalls a filter with given id.
|
||||||
EthUninstallFilter(ctx context.Context, id ethtypes.EthFilterID) (bool, error) //perm:write
|
EthUninstallFilter(ctx context.Context, id ethtypes.EthFilterID) (bool, error) //perm:read
|
||||||
|
|
||||||
// Subscribe to different event types using websockets
|
// Subscribe to different event types using websockets
|
||||||
// eventTypes is one or more of:
|
// eventTypes is one or more of:
|
||||||
@ -854,10 +855,10 @@ type FullNode interface {
|
|||||||
// - logs: notify new event logs that match a criteria
|
// - logs: notify new event logs that match a criteria
|
||||||
// params contains additional parameters used with the log event type
|
// params contains additional parameters used with the log event type
|
||||||
// The client will receive a stream of EthSubscriptionResponse values until EthUnsubscribe is called.
|
// The client will receive a stream of EthSubscriptionResponse values until EthUnsubscribe is called.
|
||||||
EthSubscribe(ctx context.Context, params jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error) //perm:write
|
EthSubscribe(ctx context.Context, params jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error) //perm:read
|
||||||
|
|
||||||
// Unsubscribe from a websocket subscription
|
// Unsubscribe from a websocket subscription
|
||||||
EthUnsubscribe(ctx context.Context, id ethtypes.EthSubscriptionID) (bool, error) //perm:write
|
EthUnsubscribe(ctx context.Context, id ethtypes.EthSubscriptionID) (bool, error) //perm:read
|
||||||
|
|
||||||
// Returns the client version
|
// Returns the client version
|
||||||
Web3ClientVersion(ctx context.Context) (string, error) //perm:read
|
Web3ClientVersion(ctx context.Context) (string, error) //perm:read
|
||||||
|
@ -33,6 +33,9 @@ import (
|
|||||||
// * Generate openrpc blobs
|
// * Generate openrpc blobs
|
||||||
|
|
||||||
type Gateway interface {
|
type Gateway interface {
|
||||||
|
MpoolPending(context.Context, types.TipSetKey) ([]*types.SignedMessage, error)
|
||||||
|
ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error)
|
||||||
|
MinerGetBaseInfo(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*MiningBaseInfo, error)
|
||||||
StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (MinerSectors, error)
|
StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (MinerSectors, error)
|
||||||
GasEstimateGasPremium(context.Context, uint64, address.Address, int64, types.TipSetKey) (types.BigInt, error)
|
GasEstimateGasPremium(context.Context, uint64, address.Address, int64, types.TipSetKey) (types.BigInt, error)
|
||||||
StateReplay(context.Context, types.TipSetKey, cid.Cid) (*InvocResult, error)
|
StateReplay(context.Context, types.TipSetKey, cid.Cid) (*InvocResult, error)
|
||||||
@ -91,13 +94,14 @@ type Gateway interface {
|
|||||||
EthGetTransactionByHashLimited(ctx context.Context, txHash *ethtypes.EthHash, limit abi.ChainEpoch) (*ethtypes.EthTx, error)
|
EthGetTransactionByHashLimited(ctx context.Context, txHash *ethtypes.EthHash, limit abi.ChainEpoch) (*ethtypes.EthTx, error)
|
||||||
EthGetTransactionHashByCid(ctx context.Context, cid cid.Cid) (*ethtypes.EthHash, error)
|
EthGetTransactionHashByCid(ctx context.Context, cid cid.Cid) (*ethtypes.EthHash, error)
|
||||||
EthGetMessageCidByTransactionHash(ctx context.Context, txHash *ethtypes.EthHash) (*cid.Cid, error)
|
EthGetMessageCidByTransactionHash(ctx context.Context, txHash *ethtypes.EthHash) (*cid.Cid, error)
|
||||||
EthGetTransactionCount(ctx context.Context, sender ethtypes.EthAddress, blkOpt string) (ethtypes.EthUint64, error)
|
EthGetTransactionCount(ctx context.Context, sender ethtypes.EthAddress, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthUint64, error)
|
||||||
EthGetTransactionReceipt(ctx context.Context, txHash ethtypes.EthHash) (*EthTxReceipt, error)
|
EthGetTransactionReceipt(ctx context.Context, txHash ethtypes.EthHash) (*EthTxReceipt, error)
|
||||||
EthGetTransactionReceiptLimited(ctx context.Context, txHash ethtypes.EthHash, limit abi.ChainEpoch) (*EthTxReceipt, error)
|
EthGetTransactionReceiptLimited(ctx context.Context, txHash ethtypes.EthHash, limit abi.ChainEpoch) (*EthTxReceipt, error)
|
||||||
EthGetCode(ctx context.Context, address ethtypes.EthAddress, blkOpt string) (ethtypes.EthBytes, error)
|
EthGetCode(ctx context.Context, address ethtypes.EthAddress, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error)
|
||||||
EthGetStorageAt(ctx context.Context, address ethtypes.EthAddress, position ethtypes.EthBytes, blkParam string) (ethtypes.EthBytes, error)
|
EthGetStorageAt(ctx context.Context, address ethtypes.EthAddress, position ethtypes.EthBytes, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error)
|
||||||
EthGetBalance(ctx context.Context, address ethtypes.EthAddress, blkParam string) (ethtypes.EthBigInt, error)
|
EthGetBalance(ctx context.Context, address ethtypes.EthAddress, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBigInt, error)
|
||||||
EthChainId(ctx context.Context) (ethtypes.EthUint64, error)
|
EthChainId(ctx context.Context) (ethtypes.EthUint64, error)
|
||||||
|
EthSyncing(ctx context.Context) (ethtypes.EthSyncingResult, error)
|
||||||
NetVersion(ctx context.Context) (string, error)
|
NetVersion(ctx context.Context) (string, error)
|
||||||
NetListening(ctx context.Context) (bool, error)
|
NetListening(ctx context.Context) (bool, error)
|
||||||
EthProtocolVersion(ctx context.Context) (ethtypes.EthUint64, error)
|
EthProtocolVersion(ctx context.Context) (ethtypes.EthUint64, error)
|
||||||
@ -105,7 +109,7 @@ type Gateway interface {
|
|||||||
EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthFeeHistory, error)
|
EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthFeeHistory, error)
|
||||||
EthMaxPriorityFeePerGas(ctx context.Context) (ethtypes.EthBigInt, error)
|
EthMaxPriorityFeePerGas(ctx context.Context) (ethtypes.EthBigInt, error)
|
||||||
EthEstimateGas(ctx context.Context, tx ethtypes.EthCall) (ethtypes.EthUint64, error)
|
EthEstimateGas(ctx context.Context, tx ethtypes.EthCall) (ethtypes.EthUint64, error)
|
||||||
EthCall(ctx context.Context, tx ethtypes.EthCall, blkParam string) (ethtypes.EthBytes, error)
|
EthCall(ctx context.Context, tx ethtypes.EthCall, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error)
|
||||||
EthSendRawTransaction(ctx context.Context, rawTx ethtypes.EthBytes) (ethtypes.EthHash, error)
|
EthSendRawTransaction(ctx context.Context, rawTx ethtypes.EthBytes) (ethtypes.EthHash, error)
|
||||||
EthGetLogs(ctx context.Context, filter *ethtypes.EthFilterSpec) (*ethtypes.EthFilterResult, error)
|
EthGetLogs(ctx context.Context, filter *ethtypes.EthFilterSpec) (*ethtypes.EthFilterResult, error)
|
||||||
EthGetFilterChanges(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error)
|
EthGetFilterChanges(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error)
|
||||||
|
@ -86,6 +86,7 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ExampleValues[reflect.TypeOf(addr)] = addr
|
ExampleValues[reflect.TypeOf(addr)] = addr
|
||||||
|
ExampleValues[reflect.TypeOf(&addr)] = &addr
|
||||||
|
|
||||||
pid, err := peer.Decode("12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf")
|
pid, err := peer.Decode("12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -479,6 +480,9 @@ func ExampleValue(method string, t, parent reflect.Type) interface{} {
|
|||||||
es := exampleStruct(method, t.Elem(), t)
|
es := exampleStruct(method, t.Elem(), t)
|
||||||
ExampleValues[t] = es
|
ExampleValues[t] = es
|
||||||
return es
|
return es
|
||||||
|
} else if t.Elem().Kind() == reflect.String {
|
||||||
|
str := "string value"
|
||||||
|
return &str
|
||||||
}
|
}
|
||||||
case reflect.Interface:
|
case reflect.Interface:
|
||||||
return struct{}{}
|
return struct{}{}
|
||||||
|
@ -21,6 +21,7 @@ func CreateEthRPCAliases(as apitypes.Aliaser) {
|
|||||||
as.AliasMethod("eth_getStorageAt", "Filecoin.EthGetStorageAt")
|
as.AliasMethod("eth_getStorageAt", "Filecoin.EthGetStorageAt")
|
||||||
as.AliasMethod("eth_getBalance", "Filecoin.EthGetBalance")
|
as.AliasMethod("eth_getBalance", "Filecoin.EthGetBalance")
|
||||||
as.AliasMethod("eth_chainId", "Filecoin.EthChainId")
|
as.AliasMethod("eth_chainId", "Filecoin.EthChainId")
|
||||||
|
as.AliasMethod("eth_syncing", "Filecoin.EthSyncing")
|
||||||
as.AliasMethod("eth_feeHistory", "Filecoin.EthFeeHistory")
|
as.AliasMethod("eth_feeHistory", "Filecoin.EthFeeHistory")
|
||||||
as.AliasMethod("eth_protocolVersion", "Filecoin.EthProtocolVersion")
|
as.AliasMethod("eth_protocolVersion", "Filecoin.EthProtocolVersion")
|
||||||
as.AliasMethod("eth_maxPriorityFeePerGas", "Filecoin.EthMaxPriorityFeePerGas")
|
as.AliasMethod("eth_maxPriorityFeePerGas", "Filecoin.EthMaxPriorityFeePerGas")
|
||||||
|
@ -1012,7 +1012,7 @@ func (mr *MockFullNodeMockRecorder) EthBlockNumber(arg0 interface{}) *gomock.Cal
|
|||||||
}
|
}
|
||||||
|
|
||||||
// EthCall mocks base method.
|
// EthCall mocks base method.
|
||||||
func (m *MockFullNode) EthCall(arg0 context.Context, arg1 ethtypes.EthCall, arg2 string) (ethtypes.EthBytes, error) {
|
func (m *MockFullNode) EthCall(arg0 context.Context, arg1 ethtypes.EthCall, arg2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "EthCall", arg0, arg1, arg2)
|
ret := m.ctrl.Call(m, "EthCall", arg0, arg1, arg2)
|
||||||
ret0, _ := ret[0].(ethtypes.EthBytes)
|
ret0, _ := ret[0].(ethtypes.EthBytes)
|
||||||
@ -1087,7 +1087,7 @@ func (mr *MockFullNodeMockRecorder) EthGasPrice(arg0 interface{}) *gomock.Call {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// EthGetBalance mocks base method.
|
// EthGetBalance mocks base method.
|
||||||
func (m *MockFullNode) EthGetBalance(arg0 context.Context, arg1 ethtypes.EthAddress, arg2 string) (ethtypes.EthBigInt, error) {
|
func (m *MockFullNode) EthGetBalance(arg0 context.Context, arg1 ethtypes.EthAddress, arg2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBigInt, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "EthGetBalance", arg0, arg1, arg2)
|
ret := m.ctrl.Call(m, "EthGetBalance", arg0, arg1, arg2)
|
||||||
ret0, _ := ret[0].(ethtypes.EthBigInt)
|
ret0, _ := ret[0].(ethtypes.EthBigInt)
|
||||||
@ -1162,7 +1162,7 @@ func (mr *MockFullNodeMockRecorder) EthGetBlockTransactionCountByNumber(arg0, ar
|
|||||||
}
|
}
|
||||||
|
|
||||||
// EthGetCode mocks base method.
|
// EthGetCode mocks base method.
|
||||||
func (m *MockFullNode) EthGetCode(arg0 context.Context, arg1 ethtypes.EthAddress, arg2 string) (ethtypes.EthBytes, error) {
|
func (m *MockFullNode) EthGetCode(arg0 context.Context, arg1 ethtypes.EthAddress, arg2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "EthGetCode", arg0, arg1, arg2)
|
ret := m.ctrl.Call(m, "EthGetCode", arg0, arg1, arg2)
|
||||||
ret0, _ := ret[0].(ethtypes.EthBytes)
|
ret0, _ := ret[0].(ethtypes.EthBytes)
|
||||||
@ -1237,7 +1237,7 @@ func (mr *MockFullNodeMockRecorder) EthGetMessageCidByTransactionHash(arg0, arg1
|
|||||||
}
|
}
|
||||||
|
|
||||||
// EthGetStorageAt mocks base method.
|
// EthGetStorageAt mocks base method.
|
||||||
func (m *MockFullNode) EthGetStorageAt(arg0 context.Context, arg1 ethtypes.EthAddress, arg2 ethtypes.EthBytes, arg3 string) (ethtypes.EthBytes, error) {
|
func (m *MockFullNode) EthGetStorageAt(arg0 context.Context, arg1 ethtypes.EthAddress, arg2 ethtypes.EthBytes, arg3 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "EthGetStorageAt", arg0, arg1, arg2, arg3)
|
ret := m.ctrl.Call(m, "EthGetStorageAt", arg0, arg1, arg2, arg3)
|
||||||
ret0, _ := ret[0].(ethtypes.EthBytes)
|
ret0, _ := ret[0].(ethtypes.EthBytes)
|
||||||
@ -1312,7 +1312,7 @@ func (mr *MockFullNodeMockRecorder) EthGetTransactionByHashLimited(arg0, arg1, a
|
|||||||
}
|
}
|
||||||
|
|
||||||
// EthGetTransactionCount mocks base method.
|
// EthGetTransactionCount mocks base method.
|
||||||
func (m *MockFullNode) EthGetTransactionCount(arg0 context.Context, arg1 ethtypes.EthAddress, arg2 string) (ethtypes.EthUint64, error) {
|
func (m *MockFullNode) EthGetTransactionCount(arg0 context.Context, arg1 ethtypes.EthAddress, arg2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthUint64, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "EthGetTransactionCount", arg0, arg1, arg2)
|
ret := m.ctrl.Call(m, "EthGetTransactionCount", arg0, arg1, arg2)
|
||||||
ret0, _ := ret[0].(ethtypes.EthUint64)
|
ret0, _ := ret[0].(ethtypes.EthUint64)
|
||||||
@ -1476,6 +1476,21 @@ func (mr *MockFullNodeMockRecorder) EthSubscribe(arg0, arg1 interface{}) *gomock
|
|||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthSubscribe", reflect.TypeOf((*MockFullNode)(nil).EthSubscribe), arg0, arg1)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthSubscribe", reflect.TypeOf((*MockFullNode)(nil).EthSubscribe), arg0, arg1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EthSyncing mocks base method.
|
||||||
|
func (m *MockFullNode) EthSyncing(arg0 context.Context) (ethtypes.EthSyncingResult, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "EthSyncing", arg0)
|
||||||
|
ret0, _ := ret[0].(ethtypes.EthSyncingResult)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// EthSyncing indicates an expected call of EthSyncing.
|
||||||
|
func (mr *MockFullNodeMockRecorder) EthSyncing(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthSyncing", reflect.TypeOf((*MockFullNode)(nil).EthSyncing), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
// EthUninstallFilter mocks base method.
|
// EthUninstallFilter mocks base method.
|
||||||
func (m *MockFullNode) EthUninstallFilter(arg0 context.Context, arg1 ethtypes.EthFilterID) (bool, error) {
|
func (m *MockFullNode) EthUninstallFilter(arg0 context.Context, arg1 ethtypes.EthFilterID) (bool, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
|
141
api/proxy_gen.go
141
api/proxy_gen.go
@ -252,7 +252,7 @@ type FullNodeMethods struct {
|
|||||||
|
|
||||||
EthBlockNumber func(p0 context.Context) (ethtypes.EthUint64, error) `perm:"read"`
|
EthBlockNumber func(p0 context.Context) (ethtypes.EthUint64, error) `perm:"read"`
|
||||||
|
|
||||||
EthCall func(p0 context.Context, p1 ethtypes.EthCall, p2 string) (ethtypes.EthBytes, error) `perm:"read"`
|
EthCall func(p0 context.Context, p1 ethtypes.EthCall, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) `perm:"read"`
|
||||||
|
|
||||||
EthChainId func(p0 context.Context) (ethtypes.EthUint64, error) `perm:"read"`
|
EthChainId func(p0 context.Context) (ethtypes.EthUint64, error) `perm:"read"`
|
||||||
|
|
||||||
@ -262,7 +262,7 @@ type FullNodeMethods struct {
|
|||||||
|
|
||||||
EthGasPrice func(p0 context.Context) (ethtypes.EthBigInt, error) `perm:"read"`
|
EthGasPrice func(p0 context.Context) (ethtypes.EthBigInt, error) `perm:"read"`
|
||||||
|
|
||||||
EthGetBalance func(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthBigInt, error) `perm:"read"`
|
EthGetBalance func(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBigInt, error) `perm:"read"`
|
||||||
|
|
||||||
EthGetBlockByHash func(p0 context.Context, p1 ethtypes.EthHash, p2 bool) (ethtypes.EthBlock, error) `perm:"read"`
|
EthGetBlockByHash func(p0 context.Context, p1 ethtypes.EthHash, p2 bool) (ethtypes.EthBlock, error) `perm:"read"`
|
||||||
|
|
||||||
@ -272,17 +272,17 @@ type FullNodeMethods struct {
|
|||||||
|
|
||||||
EthGetBlockTransactionCountByNumber func(p0 context.Context, p1 ethtypes.EthUint64) (ethtypes.EthUint64, error) `perm:"read"`
|
EthGetBlockTransactionCountByNumber func(p0 context.Context, p1 ethtypes.EthUint64) (ethtypes.EthUint64, error) `perm:"read"`
|
||||||
|
|
||||||
EthGetCode func(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthBytes, error) `perm:"read"`
|
EthGetCode func(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) `perm:"read"`
|
||||||
|
|
||||||
EthGetFilterChanges func(p0 context.Context, p1 ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) `perm:"write"`
|
EthGetFilterChanges func(p0 context.Context, p1 ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) `perm:"read"`
|
||||||
|
|
||||||
EthGetFilterLogs func(p0 context.Context, p1 ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) `perm:"write"`
|
EthGetFilterLogs func(p0 context.Context, p1 ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) `perm:"read"`
|
||||||
|
|
||||||
EthGetLogs func(p0 context.Context, p1 *ethtypes.EthFilterSpec) (*ethtypes.EthFilterResult, error) `perm:"read"`
|
EthGetLogs func(p0 context.Context, p1 *ethtypes.EthFilterSpec) (*ethtypes.EthFilterResult, error) `perm:"read"`
|
||||||
|
|
||||||
EthGetMessageCidByTransactionHash func(p0 context.Context, p1 *ethtypes.EthHash) (*cid.Cid, error) `perm:"read"`
|
EthGetMessageCidByTransactionHash func(p0 context.Context, p1 *ethtypes.EthHash) (*cid.Cid, error) `perm:"read"`
|
||||||
|
|
||||||
EthGetStorageAt func(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBytes, p3 string) (ethtypes.EthBytes, error) `perm:"read"`
|
EthGetStorageAt func(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBytes, p3 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) `perm:"read"`
|
||||||
|
|
||||||
EthGetTransactionByBlockHashAndIndex func(p0 context.Context, p1 ethtypes.EthHash, p2 ethtypes.EthUint64) (ethtypes.EthTx, error) `perm:"read"`
|
EthGetTransactionByBlockHashAndIndex func(p0 context.Context, p1 ethtypes.EthHash, p2 ethtypes.EthUint64) (ethtypes.EthTx, error) `perm:"read"`
|
||||||
|
|
||||||
@ -292,7 +292,7 @@ type FullNodeMethods struct {
|
|||||||
|
|
||||||
EthGetTransactionByHashLimited func(p0 context.Context, p1 *ethtypes.EthHash, p2 abi.ChainEpoch) (*ethtypes.EthTx, error) `perm:"read"`
|
EthGetTransactionByHashLimited func(p0 context.Context, p1 *ethtypes.EthHash, p2 abi.ChainEpoch) (*ethtypes.EthTx, error) `perm:"read"`
|
||||||
|
|
||||||
EthGetTransactionCount func(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthUint64, error) `perm:"read"`
|
EthGetTransactionCount func(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthUint64, error) `perm:"read"`
|
||||||
|
|
||||||
EthGetTransactionHashByCid func(p0 context.Context, p1 cid.Cid) (*ethtypes.EthHash, error) `perm:"read"`
|
EthGetTransactionHashByCid func(p0 context.Context, p1 cid.Cid) (*ethtypes.EthHash, error) `perm:"read"`
|
||||||
|
|
||||||
@ -302,21 +302,23 @@ type FullNodeMethods struct {
|
|||||||
|
|
||||||
EthMaxPriorityFeePerGas func(p0 context.Context) (ethtypes.EthBigInt, error) `perm:"read"`
|
EthMaxPriorityFeePerGas func(p0 context.Context) (ethtypes.EthBigInt, error) `perm:"read"`
|
||||||
|
|
||||||
EthNewBlockFilter func(p0 context.Context) (ethtypes.EthFilterID, error) `perm:"write"`
|
EthNewBlockFilter func(p0 context.Context) (ethtypes.EthFilterID, error) `perm:"read"`
|
||||||
|
|
||||||
EthNewFilter func(p0 context.Context, p1 *ethtypes.EthFilterSpec) (ethtypes.EthFilterID, error) `perm:"write"`
|
EthNewFilter func(p0 context.Context, p1 *ethtypes.EthFilterSpec) (ethtypes.EthFilterID, error) `perm:"read"`
|
||||||
|
|
||||||
EthNewPendingTransactionFilter func(p0 context.Context) (ethtypes.EthFilterID, error) `perm:"write"`
|
EthNewPendingTransactionFilter func(p0 context.Context) (ethtypes.EthFilterID, error) `perm:"read"`
|
||||||
|
|
||||||
EthProtocolVersion func(p0 context.Context) (ethtypes.EthUint64, error) `perm:"read"`
|
EthProtocolVersion func(p0 context.Context) (ethtypes.EthUint64, error) `perm:"read"`
|
||||||
|
|
||||||
EthSendRawTransaction func(p0 context.Context, p1 ethtypes.EthBytes) (ethtypes.EthHash, error) `perm:"read"`
|
EthSendRawTransaction func(p0 context.Context, p1 ethtypes.EthBytes) (ethtypes.EthHash, error) `perm:"read"`
|
||||||
|
|
||||||
EthSubscribe func(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error) `perm:"write"`
|
EthSubscribe func(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error) `perm:"read"`
|
||||||
|
|
||||||
EthUninstallFilter func(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) `perm:"write"`
|
EthSyncing func(p0 context.Context) (ethtypes.EthSyncingResult, error) `perm:"read"`
|
||||||
|
|
||||||
EthUnsubscribe func(p0 context.Context, p1 ethtypes.EthSubscriptionID) (bool, error) `perm:"write"`
|
EthUninstallFilter func(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) `perm:"read"`
|
||||||
|
|
||||||
|
EthUnsubscribe func(p0 context.Context, p1 ethtypes.EthSubscriptionID) (bool, error) `perm:"read"`
|
||||||
|
|
||||||
FilecoinAddressToEthAddress func(p0 context.Context, p1 address.Address) (ethtypes.EthAddress, error) `perm:"read"`
|
FilecoinAddressToEthAddress func(p0 context.Context, p1 address.Address) (ethtypes.EthAddress, error) `perm:"read"`
|
||||||
|
|
||||||
@ -630,6 +632,8 @@ type GatewayStruct struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type GatewayMethods struct {
|
type GatewayMethods struct {
|
||||||
|
ChainGetBlock func(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) ``
|
||||||
|
|
||||||
ChainGetBlockMessages func(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) ``
|
ChainGetBlockMessages func(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) ``
|
||||||
|
|
||||||
ChainGetGenesis func(p0 context.Context) (*types.TipSet, error) ``
|
ChainGetGenesis func(p0 context.Context) (*types.TipSet, error) ``
|
||||||
@ -664,7 +668,7 @@ type GatewayMethods struct {
|
|||||||
|
|
||||||
EthBlockNumber func(p0 context.Context) (ethtypes.EthUint64, error) ``
|
EthBlockNumber func(p0 context.Context) (ethtypes.EthUint64, error) ``
|
||||||
|
|
||||||
EthCall func(p0 context.Context, p1 ethtypes.EthCall, p2 string) (ethtypes.EthBytes, error) ``
|
EthCall func(p0 context.Context, p1 ethtypes.EthCall, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) ``
|
||||||
|
|
||||||
EthChainId func(p0 context.Context) (ethtypes.EthUint64, error) ``
|
EthChainId func(p0 context.Context) (ethtypes.EthUint64, error) ``
|
||||||
|
|
||||||
@ -674,7 +678,7 @@ type GatewayMethods struct {
|
|||||||
|
|
||||||
EthGasPrice func(p0 context.Context) (ethtypes.EthBigInt, error) ``
|
EthGasPrice func(p0 context.Context) (ethtypes.EthBigInt, error) ``
|
||||||
|
|
||||||
EthGetBalance func(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthBigInt, error) ``
|
EthGetBalance func(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBigInt, error) ``
|
||||||
|
|
||||||
EthGetBlockByHash func(p0 context.Context, p1 ethtypes.EthHash, p2 bool) (ethtypes.EthBlock, error) ``
|
EthGetBlockByHash func(p0 context.Context, p1 ethtypes.EthHash, p2 bool) (ethtypes.EthBlock, error) ``
|
||||||
|
|
||||||
@ -684,7 +688,7 @@ type GatewayMethods struct {
|
|||||||
|
|
||||||
EthGetBlockTransactionCountByNumber func(p0 context.Context, p1 ethtypes.EthUint64) (ethtypes.EthUint64, error) ``
|
EthGetBlockTransactionCountByNumber func(p0 context.Context, p1 ethtypes.EthUint64) (ethtypes.EthUint64, error) ``
|
||||||
|
|
||||||
EthGetCode func(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthBytes, error) ``
|
EthGetCode func(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) ``
|
||||||
|
|
||||||
EthGetFilterChanges func(p0 context.Context, p1 ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) ``
|
EthGetFilterChanges func(p0 context.Context, p1 ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) ``
|
||||||
|
|
||||||
@ -694,13 +698,13 @@ type GatewayMethods struct {
|
|||||||
|
|
||||||
EthGetMessageCidByTransactionHash func(p0 context.Context, p1 *ethtypes.EthHash) (*cid.Cid, error) ``
|
EthGetMessageCidByTransactionHash func(p0 context.Context, p1 *ethtypes.EthHash) (*cid.Cid, error) ``
|
||||||
|
|
||||||
EthGetStorageAt func(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBytes, p3 string) (ethtypes.EthBytes, error) ``
|
EthGetStorageAt func(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBytes, p3 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) ``
|
||||||
|
|
||||||
EthGetTransactionByHash func(p0 context.Context, p1 *ethtypes.EthHash) (*ethtypes.EthTx, error) ``
|
EthGetTransactionByHash func(p0 context.Context, p1 *ethtypes.EthHash) (*ethtypes.EthTx, error) ``
|
||||||
|
|
||||||
EthGetTransactionByHashLimited func(p0 context.Context, p1 *ethtypes.EthHash, p2 abi.ChainEpoch) (*ethtypes.EthTx, error) ``
|
EthGetTransactionByHashLimited func(p0 context.Context, p1 *ethtypes.EthHash, p2 abi.ChainEpoch) (*ethtypes.EthTx, error) ``
|
||||||
|
|
||||||
EthGetTransactionCount func(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthUint64, error) ``
|
EthGetTransactionCount func(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthUint64, error) ``
|
||||||
|
|
||||||
EthGetTransactionHashByCid func(p0 context.Context, p1 cid.Cid) (*ethtypes.EthHash, error) ``
|
EthGetTransactionHashByCid func(p0 context.Context, p1 cid.Cid) (*ethtypes.EthHash, error) ``
|
||||||
|
|
||||||
@ -722,6 +726,8 @@ type GatewayMethods struct {
|
|||||||
|
|
||||||
EthSubscribe func(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error) ``
|
EthSubscribe func(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error) ``
|
||||||
|
|
||||||
|
EthSyncing func(p0 context.Context) (ethtypes.EthSyncingResult, error) ``
|
||||||
|
|
||||||
EthUninstallFilter func(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) ``
|
EthUninstallFilter func(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) ``
|
||||||
|
|
||||||
EthUnsubscribe func(p0 context.Context, p1 ethtypes.EthSubscriptionID) (bool, error) ``
|
EthUnsubscribe func(p0 context.Context, p1 ethtypes.EthSubscriptionID) (bool, error) ``
|
||||||
@ -730,8 +736,12 @@ type GatewayMethods struct {
|
|||||||
|
|
||||||
GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) ``
|
GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) ``
|
||||||
|
|
||||||
|
MinerGetBaseInfo func(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*MiningBaseInfo, error) ``
|
||||||
|
|
||||||
MpoolGetNonce func(p0 context.Context, p1 address.Address) (uint64, error) ``
|
MpoolGetNonce func(p0 context.Context, p1 address.Address) (uint64, error) ``
|
||||||
|
|
||||||
|
MpoolPending func(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) ``
|
||||||
|
|
||||||
MpoolPush func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) ``
|
MpoolPush func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) ``
|
||||||
|
|
||||||
MsigGetAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) ``
|
MsigGetAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) ``
|
||||||
@ -2081,14 +2091,14 @@ func (s *FullNodeStub) EthBlockNumber(p0 context.Context) (ethtypes.EthUint64, e
|
|||||||
return *new(ethtypes.EthUint64), ErrNotSupported
|
return *new(ethtypes.EthUint64), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FullNodeStruct) EthCall(p0 context.Context, p1 ethtypes.EthCall, p2 string) (ethtypes.EthBytes, error) {
|
func (s *FullNodeStruct) EthCall(p0 context.Context, p1 ethtypes.EthCall, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) {
|
||||||
if s.Internal.EthCall == nil {
|
if s.Internal.EthCall == nil {
|
||||||
return *new(ethtypes.EthBytes), ErrNotSupported
|
return *new(ethtypes.EthBytes), ErrNotSupported
|
||||||
}
|
}
|
||||||
return s.Internal.EthCall(p0, p1, p2)
|
return s.Internal.EthCall(p0, p1, p2)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FullNodeStub) EthCall(p0 context.Context, p1 ethtypes.EthCall, p2 string) (ethtypes.EthBytes, error) {
|
func (s *FullNodeStub) EthCall(p0 context.Context, p1 ethtypes.EthCall, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) {
|
||||||
return *new(ethtypes.EthBytes), ErrNotSupported
|
return *new(ethtypes.EthBytes), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2136,14 +2146,14 @@ func (s *FullNodeStub) EthGasPrice(p0 context.Context) (ethtypes.EthBigInt, erro
|
|||||||
return *new(ethtypes.EthBigInt), ErrNotSupported
|
return *new(ethtypes.EthBigInt), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FullNodeStruct) EthGetBalance(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthBigInt, error) {
|
func (s *FullNodeStruct) EthGetBalance(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBigInt, error) {
|
||||||
if s.Internal.EthGetBalance == nil {
|
if s.Internal.EthGetBalance == nil {
|
||||||
return *new(ethtypes.EthBigInt), ErrNotSupported
|
return *new(ethtypes.EthBigInt), ErrNotSupported
|
||||||
}
|
}
|
||||||
return s.Internal.EthGetBalance(p0, p1, p2)
|
return s.Internal.EthGetBalance(p0, p1, p2)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FullNodeStub) EthGetBalance(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthBigInt, error) {
|
func (s *FullNodeStub) EthGetBalance(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBigInt, error) {
|
||||||
return *new(ethtypes.EthBigInt), ErrNotSupported
|
return *new(ethtypes.EthBigInt), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2191,14 +2201,14 @@ func (s *FullNodeStub) EthGetBlockTransactionCountByNumber(p0 context.Context, p
|
|||||||
return *new(ethtypes.EthUint64), ErrNotSupported
|
return *new(ethtypes.EthUint64), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FullNodeStruct) EthGetCode(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthBytes, error) {
|
func (s *FullNodeStruct) EthGetCode(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) {
|
||||||
if s.Internal.EthGetCode == nil {
|
if s.Internal.EthGetCode == nil {
|
||||||
return *new(ethtypes.EthBytes), ErrNotSupported
|
return *new(ethtypes.EthBytes), ErrNotSupported
|
||||||
}
|
}
|
||||||
return s.Internal.EthGetCode(p0, p1, p2)
|
return s.Internal.EthGetCode(p0, p1, p2)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FullNodeStub) EthGetCode(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthBytes, error) {
|
func (s *FullNodeStub) EthGetCode(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) {
|
||||||
return *new(ethtypes.EthBytes), ErrNotSupported
|
return *new(ethtypes.EthBytes), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2246,14 +2256,14 @@ func (s *FullNodeStub) EthGetMessageCidByTransactionHash(p0 context.Context, p1
|
|||||||
return nil, ErrNotSupported
|
return nil, ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FullNodeStruct) EthGetStorageAt(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBytes, p3 string) (ethtypes.EthBytes, error) {
|
func (s *FullNodeStruct) EthGetStorageAt(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBytes, p3 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) {
|
||||||
if s.Internal.EthGetStorageAt == nil {
|
if s.Internal.EthGetStorageAt == nil {
|
||||||
return *new(ethtypes.EthBytes), ErrNotSupported
|
return *new(ethtypes.EthBytes), ErrNotSupported
|
||||||
}
|
}
|
||||||
return s.Internal.EthGetStorageAt(p0, p1, p2, p3)
|
return s.Internal.EthGetStorageAt(p0, p1, p2, p3)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FullNodeStub) EthGetStorageAt(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBytes, p3 string) (ethtypes.EthBytes, error) {
|
func (s *FullNodeStub) EthGetStorageAt(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBytes, p3 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) {
|
||||||
return *new(ethtypes.EthBytes), ErrNotSupported
|
return *new(ethtypes.EthBytes), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2301,14 +2311,14 @@ func (s *FullNodeStub) EthGetTransactionByHashLimited(p0 context.Context, p1 *et
|
|||||||
return nil, ErrNotSupported
|
return nil, ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FullNodeStruct) EthGetTransactionCount(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthUint64, error) {
|
func (s *FullNodeStruct) EthGetTransactionCount(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthUint64, error) {
|
||||||
if s.Internal.EthGetTransactionCount == nil {
|
if s.Internal.EthGetTransactionCount == nil {
|
||||||
return *new(ethtypes.EthUint64), ErrNotSupported
|
return *new(ethtypes.EthUint64), ErrNotSupported
|
||||||
}
|
}
|
||||||
return s.Internal.EthGetTransactionCount(p0, p1, p2)
|
return s.Internal.EthGetTransactionCount(p0, p1, p2)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FullNodeStub) EthGetTransactionCount(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthUint64, error) {
|
func (s *FullNodeStub) EthGetTransactionCount(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthUint64, error) {
|
||||||
return *new(ethtypes.EthUint64), ErrNotSupported
|
return *new(ethtypes.EthUint64), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2422,6 +2432,17 @@ func (s *FullNodeStub) EthSubscribe(p0 context.Context, p1 jsonrpc.RawParams) (e
|
|||||||
return *new(ethtypes.EthSubscriptionID), ErrNotSupported
|
return *new(ethtypes.EthSubscriptionID), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) EthSyncing(p0 context.Context) (ethtypes.EthSyncingResult, error) {
|
||||||
|
if s.Internal.EthSyncing == nil {
|
||||||
|
return *new(ethtypes.EthSyncingResult), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.EthSyncing(p0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) EthSyncing(p0 context.Context) (ethtypes.EthSyncingResult, error) {
|
||||||
|
return *new(ethtypes.EthSyncingResult), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *FullNodeStruct) EthUninstallFilter(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) {
|
func (s *FullNodeStruct) EthUninstallFilter(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) {
|
||||||
if s.Internal.EthUninstallFilter == nil {
|
if s.Internal.EthUninstallFilter == nil {
|
||||||
return false, ErrNotSupported
|
return false, ErrNotSupported
|
||||||
@ -4094,6 +4115,17 @@ func (s *FullNodeStub) Web3ClientVersion(p0 context.Context) (string, error) {
|
|||||||
return "", ErrNotSupported
|
return "", ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStruct) ChainGetBlock(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) {
|
||||||
|
if s.Internal.ChainGetBlock == nil {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ChainGetBlock(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStub) ChainGetBlock(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *GatewayStruct) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) {
|
func (s *GatewayStruct) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) {
|
||||||
if s.Internal.ChainGetBlockMessages == nil {
|
if s.Internal.ChainGetBlockMessages == nil {
|
||||||
return nil, ErrNotSupported
|
return nil, ErrNotSupported
|
||||||
@ -4281,14 +4313,14 @@ func (s *GatewayStub) EthBlockNumber(p0 context.Context) (ethtypes.EthUint64, er
|
|||||||
return *new(ethtypes.EthUint64), ErrNotSupported
|
return *new(ethtypes.EthUint64), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *GatewayStruct) EthCall(p0 context.Context, p1 ethtypes.EthCall, p2 string) (ethtypes.EthBytes, error) {
|
func (s *GatewayStruct) EthCall(p0 context.Context, p1 ethtypes.EthCall, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) {
|
||||||
if s.Internal.EthCall == nil {
|
if s.Internal.EthCall == nil {
|
||||||
return *new(ethtypes.EthBytes), ErrNotSupported
|
return *new(ethtypes.EthBytes), ErrNotSupported
|
||||||
}
|
}
|
||||||
return s.Internal.EthCall(p0, p1, p2)
|
return s.Internal.EthCall(p0, p1, p2)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *GatewayStub) EthCall(p0 context.Context, p1 ethtypes.EthCall, p2 string) (ethtypes.EthBytes, error) {
|
func (s *GatewayStub) EthCall(p0 context.Context, p1 ethtypes.EthCall, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) {
|
||||||
return *new(ethtypes.EthBytes), ErrNotSupported
|
return *new(ethtypes.EthBytes), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4336,14 +4368,14 @@ func (s *GatewayStub) EthGasPrice(p0 context.Context) (ethtypes.EthBigInt, error
|
|||||||
return *new(ethtypes.EthBigInt), ErrNotSupported
|
return *new(ethtypes.EthBigInt), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *GatewayStruct) EthGetBalance(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthBigInt, error) {
|
func (s *GatewayStruct) EthGetBalance(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBigInt, error) {
|
||||||
if s.Internal.EthGetBalance == nil {
|
if s.Internal.EthGetBalance == nil {
|
||||||
return *new(ethtypes.EthBigInt), ErrNotSupported
|
return *new(ethtypes.EthBigInt), ErrNotSupported
|
||||||
}
|
}
|
||||||
return s.Internal.EthGetBalance(p0, p1, p2)
|
return s.Internal.EthGetBalance(p0, p1, p2)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *GatewayStub) EthGetBalance(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthBigInt, error) {
|
func (s *GatewayStub) EthGetBalance(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBigInt, error) {
|
||||||
return *new(ethtypes.EthBigInt), ErrNotSupported
|
return *new(ethtypes.EthBigInt), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4391,14 +4423,14 @@ func (s *GatewayStub) EthGetBlockTransactionCountByNumber(p0 context.Context, p1
|
|||||||
return *new(ethtypes.EthUint64), ErrNotSupported
|
return *new(ethtypes.EthUint64), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *GatewayStruct) EthGetCode(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthBytes, error) {
|
func (s *GatewayStruct) EthGetCode(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) {
|
||||||
if s.Internal.EthGetCode == nil {
|
if s.Internal.EthGetCode == nil {
|
||||||
return *new(ethtypes.EthBytes), ErrNotSupported
|
return *new(ethtypes.EthBytes), ErrNotSupported
|
||||||
}
|
}
|
||||||
return s.Internal.EthGetCode(p0, p1, p2)
|
return s.Internal.EthGetCode(p0, p1, p2)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *GatewayStub) EthGetCode(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthBytes, error) {
|
func (s *GatewayStub) EthGetCode(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) {
|
||||||
return *new(ethtypes.EthBytes), ErrNotSupported
|
return *new(ethtypes.EthBytes), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4446,14 +4478,14 @@ func (s *GatewayStub) EthGetMessageCidByTransactionHash(p0 context.Context, p1 *
|
|||||||
return nil, ErrNotSupported
|
return nil, ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *GatewayStruct) EthGetStorageAt(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBytes, p3 string) (ethtypes.EthBytes, error) {
|
func (s *GatewayStruct) EthGetStorageAt(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBytes, p3 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) {
|
||||||
if s.Internal.EthGetStorageAt == nil {
|
if s.Internal.EthGetStorageAt == nil {
|
||||||
return *new(ethtypes.EthBytes), ErrNotSupported
|
return *new(ethtypes.EthBytes), ErrNotSupported
|
||||||
}
|
}
|
||||||
return s.Internal.EthGetStorageAt(p0, p1, p2, p3)
|
return s.Internal.EthGetStorageAt(p0, p1, p2, p3)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *GatewayStub) EthGetStorageAt(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBytes, p3 string) (ethtypes.EthBytes, error) {
|
func (s *GatewayStub) EthGetStorageAt(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBytes, p3 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) {
|
||||||
return *new(ethtypes.EthBytes), ErrNotSupported
|
return *new(ethtypes.EthBytes), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4479,14 +4511,14 @@ func (s *GatewayStub) EthGetTransactionByHashLimited(p0 context.Context, p1 *eth
|
|||||||
return nil, ErrNotSupported
|
return nil, ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *GatewayStruct) EthGetTransactionCount(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthUint64, error) {
|
func (s *GatewayStruct) EthGetTransactionCount(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthUint64, error) {
|
||||||
if s.Internal.EthGetTransactionCount == nil {
|
if s.Internal.EthGetTransactionCount == nil {
|
||||||
return *new(ethtypes.EthUint64), ErrNotSupported
|
return *new(ethtypes.EthUint64), ErrNotSupported
|
||||||
}
|
}
|
||||||
return s.Internal.EthGetTransactionCount(p0, p1, p2)
|
return s.Internal.EthGetTransactionCount(p0, p1, p2)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *GatewayStub) EthGetTransactionCount(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthUint64, error) {
|
func (s *GatewayStub) EthGetTransactionCount(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthUint64, error) {
|
||||||
return *new(ethtypes.EthUint64), ErrNotSupported
|
return *new(ethtypes.EthUint64), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4600,6 +4632,17 @@ func (s *GatewayStub) EthSubscribe(p0 context.Context, p1 jsonrpc.RawParams) (et
|
|||||||
return *new(ethtypes.EthSubscriptionID), ErrNotSupported
|
return *new(ethtypes.EthSubscriptionID), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStruct) EthSyncing(p0 context.Context) (ethtypes.EthSyncingResult, error) {
|
||||||
|
if s.Internal.EthSyncing == nil {
|
||||||
|
return *new(ethtypes.EthSyncingResult), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.EthSyncing(p0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStub) EthSyncing(p0 context.Context) (ethtypes.EthSyncingResult, error) {
|
||||||
|
return *new(ethtypes.EthSyncingResult), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *GatewayStruct) EthUninstallFilter(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) {
|
func (s *GatewayStruct) EthUninstallFilter(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) {
|
||||||
if s.Internal.EthUninstallFilter == nil {
|
if s.Internal.EthUninstallFilter == nil {
|
||||||
return false, ErrNotSupported
|
return false, ErrNotSupported
|
||||||
@ -4644,6 +4687,17 @@ func (s *GatewayStub) GasEstimateMessageGas(p0 context.Context, p1 *types.Messag
|
|||||||
return nil, ErrNotSupported
|
return nil, ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStruct) MinerGetBaseInfo(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*MiningBaseInfo, error) {
|
||||||
|
if s.Internal.MinerGetBaseInfo == nil {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.MinerGetBaseInfo(p0, p1, p2, p3)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStub) MinerGetBaseInfo(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*MiningBaseInfo, error) {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *GatewayStruct) MpoolGetNonce(p0 context.Context, p1 address.Address) (uint64, error) {
|
func (s *GatewayStruct) MpoolGetNonce(p0 context.Context, p1 address.Address) (uint64, error) {
|
||||||
if s.Internal.MpoolGetNonce == nil {
|
if s.Internal.MpoolGetNonce == nil {
|
||||||
return 0, ErrNotSupported
|
return 0, ErrNotSupported
|
||||||
@ -4655,6 +4709,17 @@ func (s *GatewayStub) MpoolGetNonce(p0 context.Context, p1 address.Address) (uin
|
|||||||
return 0, ErrNotSupported
|
return 0, ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStruct) MpoolPending(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) {
|
||||||
|
if s.Internal.MpoolPending == nil {
|
||||||
|
return *new([]*types.SignedMessage), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.MpoolPending(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStub) MpoolPending(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) {
|
||||||
|
return *new([]*types.SignedMessage), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *GatewayStruct) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) {
|
func (s *GatewayStruct) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) {
|
||||||
if s.Internal.MpoolPush == nil {
|
if s.Internal.MpoolPush == nil {
|
||||||
return *new(cid.Cid), ErrNotSupported
|
return *new(cid.Cid), ErrNotSupported
|
||||||
|
50
api/types.go
50
api/types.go
@ -299,6 +299,7 @@ type MinerInfo struct {
|
|||||||
SectorSize abi.SectorSize
|
SectorSize abi.SectorSize
|
||||||
WindowPoStPartitionSectors uint64
|
WindowPoStPartitionSectors uint64
|
||||||
ConsensusFaultElapsed abi.ChainEpoch
|
ConsensusFaultElapsed abi.ChainEpoch
|
||||||
|
PendingOwnerAddress *address.Address
|
||||||
Beneficiary address.Address
|
Beneficiary address.Address
|
||||||
BeneficiaryTerm *miner.BeneficiaryTerm
|
BeneficiaryTerm *miner.BeneficiaryTerm
|
||||||
PendingBeneficiaryTerm *miner.PendingBeneficiaryChange
|
PendingBeneficiaryTerm *miner.PendingBeneficiaryChange
|
||||||
@ -314,31 +315,30 @@ type NetworkParams struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type ForkUpgradeParams struct {
|
type ForkUpgradeParams struct {
|
||||||
UpgradeSmokeHeight abi.ChainEpoch
|
UpgradeSmokeHeight abi.ChainEpoch
|
||||||
UpgradeBreezeHeight abi.ChainEpoch
|
UpgradeBreezeHeight abi.ChainEpoch
|
||||||
UpgradeIgnitionHeight abi.ChainEpoch
|
UpgradeIgnitionHeight abi.ChainEpoch
|
||||||
UpgradeLiftoffHeight abi.ChainEpoch
|
UpgradeLiftoffHeight abi.ChainEpoch
|
||||||
UpgradeAssemblyHeight abi.ChainEpoch
|
UpgradeAssemblyHeight abi.ChainEpoch
|
||||||
UpgradeRefuelHeight abi.ChainEpoch
|
UpgradeRefuelHeight abi.ChainEpoch
|
||||||
UpgradeTapeHeight abi.ChainEpoch
|
UpgradeTapeHeight abi.ChainEpoch
|
||||||
UpgradeKumquatHeight abi.ChainEpoch
|
UpgradeKumquatHeight abi.ChainEpoch
|
||||||
UpgradePriceListOopsHeight abi.ChainEpoch
|
BreezeGasTampingDuration abi.ChainEpoch
|
||||||
BreezeGasTampingDuration abi.ChainEpoch
|
UpgradeCalicoHeight abi.ChainEpoch
|
||||||
UpgradeCalicoHeight abi.ChainEpoch
|
UpgradePersianHeight abi.ChainEpoch
|
||||||
UpgradePersianHeight abi.ChainEpoch
|
UpgradeOrangeHeight abi.ChainEpoch
|
||||||
UpgradeOrangeHeight abi.ChainEpoch
|
UpgradeClausHeight abi.ChainEpoch
|
||||||
UpgradeClausHeight abi.ChainEpoch
|
UpgradeTrustHeight abi.ChainEpoch
|
||||||
UpgradeTrustHeight abi.ChainEpoch
|
UpgradeNorwegianHeight abi.ChainEpoch
|
||||||
UpgradeNorwegianHeight abi.ChainEpoch
|
UpgradeTurboHeight abi.ChainEpoch
|
||||||
UpgradeTurboHeight abi.ChainEpoch
|
UpgradeHyperdriveHeight abi.ChainEpoch
|
||||||
UpgradeHyperdriveHeight abi.ChainEpoch
|
UpgradeChocolateHeight abi.ChainEpoch
|
||||||
UpgradeChocolateHeight abi.ChainEpoch
|
UpgradeOhSnapHeight abi.ChainEpoch
|
||||||
UpgradeOhSnapHeight abi.ChainEpoch
|
UpgradeSkyrHeight abi.ChainEpoch
|
||||||
UpgradeSkyrHeight abi.ChainEpoch
|
UpgradeSharkHeight abi.ChainEpoch
|
||||||
UpgradeSharkHeight abi.ChainEpoch
|
UpgradeHyggeHeight abi.ChainEpoch
|
||||||
UpgradeHyggeHeight abi.ChainEpoch
|
UpgradeLightningHeight abi.ChainEpoch
|
||||||
UpgradeLightningHeight abi.ChainEpoch
|
UpgradeThunderHeight abi.ChainEpoch
|
||||||
UpgradeThunderHeight abi.ChainEpoch
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type NonceMapType map[address.Address]uint64
|
type NonceMapType map[address.Address]uint64
|
||||||
|
@ -35,6 +35,9 @@ import (
|
|||||||
// * Generate openrpc blobs
|
// * Generate openrpc blobs
|
||||||
|
|
||||||
type Gateway interface {
|
type Gateway interface {
|
||||||
|
MpoolPending(context.Context, types.TipSetKey) ([]*types.SignedMessage, error)
|
||||||
|
ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error)
|
||||||
|
MinerGetBaseInfo(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*api.MiningBaseInfo, error)
|
||||||
StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (api.MinerSectors, error)
|
StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (api.MinerSectors, error)
|
||||||
GasEstimateGasPremium(context.Context, uint64, address.Address, int64, types.TipSetKey) (types.BigInt, error)
|
GasEstimateGasPremium(context.Context, uint64, address.Address, int64, types.TipSetKey) (types.BigInt, error)
|
||||||
StateReplay(context.Context, types.TipSetKey, cid.Cid) (*api.InvocResult, error)
|
StateReplay(context.Context, types.TipSetKey, cid.Cid) (*api.InvocResult, error)
|
||||||
|
@ -431,6 +431,8 @@ type GatewayStruct struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type GatewayMethods struct {
|
type GatewayMethods struct {
|
||||||
|
ChainGetBlock func(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) ``
|
||||||
|
|
||||||
ChainGetBlockMessages func(p0 context.Context, p1 cid.Cid) (*api.BlockMessages, error) ``
|
ChainGetBlockMessages func(p0 context.Context, p1 cid.Cid) (*api.BlockMessages, error) ``
|
||||||
|
|
||||||
ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) ``
|
ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) ``
|
||||||
@ -453,8 +455,12 @@ type GatewayMethods struct {
|
|||||||
|
|
||||||
GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) ``
|
GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) ``
|
||||||
|
|
||||||
|
MinerGetBaseInfo func(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*api.MiningBaseInfo, error) ``
|
||||||
|
|
||||||
MpoolGetNonce func(p0 context.Context, p1 address.Address) (uint64, error) ``
|
MpoolGetNonce func(p0 context.Context, p1 address.Address) (uint64, error) ``
|
||||||
|
|
||||||
|
MpoolPending func(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) ``
|
||||||
|
|
||||||
MpoolPush func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) ``
|
MpoolPush func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) ``
|
||||||
|
|
||||||
MsigGetAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) ``
|
MsigGetAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) ``
|
||||||
@ -2581,6 +2587,17 @@ func (s *FullNodeStub) WalletVerify(p0 context.Context, p1 address.Address, p2 [
|
|||||||
return false, ErrNotSupported
|
return false, ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStruct) ChainGetBlock(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) {
|
||||||
|
if s.Internal.ChainGetBlock == nil {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ChainGetBlock(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStub) ChainGetBlock(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *GatewayStruct) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*api.BlockMessages, error) {
|
func (s *GatewayStruct) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*api.BlockMessages, error) {
|
||||||
if s.Internal.ChainGetBlockMessages == nil {
|
if s.Internal.ChainGetBlockMessages == nil {
|
||||||
return nil, ErrNotSupported
|
return nil, ErrNotSupported
|
||||||
@ -2702,6 +2719,17 @@ func (s *GatewayStub) GasEstimateMessageGas(p0 context.Context, p1 *types.Messag
|
|||||||
return nil, ErrNotSupported
|
return nil, ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStruct) MinerGetBaseInfo(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*api.MiningBaseInfo, error) {
|
||||||
|
if s.Internal.MinerGetBaseInfo == nil {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.MinerGetBaseInfo(p0, p1, p2, p3)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStub) MinerGetBaseInfo(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*api.MiningBaseInfo, error) {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *GatewayStruct) MpoolGetNonce(p0 context.Context, p1 address.Address) (uint64, error) {
|
func (s *GatewayStruct) MpoolGetNonce(p0 context.Context, p1 address.Address) (uint64, error) {
|
||||||
if s.Internal.MpoolGetNonce == nil {
|
if s.Internal.MpoolGetNonce == nil {
|
||||||
return 0, ErrNotSupported
|
return 0, ErrNotSupported
|
||||||
@ -2713,6 +2741,17 @@ func (s *GatewayStub) MpoolGetNonce(p0 context.Context, p1 address.Address) (uin
|
|||||||
return 0, ErrNotSupported
|
return 0, ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStruct) MpoolPending(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) {
|
||||||
|
if s.Internal.MpoolPending == nil {
|
||||||
|
return *new([]*types.SignedMessage), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.MpoolPending(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStub) MpoolPending(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) {
|
||||||
|
return *new([]*types.SignedMessage), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *GatewayStruct) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) {
|
func (s *GatewayStruct) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) {
|
||||||
if s.Internal.MpoolPush == nil {
|
if s.Internal.MpoolPush == nil {
|
||||||
return *new(cid.Cid), ErrNotSupported
|
return *new(cid.Cid), ErrNotSupported
|
||||||
|
@ -9,9 +9,9 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
u "github.com/ipfs/boxo/util"
|
||||||
blocks "github.com/ipfs/go-block-format"
|
blocks "github.com/ipfs/go-block-format"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
u "github.com/ipfs/go-ipfs-util"
|
|
||||||
ipld "github.com/ipfs/go-ipld-format"
|
ipld "github.com/ipfs/go-ipld-format"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
@ -4,9 +4,9 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
blockstore "github.com/ipfs/boxo/blockstore"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
ds "github.com/ipfs/go-datastore"
|
ds "github.com/ipfs/go-datastore"
|
||||||
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -5,15 +5,16 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
|
iface "github.com/ipfs/boxo/coreiface"
|
||||||
|
"github.com/ipfs/boxo/coreiface/options"
|
||||||
|
"github.com/ipfs/boxo/coreiface/path"
|
||||||
blocks "github.com/ipfs/go-block-format"
|
blocks "github.com/ipfs/go-block-format"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
httpapi "github.com/ipfs/go-ipfs-http-client"
|
|
||||||
iface "github.com/ipfs/interface-go-ipfs-core"
|
|
||||||
"github.com/ipfs/interface-go-ipfs-core/options"
|
|
||||||
"github.com/ipfs/interface-go-ipfs-core/path"
|
|
||||||
"github.com/multiformats/go-multiaddr"
|
"github.com/multiformats/go-multiaddr"
|
||||||
"github.com/multiformats/go-multihash"
|
"github.com/multiformats/go-multihash"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
rpc "github.com/filecoin-project/kubo-api-client"
|
||||||
)
|
)
|
||||||
|
|
||||||
type IPFSBlockstore struct {
|
type IPFSBlockstore struct {
|
||||||
@ -24,7 +25,7 @@ type IPFSBlockstore struct {
|
|||||||
var _ BasicBlockstore = (*IPFSBlockstore)(nil)
|
var _ BasicBlockstore = (*IPFSBlockstore)(nil)
|
||||||
|
|
||||||
func NewLocalIPFSBlockstore(ctx context.Context, onlineMode bool) (Blockstore, error) {
|
func NewLocalIPFSBlockstore(ctx context.Context, onlineMode bool) (Blockstore, error) {
|
||||||
localApi, err := httpapi.NewLocalApi()
|
localApi, err := rpc.NewLocalApi()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("getting local ipfs api: %w", err)
|
return nil, xerrors.Errorf("getting local ipfs api: %w", err)
|
||||||
}
|
}
|
||||||
@ -51,7 +52,7 @@ func NewLocalIPFSBlockstore(ctx context.Context, onlineMode bool) (Blockstore, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
func NewRemoteIPFSBlockstore(ctx context.Context, maddr multiaddr.Multiaddr, onlineMode bool) (Blockstore, error) {
|
func NewRemoteIPFSBlockstore(ctx context.Context, maddr multiaddr.Multiaddr, onlineMode bool) (Blockstore, error) {
|
||||||
httpApi, err := httpapi.NewApi(maddr)
|
httpApi, err := rpc.NewApi(maddr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("setting remote ipfs api: %w", err)
|
return nil, xerrors.Errorf("setting remote ipfs api: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -164,7 +164,7 @@ type SplitStore struct {
|
|||||||
path string
|
path string
|
||||||
|
|
||||||
mx sync.Mutex
|
mx sync.Mutex
|
||||||
warmupEpoch abi.ChainEpoch // protected by mx
|
warmupEpoch atomic.Int64
|
||||||
baseEpoch abi.ChainEpoch // protected by compaction lock
|
baseEpoch abi.ChainEpoch // protected by compaction lock
|
||||||
pruneEpoch abi.ChainEpoch // protected by compaction lock
|
pruneEpoch abi.ChainEpoch // protected by compaction lock
|
||||||
|
|
||||||
@ -684,9 +684,7 @@ func (s *SplitStore) View(ctx context.Context, cid cid.Cid, cb func([]byte) erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *SplitStore) isWarm() bool {
|
func (s *SplitStore) isWarm() bool {
|
||||||
s.mx.Lock()
|
return s.warmupEpoch.Load() > 0
|
||||||
defer s.mx.Unlock()
|
|
||||||
return s.warmupEpoch > 0
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// State tracking
|
// State tracking
|
||||||
@ -757,7 +755,7 @@ func (s *SplitStore) Start(chain ChainAccessor, us stmgr.UpgradeSchedule) error
|
|||||||
bs, err = s.ds.Get(s.ctx, warmupEpochKey)
|
bs, err = s.ds.Get(s.ctx, warmupEpochKey)
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
s.warmupEpoch = bytesToEpoch(bs)
|
s.warmupEpoch.Store(bytesToInt64(bs))
|
||||||
|
|
||||||
case dstore.ErrNotFound:
|
case dstore.ErrNotFound:
|
||||||
warmup = true
|
warmup = true
|
||||||
@ -791,7 +789,7 @@ func (s *SplitStore) Start(chain ChainAccessor, us stmgr.UpgradeSchedule) error
|
|||||||
return xerrors.Errorf("error loading compaction index: %w", err)
|
return xerrors.Errorf("error loading compaction index: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infow("starting splitstore", "baseEpoch", s.baseEpoch, "warmupEpoch", s.warmupEpoch)
|
log.Infow("starting splitstore", "baseEpoch", s.baseEpoch, "warmupEpoch", s.warmupEpoch.Load())
|
||||||
|
|
||||||
if warmup {
|
if warmup {
|
||||||
err = s.warmup(curTs)
|
err = s.warmup(curTs)
|
||||||
|
@ -145,7 +145,7 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error {
|
|||||||
func (s *SplitStore) Info() map[string]interface{} {
|
func (s *SplitStore) Info() map[string]interface{} {
|
||||||
info := make(map[string]interface{})
|
info := make(map[string]interface{})
|
||||||
info["base epoch"] = s.baseEpoch
|
info["base epoch"] = s.baseEpoch
|
||||||
info["warmup epoch"] = s.warmupEpoch
|
info["warmup epoch"] = s.warmupEpoch.Load()
|
||||||
info["compactions"] = s.compactionIndex
|
info["compactions"] = s.compactionIndex
|
||||||
info["prunes"] = s.pruneIndex
|
info["prunes"] = s.pruneIndex
|
||||||
info["compacting"] = s.compacting == 1
|
info["compacting"] = s.compacting == 1
|
||||||
|
@ -1114,13 +1114,17 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp
|
|||||||
if err := walkBlock(c); err != nil {
|
if err := walkBlock(c); err != nil {
|
||||||
return xerrors.Errorf("error walking block (cid: %s): %w", c, err)
|
return xerrors.Errorf("error walking block (cid: %s): %w", c, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := s.checkYield(); err != nil {
|
||||||
|
return xerrors.Errorf("check yield: %w", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := g.Wait(); err != nil {
|
if err := g.Wait(); err != nil {
|
||||||
return err
|
return xerrors.Errorf("walkBlock workers errored: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1153,8 +1157,8 @@ func (s *SplitStore) walkObject(c cid.Cid, visitor ObjectVisitor, f func(cid.Cid
|
|||||||
}
|
}
|
||||||
|
|
||||||
// check this before recursing
|
// check this before recursing
|
||||||
if err := s.checkYield(); err != nil {
|
if err := s.checkClosing(); err != nil {
|
||||||
return 0, err
|
return 0, xerrors.Errorf("check closing: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var links []cid.Cid
|
var links []cid.Cid
|
||||||
@ -1222,8 +1226,8 @@ func (s *SplitStore) walkObjectIncomplete(c cid.Cid, visitor ObjectVisitor, f, m
|
|||||||
}
|
}
|
||||||
|
|
||||||
// check this before recursing
|
// check this before recursing
|
||||||
if err := s.checkYield(); err != nil {
|
if err := s.checkClosing(); err != nil {
|
||||||
return sz, err
|
return sz, xerrors.Errorf("check closing: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var links []cid.Cid
|
var links []cid.Cid
|
||||||
|
@ -429,7 +429,7 @@ func testSplitStoreReification(t *testing.T, f func(context.Context, blockstore.
|
|||||||
}
|
}
|
||||||
defer ss.Close() //nolint
|
defer ss.Close() //nolint
|
||||||
|
|
||||||
ss.warmupEpoch = 1
|
ss.warmupEpoch.Store(1)
|
||||||
go ss.reifyOrchestrator()
|
go ss.reifyOrchestrator()
|
||||||
|
|
||||||
waitForReification := func() {
|
waitForReification := func() {
|
||||||
@ -529,7 +529,7 @@ func testSplitStoreReificationLimit(t *testing.T, f func(context.Context, blocks
|
|||||||
}
|
}
|
||||||
defer ss.Close() //nolint
|
defer ss.Close() //nolint
|
||||||
|
|
||||||
ss.warmupEpoch = 1
|
ss.warmupEpoch.Store(1)
|
||||||
go ss.reifyOrchestrator()
|
go ss.reifyOrchestrator()
|
||||||
|
|
||||||
waitForReification := func() {
|
waitForReification := func() {
|
||||||
|
@ -136,9 +136,8 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("error saving warm up epoch: %w", err)
|
return xerrors.Errorf("error saving warm up epoch: %w", err)
|
||||||
}
|
}
|
||||||
s.mx.Lock()
|
|
||||||
s.warmupEpoch = epoch
|
s.warmupEpoch.Store(int64(epoch))
|
||||||
s.mx.Unlock()
|
|
||||||
|
|
||||||
// also save the compactionIndex, as this is used as an indicator of warmup for upgraded nodes
|
// also save the compactionIndex, as this is used as an indicator of warmup for upgraded nodes
|
||||||
err = s.ds.Put(s.ctx, compactionIndexKey, int64ToBytes(s.compactionIndex))
|
err = s.ds.Put(s.ctx, compactionIndexKey, int64ToBytes(s.compactionIndex))
|
||||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -49,16 +49,11 @@ var UpgradeHyperdriveHeight = abi.ChainEpoch(-16)
|
|||||||
var UpgradeChocolateHeight = abi.ChainEpoch(-17)
|
var UpgradeChocolateHeight = abi.ChainEpoch(-17)
|
||||||
var UpgradeOhSnapHeight = abi.ChainEpoch(-18)
|
var UpgradeOhSnapHeight = abi.ChainEpoch(-18)
|
||||||
var UpgradeSkyrHeight = abi.ChainEpoch(-19)
|
var UpgradeSkyrHeight = abi.ChainEpoch(-19)
|
||||||
|
var UpgradeSharkHeight = abi.ChainEpoch(-20)
|
||||||
|
var UpgradeHyggeHeight = abi.ChainEpoch(-21)
|
||||||
|
var UpgradeLightningHeight = abi.ChainEpoch(-22)
|
||||||
|
|
||||||
const UpgradeSharkHeight = abi.ChainEpoch(-20)
|
const UpgradeThunderHeight = 50
|
||||||
|
|
||||||
const UpgradeHyggeHeight = abi.ChainEpoch(100)
|
|
||||||
|
|
||||||
// ??????????
|
|
||||||
const UpgradeLightningHeight = 200
|
|
||||||
|
|
||||||
// ??????????????????
|
|
||||||
const UpgradeThunderHeight = 300
|
|
||||||
|
|
||||||
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||||
0: DrandMainnet,
|
0: DrandMainnet,
|
||||||
|
@ -37,7 +37,7 @@ func BuildTypeString() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// BuildVersion is the local build version
|
// BuildVersion is the local build version
|
||||||
const BuildVersion = "1.23.2"
|
const BuildVersion = "1.23.3"
|
||||||
|
|
||||||
func UserVersion() string {
|
func UserVersion() string {
|
||||||
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
|
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
|
||||||
|
@ -61,6 +61,10 @@ const (
|
|||||||
MaxPreCommitRandomnessLookback = builtin11.EpochsInDay + SealRandomnessLookback
|
MaxPreCommitRandomnessLookback = builtin11.EpochsInDay + SealRandomnessLookback
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
MarketDefaultAllocationTermBuffer = market11.MarketDefaultAllocationTermBuffer
|
||||||
|
)
|
||||||
|
|
||||||
// SetSupportedProofTypes sets supported proof types, across all actor versions.
|
// SetSupportedProofTypes sets supported proof types, across all actor versions.
|
||||||
// This should only be used for testing.
|
// This should only be used for testing.
|
||||||
func SetSupportedProofTypes(types ...abi.RegisteredSealProof) {
|
func SetSupportedProofTypes(types ...abi.RegisteredSealProof) {
|
||||||
|
@ -39,6 +39,10 @@ const (
|
|||||||
MaxPreCommitRandomnessLookback = builtin{{.latestVersion}}.EpochsInDay + SealRandomnessLookback
|
MaxPreCommitRandomnessLookback = builtin{{.latestVersion}}.EpochsInDay + SealRandomnessLookback
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
MarketDefaultAllocationTermBuffer = market{{.latestVersion}}.MarketDefaultAllocationTermBuffer
|
||||||
|
)
|
||||||
|
|
||||||
// SetSupportedProofTypes sets supported proof types, across all actor versions.
|
// SetSupportedProofTypes sets supported proof types, across all actor versions.
|
||||||
// This should only be used for testing.
|
// This should only be used for testing.
|
||||||
func SetSupportedProofTypes(types ...abi.RegisteredSealProof) {
|
func SetSupportedProofTypes(types ...abi.RegisteredSealProof) {
|
||||||
|
@ -135,6 +135,10 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context,
|
|||||||
return xerrors.Errorf("running cron: %w", err)
|
return xerrors.Errorf("running cron: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !ret.ExitCode.IsSuccess() {
|
||||||
|
return xerrors.Errorf("cron failed with exit code %d: %w", ret.ExitCode, ret.ActorErr)
|
||||||
|
}
|
||||||
|
|
||||||
cronGas += ret.GasUsed
|
cronGas += ret.GasUsed
|
||||||
|
|
||||||
if em != nil {
|
if em != nil {
|
||||||
|
@ -80,6 +80,11 @@ var RewardFunc = func(ctx context.Context, vmi vm.Interface, em stmgr.ExecMonito
|
|||||||
if actErr != nil {
|
if actErr != nil {
|
||||||
return xerrors.Errorf("failed to apply reward message: %w", actErr)
|
return xerrors.Errorf("failed to apply reward message: %w", actErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !ret.ExitCode.IsSuccess() {
|
||||||
|
return xerrors.Errorf("reward actor failed with exit code %d: %w", ret.ExitCode, ret.ActorErr)
|
||||||
|
}
|
||||||
|
|
||||||
if em != nil {
|
if em != nil {
|
||||||
if err := em.MessageApplied(ctx, ts, rwMsg.Cid(), rwMsg, ret, true); err != nil {
|
if err := em.MessageApplied(ctx, ts, rwMsg.Cid(), rwMsg, ret, true); err != nil {
|
||||||
return xerrors.Errorf("callback failed on reward message: %w", err)
|
return xerrors.Errorf("callback failed on reward message: %w", err)
|
||||||
|
@ -7,14 +7,17 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
|
logging "github.com/ipfs/go-log/v2"
|
||||||
_ "github.com/mattn/go-sqlite3"
|
_ "github.com/mattn/go-sqlite3"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -42,6 +45,8 @@ var ddls = []string{
|
|||||||
reverted INTEGER NOT NULL
|
reverted INTEGER NOT NULL
|
||||||
)`,
|
)`,
|
||||||
|
|
||||||
|
`CREATE INDEX IF NOT EXISTS height_tipset_key ON event (height,tipset_key)`,
|
||||||
|
|
||||||
`CREATE TABLE IF NOT EXISTS event_entry (
|
`CREATE TABLE IF NOT EXISTS event_entry (
|
||||||
event_id INTEGER,
|
event_id INTEGER,
|
||||||
indexed INTEGER NOT NULL,
|
indexed INTEGER NOT NULL,
|
||||||
@ -56,27 +61,210 @@ var ddls = []string{
|
|||||||
version UINT64 NOT NULL UNIQUE
|
version UINT64 NOT NULL UNIQUE
|
||||||
)`,
|
)`,
|
||||||
|
|
||||||
// version 1.
|
|
||||||
`INSERT OR IGNORE INTO _meta (version) VALUES (1)`,
|
`INSERT OR IGNORE INTO _meta (version) VALUES (1)`,
|
||||||
|
`INSERT OR IGNORE INTO _meta (version) VALUES (2)`,
|
||||||
}
|
}
|
||||||
|
|
||||||
const schemaVersion = 1
|
var (
|
||||||
|
log = logging.Logger("filter")
|
||||||
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
insertEvent = `INSERT OR IGNORE INTO event
|
schemaVersion = 2
|
||||||
(height, tipset_key, tipset_key_cid, emitter_addr, event_index, message_cid, message_index, reverted)
|
|
||||||
VALUES(?, ?, ?, ?, ?, ?, ?, ?)`
|
|
||||||
|
|
||||||
insertEntry = `INSERT OR IGNORE INTO event_entry
|
eventExists = `SELECT MAX(id) FROM event WHERE height=? AND tipset_key=? AND tipset_key_cid=? AND emitter_addr=? AND event_index=? AND message_cid=? AND message_index=?`
|
||||||
(event_id, indexed, flags, key, codec, value)
|
insertEvent = `INSERT OR IGNORE INTO event(height, tipset_key, tipset_key_cid, emitter_addr, event_index, message_cid, message_index, reverted) VALUES(?, ?, ?, ?, ?, ?, ?, ?)`
|
||||||
VALUES(?, ?, ?, ?, ?, ?)`
|
insertEntry = `INSERT OR IGNORE INTO event_entry(event_id, indexed, flags, key, codec, value) VALUES(?, ?, ?, ?, ?, ?)`
|
||||||
|
revertEventsInTipset = `UPDATE event SET reverted=true WHERE height=? AND tipset_key=?`
|
||||||
|
restoreEvent = `UPDATE event SET reverted=false WHERE height=? AND tipset_key=? AND tipset_key_cid=? AND emitter_addr=? AND event_index=? AND message_cid=? AND message_index=?`
|
||||||
)
|
)
|
||||||
|
|
||||||
type EventIndex struct {
|
type EventIndex struct {
|
||||||
db *sql.DB
|
db *sql.DB
|
||||||
|
|
||||||
|
stmtEventExists *sql.Stmt
|
||||||
|
stmtInsertEvent *sql.Stmt
|
||||||
|
stmtInsertEntry *sql.Stmt
|
||||||
|
stmtRevertEventsInTipset *sql.Stmt
|
||||||
|
stmtRestoreEvent *sql.Stmt
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewEventIndex(path string) (*EventIndex, error) {
|
func (ei *EventIndex) initStatements() (err error) {
|
||||||
|
ei.stmtEventExists, err = ei.db.Prepare(eventExists)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("prepare stmtEventExists: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ei.stmtInsertEvent, err = ei.db.Prepare(insertEvent)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("prepare stmtInsertEvent: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ei.stmtInsertEntry, err = ei.db.Prepare(insertEntry)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("prepare stmtInsertEntry: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ei.stmtRevertEventsInTipset, err = ei.db.Prepare(revertEventsInTipset)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("prepare stmtRevertEventsInTipset: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ei.stmtRestoreEvent, err = ei.db.Prepare(restoreEvent)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("prepare stmtRestoreEvent: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ei *EventIndex) migrateToVersion2(ctx context.Context, chainStore *store.ChainStore) error {
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
tx, err := ei.db.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("begin transaction: %w", err)
|
||||||
|
}
|
||||||
|
// rollback the transaction (a no-op if the transaction was already committed)
|
||||||
|
defer tx.Rollback() //nolint:errcheck
|
||||||
|
|
||||||
|
// create some temporary indices to help speed up the migration
|
||||||
|
_, err = tx.Exec("CREATE INDEX IF NOT EXISTS tmp_height_tipset_key_cid ON event (height,tipset_key_cid)")
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("create index tmp_height_tipset_key_cid: %w", err)
|
||||||
|
}
|
||||||
|
_, err = tx.Exec("CREATE INDEX IF NOT EXISTS tmp_tipset_key_cid ON event (tipset_key_cid)")
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("create index tmp_tipset_key_cid: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stmtDeleteOffChainEvent, err := tx.Prepare("DELETE FROM event WHERE tipset_key_cid!=? and height=?")
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("prepare stmtDeleteOffChainEvent: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stmtSelectEvent, err := tx.Prepare("SELECT id FROM event WHERE tipset_key_cid=? ORDER BY message_index ASC, event_index ASC, id DESC LIMIT 1")
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("prepare stmtSelectEvent: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stmtDeleteEvent, err := tx.Prepare("DELETE FROM event WHERE tipset_key_cid=? AND id<?")
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("prepare stmtDeleteEvent: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// get the lowest height tipset
|
||||||
|
var minHeight sql.NullInt64
|
||||||
|
err = ei.db.QueryRow("SELECT MIN(height) FROM event").Scan(&minHeight)
|
||||||
|
if err != nil {
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return xerrors.Errorf("query min height: %w", err)
|
||||||
|
}
|
||||||
|
log.Infof("Migrating events from head to %d", minHeight.Int64)
|
||||||
|
|
||||||
|
currTs := chainStore.GetHeaviestTipSet()
|
||||||
|
|
||||||
|
for int64(currTs.Height()) >= minHeight.Int64 {
|
||||||
|
if currTs.Height()%1000 == 0 {
|
||||||
|
log.Infof("Migrating height %d (remaining %d)", currTs.Height(), int64(currTs.Height())-minHeight.Int64)
|
||||||
|
}
|
||||||
|
|
||||||
|
tsKey := currTs.Parents()
|
||||||
|
currTs, err = chainStore.GetTipSetFromKey(ctx, tsKey)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("get tipset from key: %w", err)
|
||||||
|
}
|
||||||
|
log.Debugf("Migrating height %d", currTs.Height())
|
||||||
|
|
||||||
|
tsKeyCid, err := currTs.Key().Cid()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("tipset key cid: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// delete all events that are not in the canonical chain
|
||||||
|
_, err = stmtDeleteOffChainEvent.Exec(tsKeyCid.Bytes(), currTs.Height())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("delete off chain event: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// find the first eventId from the last time the tipset was applied
|
||||||
|
var eventId sql.NullInt64
|
||||||
|
err = stmtSelectEvent.QueryRow(tsKeyCid.Bytes()).Scan(&eventId)
|
||||||
|
if err != nil {
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return xerrors.Errorf("select event: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// this tipset might not have any events which is ok
|
||||||
|
if !eventId.Valid {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
log.Debugf("Deleting all events with id < %d at height %d", eventId.Int64, currTs.Height())
|
||||||
|
|
||||||
|
res, err := stmtDeleteEvent.Exec(tsKeyCid.Bytes(), eventId.Int64)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("delete event: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
nrRowsAffected, err := res.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("rows affected: %w", err)
|
||||||
|
}
|
||||||
|
log.Debugf("deleted %d events from tipset %s", nrRowsAffected, tsKeyCid.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// delete all entries that have an event_id that doesn't exist (since we don't have a foreign
|
||||||
|
// key constraint that gives us cascading deletes)
|
||||||
|
res, err := tx.Exec("DELETE FROM event_entry WHERE event_id NOT IN (SELECT id FROM event)")
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("delete event_entry: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
nrRowsAffected, err := res.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("rows affected: %w", err)
|
||||||
|
}
|
||||||
|
log.Infof("cleaned up %d entries that had deleted events\n", nrRowsAffected)
|
||||||
|
|
||||||
|
// drop the temporary indices after the migration
|
||||||
|
_, err = tx.Exec("DROP INDEX IF EXISTS tmp_tipset_key_cid")
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("create index tmp_tipset_key_cid: %w", err)
|
||||||
|
}
|
||||||
|
_, err = tx.Exec("DROP INDEX IF EXISTS tmp_height_tipset_key_cid")
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("drop index tmp_height_tipset_key_cid: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = tx.Commit()
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("commit transaction: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// during the migration, we have likely increased the WAL size a lot, so lets do some
|
||||||
|
// simple DB administration to free up space (VACUUM followed by truncating the WAL file)
|
||||||
|
// as this would be a good time to do it when no other writes are happening
|
||||||
|
log.Infof("Performing DB vacuum and wal checkpointing to free up space after the migration")
|
||||||
|
_, err = ei.db.Exec("VACUUM")
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("error vacuuming database: %s", err)
|
||||||
|
}
|
||||||
|
_, err = ei.db.Exec("PRAGMA wal_checkpoint(TRUNCATE)")
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("error checkpointing wal: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("Successfully migrated events to version 2 in %s", time.Since(now))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewEventIndex(ctx context.Context, path string, chainStore *store.ChainStore) (*EventIndex, error) {
|
||||||
db, err := sql.Open("sqlite3", path+"?mode=rwc")
|
db, err := sql.Open("sqlite3", path+"?mode=rwc")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("open sqlite3 database: %w", err)
|
return nil, xerrors.Errorf("open sqlite3 database: %w", err)
|
||||||
@ -89,6 +277,8 @@ func NewEventIndex(path string) (*EventIndex, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
eventIndex := EventIndex{db: db}
|
||||||
|
|
||||||
q, err := db.Query("SELECT name FROM sqlite_master WHERE type='table' AND name='_meta';")
|
q, err := db.Query("SELECT name FROM sqlite_master WHERE type='table' AND name='_meta';")
|
||||||
if err == sql.ErrNoRows || !q.Next() {
|
if err == sql.ErrNoRows || !q.Next() {
|
||||||
// empty database, create the schema
|
// empty database, create the schema
|
||||||
@ -102,24 +292,48 @@ func NewEventIndex(path string) (*EventIndex, error) {
|
|||||||
_ = db.Close()
|
_ = db.Close()
|
||||||
return nil, xerrors.Errorf("looking for _meta table: %w", err)
|
return nil, xerrors.Errorf("looking for _meta table: %w", err)
|
||||||
} else {
|
} else {
|
||||||
// Ensure we don't open a database from a different schema version
|
// check the schema version to see if we need to upgrade the database schema
|
||||||
|
|
||||||
row := db.QueryRow("SELECT max(version) FROM _meta")
|
|
||||||
var version int
|
var version int
|
||||||
err := row.Scan(&version)
|
err := db.QueryRow("SELECT max(version) FROM _meta").Scan(&version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = db.Close()
|
_ = db.Close()
|
||||||
return nil, xerrors.Errorf("invalid database version: no version found")
|
return nil, xerrors.Errorf("invalid database version: no version found")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if version == 1 {
|
||||||
|
log.Infof("upgrading event index from version 1 to version 2")
|
||||||
|
|
||||||
|
err = eventIndex.migrateToVersion2(ctx, chainStore)
|
||||||
|
if err != nil {
|
||||||
|
_ = db.Close()
|
||||||
|
return nil, xerrors.Errorf("could not migrate sql data to version 2: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// to upgrade to version version 2 we only need to create an index on the event table
|
||||||
|
// which means we can just recreate the schema (it will not have any effect on existing data)
|
||||||
|
for _, ddl := range ddls {
|
||||||
|
if _, err := db.Exec(ddl); err != nil {
|
||||||
|
_ = db.Close()
|
||||||
|
return nil, xerrors.Errorf("could not upgrade index to version 2, exec ddl %q: %w", ddl, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
version = 2
|
||||||
|
}
|
||||||
|
|
||||||
if version != schemaVersion {
|
if version != schemaVersion {
|
||||||
_ = db.Close()
|
_ = db.Close()
|
||||||
return nil, xerrors.Errorf("invalid database version: got %d, expected %d", version, schemaVersion)
|
return nil, xerrors.Errorf("invalid database version: got %d, expected %d", version, schemaVersion)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return &EventIndex{
|
err = eventIndex.initStatements()
|
||||||
db: db,
|
if err != nil {
|
||||||
}, nil
|
_ = db.Close()
|
||||||
|
return nil, xerrors.Errorf("error preparing eventIndex database statements: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &eventIndex, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ei *EventIndex) Close() error {
|
func (ei *EventIndex) Close() error {
|
||||||
@ -130,8 +344,29 @@ func (ei *EventIndex) Close() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, revert bool, resolver func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool)) error {
|
func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, revert bool, resolver func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool)) error {
|
||||||
// cache of lookups between actor id and f4 address
|
tx, err := ei.db.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("begin transaction: %w", err)
|
||||||
|
}
|
||||||
|
// rollback the transaction (a no-op if the transaction was already committed)
|
||||||
|
defer tx.Rollback() //nolint:errcheck
|
||||||
|
|
||||||
|
// lets handle the revert case first, since its simpler and we can simply mark all events events in this tipset as reverted and return
|
||||||
|
if revert {
|
||||||
|
_, err = tx.Stmt(ei.stmtRevertEventsInTipset).Exec(te.msgTs.Height(), te.msgTs.Key().Bytes())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("revert event: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = tx.Commit()
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("commit transaction: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// cache of lookups between actor id and f4 address
|
||||||
addressLookups := make(map[abi.ActorID]address.Address)
|
addressLookups := make(map[abi.ActorID]address.Address)
|
||||||
|
|
||||||
ems, err := te.messages(ctx)
|
ems, err := te.messages(ctx)
|
||||||
@ -139,19 +374,8 @@ func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, rever
|
|||||||
return xerrors.Errorf("load executed messages: %w", err)
|
return xerrors.Errorf("load executed messages: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
tx, err := ei.db.Begin()
|
// iterate over all executed messages in this tipset and insert them into the database if they
|
||||||
if err != nil {
|
// don't exist, otherwise mark them as not reverted
|
||||||
return xerrors.Errorf("begin transaction: %w", err)
|
|
||||||
}
|
|
||||||
stmtEvent, err := tx.Prepare(insertEvent)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("prepare insert event: %w", err)
|
|
||||||
}
|
|
||||||
stmtEntry, err := tx.Prepare(insertEntry)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("prepare insert entry: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for msgIdx, em := range ems {
|
for msgIdx, em := range ems {
|
||||||
for evIdx, ev := range em.Events() {
|
for evIdx, ev := range em.Events() {
|
||||||
addr, found := addressLookups[ev.Emitter]
|
addr, found := addressLookups[ev.Emitter]
|
||||||
@ -170,7 +394,9 @@ func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, rever
|
|||||||
return xerrors.Errorf("tipset key cid: %w", err)
|
return xerrors.Errorf("tipset key cid: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := stmtEvent.Exec(
|
// check if this event already exists in the database
|
||||||
|
var entryID sql.NullInt64
|
||||||
|
err = tx.Stmt(ei.stmtEventExists).QueryRow(
|
||||||
te.msgTs.Height(), // height
|
te.msgTs.Height(), // height
|
||||||
te.msgTs.Key().Bytes(), // tipset_key
|
te.msgTs.Key().Bytes(), // tipset_key
|
||||||
tsKeyCid.Bytes(), // tipset_key_cid
|
tsKeyCid.Bytes(), // tipset_key_cid
|
||||||
@ -178,34 +404,76 @@ func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, rever
|
|||||||
evIdx, // event_index
|
evIdx, // event_index
|
||||||
em.Message().Cid().Bytes(), // message_cid
|
em.Message().Cid().Bytes(), // message_cid
|
||||||
msgIdx, // message_index
|
msgIdx, // message_index
|
||||||
revert, // reverted
|
).Scan(&entryID)
|
||||||
)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("exec insert event: %w", err)
|
return xerrors.Errorf("error checking if event exists: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
lastID, err := res.LastInsertId()
|
if !entryID.Valid {
|
||||||
if err != nil {
|
// event does not exist, lets insert it
|
||||||
return xerrors.Errorf("get last row id: %w", err)
|
res, err := tx.Stmt(ei.stmtInsertEvent).Exec(
|
||||||
}
|
te.msgTs.Height(), // height
|
||||||
|
te.msgTs.Key().Bytes(), // tipset_key
|
||||||
for _, entry := range ev.Entries {
|
tsKeyCid.Bytes(), // tipset_key_cid
|
||||||
_, err := stmtEntry.Exec(
|
addr.Bytes(), // emitter_addr
|
||||||
lastID, // event_id
|
evIdx, // event_index
|
||||||
isIndexedValue(entry.Flags), // indexed
|
em.Message().Cid().Bytes(), // message_cid
|
||||||
[]byte{entry.Flags}, // flags
|
msgIdx, // message_index
|
||||||
entry.Key, // key
|
false, // reverted
|
||||||
entry.Codec, // codec
|
|
||||||
entry.Value, // value
|
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("exec insert entry: %w", err)
|
return xerrors.Errorf("exec insert event: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
entryID.Int64, err = res.LastInsertId()
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("get last row id: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// insert all the entries for this event
|
||||||
|
for _, entry := range ev.Entries {
|
||||||
|
_, err = tx.Stmt(ei.stmtInsertEntry).Exec(
|
||||||
|
entryID.Int64, // event_id
|
||||||
|
isIndexedValue(entry.Flags), // indexed
|
||||||
|
[]byte{entry.Flags}, // flags
|
||||||
|
entry.Key, // key
|
||||||
|
entry.Codec, // codec
|
||||||
|
entry.Value, // value
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("exec insert entry: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// event already exists, lets mark it as not reverted
|
||||||
|
res, err := tx.Stmt(ei.stmtRestoreEvent).Exec(
|
||||||
|
te.msgTs.Height(), // height
|
||||||
|
te.msgTs.Key().Bytes(), // tipset_key
|
||||||
|
tsKeyCid.Bytes(), // tipset_key_cid
|
||||||
|
addr.Bytes(), // emitter_addr
|
||||||
|
evIdx, // event_index
|
||||||
|
em.Message().Cid().Bytes(), // message_cid
|
||||||
|
msgIdx, // message_index
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("exec restore event: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rowsAffected, err := res.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error getting rows affected: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// this is a sanity check as we should only ever be updating one event
|
||||||
|
if rowsAffected != 1 {
|
||||||
|
log.Warnf("restored %d events but expected only one to exist", rowsAffected)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := tx.Commit(); err != nil {
|
err = tx.Commit()
|
||||||
|
if err != nil {
|
||||||
return xerrors.Errorf("commit transaction: %w", err)
|
return xerrors.Errorf("commit transaction: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -74,7 +74,7 @@ func TestEventIndexPrefillFilter(t *testing.T) {
|
|||||||
|
|
||||||
dbPath := filepath.Join(workDir, "actorevents.db")
|
dbPath := filepath.Join(workDir, "actorevents.db")
|
||||||
|
|
||||||
ei, err := NewEventIndex(dbPath)
|
ei, err := NewEventIndex(context.Background(), dbPath, nil)
|
||||||
require.NoError(t, err, "create event index")
|
require.NoError(t, err, "create event index")
|
||||||
if err := ei.CollectEvents(context.Background(), events14000, false, addrMap.ResolveAddress); err != nil {
|
if err := ei.CollectEvents(context.Background(), events14000, false, addrMap.ResolveAddress); err != nil {
|
||||||
require.NoError(t, err, "collect events")
|
require.NoError(t, err, "collect events")
|
||||||
|
@ -125,7 +125,7 @@ func (o *observer) listenHeadChangesOnce(ctx context.Context) error {
|
|||||||
|
|
||||||
for changes := range notifs {
|
for changes := range notifs {
|
||||||
if err := o.applyChanges(ctx, changes); err != nil {
|
if err := o.applyChanges(ctx, changes); err != nil {
|
||||||
return err
|
return xerrors.Errorf("failed to apply a change notification: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10,12 +10,12 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/ipfs/go-blockservice"
|
"github.com/ipfs/boxo/blockservice"
|
||||||
|
offline "github.com/ipfs/boxo/exchange/offline"
|
||||||
|
"github.com/ipfs/boxo/ipld/merkledag"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
offline "github.com/ipfs/go-ipfs-exchange-offline"
|
|
||||||
format "github.com/ipfs/go-ipld-format"
|
format "github.com/ipfs/go-ipld-format"
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
"github.com/ipfs/go-merkledag"
|
|
||||||
"github.com/ipld/go-car"
|
"github.com/ipld/go-car"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
@ -11,7 +11,6 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -27,24 +26,30 @@ func New(dstore ds.Batching) *SlashFilter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *SlashFilter) MinedBlock(ctx context.Context, bh *types.BlockHeader, parentEpoch abi.ChainEpoch) error {
|
func (f *SlashFilter) MinedBlock(ctx context.Context, bh *types.BlockHeader, parentEpoch abi.ChainEpoch) (cid.Cid, bool, error) {
|
||||||
if build.IsNearUpgrade(bh.Height, build.UpgradeOrangeHeight) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
epochKey := ds.NewKey(fmt.Sprintf("/%s/%d", bh.Miner, bh.Height))
|
epochKey := ds.NewKey(fmt.Sprintf("/%s/%d", bh.Miner, bh.Height))
|
||||||
{
|
{
|
||||||
// double-fork mining (2 blocks at one epoch)
|
// double-fork mining (2 blocks at one epoch)
|
||||||
if err := checkFault(ctx, f.byEpoch, epochKey, bh, "double-fork mining faults"); err != nil {
|
doubleForkWitness, doubleForkFault, err := checkFault(ctx, f.byEpoch, epochKey, bh, "double-fork mining faults")
|
||||||
return err
|
if err != nil {
|
||||||
|
return cid.Undef, false, xerrors.Errorf("check double-fork mining faults: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if doubleForkFault {
|
||||||
|
return doubleForkWitness, doubleForkFault, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
parentsKey := ds.NewKey(fmt.Sprintf("/%s/%x", bh.Miner, types.NewTipSetKey(bh.Parents...).Bytes()))
|
parentsKey := ds.NewKey(fmt.Sprintf("/%s/%x", bh.Miner, types.NewTipSetKey(bh.Parents...).Bytes()))
|
||||||
{
|
{
|
||||||
// time-offset mining faults (2 blocks with the same parents)
|
// time-offset mining faults (2 blocks with the same parents)
|
||||||
if err := checkFault(ctx, f.byParents, parentsKey, bh, "time-offset mining faults"); err != nil {
|
timeOffsetWitness, timeOffsetFault, err := checkFault(ctx, f.byParents, parentsKey, bh, "time-offset mining faults")
|
||||||
return err
|
if err != nil {
|
||||||
|
return cid.Undef, false, xerrors.Errorf("check time-offset mining faults: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if timeOffsetFault {
|
||||||
|
return timeOffsetWitness, timeOffsetFault, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -55,19 +60,19 @@ func (f *SlashFilter) MinedBlock(ctx context.Context, bh *types.BlockHeader, par
|
|||||||
parentEpochKey := ds.NewKey(fmt.Sprintf("/%s/%d", bh.Miner, parentEpoch))
|
parentEpochKey := ds.NewKey(fmt.Sprintf("/%s/%d", bh.Miner, parentEpoch))
|
||||||
have, err := f.byEpoch.Has(ctx, parentEpochKey)
|
have, err := f.byEpoch.Has(ctx, parentEpochKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return cid.Undef, false, xerrors.Errorf("failed to read from db: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if have {
|
if have {
|
||||||
// If we had, make sure it's in our parent tipset
|
// If we had, make sure it's in our parent tipset
|
||||||
cidb, err := f.byEpoch.Get(ctx, parentEpochKey)
|
cidb, err := f.byEpoch.Get(ctx, parentEpochKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("getting other block cid: %w", err)
|
return cid.Undef, false, xerrors.Errorf("getting other block cid: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, parent, err := cid.CidFromBytes(cidb)
|
_, parent, err := cid.CidFromBytes(cidb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return cid.Undef, false, xerrors.Errorf("failed to read cid from bytes: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var found bool
|
var found bool
|
||||||
@ -78,45 +83,45 @@ func (f *SlashFilter) MinedBlock(ctx context.Context, bh *types.BlockHeader, par
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !found {
|
if !found {
|
||||||
return xerrors.Errorf("produced block would trigger 'parent-grinding fault' consensus fault; miner: %s; bh: %s, expected parent: %s", bh.Miner, bh.Cid(), parent)
|
return parent, true, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := f.byParents.Put(ctx, parentsKey, bh.Cid().Bytes()); err != nil {
|
if err := f.byParents.Put(ctx, parentsKey, bh.Cid().Bytes()); err != nil {
|
||||||
return xerrors.Errorf("putting byEpoch entry: %w", err)
|
return cid.Undef, false, xerrors.Errorf("putting byEpoch entry: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := f.byEpoch.Put(ctx, epochKey, bh.Cid().Bytes()); err != nil {
|
if err := f.byEpoch.Put(ctx, epochKey, bh.Cid().Bytes()); err != nil {
|
||||||
return xerrors.Errorf("putting byEpoch entry: %w", err)
|
return cid.Undef, false, xerrors.Errorf("putting byEpoch entry: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return cid.Undef, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkFault(ctx context.Context, t ds.Datastore, key ds.Key, bh *types.BlockHeader, faultType string) error {
|
func checkFault(ctx context.Context, t ds.Datastore, key ds.Key, bh *types.BlockHeader, faultType string) (cid.Cid, bool, error) {
|
||||||
fault, err := t.Has(ctx, key)
|
fault, err := t.Has(ctx, key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return cid.Undef, false, xerrors.Errorf("failed to read from datastore: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if fault {
|
if fault {
|
||||||
cidb, err := t.Get(ctx, key)
|
cidb, err := t.Get(ctx, key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("getting other block cid: %w", err)
|
return cid.Undef, false, xerrors.Errorf("getting other block cid: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, other, err := cid.CidFromBytes(cidb)
|
_, other, err := cid.CidFromBytes(cidb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return cid.Undef, false, xerrors.Errorf("failed to read cid of other block: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if other == bh.Cid() {
|
if other == bh.Cid() {
|
||||||
return nil
|
return cid.Undef, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return xerrors.Errorf("produced block would trigger '%s' consensus fault; miner: %s; bh: %s, other: %s", faultType, bh.Miner, bh.Cid(), other)
|
return other, true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return cid.Undef, false, nil
|
||||||
}
|
}
|
||||||
|
179
chain/gen/slashfilter/slashsvc/slashservice.go
Normal file
179
chain/gen/slashfilter/slashsvc/slashservice.go
Normal file
@ -0,0 +1,179 @@
|
|||||||
|
package slashsvc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
levelds "github.com/ipfs/go-ds-leveldb"
|
||||||
|
logging "github.com/ipfs/go-log/v2"
|
||||||
|
ldbopts "github.com/syndtr/goleveldb/leveldb/opt"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
cborutil "github.com/filecoin-project/go-cbor-util"
|
||||||
|
"github.com/filecoin-project/go-state-types/builtin"
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||||
|
|
||||||
|
lapi "github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
|
"github.com/filecoin-project/lotus/chain/gen/slashfilter"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
var log = logging.Logger("slashsvc")
|
||||||
|
|
||||||
|
type ConsensusSlasherApi interface {
|
||||||
|
ChainHead(context.Context) (*types.TipSet, error)
|
||||||
|
ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error)
|
||||||
|
MpoolPushMessage(ctx context.Context, msg *types.Message, spec *lapi.MessageSendSpec) (*types.SignedMessage, error)
|
||||||
|
SyncIncomingBlocks(context.Context) (<-chan *types.BlockHeader, error)
|
||||||
|
WalletDefaultAddress(context.Context) (address.Address, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func SlashConsensus(ctx context.Context, a ConsensusSlasherApi, p string, from string) error {
|
||||||
|
var fromAddr address.Address
|
||||||
|
|
||||||
|
ds, err := levelds.NewDatastore(p, &levelds.Options{
|
||||||
|
Compression: ldbopts.NoCompression,
|
||||||
|
NoSync: false,
|
||||||
|
Strict: ldbopts.StrictAll,
|
||||||
|
ReadOnly: false,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("open leveldb: %w", err)
|
||||||
|
}
|
||||||
|
sf := slashfilter.New(ds)
|
||||||
|
if from == "" {
|
||||||
|
defaddr, err := a.WalletDefaultAddress(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fromAddr = defaddr
|
||||||
|
} else {
|
||||||
|
addr, err := address.NewFromString(from)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fromAddr = addr
|
||||||
|
}
|
||||||
|
|
||||||
|
blocks, err := a.SyncIncomingBlocks(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("sync incoming blocks failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infow("consensus fault reporter", "from", fromAddr)
|
||||||
|
go func() {
|
||||||
|
for block := range blocks {
|
||||||
|
otherBlock, extraBlock, fault, err := slashFilterMinedBlock(ctx, sf, a, block)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("slash detector errored: %s", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if fault {
|
||||||
|
log.Errorf("<!!> SLASH FILTER DETECTED FAULT DUE TO BLOCKS %s and %s", otherBlock.Cid(), block.Cid())
|
||||||
|
bh1, err := cborutil.Dump(otherBlock)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("could not dump otherblock:%s, err:%s", otherBlock.Cid(), err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
bh2, err := cborutil.Dump(block)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("could not dump block:%s, err:%s", block.Cid(), err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
params := miner.ReportConsensusFaultParams{
|
||||||
|
BlockHeader1: bh1,
|
||||||
|
BlockHeader2: bh2,
|
||||||
|
}
|
||||||
|
if extraBlock != nil {
|
||||||
|
be, err := cborutil.Dump(extraBlock)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("could not dump block:%s, err:%s", block.Cid(), err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
params.BlockHeaderExtra = be
|
||||||
|
}
|
||||||
|
|
||||||
|
enc, err := actors.SerializeParams(¶ms)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("could not serialize declare faults parameters: %s", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
head, err := a.ChainHead(ctx)
|
||||||
|
if err != nil || head.Height() > block.Height {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
time.Sleep(time.Second * 10)
|
||||||
|
}
|
||||||
|
message, err := a.MpoolPushMessage(ctx, &types.Message{
|
||||||
|
To: block.Miner,
|
||||||
|
From: fromAddr,
|
||||||
|
Value: types.NewInt(0),
|
||||||
|
Method: builtin.MethodsMiner.ReportConsensusFault,
|
||||||
|
Params: enc,
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("ReportConsensusFault to messagepool error:%s", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
log.Infof("ReportConsensusFault message CID:%s", message.Cid())
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func slashFilterMinedBlock(ctx context.Context, sf *slashfilter.SlashFilter, a ConsensusSlasherApi, blockB *types.BlockHeader) (*types.BlockHeader, *types.BlockHeader, bool, error) {
|
||||||
|
blockC, err := a.ChainGetBlock(ctx, blockB.Parents[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, false, xerrors.Errorf("chain get block error:%s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
blockACid, fault, err := sf.MinedBlock(ctx, blockB, blockC.Height)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, false, xerrors.Errorf("slash filter check block error:%s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !fault {
|
||||||
|
return nil, nil, false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
blockA, err := a.ChainGetBlock(ctx, blockACid)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, false, xerrors.Errorf("failed to get blockA: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// (a) double-fork mining (2 blocks at one epoch)
|
||||||
|
if blockA.Height == blockB.Height {
|
||||||
|
return blockA, nil, true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// (b) time-offset mining faults (2 blocks with the same parents)
|
||||||
|
if types.CidArrsEqual(blockB.Parents, blockA.Parents) {
|
||||||
|
return blockA, nil, true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// (c) parent-grinding fault
|
||||||
|
// Here extra is the "witness", a third block that shows the connection between A and B as
|
||||||
|
// A's sibling and B's parent.
|
||||||
|
// Specifically, since A is of lower height, it must be that B was mined omitting A from its tipset
|
||||||
|
//
|
||||||
|
// B
|
||||||
|
// |
|
||||||
|
// [A, C]
|
||||||
|
if types.CidArrsEqual(blockA.Parents, blockC.Parents) && blockA.Height == blockC.Height &&
|
||||||
|
types.CidArrsContains(blockB.Parents, blockC.Cid()) && !types.CidArrsContains(blockB.Parents, blockA.Cid()) {
|
||||||
|
return blockA, blockC, true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Error("unexpectedly reached end of slashFilterMinedBlock despite fault being reported!")
|
||||||
|
return nil, nil, false, nil
|
||||||
|
}
|
@ -37,7 +37,17 @@ var dbDefs = []string{
|
|||||||
)`,
|
)`,
|
||||||
`INSERT OR IGNORE INTO _meta (version) VALUES (1)`,
|
`INSERT OR IGNORE INTO _meta (version) VALUES (1)`,
|
||||||
}
|
}
|
||||||
var dbPragmas = []string{}
|
|
||||||
|
var dbPragmas = []string{
|
||||||
|
"PRAGMA synchronous = normal",
|
||||||
|
"PRAGMA temp_store = memory",
|
||||||
|
"PRAGMA mmap_size = 30000000000",
|
||||||
|
"PRAGMA page_size = 32768",
|
||||||
|
"PRAGMA auto_vacuum = NONE",
|
||||||
|
"PRAGMA automatic_index = OFF",
|
||||||
|
"PRAGMA journal_mode = WAL",
|
||||||
|
"PRAGMA read_uncommitted = ON",
|
||||||
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// prepared stmts
|
// prepared stmts
|
||||||
|
@ -39,10 +39,10 @@ func TestBasicMsgIndex(t *testing.T) {
|
|||||||
t.Logf("advance to epoch %d", i+1)
|
t.Logf("advance to epoch %d", i+1)
|
||||||
err := cs.advance()
|
err := cs.advance()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// wait for the coalescer to notify
|
|
||||||
time.Sleep(CoalesceMinDelay + 10*time.Millisecond)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
waitForCoalescerAfterLastEvent()
|
||||||
|
|
||||||
t.Log("verifying index")
|
t.Log("verifying index")
|
||||||
verifyIndex(t, cs, msgIndex)
|
verifyIndex(t, cs, msgIndex)
|
||||||
}
|
}
|
||||||
@ -51,7 +51,7 @@ func TestReorgMsgIndex(t *testing.T) {
|
|||||||
// slightly more nuanced test that includes reorgs
|
// slightly more nuanced test that includes reorgs
|
||||||
// 1. Create an index with mock chain store
|
// 1. Create an index with mock chain store
|
||||||
// 2. Advance/Reorg the chain for a few tipsets
|
// 2. Advance/Reorg the chain for a few tipsets
|
||||||
// 3. Verify that the index contains all messages with the correct tipst/epoch
|
// 3. Verify that the index contains all messages with the correct tipset/epoch
|
||||||
cs := newMockChainStore()
|
cs := newMockChainStore()
|
||||||
cs.genesis()
|
cs.genesis()
|
||||||
|
|
||||||
@ -67,10 +67,10 @@ func TestReorgMsgIndex(t *testing.T) {
|
|||||||
t.Logf("advance to epoch %d", i+1)
|
t.Logf("advance to epoch %d", i+1)
|
||||||
err := cs.advance()
|
err := cs.advance()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// wait for the coalescer to notify
|
|
||||||
time.Sleep(CoalesceMinDelay + 10*time.Millisecond)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
waitForCoalescerAfterLastEvent()
|
||||||
|
|
||||||
// a simple reorg
|
// a simple reorg
|
||||||
t.Log("doing reorg")
|
t.Log("doing reorg")
|
||||||
reorgme := cs.curTs
|
reorgme := cs.curTs
|
||||||
@ -80,7 +80,8 @@ func TestReorgMsgIndex(t *testing.T) {
|
|||||||
reorgmeChild := cs.makeBlk()
|
reorgmeChild := cs.makeBlk()
|
||||||
err = cs.reorg([]*types.TipSet{reorgme}, []*types.TipSet{reorgmeChild})
|
err = cs.reorg([]*types.TipSet{reorgme}, []*types.TipSet{reorgmeChild})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
time.Sleep(CoalesceMinDelay + 10*time.Millisecond)
|
|
||||||
|
waitForCoalescerAfterLastEvent()
|
||||||
|
|
||||||
t.Log("verifying index")
|
t.Log("verifying index")
|
||||||
verifyIndex(t, cs, msgIndex)
|
verifyIndex(t, cs, msgIndex)
|
||||||
@ -109,10 +110,10 @@ func TestReconcileMsgIndex(t *testing.T) {
|
|||||||
t.Logf("advance to epoch %d", i+1)
|
t.Logf("advance to epoch %d", i+1)
|
||||||
err := cs.advance()
|
err := cs.advance()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// wait for the coalescer to notify
|
|
||||||
time.Sleep(CoalesceMinDelay + 10*time.Millisecond)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
waitForCoalescerAfterLastEvent()
|
||||||
|
|
||||||
// Close it and reorg
|
// Close it and reorg
|
||||||
err = msgIndex.Close()
|
err = msgIndex.Close()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -296,3 +297,11 @@ func (cs *mockChainStore) GetTipSetFromKey(ctx context.Context, tsk types.TipSet
|
|||||||
}
|
}
|
||||||
return ts, nil
|
return ts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func waitForCoalescerAfterLastEvent() {
|
||||||
|
// It can take up to CoalesceMinDelay for the coalescer timer to fire after the last event.
|
||||||
|
// When the timer fires, it can wait up to CoalesceMinDelay again for more events.
|
||||||
|
// Therefore the total wait is 2 * CoalesceMinDelay.
|
||||||
|
// Then we wait another second for the listener (the index) to actually process events.
|
||||||
|
time.Sleep(2*CoalesceMinDelay + time.Second)
|
||||||
|
}
|
||||||
|
@ -35,8 +35,8 @@ func (mp *MessagePool) CheckPendingMessages(ctx context.Context, from address.Ad
|
|||||||
mp.lk.RLock()
|
mp.lk.RLock()
|
||||||
mset, ok, err := mp.getPendingMset(ctx, from)
|
mset, ok, err := mp.getPendingMset(ctx, from)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("errored while getting pending mset: %w", err)
|
mp.lk.RUnlock()
|
||||||
return nil, err
|
return nil, xerrors.Errorf("errored while getting pending mset: %w", err)
|
||||||
}
|
}
|
||||||
if ok {
|
if ok {
|
||||||
msgs = make([]*types.Message, 0, len(mset.msgs))
|
msgs = make([]*types.Message, 0, len(mset.msgs))
|
||||||
@ -71,8 +71,8 @@ func (mp *MessagePool) CheckReplaceMessages(ctx context.Context, replace []*type
|
|||||||
msgMap[m.From] = mmap
|
msgMap[m.From] = mmap
|
||||||
mset, ok, err := mp.getPendingMset(ctx, m.From)
|
mset, ok, err := mp.getPendingMset(ctx, m.From)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("errored while getting pending mset: %w", err)
|
mp.lk.RUnlock()
|
||||||
return nil, err
|
return nil, xerrors.Errorf("errored while getting pending mset: %w", err)
|
||||||
}
|
}
|
||||||
if ok {
|
if ok {
|
||||||
count += len(mset.msgs)
|
count += len(mset.msgs)
|
||||||
@ -155,8 +155,8 @@ func (mp *MessagePool) checkMessages(ctx context.Context, msgs []*types.Message,
|
|||||||
mp.lk.RLock()
|
mp.lk.RLock()
|
||||||
mset, ok, err := mp.getPendingMset(ctx, m.From)
|
mset, ok, err := mp.getPendingMset(ctx, m.From)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("errored while getting pending mset: %w", err)
|
mp.lk.RUnlock()
|
||||||
return nil, err
|
return nil, xerrors.Errorf("errored while getting pending mset: %w", err)
|
||||||
}
|
}
|
||||||
if ok && !interned {
|
if ok && !interned {
|
||||||
st = &actorState{nextNonce: mset.nextNonce, requiredFunds: mset.requiredFunds}
|
st = &actorState{nextNonce: mset.nextNonce, requiredFunds: mset.requiredFunds}
|
||||||
|
@ -448,12 +448,8 @@ func New(ctx context.Context, api Provider, ds dtypes.MetadataDS, us stmgr.Upgra
|
|||||||
return mp, nil
|
return mp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mp *MessagePool) TryForEachPendingMessage(f func(cid.Cid) error) error {
|
func (mp *MessagePool) ForEachPendingMessage(f func(cid.Cid) error) error {
|
||||||
// avoid deadlocks in splitstore compaction when something else needs to access the blockstore
|
mp.lk.Lock()
|
||||||
// while holding the mpool lock
|
|
||||||
if !mp.lk.TryLock() {
|
|
||||||
return xerrors.Errorf("mpool TryForEachPendingMessage: could not acquire lock")
|
|
||||||
}
|
|
||||||
defer mp.lk.Unlock()
|
defer mp.lk.Unlock()
|
||||||
|
|
||||||
for _, mset := range mp.pending {
|
for _, mset := range mp.pending {
|
||||||
@ -749,8 +745,7 @@ func (mp *MessagePool) checkMessage(ctx context.Context, m *types.SignedMessage)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := mp.VerifyMsgSig(m); err != nil {
|
if err := mp.VerifyMsgSig(m); err != nil {
|
||||||
log.Warnf("signature verification failed: %s", err)
|
return xerrors.Errorf("signature verification failed: %s", err)
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -969,13 +964,11 @@ func (mp *MessagePool) addLocked(ctx context.Context, m *types.SignedMessage, st
|
|||||||
}
|
}
|
||||||
|
|
||||||
if _, err := mp.api.PutMessage(ctx, m); err != nil {
|
if _, err := mp.api.PutMessage(ctx, m); err != nil {
|
||||||
log.Warnf("mpooladd cs.PutMessage failed: %s", err)
|
return xerrors.Errorf("mpooladd cs.PutMessage failed: %s", err)
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := mp.api.PutMessage(ctx, &m.Message); err != nil {
|
if _, err := mp.api.PutMessage(ctx, &m.Message); err != nil {
|
||||||
log.Warnf("mpooladd cs.PutMessage failed: %s", err)
|
return xerrors.Errorf("mpooladd cs.PutMessage failed: %s", err)
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Note: If performance becomes an issue, making this getOrCreatePendingMset will save some work
|
// Note: If performance becomes an issue, making this getOrCreatePendingMset will save some work
|
||||||
|
@ -11,9 +11,11 @@ import (
|
|||||||
"github.com/ipfs/go-datastore"
|
"github.com/ipfs/go-datastore"
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
big2 "github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/go-state-types/crypto"
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||||
@ -524,6 +526,36 @@ func TestPruningSimple(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGasRewardNegative(t *testing.T) {
|
||||||
|
var mp MessagePool
|
||||||
|
|
||||||
|
msg := types.SignedMessage{
|
||||||
|
Message: types.Message{
|
||||||
|
GasLimit: 1000,
|
||||||
|
GasFeeCap: big2.NewInt(20000),
|
||||||
|
GasPremium: big2.NewInt(15000),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
baseFee := big2.NewInt(30000)
|
||||||
|
// Over the GasPremium, but under the BaseFee
|
||||||
|
gr1 := mp.getGasReward(&msg, baseFee)
|
||||||
|
|
||||||
|
msg.Message.GasFeeCap = big2.NewInt(15000)
|
||||||
|
// Equal to GasPremium, under the BaseFee
|
||||||
|
gr2 := mp.getGasReward(&msg, baseFee)
|
||||||
|
|
||||||
|
msg.Message.GasFeeCap = big2.NewInt(10000)
|
||||||
|
// Under both GasPremium and BaseFee
|
||||||
|
gr3 := mp.getGasReward(&msg, baseFee)
|
||||||
|
|
||||||
|
require.True(t, gr1.Sign() < 0)
|
||||||
|
require.True(t, gr2.Sign() < 0)
|
||||||
|
require.True(t, gr3.Sign() < 0)
|
||||||
|
|
||||||
|
require.True(t, gr1.Cmp(gr2) > 0)
|
||||||
|
require.True(t, gr2.Cmp(gr3) > 0)
|
||||||
|
}
|
||||||
|
|
||||||
func TestLoadLocal(t *testing.T) {
|
func TestLoadLocal(t *testing.T) {
|
||||||
tma := newTestMpoolAPI()
|
tma := newTestMpoolAPI()
|
||||||
ds := datastore.NewMapDatastore()
|
ds := datastore.NewMapDatastore()
|
||||||
|
@ -13,6 +13,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/ipfs/go-datastore"
|
"github.com/ipfs/go-datastore"
|
||||||
@ -1690,3 +1691,188 @@ readLoop:
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRealWorldSelectionTiming(t *testing.T) {
|
||||||
|
//stm: @TOKEN_WALLET_NEW_001, @TOKEN_WALLET_SIGN_001, @CHAIN_MEMPOOL_SELECT_001
|
||||||
|
|
||||||
|
// load test-messages.json.gz and rewrite the messages so that
|
||||||
|
// 1) we map each real actor to a test actor so that we can sign the messages
|
||||||
|
// 2) adjust the nonces so that they start from 0
|
||||||
|
file, err := os.Open("test-messages2.json.gz")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
gzr, err := gzip.NewReader(file)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dec := json.NewDecoder(gzr)
|
||||||
|
|
||||||
|
var msgs []*types.SignedMessage
|
||||||
|
baseNonces := make(map[address.Address]uint64)
|
||||||
|
|
||||||
|
readLoop:
|
||||||
|
for {
|
||||||
|
m := new(types.SignedMessage)
|
||||||
|
err := dec.Decode(m)
|
||||||
|
switch err {
|
||||||
|
case nil:
|
||||||
|
msgs = append(msgs, m)
|
||||||
|
nonce, ok := baseNonces[m.Message.From]
|
||||||
|
if !ok || m.Message.Nonce < nonce {
|
||||||
|
baseNonces[m.Message.From] = m.Message.Nonce
|
||||||
|
}
|
||||||
|
|
||||||
|
case io.EOF:
|
||||||
|
break readLoop
|
||||||
|
|
||||||
|
default:
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
actorMap := make(map[address.Address]address.Address)
|
||||||
|
actorWallets := make(map[address.Address]api.Wallet)
|
||||||
|
|
||||||
|
for _, m := range msgs {
|
||||||
|
baseNonce := baseNonces[m.Message.From]
|
||||||
|
|
||||||
|
localActor, ok := actorMap[m.Message.From]
|
||||||
|
if !ok {
|
||||||
|
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
a, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
actorMap[m.Message.From] = a
|
||||||
|
actorWallets[a] = w
|
||||||
|
localActor = a
|
||||||
|
}
|
||||||
|
|
||||||
|
w, ok := actorWallets[localActor]
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("failed to lookup wallet for actor %s", localActor)
|
||||||
|
}
|
||||||
|
|
||||||
|
m.Message.From = localActor
|
||||||
|
m.Message.Nonce -= baseNonce
|
||||||
|
|
||||||
|
sig, err := w.WalletSign(context.TODO(), localActor, m.Message.Cid().Bytes(), api.MsgMeta{})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
m.Signature = *sig
|
||||||
|
}
|
||||||
|
|
||||||
|
mp, tma := makeTestMpool()
|
||||||
|
|
||||||
|
block := tma.nextBlockWithHeight(uint64(build.UpgradeHyggeHeight) + 10)
|
||||||
|
ts := mock.TipSet(block)
|
||||||
|
tma.applyBlock(t, block)
|
||||||
|
|
||||||
|
for _, a := range actorMap {
|
||||||
|
tma.setBalance(a, 1000000)
|
||||||
|
}
|
||||||
|
|
||||||
|
tma.baseFee = types.NewInt(800_000_000)
|
||||||
|
|
||||||
|
sort.Slice(msgs, func(i, j int) bool {
|
||||||
|
return msgs[i].Message.Nonce < msgs[j].Message.Nonce
|
||||||
|
})
|
||||||
|
|
||||||
|
// add the messages
|
||||||
|
for _, m := range msgs {
|
||||||
|
mustAdd(t, mp, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// do message selection and check block packing
|
||||||
|
minGasLimit := int64(0.9 * float64(build.BlockGasLimit))
|
||||||
|
|
||||||
|
// greedy first
|
||||||
|
start := time.Now()
|
||||||
|
selected, err := mp.SelectMessages(context.Background(), ts, 1.0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
t.Logf("selected %d messages in %s", len(selected), time.Since(start))
|
||||||
|
|
||||||
|
gasLimit := int64(0)
|
||||||
|
for _, m := range selected {
|
||||||
|
gasLimit += m.Message.GasLimit
|
||||||
|
}
|
||||||
|
if gasLimit < minGasLimit {
|
||||||
|
t.Fatalf("failed to pack with tq=1.0; packed %d, minimum packing: %d", gasLimit, minGasLimit)
|
||||||
|
}
|
||||||
|
|
||||||
|
// high quality ticket
|
||||||
|
start = time.Now()
|
||||||
|
selected, err = mp.SelectMessages(context.Background(), ts, .8)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
t.Logf("selected %d messages in %s", len(selected), time.Since(start))
|
||||||
|
|
||||||
|
gasLimit = int64(0)
|
||||||
|
for _, m := range selected {
|
||||||
|
gasLimit += m.Message.GasLimit
|
||||||
|
}
|
||||||
|
if gasLimit < minGasLimit {
|
||||||
|
t.Fatalf("failed to pack with tq=0.8; packed %d, minimum packing: %d", gasLimit, minGasLimit)
|
||||||
|
}
|
||||||
|
|
||||||
|
// mid quality ticket
|
||||||
|
start = time.Now()
|
||||||
|
selected, err = mp.SelectMessages(context.Background(), ts, .4)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
t.Logf("selected %d messages in %s", len(selected), time.Since(start))
|
||||||
|
|
||||||
|
gasLimit = int64(0)
|
||||||
|
for _, m := range selected {
|
||||||
|
gasLimit += m.Message.GasLimit
|
||||||
|
}
|
||||||
|
if gasLimit < minGasLimit {
|
||||||
|
t.Fatalf("failed to pack with tq=0.4; packed %d, minimum packing: %d", gasLimit, minGasLimit)
|
||||||
|
}
|
||||||
|
|
||||||
|
// low quality ticket
|
||||||
|
start = time.Now()
|
||||||
|
selected, err = mp.SelectMessages(context.Background(), ts, .1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
t.Logf("selected %d messages in %s", len(selected), time.Since(start))
|
||||||
|
|
||||||
|
gasLimit = int64(0)
|
||||||
|
for _, m := range selected {
|
||||||
|
gasLimit += m.Message.GasLimit
|
||||||
|
}
|
||||||
|
if gasLimit < minGasLimit {
|
||||||
|
t.Fatalf("failed to pack with tq=0.1; packed %d, minimum packing: %d", gasLimit, minGasLimit)
|
||||||
|
}
|
||||||
|
|
||||||
|
// very low quality ticket
|
||||||
|
start = time.Now()
|
||||||
|
selected, err = mp.SelectMessages(context.Background(), ts, .01)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
t.Logf("selected %d messages in %s", len(selected), time.Since(start))
|
||||||
|
|
||||||
|
gasLimit = int64(0)
|
||||||
|
for _, m := range selected {
|
||||||
|
gasLimit += m.Message.GasLimit
|
||||||
|
}
|
||||||
|
if gasLimit < minGasLimit {
|
||||||
|
t.Fatalf("failed to pack with tq=0.01; packed %d, minimum packing: %d", gasLimit, minGasLimit)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
BIN
chain/messagepool/test-messages2.json.gz
Normal file
BIN
chain/messagepool/test-messages2.json.gz
Normal file
Binary file not shown.
@ -131,15 +131,17 @@ func (sm *StateManager) ExecutionTrace(ctx context.Context, ts *types.TipSet) (c
|
|||||||
tsKey := ts.Key()
|
tsKey := ts.Key()
|
||||||
|
|
||||||
// check if we have the trace for this tipset in the cache
|
// check if we have the trace for this tipset in the cache
|
||||||
sm.execTraceCacheLock.Lock()
|
if execTraceCacheSize > 0 {
|
||||||
if entry, ok := sm.execTraceCache.Get(tsKey); ok {
|
sm.execTraceCacheLock.Lock()
|
||||||
// we have to make a deep copy since caller can modify the invocTrace
|
if entry, ok := sm.execTraceCache.Get(tsKey); ok {
|
||||||
// and we don't want that to change what we store in cache
|
// we have to make a deep copy since caller can modify the invocTrace
|
||||||
invocTraceCopy := makeDeepCopy(entry.invocTrace)
|
// and we don't want that to change what we store in cache
|
||||||
|
invocTraceCopy := makeDeepCopy(entry.invocTrace)
|
||||||
|
sm.execTraceCacheLock.Unlock()
|
||||||
|
return entry.postStateRoot, invocTraceCopy, nil
|
||||||
|
}
|
||||||
sm.execTraceCacheLock.Unlock()
|
sm.execTraceCacheLock.Unlock()
|
||||||
return entry.postStateRoot, invocTraceCopy, nil
|
|
||||||
}
|
}
|
||||||
sm.execTraceCacheLock.Unlock()
|
|
||||||
|
|
||||||
var invocTrace []*api.InvocResult
|
var invocTrace []*api.InvocResult
|
||||||
st, err := sm.ExecutionTraceWithMonitor(ctx, ts, &InvocationTracer{trace: &invocTrace})
|
st, err := sm.ExecutionTraceWithMonitor(ctx, ts, &InvocationTracer{trace: &invocTrace})
|
||||||
@ -147,11 +149,13 @@ func (sm *StateManager) ExecutionTrace(ctx context.Context, ts *types.TipSet) (c
|
|||||||
return cid.Undef, nil, err
|
return cid.Undef, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
invocTraceCopy := makeDeepCopy(invocTrace)
|
if execTraceCacheSize > 0 {
|
||||||
|
invocTraceCopy := makeDeepCopy(invocTrace)
|
||||||
|
|
||||||
sm.execTraceCacheLock.Lock()
|
sm.execTraceCacheLock.Lock()
|
||||||
sm.execTraceCache.Add(tsKey, tipSetCacheEntry{st, invocTraceCopy})
|
sm.execTraceCache.Add(tsKey, tipSetCacheEntry{st, invocTraceCopy})
|
||||||
sm.execTraceCacheLock.Unlock()
|
sm.execTraceCacheLock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
return st, invocTrace, nil
|
return st, invocTrace, nil
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,8 @@ package stmgr
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
lru "github.com/hashicorp/golang-lru/v2"
|
lru "github.com/hashicorp/golang-lru/v2"
|
||||||
@ -40,8 +42,7 @@ import (
|
|||||||
const LookbackNoLimit = api.LookbackNoLimit
|
const LookbackNoLimit = api.LookbackNoLimit
|
||||||
const ReceiptAmtBitwidth = 3
|
const ReceiptAmtBitwidth = 3
|
||||||
|
|
||||||
const execTraceCacheSize = 16
|
var execTraceCacheSize = 16
|
||||||
|
|
||||||
var log = logging.Logger("statemgr")
|
var log = logging.Logger("statemgr")
|
||||||
|
|
||||||
type StateManagerAPI interface {
|
type StateManagerAPI interface {
|
||||||
@ -74,6 +75,17 @@ func (m *migrationResultCache) keyForMigration(root cid.Cid) dstore.Key {
|
|||||||
return dstore.NewKey(kStr)
|
return dstore.NewKey(kStr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
if s := os.Getenv("LOTUS_EXEC_TRACE_CACHE_SIZE"); s != "" {
|
||||||
|
letc, err := strconv.Atoi(s)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed to parse 'LOTUS_EXEC_TRACE_CACHE_SIZE' env var: %s", err)
|
||||||
|
} else {
|
||||||
|
execTraceCacheSize = letc
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (m *migrationResultCache) Get(ctx context.Context, root cid.Cid) (cid.Cid, bool, error) {
|
func (m *migrationResultCache) Get(ctx context.Context, root cid.Cid) (cid.Cid, bool, error) {
|
||||||
k := m.keyForMigration(root)
|
k := m.keyForMigration(root)
|
||||||
|
|
||||||
@ -200,9 +212,14 @@ func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
execTraceCache, err := lru.NewARC[types.TipSetKey, tipSetCacheEntry](execTraceCacheSize)
|
log.Debugf("execTraceCache size: %d", execTraceCacheSize)
|
||||||
if err != nil {
|
var execTraceCache *lru.ARCCache[types.TipSetKey, tipSetCacheEntry]
|
||||||
return nil, err
|
var err error
|
||||||
|
if execTraceCacheSize > 0 {
|
||||||
|
execTraceCache, err = lru.NewARC[types.TipSetKey, tipSetCacheEntry](execTraceCacheSize)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return &StateManager{
|
return &StateManager{
|
||||||
|
@ -72,7 +72,7 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch,
|
|||||||
|
|
||||||
base, trace, err := sm.ExecutionTrace(ctx, ts)
|
base, trace, err := sm.ExecutionTrace(ctx, ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cid.Undef, nil, err
|
return cid.Undef, nil, xerrors.Errorf("failed to compute base state: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := ts.Height(); i < height; i++ {
|
for i := ts.Height(); i < height; i++ {
|
||||||
@ -116,6 +116,21 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch,
|
|||||||
if ret.ExitCode != 0 {
|
if ret.ExitCode != 0 {
|
||||||
log.Infof("compute state apply message %d failed (exit: %d): %s", i, ret.ExitCode, ret.ActorErr)
|
log.Infof("compute state apply message %d failed (exit: %d): %s", i, ret.ExitCode, ret.ActorErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ir := &api.InvocResult{
|
||||||
|
MsgCid: msg.Cid(),
|
||||||
|
Msg: msg,
|
||||||
|
MsgRct: &ret.MessageReceipt,
|
||||||
|
ExecutionTrace: ret.ExecutionTrace,
|
||||||
|
Duration: ret.Duration,
|
||||||
|
}
|
||||||
|
if ret.ActorErr != nil {
|
||||||
|
ir.Error = ret.ActorErr.Error()
|
||||||
|
}
|
||||||
|
if ret.GasCosts != nil {
|
||||||
|
ir.GasCost = MakeMsgGasCost(msg, ret)
|
||||||
|
}
|
||||||
|
trace = append(trace, ir)
|
||||||
}
|
}
|
||||||
|
|
||||||
root, err := vmi.Flush(ctx)
|
root, err := vmi.Flush(ctx)
|
||||||
|
@ -425,6 +425,11 @@ func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipS
|
|||||||
}
|
}
|
||||||
|
|
||||||
defer cs.heaviestLk.Unlock()
|
defer cs.heaviestLk.Unlock()
|
||||||
|
|
||||||
|
if ts.Equals(cs.heaviest) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
w, err := cs.weight(ctx, cs.StateBlockstore(), ts)
|
w, err := cs.weight(ctx, cs.StateBlockstore(), ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -8,11 +8,11 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
lru "github.com/hashicorp/golang-lru/v2"
|
lru "github.com/hashicorp/golang-lru/v2"
|
||||||
|
bserv "github.com/ipfs/boxo/blockservice"
|
||||||
blocks "github.com/ipfs/go-block-format"
|
blocks "github.com/ipfs/go-block-format"
|
||||||
bserv "github.com/ipfs/go-blockservice"
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
"github.com/ipni/storetheindex/announce/message"
|
"github.com/ipni/go-libipni/announce/message"
|
||||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||||
"github.com/libp2p/go-libp2p/core/connmgr"
|
"github.com/libp2p/go-libp2p/core/connmgr"
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
@ -358,6 +358,8 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs
|
|||||||
fallthrough
|
fallthrough
|
||||||
case xerrors.Is(err, messagepool.ErrNonceGap):
|
case xerrors.Is(err, messagepool.ErrNonceGap):
|
||||||
fallthrough
|
fallthrough
|
||||||
|
case xerrors.Is(err, messagepool.ErrGasFeeCapTooLow):
|
||||||
|
fallthrough
|
||||||
case xerrors.Is(err, messagepool.ErrNonceTooLow):
|
case xerrors.Is(err, messagepool.ErrNonceTooLow):
|
||||||
fallthrough
|
fallthrough
|
||||||
case xerrors.Is(err, messagepool.ErrExistingNonce):
|
case xerrors.Is(err, messagepool.ErrExistingNonce):
|
||||||
|
@ -9,7 +9,7 @@ import (
|
|||||||
"github.com/golang/mock/gomock"
|
"github.com/golang/mock/gomock"
|
||||||
blocks "github.com/ipfs/go-block-format"
|
blocks "github.com/ipfs/go-block-format"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/ipni/storetheindex/announce/message"
|
"github.com/ipni/go-libipni/announce/message"
|
||||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||||
pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
@ -2,6 +2,7 @@ package types
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
)
|
)
|
||||||
@ -14,10 +15,13 @@ type BlockMsg struct {
|
|||||||
|
|
||||||
func DecodeBlockMsg(b []byte) (*BlockMsg, error) {
|
func DecodeBlockMsg(b []byte) (*BlockMsg, error) {
|
||||||
var bm BlockMsg
|
var bm BlockMsg
|
||||||
if err := bm.UnmarshalCBOR(bytes.NewReader(b)); err != nil {
|
data := bytes.NewReader(b)
|
||||||
|
if err := bm.UnmarshalCBOR(data); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if l := data.Len(); l != 0 {
|
||||||
|
return nil, fmt.Errorf("extraneous data in BlockMsg CBOR encoding: got %d unexpected bytes", l)
|
||||||
|
}
|
||||||
return &bm, nil
|
return &bm, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
40
chain/types/blockmsg_test.go
Normal file
40
chain/types/blockmsg_test.go
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDecodeBlockMsg(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
b []byte
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
data []byte
|
||||||
|
want *BlockMsg
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{"decode empty BlockMsg with extra data at the end", []byte{0x83, 0xf6, 0x80, 0x80, 0x20}, nil, true},
|
||||||
|
{"decode valid empty BlockMsg", []byte{0x83, 0xf6, 0x80, 0x80}, new(BlockMsg), false},
|
||||||
|
{"decode invalid cbor", []byte{0x83, 0xf6, 0x80}, nil, true},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
data := tt.data
|
||||||
|
want := tt.want
|
||||||
|
wantErr := tt.wantErr
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got, err := DecodeBlockMsg(data)
|
||||||
|
if wantErr {
|
||||||
|
assert.Errorf(t, err, "DecodeBlockMsg(%x)", data)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
assert.NoErrorf(t, err, "DecodeBlockMsg(%x)", data)
|
||||||
|
assert.Equalf(t, want, got, "DecodeBlockMsg(%x)", data)
|
||||||
|
serialized, err := got.Serialize()
|
||||||
|
assert.NoErrorf(t, err, "DecodeBlockMsg(%x)", data)
|
||||||
|
assert.Equalf(t, serialized, data, "DecodeBlockMsg(%x)", data)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
@ -238,6 +238,30 @@ func (c *EthCall) UnmarshalJSON(b []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type EthSyncingResult struct {
|
||||||
|
DoneSync bool
|
||||||
|
StartingBlock EthUint64
|
||||||
|
CurrentBlock EthUint64
|
||||||
|
HighestBlock EthUint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sr EthSyncingResult) MarshalJSON() ([]byte, error) {
|
||||||
|
if sr.DoneSync {
|
||||||
|
// when done syncing, the json response should be '"result": false'
|
||||||
|
return []byte("false"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// need to do an anonymous struct to avoid infinite recursion
|
||||||
|
return json.Marshal(&struct {
|
||||||
|
StartingBlock EthUint64 `json:"startingblock"`
|
||||||
|
CurrentBlock EthUint64 `json:"currentblock"`
|
||||||
|
HighestBlock EthUint64 `json:"highestblock"`
|
||||||
|
}{
|
||||||
|
StartingBlock: sr.StartingBlock,
|
||||||
|
CurrentBlock: sr.CurrentBlock,
|
||||||
|
HighestBlock: sr.HighestBlock})
|
||||||
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
EthAddressLength = 20
|
EthAddressLength = 20
|
||||||
EthHashLength = 32
|
EthHashLength = 32
|
||||||
@ -548,12 +572,12 @@ func (h EthSubscriptionID) String() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type EthFilterSpec struct {
|
type EthFilterSpec struct {
|
||||||
// Interpreted as an epoch or one of "latest" for last mined block, "earliest" for first,
|
// Interpreted as an epoch (in hex) or one of "latest" for last mined block, "earliest" for first,
|
||||||
// "pending" for not yet committed messages.
|
// "pending" for not yet committed messages.
|
||||||
// Optional, default: "latest".
|
// Optional, default: "latest".
|
||||||
FromBlock *string `json:"fromBlock,omitempty"`
|
FromBlock *string `json:"fromBlock,omitempty"`
|
||||||
|
|
||||||
// Interpreted as an epoch or one of "latest" for last mined block, "earliest" for first,
|
// Interpreted as an epoch (in hex) or one of "latest" for last mined block, "earliest" for first,
|
||||||
// "pending" for not yet committed messages.
|
// "pending" for not yet committed messages.
|
||||||
// Optional, default: "latest".
|
// Optional, default: "latest".
|
||||||
ToBlock *string `json:"toBlock,omitempty"`
|
ToBlock *string `json:"toBlock,omitempty"`
|
||||||
@ -815,3 +839,93 @@ func (e EthFeeHistoryParams) MarshalJSON() ([]byte, error) {
|
|||||||
}
|
}
|
||||||
return json.Marshal([]interface{}{e.BlkCount, e.NewestBlkNum})
|
return json.Marshal([]interface{}{e.BlkCount, e.NewestBlkNum})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type EthBlockNumberOrHash struct {
|
||||||
|
// PredefinedBlock can be one of "earliest", "pending" or "latest". We could merge this
|
||||||
|
// field with BlockNumber if the latter could store negative numbers representing
|
||||||
|
// each predefined value (e.g. -1 for "earliest", -2 for "pending" and -3 for "latest")
|
||||||
|
PredefinedBlock *string `json:"-"`
|
||||||
|
|
||||||
|
BlockNumber *EthUint64 `json:"blockNumber,omitempty"`
|
||||||
|
BlockHash *EthHash `json:"blockHash,omitempty"`
|
||||||
|
RequireCanonical bool `json:"requireCanonical,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewEthBlockNumberOrHashFromPredefined(predefined string) EthBlockNumberOrHash {
|
||||||
|
return EthBlockNumberOrHash{
|
||||||
|
PredefinedBlock: &predefined,
|
||||||
|
BlockNumber: nil,
|
||||||
|
BlockHash: nil,
|
||||||
|
RequireCanonical: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewEthBlockNumberOrHashFromNumber(number EthUint64) EthBlockNumberOrHash {
|
||||||
|
return EthBlockNumberOrHash{
|
||||||
|
PredefinedBlock: nil,
|
||||||
|
BlockNumber: &number,
|
||||||
|
BlockHash: nil,
|
||||||
|
RequireCanonical: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewEthBlockNumberOrHashFromHexString(str string) (EthBlockNumberOrHash, error) {
|
||||||
|
// check if block param is a number (decimal or hex)
|
||||||
|
var num EthUint64 = 0
|
||||||
|
err := num.UnmarshalJSON([]byte(str))
|
||||||
|
if err != nil {
|
||||||
|
return NewEthBlockNumberOrHashFromNumber(0), err
|
||||||
|
}
|
||||||
|
|
||||||
|
return EthBlockNumberOrHash{
|
||||||
|
PredefinedBlock: nil,
|
||||||
|
BlockNumber: &num,
|
||||||
|
BlockHash: nil,
|
||||||
|
RequireCanonical: false,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e EthBlockNumberOrHash) MarshalJSON() ([]byte, error) {
|
||||||
|
if e.PredefinedBlock != nil {
|
||||||
|
return json.Marshal(*e.PredefinedBlock)
|
||||||
|
}
|
||||||
|
|
||||||
|
type tmpStruct EthBlockNumberOrHash
|
||||||
|
return json.Marshal(tmpStruct(e))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *EthBlockNumberOrHash) UnmarshalJSON(b []byte) error {
|
||||||
|
// we first try to unmarshal into a EthBlockNumberOrHash struct to check
|
||||||
|
// if the block param is a block hash or block number (see EIP-1898). We use
|
||||||
|
// a temporary struct to avoid infinite recursion.
|
||||||
|
type tmpStruct EthBlockNumberOrHash
|
||||||
|
var tmp tmpStruct
|
||||||
|
if err := json.Unmarshal(b, &tmp); err == nil {
|
||||||
|
if tmp.BlockNumber != nil && tmp.BlockHash != nil {
|
||||||
|
return errors.New("cannot specify both blockNumber and blockHash")
|
||||||
|
}
|
||||||
|
|
||||||
|
*e = EthBlockNumberOrHash(tmp)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if block param is once of the special strings
|
||||||
|
var str string
|
||||||
|
err := json.Unmarshal(b, &str)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if str == "earliest" || str == "pending" || str == "latest" {
|
||||||
|
e.PredefinedBlock = &str
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if block param is a number (decimal or hex)
|
||||||
|
var num EthUint64
|
||||||
|
if err := num.UnmarshalJSON(b); err == nil {
|
||||||
|
e.BlockNumber = &num
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors.New("invalid block param")
|
||||||
|
}
|
||||||
|
@ -520,10 +520,6 @@ func (vm *FVM) ApplyImplicitMessage(ctx context.Context, cmsg *types.Message) (*
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ret.ExitCode != 0 {
|
|
||||||
return applyRet, fmt.Errorf("implicit message failed with exit code: %d and error: %w", ret.ExitCode, applyRet.ActorErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
return applyRet, nil
|
return applyRet, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10,10 +10,10 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ipfs/go-blockservice"
|
"github.com/ipfs/boxo/blockservice"
|
||||||
|
offline "github.com/ipfs/boxo/exchange/offline"
|
||||||
|
"github.com/ipfs/boxo/ipld/merkledag"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
offline "github.com/ipfs/go-ipfs-exchange-offline"
|
|
||||||
"github.com/ipfs/go-merkledag"
|
|
||||||
carv2 "github.com/ipld/go-car/v2"
|
carv2 "github.com/ipld/go-car/v2"
|
||||||
"github.com/ipld/go-car/v2/blockstore"
|
"github.com/ipld/go-car/v2/blockstore"
|
||||||
"github.com/ipld/go-ipld-prime"
|
"github.com/ipld/go-ipld-prime"
|
||||||
|
@ -130,7 +130,7 @@ var EvmCallSimulateCmd = &cli.Command{
|
|||||||
From: &fromEthAddr,
|
From: &fromEthAddr,
|
||||||
To: &toEthAddr,
|
To: &toEthAddr,
|
||||||
Data: params,
|
Data: params,
|
||||||
}, "")
|
}, ethtypes.NewEthBlockNumberOrHashFromPredefined("latest"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println("Eth call fails, return val: ", res)
|
fmt.Println("Eth call fails, return val: ", res)
|
||||||
return err
|
return err
|
||||||
@ -518,7 +518,7 @@ var EvmGetBytecode = &cli.Command{
|
|||||||
defer closer()
|
defer closer()
|
||||||
ctx := ReqContext(cctx)
|
ctx := ReqContext(cctx)
|
||||||
|
|
||||||
code, err := api.EthGetCode(ctx, contractAddr, "latest")
|
code, err := api.EthGetCode(ctx, contractAddr, ethtypes.NewEthBlockNumberOrHashFromPredefined("latest"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
16
cli/info.go
16
cli/info.go
@ -20,6 +20,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/api/v1api"
|
"github.com/filecoin-project/lotus/api/v1api"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/journal/alerting"
|
||||||
)
|
)
|
||||||
|
|
||||||
var infoCmd = &cli.Command{
|
var infoCmd = &cli.Command{
|
||||||
@ -62,6 +63,21 @@ func infoCmdAct(cctx *cli.Context) error {
|
|||||||
fmt.Printf(" [epoch %s]\n", color.MagentaString(("%d"), status.SyncStatus.Epoch))
|
fmt.Printf(" [epoch %s]\n", color.MagentaString(("%d"), status.SyncStatus.Epoch))
|
||||||
fmt.Printf("Peers to: [publish messages %d] [publish blocks %d]\n", status.PeerStatus.PeersToPublishMsgs, status.PeerStatus.PeersToPublishBlocks)
|
fmt.Printf("Peers to: [publish messages %d] [publish blocks %d]\n", status.PeerStatus.PeersToPublishMsgs, status.PeerStatus.PeersToPublishBlocks)
|
||||||
|
|
||||||
|
alerts, err := fullapi.LogAlerts(ctx)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("ERROR: getting alerts: %s\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
activeAlerts := make([]alerting.Alert, 0)
|
||||||
|
for _, alert := range alerts {
|
||||||
|
if alert.Active {
|
||||||
|
activeAlerts = append(activeAlerts, alert)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(activeAlerts) > 0 {
|
||||||
|
fmt.Printf("%s (check %s)\n", color.RedString("⚠ %d Active alerts", len(activeAlerts)), color.YellowString("lotus log alerts"))
|
||||||
|
}
|
||||||
|
|
||||||
//Chain health calculated as percentage: amount of blocks in last finality / very healthy amount of blocks in a finality (900 epochs * 5 blocks per tipset)
|
//Chain health calculated as percentage: amount of blocks in last finality / very healthy amount of blocks in a finality (900 epochs * 5 blocks per tipset)
|
||||||
health := (100 * (900 * status.ChainStatus.BlocksPerTipsetLastFinality) / (900 * 5))
|
health := (100 * (900 * status.ChainStatus.BlocksPerTipsetLastFinality) / (900 * 5))
|
||||||
switch {
|
switch {
|
||||||
|
14
cli/state.go
14
cli/state.go
@ -1065,12 +1065,19 @@ var StateComputeStateCmd = &cli.Command{
|
|||||||
|
|
||||||
ctx := ReqContext(cctx)
|
ctx := ReqContext(cctx)
|
||||||
|
|
||||||
ts, err := LoadTipSet(ctx, cctx, api)
|
h := abi.ChainEpoch(cctx.Uint64("vm-height"))
|
||||||
|
var ts *types.TipSet
|
||||||
|
if tss := cctx.String("tipset"); tss != "" {
|
||||||
|
ts, err = ParseTipSetRef(ctx, api, tss)
|
||||||
|
} else if h > 0 {
|
||||||
|
ts, err = api.ChainGetTipSetByHeight(ctx, h, types.EmptyTSK)
|
||||||
|
} else {
|
||||||
|
ts, err = api.ChainHead(ctx)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
h := abi.ChainEpoch(cctx.Uint64("vm-height"))
|
|
||||||
if h == 0 {
|
if h == 0 {
|
||||||
h = ts.Height()
|
h = ts.Height()
|
||||||
}
|
}
|
||||||
@ -1528,6 +1535,9 @@ func printMsg(ctx context.Context, api v0api.FullNode, msg cid.Cid, mw *lapi.Msg
|
|||||||
if err := printReceiptReturn(ctx, api, m, mw.Receipt); err != nil {
|
if err := printReceiptReturn(ctx, api, m, mw.Receipt); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if mw.Receipt.EventsRoot != nil {
|
||||||
|
fmt.Printf("Events Root: %s\n", mw.Receipt.EventsRoot)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -10,6 +10,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
|
"golang.org/x/term"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
@ -327,13 +328,21 @@ var walletImport = &cli.Command{
|
|||||||
|
|
||||||
var inpdata []byte
|
var inpdata []byte
|
||||||
if !cctx.Args().Present() || cctx.Args().First() == "-" {
|
if !cctx.Args().Present() || cctx.Args().First() == "-" {
|
||||||
reader := bufio.NewReader(os.Stdin)
|
if term.IsTerminal(int(os.Stdin.Fd())) {
|
||||||
fmt.Print("Enter private key: ")
|
fmt.Print("Enter private key(not display in the terminal): ")
|
||||||
indata, err := reader.ReadBytes('\n')
|
inpdata, err = term.ReadPassword(int(os.Stdin.Fd()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
} else {
|
||||||
|
reader := bufio.NewReader(os.Stdin)
|
||||||
|
indata, err := reader.ReadBytes('\n')
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
inpdata = indata
|
||||||
}
|
}
|
||||||
inpdata = indata
|
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
fdata, err := os.ReadFile(cctx.Args().First())
|
fdata, err := os.ReadFile(cctx.Args().First())
|
||||||
|
@ -98,14 +98,16 @@ func main() {
|
|||||||
log.Info("Starting lotus-bench")
|
log.Info("Starting lotus-bench")
|
||||||
|
|
||||||
app := &cli.App{
|
app := &cli.App{
|
||||||
Name: "lotus-bench",
|
Name: "lotus-bench",
|
||||||
Usage: "Benchmark performance of lotus on your hardware",
|
Usage: "Benchmark performance of lotus on your hardware",
|
||||||
Version: build.UserVersion(),
|
Version: build.UserVersion(),
|
||||||
|
DisableSliceFlagSeparator: true,
|
||||||
Commands: []*cli.Command{
|
Commands: []*cli.Command{
|
||||||
proveCmd,
|
proveCmd,
|
||||||
sealBenchCmd,
|
sealBenchCmd,
|
||||||
simpleCmd,
|
simpleCmd,
|
||||||
importBenchCmd,
|
importBenchCmd,
|
||||||
|
rpcCmd,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
576
cmd/lotus-bench/rpc.go
Normal file
576
cmd/lotus-bench/rpc.go
Normal file
@ -0,0 +1,576 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"text/tabwriter"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/urfave/cli/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
var rpcCmd = &cli.Command{
|
||||||
|
Name: "rpc",
|
||||||
|
Usage: "Runs a concurrent stress test on one or more rpc methods and prints the performance metrics including latency distribution and histogram",
|
||||||
|
Description: `This benchmark is designed to stress test the rpc methods of a lotus node so that we can simulate real world usage and measure the performance of rpc methods on the node.
|
||||||
|
|
||||||
|
This benchmark has the following features:
|
||||||
|
* Can query each method both sequentially and concurrently
|
||||||
|
* Supports rate limiting
|
||||||
|
* Can query multiple different endpoints at once (supporting different concurrency level and rate limiting for each method)
|
||||||
|
* Gives a nice reporting summary of the stress testing of each method (including latency distribution, histogram and more)
|
||||||
|
* Easy to use
|
||||||
|
|
||||||
|
To use this benchmark you must specify the rpc methods you want to test using the --method options, the format of it is:
|
||||||
|
|
||||||
|
--method=NAME[:CONCURRENCY][:QPS][:PARAMS] where only NAME is required.
|
||||||
|
|
||||||
|
Here are some real examples:
|
||||||
|
lotus-bench rpc --method='eth_chainId' // run eth_chainId with default concurrency and qps
|
||||||
|
lotus-bench rpc --method='eth_chainId:3' // override concurrency to 3
|
||||||
|
lotus-bench rpc --method='eth_chainId::100' // override to 100 qps while using default concurrency
|
||||||
|
lotus-bench rpc --method='eth_chainId:3:100' // run using 3 workers but limit to 100 qps
|
||||||
|
lotus-bench rpc --method='eth_getTransactionCount:::["0xd4c70007F3F502f212c7e6794b94C06F36173B36", "latest"]' // run using optional params while using default concurrency and qps
|
||||||
|
lotus-bench rpc --method='eth_chainId' --method='eth_getTransactionCount:10:0:["0xd4c70007F3F502f212c7e6794b94C06F36173B36", "latest"]' // run multiple methods at once
|
||||||
|
|
||||||
|
NOTE: The last two examples will not work until we upgrade urfave dependency (tracked in https://github.com/urfave/cli/issues/1734)`,
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "endpoint",
|
||||||
|
Value: "http://127.0.0.1:1234/rpc/v1",
|
||||||
|
Usage: "The rpc endpoint to benchmark",
|
||||||
|
},
|
||||||
|
&cli.DurationFlag{
|
||||||
|
Name: "duration",
|
||||||
|
Value: 60 * time.Second,
|
||||||
|
Usage: "Duration of benchmark in seconds",
|
||||||
|
},
|
||||||
|
&cli.IntFlag{
|
||||||
|
Name: "concurrency",
|
||||||
|
Value: 10,
|
||||||
|
Usage: "How many workers should be used per rpc method (can be overridden per method)",
|
||||||
|
},
|
||||||
|
&cli.IntFlag{
|
||||||
|
Name: "qps",
|
||||||
|
Value: 0,
|
||||||
|
Usage: "How many requests per second should be sent per rpc method (can be overridden per method), a value of 0 means no limit",
|
||||||
|
},
|
||||||
|
&cli.StringSliceFlag{
|
||||||
|
Name: "method",
|
||||||
|
Usage: `Method to benchmark, you can specify multiple methods by repeating this flag. You can also specify method specific options to set the concurrency and qps for each method (see usage).
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
&cli.DurationFlag{
|
||||||
|
Name: "watch",
|
||||||
|
Value: 0 * time.Second,
|
||||||
|
Usage: "If >0 then generates reports every N seconds (only supports linux/unix)",
|
||||||
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "print-response",
|
||||||
|
Value: false,
|
||||||
|
Usage: "print the response of each request",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
if len(cctx.StringSlice("method")) == 0 {
|
||||||
|
return errors.New("you must specify and least one method to benchmark")
|
||||||
|
}
|
||||||
|
|
||||||
|
var rpcMethods []*RPCMethod
|
||||||
|
for _, str := range cctx.StringSlice("method") {
|
||||||
|
entries := strings.SplitN(str, ":", 4)
|
||||||
|
if len(entries) == 0 {
|
||||||
|
return errors.New("invalid method format")
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if concurrency was specified
|
||||||
|
concurrency := cctx.Int("concurrency")
|
||||||
|
if len(entries) > 1 {
|
||||||
|
if len(entries[1]) > 0 {
|
||||||
|
var err error
|
||||||
|
concurrency, err = strconv.Atoi(entries[1])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not parse concurrency value from method %s: %v", entries[0], err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if qps was specified
|
||||||
|
qps := cctx.Int("qps")
|
||||||
|
if len(entries) > 2 {
|
||||||
|
if len(entries[2]) > 0 {
|
||||||
|
var err error
|
||||||
|
qps, err = strconv.Atoi(entries[2])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not parse qps value from method %s: %v", entries[0], err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if params was specified
|
||||||
|
params := "[]"
|
||||||
|
if len(entries) > 3 {
|
||||||
|
params = entries[3]
|
||||||
|
}
|
||||||
|
|
||||||
|
rpcMethods = append(rpcMethods, &RPCMethod{
|
||||||
|
w: os.Stdout,
|
||||||
|
uri: cctx.String("endpoint"),
|
||||||
|
method: entries[0],
|
||||||
|
concurrency: concurrency,
|
||||||
|
qps: qps,
|
||||||
|
params: params,
|
||||||
|
printResp: cctx.Bool("print-response"),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// terminate early on ctrl+c
|
||||||
|
c := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(c, os.Interrupt)
|
||||||
|
go func() {
|
||||||
|
<-c
|
||||||
|
fmt.Println("Received interrupt, stopping...")
|
||||||
|
for _, method := range rpcMethods {
|
||||||
|
method.Stop()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// stop all threads after duration
|
||||||
|
go func() {
|
||||||
|
time.Sleep(cctx.Duration("duration"))
|
||||||
|
for _, e := range rpcMethods {
|
||||||
|
e.Stop()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// start all threads
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(len(rpcMethods))
|
||||||
|
|
||||||
|
for _, e := range rpcMethods {
|
||||||
|
go func(e *RPCMethod) {
|
||||||
|
defer wg.Done()
|
||||||
|
err := e.Run()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error running rpc method: %v\n", err)
|
||||||
|
}
|
||||||
|
}(e)
|
||||||
|
}
|
||||||
|
|
||||||
|
// if watch is set then print a report every N seconds
|
||||||
|
var progressCh chan struct{}
|
||||||
|
if cctx.Duration("watch") > 0 {
|
||||||
|
progressCh = make(chan struct{}, 1)
|
||||||
|
go func(progressCh chan struct{}) {
|
||||||
|
ticker := time.NewTicker(cctx.Duration("watch"))
|
||||||
|
for {
|
||||||
|
clearAndPrintReport := func() {
|
||||||
|
// clear the screen move the curser to the top left
|
||||||
|
fmt.Print("\033[2J")
|
||||||
|
fmt.Printf("\033[%d;%dH", 1, 1)
|
||||||
|
for i, e := range rpcMethods {
|
||||||
|
e.Report()
|
||||||
|
if i < len(rpcMethods)-1 {
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
clearAndPrintReport()
|
||||||
|
case <-progressCh:
|
||||||
|
clearAndPrintReport()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}(progressCh)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
if progressCh != nil {
|
||||||
|
// wait for the watch go routine to return
|
||||||
|
progressCh <- struct{}{}
|
||||||
|
|
||||||
|
// no need to print the report again
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// print the report for each endpoint
|
||||||
|
for i, e := range rpcMethods {
|
||||||
|
e.Report()
|
||||||
|
if i < len(rpcMethods)-1 {
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// RPCMethod handles the benchmarking of a single endpoint method.
|
||||||
|
type RPCMethod struct {
|
||||||
|
w io.Writer
|
||||||
|
// the endpoint uri
|
||||||
|
uri string
|
||||||
|
// the rpc method we want to benchmark
|
||||||
|
method string
|
||||||
|
// the number of concurrent requests to make to this endpoint
|
||||||
|
concurrency int
|
||||||
|
// if >0 then limit to qps is the max number of requests per second to make to this endpoint (0 = no limit)
|
||||||
|
qps int
|
||||||
|
// many endpoints require specific parameters to be passed
|
||||||
|
params string
|
||||||
|
// whether or not to print the response of each request (useful for debugging)
|
||||||
|
printResp bool
|
||||||
|
// instruct the worker go routines to stop
|
||||||
|
stopCh chan struct{}
|
||||||
|
// when the endpoint bencharking started
|
||||||
|
start time.Time
|
||||||
|
// results channel is used by the workers to send results to the reporter
|
||||||
|
results chan *result
|
||||||
|
// reporter handles reading the results from workers and printing the report statistics
|
||||||
|
reporter *Reporter
|
||||||
|
}
|
||||||
|
|
||||||
|
// result is the result of a single rpc method request.
|
||||||
|
type result struct {
|
||||||
|
err error
|
||||||
|
statusCode *int
|
||||||
|
duration time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rpc *RPCMethod) Run() error {
|
||||||
|
client := &http.Client{
|
||||||
|
Timeout: 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(rpc.concurrency)
|
||||||
|
|
||||||
|
rpc.results = make(chan *result, rpc.concurrency*1_000)
|
||||||
|
rpc.stopCh = make(chan struct{}, rpc.concurrency)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
rpc.reporter = NewReporter(rpc.results, rpc.w)
|
||||||
|
rpc.reporter.Run()
|
||||||
|
}()
|
||||||
|
|
||||||
|
rpc.start = time.Now()
|
||||||
|
|
||||||
|
// throttle the number of requests per second
|
||||||
|
var qpsTicker *time.Ticker
|
||||||
|
if rpc.qps > 0 {
|
||||||
|
qpsTicker = time.NewTicker(time.Second / time.Duration(rpc.qps))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < rpc.concurrency; i++ {
|
||||||
|
go func() {
|
||||||
|
rpc.startWorker(client, qpsTicker)
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
// close the results channel so reporter will stop
|
||||||
|
close(rpc.results)
|
||||||
|
|
||||||
|
// wait until the reporter is done
|
||||||
|
<-rpc.reporter.doneCh
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rpc *RPCMethod) startWorker(client *http.Client, qpsTicker *time.Ticker) {
|
||||||
|
for {
|
||||||
|
// check if we should stop
|
||||||
|
select {
|
||||||
|
case <-rpc.stopCh:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait for the next tick if we are rate limiting this endpoint
|
||||||
|
if qpsTicker != nil {
|
||||||
|
<-qpsTicker.C
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := rpc.buildRequest()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
|
||||||
|
var statusCode *int
|
||||||
|
|
||||||
|
// send request the endpoint
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("HTTP error: %s", err.Error())
|
||||||
|
} else {
|
||||||
|
statusCode = &resp.StatusCode
|
||||||
|
|
||||||
|
// there was not a HTTP error but we need to still check the json response for errrors
|
||||||
|
var data []byte
|
||||||
|
data, err = io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// we are only interested if it has the error field in the response
|
||||||
|
type respData struct {
|
||||||
|
Error struct {
|
||||||
|
Code int `json:"code"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
} `json:"error"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// unmarshal the response into a struct so we can check for errors
|
||||||
|
var d respData
|
||||||
|
err = json.Unmarshal(data, &d)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// if the response has an error json message then it should be considered an error just like any http error
|
||||||
|
if len(d.Error.Message) > 0 {
|
||||||
|
// truncate the error message if it is too long
|
||||||
|
if len(d.Error.Message) > 1000 {
|
||||||
|
d.Error.Message = d.Error.Message[:1000] + "..."
|
||||||
|
}
|
||||||
|
// remove newlines from the error message so we don't screw up the report
|
||||||
|
d.Error.Message = strings.ReplaceAll(d.Error.Message, "\n", "")
|
||||||
|
|
||||||
|
err = fmt.Errorf("JSON error: code:%d, message:%s", d.Error.Code, d.Error.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rpc.printResp {
|
||||||
|
fmt.Printf("[%s] %s", rpc.method, string(data))
|
||||||
|
}
|
||||||
|
|
||||||
|
resp.Body.Close() //nolint:errcheck
|
||||||
|
}
|
||||||
|
|
||||||
|
rpc.results <- &result{
|
||||||
|
statusCode: statusCode,
|
||||||
|
err: err,
|
||||||
|
duration: time.Since(start),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rpc *RPCMethod) buildRequest() (*http.Request, error) {
|
||||||
|
jreq, err := json.Marshal(struct {
|
||||||
|
Jsonrpc string `json:"jsonrpc"`
|
||||||
|
ID int `json:"id"`
|
||||||
|
Method string `json:"method"`
|
||||||
|
Params json.RawMessage `json:"params"`
|
||||||
|
}{
|
||||||
|
Jsonrpc: "2.0",
|
||||||
|
Method: rpc.method,
|
||||||
|
Params: json.RawMessage(rpc.params),
|
||||||
|
ID: 0,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", rpc.uri, bytes.NewReader(jreq))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("Accept", "application/json")
|
||||||
|
|
||||||
|
return req, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rpc *RPCMethod) Stop() {
|
||||||
|
for i := 0; i < rpc.concurrency; i++ {
|
||||||
|
rpc.stopCh <- struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rpc *RPCMethod) Report() {
|
||||||
|
total := time.Since(rpc.start)
|
||||||
|
fmt.Fprintf(rpc.w, "[%s]:\n", rpc.method)
|
||||||
|
fmt.Fprintf(rpc.w, "- Options:\n")
|
||||||
|
fmt.Fprintf(rpc.w, " - concurrency: %d\n", rpc.concurrency)
|
||||||
|
fmt.Fprintf(rpc.w, " - params: %s\n", rpc.params)
|
||||||
|
fmt.Fprintf(rpc.w, " - qps: %d\n", rpc.qps)
|
||||||
|
rpc.reporter.Print(total, rpc.w)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reporter reads the results from the workers through the results channel and aggregates the results.
|
||||||
|
type Reporter struct {
|
||||||
|
// write the report to this writer
|
||||||
|
w io.Writer
|
||||||
|
// the reporter read the results from this channel
|
||||||
|
results chan *result
|
||||||
|
// doneCh is used to signal that the reporter has finished reading the results (channel has closed)
|
||||||
|
doneCh chan bool
|
||||||
|
|
||||||
|
// lock protect the following fields during critical sections (if --watch was specified)
|
||||||
|
lock sync.Mutex
|
||||||
|
// the latencies of all requests
|
||||||
|
latencies []int64
|
||||||
|
// the number of requests that returned each status code
|
||||||
|
statusCodes map[int]int
|
||||||
|
// the number of errors that occurred
|
||||||
|
errors map[string]int
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewReporter(results chan *result, w io.Writer) *Reporter {
|
||||||
|
return &Reporter{
|
||||||
|
w: w,
|
||||||
|
results: results,
|
||||||
|
doneCh: make(chan bool, 1),
|
||||||
|
statusCodes: make(map[int]int),
|
||||||
|
errors: make(map[string]int),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reporter) Run() {
|
||||||
|
for res := range r.results {
|
||||||
|
r.lock.Lock()
|
||||||
|
|
||||||
|
r.latencies = append(r.latencies, res.duration.Milliseconds())
|
||||||
|
|
||||||
|
if res.statusCode != nil {
|
||||||
|
r.statusCodes[*res.statusCode]++
|
||||||
|
}
|
||||||
|
|
||||||
|
if res.err != nil {
|
||||||
|
if len(r.errors) < 1_000_000 {
|
||||||
|
r.errors[res.err.Error()]++
|
||||||
|
} else {
|
||||||
|
// we don't want to store too many errors in memory
|
||||||
|
r.errors["hidden"]++
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
r.errors["nil"]++
|
||||||
|
}
|
||||||
|
|
||||||
|
r.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
r.doneCh <- true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reporter) Print(elapsed time.Duration, w io.Writer) {
|
||||||
|
r.lock.Lock()
|
||||||
|
defer r.lock.Unlock()
|
||||||
|
|
||||||
|
nrReq := int64(len(r.latencies))
|
||||||
|
if nrReq == 0 {
|
||||||
|
fmt.Println("No requests were made")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// we need to sort the latencies slice to calculate the percentiles
|
||||||
|
sort.Slice(r.latencies, func(i, j int) bool {
|
||||||
|
return r.latencies[i] < r.latencies[j]
|
||||||
|
})
|
||||||
|
|
||||||
|
var totalLatency int64 = 0
|
||||||
|
for _, latency := range r.latencies {
|
||||||
|
totalLatency += latency
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(w, "- Total Requests: %d\n", nrReq)
|
||||||
|
fmt.Fprintf(w, "- Total Duration: %dms\n", elapsed.Milliseconds())
|
||||||
|
fmt.Fprintf(w, "- Requests/sec: %f\n", float64(nrReq)/elapsed.Seconds())
|
||||||
|
fmt.Fprintf(w, "- Avg latency: %dms\n", totalLatency/nrReq)
|
||||||
|
fmt.Fprintf(w, "- Median latency: %dms\n", r.latencies[nrReq/2])
|
||||||
|
fmt.Fprintf(w, "- Latency distribution:\n")
|
||||||
|
percentiles := []float64{0.1, 0.5, 0.9, 0.95, 0.99, 0.999}
|
||||||
|
for _, p := range percentiles {
|
||||||
|
idx := int64(p * float64(nrReq))
|
||||||
|
fmt.Fprintf(w, " %s%% in %dms\n", fmt.Sprintf("%.2f", p*100.0), r.latencies[idx])
|
||||||
|
}
|
||||||
|
|
||||||
|
// create a simple histogram with 10 buckets spanning the range of latency
|
||||||
|
// into equal ranges
|
||||||
|
//
|
||||||
|
nrBucket := 10
|
||||||
|
buckets := make([]Bucket, nrBucket)
|
||||||
|
latencyRange := r.latencies[len(r.latencies)-1]
|
||||||
|
bucketRange := latencyRange / int64(nrBucket)
|
||||||
|
|
||||||
|
// mark the end of each bucket
|
||||||
|
for i := 0; i < nrBucket; i++ {
|
||||||
|
buckets[i].start = int64(i) * bucketRange
|
||||||
|
buckets[i].end = buckets[i].start + bucketRange
|
||||||
|
// extend the last bucked by any remaning range caused by the integer division
|
||||||
|
if i == nrBucket-1 {
|
||||||
|
buckets[i].end = latencyRange
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// count the number of requests in each bucket
|
||||||
|
currBucket := 0
|
||||||
|
for i := 0; i < len(r.latencies); {
|
||||||
|
if r.latencies[i] <= buckets[currBucket].end {
|
||||||
|
buckets[currBucket].cnt++
|
||||||
|
i++
|
||||||
|
} else {
|
||||||
|
currBucket++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// print the histogram using a tabwriter which will align the columns nicely
|
||||||
|
fmt.Fprintf(w, "- Histogram:\n")
|
||||||
|
const padding = 2
|
||||||
|
tabWriter := tabwriter.NewWriter(w, 0, 0, padding, ' ', tabwriter.AlignRight|tabwriter.Debug)
|
||||||
|
for i := 0; i < nrBucket; i++ {
|
||||||
|
ratio := float64(buckets[i].cnt) / float64(nrReq)
|
||||||
|
bars := strings.Repeat("#", int(ratio*100))
|
||||||
|
fmt.Fprintf(tabWriter, " %d-%dms\t%d\t%s (%s%%)\n", buckets[i].start, buckets[i].end, buckets[i].cnt, bars, fmt.Sprintf("%.2f", ratio*100))
|
||||||
|
}
|
||||||
|
tabWriter.Flush() //nolint:errcheck
|
||||||
|
|
||||||
|
fmt.Fprintf(w, "- Status codes:\n")
|
||||||
|
for code, cnt := range r.statusCodes {
|
||||||
|
fmt.Fprintf(w, " [%d]: %d\n", code, cnt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// print the 10 most occurring errors (in case error values are not unique)
|
||||||
|
//
|
||||||
|
type kv struct {
|
||||||
|
err string
|
||||||
|
cnt int
|
||||||
|
}
|
||||||
|
var sortedErrors []kv
|
||||||
|
for err, cnt := range r.errors {
|
||||||
|
sortedErrors = append(sortedErrors, kv{err, cnt})
|
||||||
|
}
|
||||||
|
sort.Slice(sortedErrors, func(i, j int) bool {
|
||||||
|
return sortedErrors[i].cnt > sortedErrors[j].cnt
|
||||||
|
})
|
||||||
|
fmt.Fprintf(w, "- Errors (top 10):\n")
|
||||||
|
for i, se := range sortedErrors {
|
||||||
|
if i > 10 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, " [%s]: %d\n", se.err, se.cnt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type Bucket struct {
|
||||||
|
start int64
|
||||||
|
// the end value of the bucket
|
||||||
|
end int64
|
||||||
|
// how many entries are in the bucket
|
||||||
|
cnt int
|
||||||
|
}
|
@ -7,6 +7,7 @@ import (
|
|||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
rice "github.com/GeertJohan/go.rice"
|
rice "github.com/GeertJohan/go.rice"
|
||||||
@ -15,10 +16,14 @@ import (
|
|||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
|
verifregtypes9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api/v0api"
|
"github.com/filecoin-project/lotus/api/v0api"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types/ethtypes"
|
||||||
lcli "github.com/filecoin-project/lotus/cli"
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -70,6 +75,11 @@ var runCmd = &cli.Command{
|
|||||||
EnvVars: []string{"LOTUS_FOUNTAIN_AMOUNT"},
|
EnvVars: []string{"LOTUS_FOUNTAIN_AMOUNT"},
|
||||||
Value: "50",
|
Value: "50",
|
||||||
},
|
},
|
||||||
|
&cli.Uint64Flag{
|
||||||
|
Name: "data-cap",
|
||||||
|
EnvVars: []string{"LOTUS_DATACAP_AMOUNT"},
|
||||||
|
Value: verifregtypes9.MinVerifiedDealSize.Uint64(),
|
||||||
|
},
|
||||||
&cli.Float64Flag{
|
&cli.Float64Flag{
|
||||||
Name: "captcha-threshold",
|
Name: "captcha-threshold",
|
||||||
Value: 0.5,
|
Value: 0.5,
|
||||||
@ -108,6 +118,7 @@ var runCmd = &cli.Command{
|
|||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
api: nodeApi,
|
api: nodeApi,
|
||||||
from: from,
|
from: from,
|
||||||
|
allowance: types.NewInt(cctx.Uint64("data-cap")),
|
||||||
sendPerRequest: sendPerRequest,
|
sendPerRequest: sendPerRequest,
|
||||||
limiter: NewLimiter(LimiterConfig{
|
limiter: NewLimiter(LimiterConfig{
|
||||||
TotalRate: 500 * time.Millisecond,
|
TotalRate: 500 * time.Millisecond,
|
||||||
@ -124,6 +135,8 @@ var runCmd = &cli.Command{
|
|||||||
http.Handle("/", http.FileServer(box.HTTPBox()))
|
http.Handle("/", http.FileServer(box.HTTPBox()))
|
||||||
http.HandleFunc("/funds.html", prepFundsHtml(box))
|
http.HandleFunc("/funds.html", prepFundsHtml(box))
|
||||||
http.Handle("/send", h)
|
http.Handle("/send", h)
|
||||||
|
http.HandleFunc("/datacap.html", prepDataCapHtml(box))
|
||||||
|
http.Handle("/datacap", h)
|
||||||
fmt.Printf("Open http://%s\n", cctx.String("front"))
|
fmt.Printf("Open http://%s\n", cctx.String("front"))
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
@ -156,12 +169,24 @@ func prepFundsHtml(box *rice.Box) http.HandlerFunc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func prepDataCapHtml(box *rice.Box) http.HandlerFunc {
|
||||||
|
tmpl := template.Must(template.New("datacaps").Parse(box.MustString("datacap.html")))
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
err := tmpl.Execute(w, os.Getenv("RECAPTCHA_SITE_KEY"))
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusBadGateway)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type handler struct {
|
type handler struct {
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
api v0api.FullNode
|
api v0api.FullNode
|
||||||
|
|
||||||
from address.Address
|
from address.Address
|
||||||
sendPerRequest types.FIL
|
sendPerRequest types.FIL
|
||||||
|
allowance types.BigInt
|
||||||
|
|
||||||
limiter *Limiter
|
limiter *Limiter
|
||||||
recapThreshold float64
|
recapThreshold float64
|
||||||
@ -187,24 +212,41 @@ func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||||||
http.Error(w, err.Error(), http.StatusBadGateway)
|
http.Error(w, err.Error(), http.StatusBadGateway)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if !capResp.Success || capResp.Score < h.recapThreshold {
|
if !capResp.Success || capResp.Score < h.recapThreshold {
|
||||||
log.Infow("spam", "capResp", capResp)
|
log.Infow("spam", "capResp", capResp)
|
||||||
http.Error(w, "spam protection", http.StatusUnprocessableEntity)
|
http.Error(w, "spam protection", http.StatusUnprocessableEntity)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
to, err := address.NewFromString(r.FormValue("address"))
|
addressInput := r.FormValue("address")
|
||||||
if err != nil {
|
|
||||||
|
var filecoinAddress address.Address
|
||||||
|
var decodeError error
|
||||||
|
|
||||||
|
if strings.HasPrefix(addressInput, "0x") {
|
||||||
|
ethAddress, err := ethtypes.ParseEthAddress(addressInput)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
filecoinAddress, decodeError = ethAddress.ToFilecoinAddress()
|
||||||
|
} else {
|
||||||
|
filecoinAddress, decodeError = address.NewFromString(addressInput)
|
||||||
|
}
|
||||||
|
|
||||||
|
if decodeError != nil {
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if to == address.Undef {
|
if filecoinAddress == address.Undef {
|
||||||
http.Error(w, "empty address", http.StatusBadRequest)
|
http.Error(w, "empty address", http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Limit based on wallet address
|
// Limit based on wallet address
|
||||||
limiter := h.limiter.GetWalletLimiter(to.String())
|
limiter := h.limiter.GetWalletLimiter(filecoinAddress.String())
|
||||||
if !limiter.Allow() {
|
if !limiter.Allow() {
|
||||||
http.Error(w, http.StatusText(http.StatusTooManyRequests)+": wallet limit", http.StatusTooManyRequests)
|
http.Error(w, http.StatusText(http.StatusTooManyRequests)+": wallet limit", http.StatusTooManyRequests)
|
||||||
return
|
return
|
||||||
@ -227,11 +269,37 @@ func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
smsg, err := h.api.MpoolPushMessage(h.ctx, &types.Message{
|
var smsg *types.SignedMessage
|
||||||
Value: types.BigInt(h.sendPerRequest),
|
if r.RequestURI == "/send" {
|
||||||
From: h.from,
|
smsg, err = h.api.MpoolPushMessage(
|
||||||
To: to,
|
h.ctx, &types.Message{
|
||||||
}, nil)
|
Value: types.BigInt(h.sendPerRequest),
|
||||||
|
From: h.from,
|
||||||
|
To: filecoinAddress,
|
||||||
|
}, nil)
|
||||||
|
} else if r.RequestURI == "/datacap" {
|
||||||
|
var params []byte
|
||||||
|
params, err = actors.SerializeParams(
|
||||||
|
&verifregtypes9.AddVerifiedClientParams{
|
||||||
|
Address: filecoinAddress,
|
||||||
|
Allowance: h.allowance,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
smsg, err = h.api.MpoolPushMessage(
|
||||||
|
h.ctx, &types.Message{
|
||||||
|
Params: params,
|
||||||
|
From: h.from,
|
||||||
|
To: verifreg.Address,
|
||||||
|
Method: verifreg.Methods.AddVerifiedClient,
|
||||||
|
}, nil)
|
||||||
|
} else {
|
||||||
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
|
41
cmd/lotus-fountain/site/datacap.html
Normal file
41
cmd/lotus-fountain/site/datacap.html
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<title>Grant DataCap - Lotus Fountain</title>
|
||||||
|
<link rel="stylesheet" type="text/css" href="main.css">
|
||||||
|
<script src="https://www.google.com/recaptcha/api.js"></script>
|
||||||
|
<script>
|
||||||
|
function onSubmit(token) {
|
||||||
|
document.getElementById("datacap-form").submit();
|
||||||
|
}
|
||||||
|
</script>
|
||||||
|
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<div class="Index">
|
||||||
|
<div class="Index-nodes">
|
||||||
|
<div class="Index-node">
|
||||||
|
<h3>Grant datacap</h3>
|
||||||
|
<p>Please input your address to receive a data cap on the Calibration Testnet.</p>
|
||||||
|
</div>
|
||||||
|
<div class="Index-node">
|
||||||
|
<form action='/datacap' method='post' id='datacap-form'>
|
||||||
|
<span>Enter destination address:</span>
|
||||||
|
<input type='text' name='address' style="width: 300px" placeholder="t0/1/2/3/4 or 0xETH">
|
||||||
|
<button class="g-recaptcha"
|
||||||
|
data-sitekey="{{ . }}"
|
||||||
|
data-callback='onSubmit'
|
||||||
|
data-action='submit'>Grant Datacap</button>
|
||||||
|
</form>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="Index-footer">
|
||||||
|
<div>
|
||||||
|
<a href="index.html">[Back]</a>
|
||||||
|
<span style="float: right">Not dispensing real Filecoin tokens</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</body>
|
||||||
|
</html>
|
@ -15,12 +15,13 @@
|
|||||||
<div class="Index">
|
<div class="Index">
|
||||||
<div class="Index-nodes">
|
<div class="Index-nodes">
|
||||||
<div class="Index-node">
|
<div class="Index-node">
|
||||||
[SENDING FUNDS]
|
<h3>Send funds</h3>
|
||||||
|
<p>Please input your address to receive test FIL (tFIL) on the Calibration Testnet. This faucet dispenses 100 tFIL.</p>
|
||||||
</div>
|
</div>
|
||||||
<div class="Index-node">
|
<div class="Index-node">
|
||||||
<form action='/send' method='post' id='funds-form'>
|
<form action='/send' method='post' id='funds-form'>
|
||||||
<span>Enter destination address:</span>
|
<span>Enter destination address:</span>
|
||||||
<input type='text' name='address' style="width: 300px">
|
<input type='text' name='address' style="width: 300px" placeholder="Enter t0/1/2/3/4 or 0xETH">
|
||||||
<button class="g-recaptcha"
|
<button class="g-recaptcha"
|
||||||
data-sitekey="{{ . }}"
|
data-sitekey="{{ . }}"
|
||||||
data-callback='onSubmit'
|
data-callback='onSubmit'
|
||||||
|
@ -8,10 +8,16 @@
|
|||||||
<div class="Index">
|
<div class="Index">
|
||||||
<div class="Index-nodes">
|
<div class="Index-nodes">
|
||||||
<div class="Index-node">
|
<div class="Index-node">
|
||||||
[LOTUS DEVNET FAUCET]
|
LOTUS DEVNET FAUCET
|
||||||
</div>
|
</div>
|
||||||
<div class="Index-node">
|
<div class="Index-node">
|
||||||
<a href="funds.html">[Send Funds]</a>
|
<a href="funds.html">Send Funds</a>
|
||||||
|
</div>
|
||||||
|
<div class="Index-node">
|
||||||
|
LOTUS DEVNET GRANT DATACAP
|
||||||
|
</div>
|
||||||
|
<div class="Index-node">
|
||||||
|
<a href="datacap.html">Grant DataCap</a>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<div class="Index-footer">
|
<div class="Index-footer">
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
body {
|
body {
|
||||||
font-family: 'monospace';
|
font-family: 'Helvetica Neue', sans-serif;
|
||||||
background: #1f1f1f;
|
background-color: #f0f0f0;
|
||||||
color: #f0f0f0;
|
|
||||||
padding: 0;
|
padding: 0;
|
||||||
margin: 0;
|
margin: 0;
|
||||||
}
|
}
|
||||||
@ -9,21 +8,22 @@ body {
|
|||||||
.Index {
|
.Index {
|
||||||
width: 100vw;
|
width: 100vw;
|
||||||
height: 100vh;
|
height: 100vh;
|
||||||
background: #1a1a1a;
|
background-color: #f0f0f0;
|
||||||
color: #f0f0f0;
|
color: #333;
|
||||||
font-family: monospace;
|
font-family: 'Helvetica Neue', sans-serif;
|
||||||
|
|
||||||
display: grid;
|
display: grid;
|
||||||
grid-template-columns: auto 40vw auto;
|
grid-template-columns: auto 40vw auto;
|
||||||
grid-template-rows: auto auto auto 3em;
|
grid-template-rows: auto auto auto 3em;
|
||||||
grid-template-areas:
|
grid-template-areas:
|
||||||
". . . ."
|
". . . ."
|
||||||
". main main ."
|
". main main ."
|
||||||
". . . ."
|
". . . ."
|
||||||
"footer footer footer footer";
|
"footer footer footer footer";
|
||||||
}
|
}
|
||||||
|
|
||||||
.Index-footer {
|
.Index-footer {
|
||||||
background: #2a2a2a;
|
background-color: #333;
|
||||||
grid-area: footer;
|
grid-area: footer;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -34,23 +34,49 @@ body {
|
|||||||
|
|
||||||
.Index-nodes {
|
.Index-nodes {
|
||||||
grid-area: main;
|
grid-area: main;
|
||||||
background: #2a2a2a;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
.Index-node {
|
.Index-node {
|
||||||
margin: 5px;
|
margin: 5px;
|
||||||
padding: 15px;
|
padding: 15px;
|
||||||
background: #1f1f1f;
|
background-color: #fff;
|
||||||
|
box-shadow: 0 0 5px rgba(0, 0, 0, 0.1);
|
||||||
|
}
|
||||||
|
|
||||||
|
span {
|
||||||
|
display: block;
|
||||||
|
margin-bottom: 5px;
|
||||||
|
}
|
||||||
|
|
||||||
|
input[type="text"] {
|
||||||
|
width: 100%;
|
||||||
|
padding: 10px;
|
||||||
|
border-radius: 5px;
|
||||||
|
border: 1px solid #ccc;
|
||||||
|
margin-bottom: 10px;
|
||||||
|
}
|
||||||
|
|
||||||
|
button {
|
||||||
|
background-color: #4c9aff;
|
||||||
|
color: #fff;
|
||||||
|
border: none;
|
||||||
|
border-radius: 5px;
|
||||||
|
padding: 10px 20px;
|
||||||
|
font-size: 1.2em;
|
||||||
|
}
|
||||||
|
|
||||||
|
button:hover {
|
||||||
|
background-color: #4c7eff;
|
||||||
}
|
}
|
||||||
|
|
||||||
a:link {
|
a:link {
|
||||||
color: #50f020;
|
color: #333;
|
||||||
}
|
}
|
||||||
|
|
||||||
a:visited {
|
a:visited {
|
||||||
color: #50f020;
|
color: #333;
|
||||||
}
|
}
|
||||||
|
|
||||||
a:hover {
|
a:hover {
|
||||||
color: #30a00a;
|
color: #555;
|
||||||
}
|
}
|
||||||
|
@ -1166,7 +1166,7 @@ var actorConfirmChangeWorker = &cli.Command{
|
|||||||
var actorConfirmChangeBeneficiary = &cli.Command{
|
var actorConfirmChangeBeneficiary = &cli.Command{
|
||||||
Name: "confirm-change-beneficiary",
|
Name: "confirm-change-beneficiary",
|
||||||
Usage: "Confirm a beneficiary address change",
|
Usage: "Confirm a beneficiary address change",
|
||||||
ArgsUsage: "[minerAddress]",
|
ArgsUsage: "[minerID]",
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
&cli.BoolFlag{
|
&cli.BoolFlag{
|
||||||
Name: "really-do-it",
|
Name: "really-do-it",
|
||||||
|
@ -2,9 +2,11 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"encoding/csv"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -318,6 +320,9 @@ var sectorsListCmd = &cli.Command{
|
|||||||
Value: parallelSectorChecks,
|
Value: parallelSectorChecks,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
Subcommands: []*cli.Command{
|
||||||
|
sectorsListUpgradeBoundsCmd,
|
||||||
|
},
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
// http mode allows for parallel json decoding/encoding, which was a bottleneck here
|
// http mode allows for parallel json decoding/encoding, which was a bottleneck here
|
||||||
minerApi, closer, err := lcli.GetStorageMinerAPI(cctx, cliutil.StorageMinerUseHttp)
|
minerApi, closer, err := lcli.GetStorageMinerAPI(cctx, cliutil.StorageMinerUseHttp)
|
||||||
@ -585,6 +590,169 @@ var sectorsListCmd = &cli.Command{
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var sectorsListUpgradeBoundsCmd = &cli.Command{
|
||||||
|
Name: "upgrade-bounds",
|
||||||
|
Usage: "Output upgrade bounds for available sectors",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.IntFlag{
|
||||||
|
Name: "buckets",
|
||||||
|
Value: 25,
|
||||||
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "csv",
|
||||||
|
Usage: "output machine-readable values",
|
||||||
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "deal-terms",
|
||||||
|
Usage: "bucket by how many deal-sectors can start at a given expiration",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
minerApi, closer, err := lcli.GetStorageMinerAPI(cctx, cliutil.StorageMinerUseHttp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer closer()
|
||||||
|
|
||||||
|
fullApi, closer2, err := lcli.GetFullNodeAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer closer2()
|
||||||
|
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
|
list, err := minerApi.SectorsListInStates(ctx, []api.SectorState{
|
||||||
|
api.SectorState(sealing.Available),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting sector list: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
head, err := fullApi.ChainHead(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting chain head: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
filter := bitfield.New()
|
||||||
|
|
||||||
|
for _, s := range list {
|
||||||
|
filter.Set(uint64(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
maddr, err := minerApi.ActorAddress(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
sset, err := fullApi.StateMinerSectors(ctx, maddr, &filter, head.Key())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(sset) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var minExpiration, maxExpiration abi.ChainEpoch
|
||||||
|
|
||||||
|
for _, s := range sset {
|
||||||
|
if s.Expiration < minExpiration || minExpiration == 0 {
|
||||||
|
minExpiration = s.Expiration
|
||||||
|
}
|
||||||
|
if s.Expiration > maxExpiration {
|
||||||
|
maxExpiration = s.Expiration
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buckets := cctx.Int("buckets")
|
||||||
|
bucketSize := (maxExpiration - minExpiration) / abi.ChainEpoch(buckets)
|
||||||
|
bucketCounts := make([]int, buckets+1)
|
||||||
|
|
||||||
|
for b := range bucketCounts {
|
||||||
|
bucketMin := minExpiration + abi.ChainEpoch(b)*bucketSize
|
||||||
|
bucketMax := minExpiration + abi.ChainEpoch(b+1)*bucketSize
|
||||||
|
|
||||||
|
if cctx.Bool("deal-terms") {
|
||||||
|
bucketMax = bucketMax + policy.MarketDefaultAllocationTermBuffer
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, s := range sset {
|
||||||
|
isInBucket := s.Expiration >= bucketMin && s.Expiration < bucketMax
|
||||||
|
|
||||||
|
if isInBucket {
|
||||||
|
bucketCounts[b]++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creating CSV writer
|
||||||
|
writer := csv.NewWriter(os.Stdout)
|
||||||
|
|
||||||
|
// Writing CSV headers
|
||||||
|
err = writer.Write([]string{"Max Expiration in Bucket", "Sector Count"})
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("writing csv headers: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Writing bucket details
|
||||||
|
|
||||||
|
if cctx.Bool("csv") {
|
||||||
|
for i := 0; i < buckets; i++ {
|
||||||
|
maxExp := minExpiration + abi.ChainEpoch(i+1)*bucketSize
|
||||||
|
|
||||||
|
timeStr := strconv.FormatInt(int64(maxExp), 10)
|
||||||
|
|
||||||
|
err = writer.Write([]string{
|
||||||
|
timeStr,
|
||||||
|
strconv.Itoa(bucketCounts[i]),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("writing csv row: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flush to make sure all data is written to the underlying writer
|
||||||
|
writer.Flush()
|
||||||
|
|
||||||
|
if err := writer.Error(); err != nil {
|
||||||
|
return xerrors.Errorf("flushing csv writer: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
tw := tablewriter.New(
|
||||||
|
tablewriter.Col("Bucket Expiration"),
|
||||||
|
tablewriter.Col("Sector Count"),
|
||||||
|
tablewriter.Col("Bar"),
|
||||||
|
)
|
||||||
|
|
||||||
|
var barCols = 40
|
||||||
|
var maxCount int
|
||||||
|
|
||||||
|
for _, c := range bucketCounts {
|
||||||
|
if c > maxCount {
|
||||||
|
maxCount = c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < buckets; i++ {
|
||||||
|
maxExp := minExpiration + abi.ChainEpoch(i+1)*bucketSize
|
||||||
|
timeStr := cliutil.EpochTime(head.Height(), maxExp)
|
||||||
|
|
||||||
|
tw.Write(map[string]interface{}{
|
||||||
|
"Bucket Expiration": timeStr,
|
||||||
|
"Sector Count": color.YellowString("%d", bucketCounts[i]),
|
||||||
|
"Bar": "[" + color.GreenString(strings.Repeat("|", bucketCounts[i]*barCols/maxCount)) + strings.Repeat(" ", barCols-bucketCounts[i]*barCols/maxCount) + "]",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return tw.Flush(os.Stdout)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
var sectorsRefsCmd = &cli.Command{
|
var sectorsRefsCmd = &cli.Command{
|
||||||
Name: "refs",
|
Name: "refs",
|
||||||
Usage: "List References to sectors",
|
Usage: "List References to sectors",
|
||||||
@ -1922,10 +2090,31 @@ var sectorsBatchingPendingCommit = &cli.Command{
|
|||||||
for _, sector := range pending {
|
for _, sector := range pending {
|
||||||
fmt.Println(sector.Number)
|
fmt.Println(sector.Number)
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Println("No sectors queued to be committed")
|
reader := bufio.NewReader(os.Stdin)
|
||||||
|
fmt.Print("Do you want to publish these sectors now? (yes/no): ")
|
||||||
|
userInput, err := reader.ReadString('\n')
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("reading user input: %w", err)
|
||||||
|
}
|
||||||
|
userInput = strings.ToLower(strings.TrimSpace(userInput))
|
||||||
|
|
||||||
|
if userInput == "yes" {
|
||||||
|
err := cctx.Set("publish-now", "true")
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("setting publish-now flag: %w", err)
|
||||||
|
}
|
||||||
|
return cctx.Command.Action(cctx)
|
||||||
|
} else if userInput == "no" {
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
fmt.Println("Invalid input. Please answer with 'yes' or 'no'.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
fmt.Println("No sectors queued to be committed")
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -1980,10 +2169,31 @@ var sectorsBatchingPendingPreCommit = &cli.Command{
|
|||||||
for _, sector := range pending {
|
for _, sector := range pending {
|
||||||
fmt.Println(sector.Number)
|
fmt.Println(sector.Number)
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Println("No sectors queued to be committed")
|
reader := bufio.NewReader(os.Stdin)
|
||||||
|
fmt.Print("Do you want to publish these sectors now? (yes/no): ")
|
||||||
|
userInput, err := reader.ReadString('\n')
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("reading user input: %w", err)
|
||||||
|
}
|
||||||
|
userInput = strings.ToLower(strings.TrimSpace(userInput))
|
||||||
|
|
||||||
|
if userInput == "yes" {
|
||||||
|
err := cctx.Set("publish-now", "true")
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("setting publish-now flag: %w", err)
|
||||||
|
}
|
||||||
|
return cctx.Command.Action(cctx)
|
||||||
|
} else if userInput == "no" {
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
fmt.Println("Invalid input. Please answer with 'yes' or 'no'.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
fmt.Println("No sectors queued to be committed")
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -2014,7 +2224,6 @@ func yesno(b bool) string {
|
|||||||
return color.RedString("NO")
|
return color.RedString("NO")
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO simulate this call if --really-do-it is not used
|
|
||||||
var sectorsCompactPartitionsCmd = &cli.Command{
|
var sectorsCompactPartitionsCmd = &cli.Command{
|
||||||
Name: "compact-partitions",
|
Name: "compact-partitions",
|
||||||
Usage: "removes dead sectors from partitions and reduces the number of partitions used if possible",
|
Usage: "removes dead sectors from partitions and reduces the number of partitions used if possible",
|
||||||
@ -2040,12 +2249,7 @@ var sectorsCompactPartitionsCmd = &cli.Command{
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
if !cctx.Bool("really-do-it") {
|
fullNodeAPI, acloser, err := lcli.GetFullNodeAPI(cctx)
|
||||||
fmt.Println("Pass --really-do-it to actually execute this action")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
api, acloser, err := lcli.GetFullNodeAPI(cctx)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -2058,7 +2262,7 @@ var sectorsCompactPartitionsCmd = &cli.Command{
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
minfo, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
minfo, err := fullNodeAPI.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -2074,46 +2278,118 @@ var sectorsCompactPartitionsCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
fmt.Printf("compacting %d paritions\n", len(parts))
|
fmt.Printf("compacting %d paritions\n", len(parts))
|
||||||
|
|
||||||
|
var makeMsgForPartitions func(partitionsBf bitfield.BitField) ([]*types.Message, error)
|
||||||
|
makeMsgForPartitions = func(partitionsBf bitfield.BitField) ([]*types.Message, error) {
|
||||||
|
params := miner.CompactPartitionsParams{
|
||||||
|
Deadline: deadline,
|
||||||
|
Partitions: partitionsBf,
|
||||||
|
}
|
||||||
|
|
||||||
|
sp, aerr := actors.SerializeParams(¶ms)
|
||||||
|
if aerr != nil {
|
||||||
|
return nil, xerrors.Errorf("serializing params: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
msg := &types.Message{
|
||||||
|
From: minfo.Worker,
|
||||||
|
To: maddr,
|
||||||
|
Method: builtin.MethodsMiner.CompactPartitions,
|
||||||
|
Value: big.Zero(),
|
||||||
|
Params: sp,
|
||||||
|
}
|
||||||
|
|
||||||
|
estimatedMsg, err := fullNodeAPI.GasEstimateMessageGas(ctx, msg, nil, types.EmptyTSK)
|
||||||
|
if err != nil && xerrors.Is(err, &api.ErrOutOfGas{}) {
|
||||||
|
// the message is too big -- split into 2
|
||||||
|
partitionsSlice, err := partitionsBf.All(math.MaxUint64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
partitions1 := bitfield.New()
|
||||||
|
for i := 0; i < len(partitionsSlice)/2; i++ {
|
||||||
|
partitions1.Set(uint64(i))
|
||||||
|
}
|
||||||
|
|
||||||
|
msgs1, err := makeMsgForPartitions(partitions1)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// time for the second half
|
||||||
|
partitions2 := bitfield.New()
|
||||||
|
for i := len(partitionsSlice) / 2; i < len(partitionsSlice); i++ {
|
||||||
|
partitions2.Set(uint64(i))
|
||||||
|
}
|
||||||
|
|
||||||
|
msgs2, err := makeMsgForPartitions(partitions2)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return append(msgs1, msgs2...), nil
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return []*types.Message{estimatedMsg}, nil
|
||||||
|
}
|
||||||
|
|
||||||
partitions := bitfield.New()
|
partitions := bitfield.New()
|
||||||
for _, partition := range parts {
|
for _, partition := range parts {
|
||||||
partitions.Set(uint64(partition))
|
partitions.Set(uint64(partition))
|
||||||
}
|
}
|
||||||
|
|
||||||
params := miner.CompactPartitionsParams{
|
msgs, err := makeMsgForPartitions(partitions)
|
||||||
Deadline: deadline,
|
|
||||||
Partitions: partitions,
|
|
||||||
}
|
|
||||||
|
|
||||||
sp, err := actors.SerializeParams(¶ms)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("serializing params: %w", err)
|
return xerrors.Errorf("failed to make messages: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
smsg, err := api.MpoolPushMessage(ctx, &types.Message{
|
// Actually send the messages if really-do-it provided, simulate otherwise
|
||||||
From: minfo.Worker,
|
if cctx.Bool("really-do-it") {
|
||||||
To: maddr,
|
smsgs, err := fullNodeAPI.MpoolBatchPushMessage(ctx, msgs, nil)
|
||||||
Method: builtin.MethodsMiner.CompactPartitions,
|
if err != nil {
|
||||||
Value: big.Zero(),
|
return xerrors.Errorf("mpool push: %w", err)
|
||||||
Params: sp,
|
}
|
||||||
}, nil)
|
|
||||||
if err != nil {
|
if len(smsgs) == 1 {
|
||||||
return xerrors.Errorf("mpool push: %w", err)
|
fmt.Printf("Requested compact partitions in message %s\n", smsgs[0].Cid())
|
||||||
|
} else {
|
||||||
|
fmt.Printf("Requested compact partitions in %d messages\n\n", len(smsgs))
|
||||||
|
for _, v := range smsgs {
|
||||||
|
fmt.Println(v.Cid())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range smsgs {
|
||||||
|
wait, err := fullNodeAPI.StateWaitMsg(ctx, v.Cid(), 2)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// check it executed successfully
|
||||||
|
if wait.Receipt.ExitCode.IsError() {
|
||||||
|
fmt.Println(cctx.App.Writer, "compact partitions msg %s failed!", v.Cid())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("Requested compact partitions in message %s\n", smsg.Cid())
|
for i, v := range msgs {
|
||||||
|
fmt.Printf("total of %d CompactPartitions msgs would be sent\n", len(msgs))
|
||||||
|
|
||||||
wait, err := api.StateWaitMsg(ctx, smsg.Cid(), 0)
|
estMsg, err := fullNodeAPI.GasEstimateMessageGas(ctx, v, nil, types.EmptyTSK)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// check it executed successfully
|
fmt.Printf("msg %d would cost up to %s\n", i+1, types.FIL(estMsg.RequiredFunds()))
|
||||||
if wait.Receipt.ExitCode.IsError() {
|
|
||||||
fmt.Println(cctx.App.Writer, "compact partitions failed!")
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -225,6 +225,7 @@ var storageRedeclareCmd = &cli.Command{
|
|||||||
&cli.BoolFlag{
|
&cli.BoolFlag{
|
||||||
Name: "drop-missing",
|
Name: "drop-missing",
|
||||||
Usage: "Drop index entries with missing files",
|
Usage: "Drop index entries with missing files",
|
||||||
|
Value: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
@ -235,14 +236,19 @@ var storageRedeclareCmd = &cli.Command{
|
|||||||
defer closer()
|
defer closer()
|
||||||
ctx := lcli.ReqContext(cctx)
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
if cctx.NArg() != 1 {
|
// check if no argument and no --id or --all flag is provided
|
||||||
return lcli.IncorrectNumArgs(cctx)
|
if cctx.NArg() == 0 && !cctx.IsSet("id") && !cctx.Bool("all") {
|
||||||
|
return xerrors.Errorf("You must specify a storage path, or --id, or --all")
|
||||||
}
|
}
|
||||||
|
|
||||||
if cctx.IsSet("id") && cctx.Bool("all") {
|
if cctx.IsSet("id") && cctx.Bool("all") {
|
||||||
return xerrors.Errorf("--id and --all can't be passed at the same time")
|
return xerrors.Errorf("--id and --all can't be passed at the same time")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if cctx.Bool("all") && cctx.NArg() > 0 {
|
||||||
|
return xerrors.Errorf("No additional arguments are expected when --all is set")
|
||||||
|
}
|
||||||
|
|
||||||
if cctx.IsSet("id") {
|
if cctx.IsSet("id") {
|
||||||
id := storiface.ID(cctx.String("id"))
|
id := storiface.ID(cctx.String("id"))
|
||||||
return minerApi.StorageRedeclareLocal(ctx, &id, cctx.Bool("drop-missing"))
|
return minerApi.StorageRedeclareLocal(ctx, &id, cctx.Bool("drop-missing"))
|
||||||
@ -252,7 +258,28 @@ var storageRedeclareCmd = &cli.Command{
|
|||||||
return minerApi.StorageRedeclareLocal(ctx, nil, cctx.Bool("drop-missing"))
|
return minerApi.StorageRedeclareLocal(ctx, nil, cctx.Bool("drop-missing"))
|
||||||
}
|
}
|
||||||
|
|
||||||
return xerrors.Errorf("either --all or --id must be specified")
|
// As no --id or --all flag is set, we can assume the argument is a path.
|
||||||
|
path := cctx.Args().First()
|
||||||
|
metaFilePath := filepath.Join(path, "sectorstore.json")
|
||||||
|
|
||||||
|
var meta storiface.LocalStorageMeta
|
||||||
|
metaFile, err := os.Open(metaFilePath)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("Failed to open file: %w", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if closeErr := metaFile.Close(); closeErr != nil {
|
||||||
|
log.Error("Failed to close the file: %v", closeErr)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = json.NewDecoder(metaFile).Decode(&meta)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("Failed to decode file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
id := meta.ID
|
||||||
|
return minerApi.StorageRedeclareLocal(ctx, &id, cctx.Bool("drop-missing"))
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -968,7 +968,7 @@ var actorProposeChangeBeneficiary = &cli.Command{
|
|||||||
var actorConfirmChangeBeneficiary = &cli.Command{
|
var actorConfirmChangeBeneficiary = &cli.Command{
|
||||||
Name: "confirm-change-beneficiary",
|
Name: "confirm-change-beneficiary",
|
||||||
Usage: "Confirm a beneficiary address change",
|
Usage: "Confirm a beneficiary address change",
|
||||||
ArgsUsage: "[minerAddress]",
|
ArgsUsage: "[minerID]",
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
&cli.BoolFlag{
|
&cli.BoolFlag{
|
||||||
Name: "really-do-it",
|
Name: "really-do-it",
|
||||||
|
@ -9,6 +9,7 @@ import (
|
|||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types/ethtypes"
|
||||||
lcli "github.com/filecoin-project/lotus/cli"
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -23,6 +24,7 @@ var ethCmd = &cli.Command{
|
|||||||
},
|
},
|
||||||
Subcommands: []*cli.Command{
|
Subcommands: []*cli.Command{
|
||||||
checkTipsetsCmd,
|
checkTipsetsCmd,
|
||||||
|
computeEthHashCmd,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -70,3 +72,36 @@ var checkTipsetsCmd = &cli.Command{
|
|||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var computeEthHashCmd = &cli.Command{
|
||||||
|
Name: "compute-eth-hash",
|
||||||
|
Usage: "Compute the eth hash for a given message CID",
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
if cctx.NArg() != 1 {
|
||||||
|
return lcli.IncorrectNumArgs(cctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
msg, err := messageFromString(cctx, cctx.Args().First())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch msg := msg.(type) {
|
||||||
|
case *types.SignedMessage:
|
||||||
|
tx, err := ethtypes.EthTxFromSignedEthMessage(msg)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to convert from signed message: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tx.Hash, err = tx.TxHash()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to call TxHash: %w", err)
|
||||||
|
}
|
||||||
|
fmt.Println(tx.Hash)
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("not a signed message")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
@ -5,11 +5,11 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/ipfs/go-blockservice"
|
"github.com/ipfs/boxo/blockservice"
|
||||||
|
offline "github.com/ipfs/boxo/exchange/offline"
|
||||||
|
"github.com/ipfs/boxo/ipld/merkledag"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
offline "github.com/ipfs/go-ipfs-exchange-offline"
|
|
||||||
format "github.com/ipfs/go-ipld-format"
|
format "github.com/ipfs/go-ipld-format"
|
||||||
"github.com/ipfs/go-merkledag"
|
|
||||||
"github.com/ipld/go-car"
|
"github.com/ipld/go-car"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
@ -15,12 +15,12 @@ import (
|
|||||||
"github.com/dgraph-io/badger/v2"
|
"github.com/dgraph-io/badger/v2"
|
||||||
"github.com/dgraph-io/badger/v2/pb"
|
"github.com/dgraph-io/badger/v2/pb"
|
||||||
"github.com/dustin/go-humanize"
|
"github.com/dustin/go-humanize"
|
||||||
|
"github.com/ipfs/boxo/blockservice"
|
||||||
|
offline "github.com/ipfs/boxo/exchange/offline"
|
||||||
|
"github.com/ipfs/boxo/ipld/merkledag"
|
||||||
block "github.com/ipfs/go-block-format"
|
block "github.com/ipfs/go-block-format"
|
||||||
"github.com/ipfs/go-blockservice"
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
offline "github.com/ipfs/go-ipfs-exchange-offline"
|
|
||||||
ipld "github.com/ipfs/go-ipld-format"
|
ipld "github.com/ipfs/go-ipld-format"
|
||||||
"github.com/ipfs/go-merkledag"
|
|
||||||
"github.com/ipld/go-car"
|
"github.com/ipld/go-car"
|
||||||
"github.com/multiformats/go-base32"
|
"github.com/multiformats/go-base32"
|
||||||
mh "github.com/multiformats/go-multihash"
|
mh "github.com/multiformats/go-multihash"
|
||||||
|
337
cmd/lotus-shed/indexes.go
Normal file
337
cmd/lotus-shed/indexes.go
Normal file
@ -0,0 +1,337 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/mitchellh/go-homedir"
|
||||||
|
"github.com/urfave/cli/v2"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/types/ethtypes"
|
||||||
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
|
)
|
||||||
|
|
||||||
|
func withCategory(cat string, cmd *cli.Command) *cli.Command {
|
||||||
|
cmd.Category = strings.ToUpper(cat)
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
var indexesCmd = &cli.Command{
|
||||||
|
Name: "indexes",
|
||||||
|
Usage: "Commands related to managing sqlite indexes",
|
||||||
|
HideHelpCommand: true,
|
||||||
|
Subcommands: []*cli.Command{
|
||||||
|
withCategory("msgindex", backfillMsgIndexCmd),
|
||||||
|
withCategory("msgindex", pruneMsgIndexCmd),
|
||||||
|
withCategory("txhash", backfillTxHashCmd),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var backfillMsgIndexCmd = &cli.Command{
|
||||||
|
Name: "backfill-msgindex",
|
||||||
|
Usage: "Backfill the msgindex.db for a number of epochs starting from a specified height",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.IntFlag{
|
||||||
|
Name: "from",
|
||||||
|
Value: 0,
|
||||||
|
Usage: "height to start the backfill; uses the current head if omitted",
|
||||||
|
},
|
||||||
|
&cli.IntFlag{
|
||||||
|
Name: "epochs",
|
||||||
|
Value: 1800,
|
||||||
|
Usage: "number of epochs to backfill; defaults to 1800 (2 finalities)",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
api, closer, err := lcli.GetFullNodeAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer closer()
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
|
curTs, err := api.ChainHead(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
startHeight := int64(cctx.Int("from"))
|
||||||
|
if startHeight == 0 {
|
||||||
|
startHeight = int64(curTs.Height()) - 1
|
||||||
|
}
|
||||||
|
epochs := cctx.Int("epochs")
|
||||||
|
|
||||||
|
basePath, err := homedir.Expand(cctx.String("repo"))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dbPath := path.Join(basePath, "sqlite", "msgindex.db")
|
||||||
|
db, err := sql.Open("sqlite3", dbPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
err := db.Close()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("ERROR: closing db: %s", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
insertStmt, err := db.Prepare("INSERT OR IGNORE INTO messages (cid, tipset_cid, epoch) VALUES (?, ?, ?)")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var nrRowsAffected int64
|
||||||
|
for i := 0; i < epochs; i++ {
|
||||||
|
epoch := abi.ChainEpoch(startHeight - int64(i))
|
||||||
|
|
||||||
|
if i%100 == 0 {
|
||||||
|
log.Infof("%d/%d processing epoch:%d, nrRowsAffected:%d", i, epochs, epoch, nrRowsAffected)
|
||||||
|
}
|
||||||
|
|
||||||
|
ts, err := api.ChainGetTipSetByHeight(ctx, epoch, curTs.Key())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get tipset at epoch %d: %w", epoch, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tsCid, err := ts.Key().Cid()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get tipset cid at epoch %d: %w", epoch, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
msgs, err := api.ChainGetMessagesInTipset(ctx, ts.Key())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get messages in tipset at epoch %d: %w", epoch, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, msg := range msgs {
|
||||||
|
key := msg.Cid.String()
|
||||||
|
tskey := tsCid.String()
|
||||||
|
res, err := insertStmt.Exec(key, tskey, int64(epoch))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to insert message cid %s in tipset %s at epoch %d: %w", key, tskey, epoch, err)
|
||||||
|
}
|
||||||
|
rowsAffected, err := res.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get rows affected for message cid %s in tipset %s at epoch %d: %w", key, tskey, epoch, err)
|
||||||
|
}
|
||||||
|
nrRowsAffected += rowsAffected
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("Done backfilling, nrRowsAffected:%d", nrRowsAffected)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var pruneMsgIndexCmd = &cli.Command{
|
||||||
|
Name: "prune-msgindex",
|
||||||
|
Usage: "Prune the msgindex.db for messages included before a given epoch",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.IntFlag{
|
||||||
|
Name: "from",
|
||||||
|
Usage: "height to start the prune; if negative it indicates epochs from current head",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
api, closer, err := lcli.GetFullNodeAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer closer()
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
|
startHeight := int64(cctx.Int("from"))
|
||||||
|
if startHeight < 0 {
|
||||||
|
curTs, err := api.ChainHead(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
startHeight += int64(curTs.Height())
|
||||||
|
|
||||||
|
if startHeight < 0 {
|
||||||
|
return xerrors.Errorf("bogus start height %d", startHeight)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
basePath, err := homedir.Expand(cctx.String("repo"))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dbPath := path.Join(basePath, "sqlite", "msgindex.db")
|
||||||
|
db, err := sql.Open("sqlite3", dbPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
err := db.Close()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("ERROR: closing db: %s", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
tx, err := db.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := tx.Exec("DELETE FROM messages WHERE epoch < ?", startHeight); err != nil {
|
||||||
|
if err := tx.Rollback(); err != nil {
|
||||||
|
fmt.Printf("ERROR: rollback: %s", err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := tx.Commit(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var backfillTxHashCmd = &cli.Command{
|
||||||
|
Name: "backfill-txhash",
|
||||||
|
Usage: "Backfills the txhash.db for a number of epochs starting from a specified height",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.UintFlag{
|
||||||
|
Name: "from",
|
||||||
|
Value: 0,
|
||||||
|
Usage: "the tipset height to start backfilling from (0 is head of chain)",
|
||||||
|
},
|
||||||
|
&cli.IntFlag{
|
||||||
|
Name: "epochs",
|
||||||
|
Value: 2000,
|
||||||
|
Usage: "the number of epochs to backfill",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
api, closer, err := lcli.GetFullNodeAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer closer()
|
||||||
|
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
|
curTs, err := api.ChainHead(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
startHeight := int64(cctx.Int("from"))
|
||||||
|
if startHeight == 0 {
|
||||||
|
startHeight = int64(curTs.Height()) - 1
|
||||||
|
}
|
||||||
|
|
||||||
|
epochs := cctx.Int("epochs")
|
||||||
|
|
||||||
|
basePath, err := homedir.Expand(cctx.String("repo"))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dbPath := filepath.Join(basePath, "sqlite", "txhash.db")
|
||||||
|
db, err := sql.Open("sqlite3", dbPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
err := db.Close()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("ERROR: closing db: %s", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
insertStmt, err := db.Prepare("INSERT OR IGNORE INTO eth_tx_hashes(hash, cid) VALUES(?, ?)")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var totalRowsAffected int64 = 0
|
||||||
|
for i := 0; i < epochs; i++ {
|
||||||
|
epoch := abi.ChainEpoch(startHeight - int64(i))
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-cctx.Done():
|
||||||
|
fmt.Println("request cancelled")
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
curTsk := curTs.Parents()
|
||||||
|
execTs, err := api.ChainGetTipSet(ctx, curTsk)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to call ChainGetTipSet for %s: %w", curTsk, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if i%100 == 0 {
|
||||||
|
log.Infof("%d/%d processing epoch:%d", i, epochs, epoch)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, blockheader := range execTs.Blocks() {
|
||||||
|
blkMsgs, err := api.ChainGetBlockMessages(ctx, blockheader.Cid())
|
||||||
|
if err != nil {
|
||||||
|
log.Infof("Could not get block messages at epoch: %d, stopping walking up the chain", epoch)
|
||||||
|
epochs = i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, smsg := range blkMsgs.SecpkMessages {
|
||||||
|
if smsg.Signature.Type != crypto.SigTypeDelegated {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
tx, err := ethtypes.EthTxFromSignedEthMessage(smsg)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to convert from signed message: %w at epoch: %d", err, epoch)
|
||||||
|
}
|
||||||
|
|
||||||
|
tx.Hash, err = tx.TxHash()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to calculate hash for ethTx: %w at epoch: %d", err, epoch)
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := insertStmt.Exec(tx.Hash.String(), smsg.Cid().String())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error inserting tx mapping to db: %s at epoch: %d", err, epoch)
|
||||||
|
}
|
||||||
|
|
||||||
|
rowsAffected, err := res.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error getting rows affected: %s at epoch: %d", err, epoch)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rowsAffected > 0 {
|
||||||
|
log.Debugf("Inserted txhash %s, cid: %s at epoch: %d", tx.Hash.String(), smsg.Cid().String(), epoch)
|
||||||
|
}
|
||||||
|
|
||||||
|
totalRowsAffected += rowsAffected
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
curTs = execTs
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("Done, inserted %d missing txhashes", totalRowsAffected)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
@ -14,6 +14,8 @@ import (
|
|||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||||
"github.com/filecoin-project/go-state-types/builtin"
|
"github.com/filecoin-project/go-state-types/builtin"
|
||||||
|
v10 "github.com/filecoin-project/go-state-types/builtin/v10"
|
||||||
|
v11 "github.com/filecoin-project/go-state-types/builtin/v11"
|
||||||
v8 "github.com/filecoin-project/go-state-types/builtin/v8"
|
v8 "github.com/filecoin-project/go-state-types/builtin/v8"
|
||||||
v9 "github.com/filecoin-project/go-state-types/builtin/v9"
|
v9 "github.com/filecoin-project/go-state-types/builtin/v9"
|
||||||
|
|
||||||
@ -137,6 +139,16 @@ var invariantsCmd = &cli.Command{
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("checking state invariants: %w", err)
|
return xerrors.Errorf("checking state invariants: %w", err)
|
||||||
}
|
}
|
||||||
|
case actorstypes.Version10:
|
||||||
|
messages, err = v10.CheckStateInvariants(actorTree, abi.ChainEpoch(epoch), actorCodeCids)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("checking state invariants: %w", err)
|
||||||
|
}
|
||||||
|
case actorstypes.Version11:
|
||||||
|
messages, err = v11.CheckStateInvariants(actorTree, abi.ChainEpoch(epoch), actorCodeCids)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("checking state invariants: %w", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("completed, took ", time.Since(startTime))
|
fmt.Println("completed, took ", time.Since(startTime))
|
||||||
|
@ -1,8 +1,10 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
@ -84,7 +86,7 @@ func main() {
|
|||||||
invariantsCmd,
|
invariantsCmd,
|
||||||
gasTraceCmd,
|
gasTraceCmd,
|
||||||
replayOfflineCmd,
|
replayOfflineCmd,
|
||||||
msgindexCmd,
|
indexesCmd,
|
||||||
FevmAnalyticsCmd,
|
FevmAnalyticsCmd,
|
||||||
mismatchesCmd,
|
mismatchesCmd,
|
||||||
}
|
}
|
||||||
@ -118,7 +120,20 @@ func main() {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := app.Run(os.Args); err != nil {
|
// terminate early on ctrl+c
|
||||||
|
c := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(c, os.Interrupt)
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
go func() {
|
||||||
|
<-c
|
||||||
|
cancel()
|
||||||
|
fmt.Println("Received interrupt, shutting down... Press CTRL+C again to force shutdown")
|
||||||
|
<-c
|
||||||
|
fmt.Println("Forcing stop")
|
||||||
|
os.Exit(1)
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err := app.RunContext(ctx, os.Args); err != nil {
|
||||||
log.Errorf("%+v", err)
|
log.Errorf("%+v", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
return
|
return
|
||||||
|
@ -1,221 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"database/sql"
|
|
||||||
"fmt"
|
|
||||||
"path"
|
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
|
||||||
_ "github.com/mattn/go-sqlite3"
|
|
||||||
"github.com/mitchellh/go-homedir"
|
|
||||||
"github.com/urfave/cli/v2"
|
|
||||||
"golang.org/x/xerrors"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
|
|
||||||
lcli "github.com/filecoin-project/lotus/cli"
|
|
||||||
)
|
|
||||||
|
|
||||||
var msgindexCmd = &cli.Command{
|
|
||||||
Name: "msgindex",
|
|
||||||
Usage: "Tools for managing the message index",
|
|
||||||
Subcommands: []*cli.Command{
|
|
||||||
msgindexBackfillCmd,
|
|
||||||
msgindexPruneCmd,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var msgindexBackfillCmd = &cli.Command{
|
|
||||||
Name: "backfill",
|
|
||||||
Usage: "Backfill the message index for a number of epochs starting from a specified height",
|
|
||||||
Flags: []cli.Flag{
|
|
||||||
&cli.IntFlag{
|
|
||||||
Name: "from",
|
|
||||||
Value: 0,
|
|
||||||
Usage: "height to start the backfill; uses the current head if omitted",
|
|
||||||
},
|
|
||||||
&cli.IntFlag{
|
|
||||||
Name: "epochs",
|
|
||||||
Value: 1800,
|
|
||||||
Usage: "number of epochs to backfill; defaults to 1800 (2 finalities)",
|
|
||||||
},
|
|
||||||
&cli.StringFlag{
|
|
||||||
Name: "repo",
|
|
||||||
Value: "~/.lotus",
|
|
||||||
Usage: "path to the repo",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Action: func(cctx *cli.Context) error {
|
|
||||||
api, closer, err := lcli.GetFullNodeAPI(cctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer closer()
|
|
||||||
ctx := lcli.ReqContext(cctx)
|
|
||||||
|
|
||||||
curTs, err := api.ChainHead(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
startHeight := int64(cctx.Int("from"))
|
|
||||||
if startHeight == 0 {
|
|
||||||
startHeight = int64(curTs.Height()) - 1
|
|
||||||
}
|
|
||||||
epochs := cctx.Int("epochs")
|
|
||||||
|
|
||||||
basePath, err := homedir.Expand(cctx.String("repo"))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
dbPath := path.Join(basePath, "sqlite", "msgindex.db")
|
|
||||||
db, err := sql.Open("sqlite3", dbPath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
err := db.Close()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("ERROR: closing db: %s", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
tx, err := db.Begin()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
insertStmt, err := tx.Prepare("INSERT INTO messages VALUES (?, ?, ?)")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
insertMsg := func(cid, tsCid cid.Cid, epoch abi.ChainEpoch) error {
|
|
||||||
key := cid.String()
|
|
||||||
tskey := tsCid.String()
|
|
||||||
if _, err := insertStmt.Exec(key, tskey, int64(epoch)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
rollback := func() {
|
|
||||||
if err := tx.Rollback(); err != nil {
|
|
||||||
fmt.Printf("ERROR: rollback: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < epochs; i++ {
|
|
||||||
epoch := abi.ChainEpoch(startHeight - int64(i))
|
|
||||||
|
|
||||||
ts, err := api.ChainGetTipSetByHeight(ctx, epoch, curTs.Key())
|
|
||||||
if err != nil {
|
|
||||||
rollback()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
tsCid, err := ts.Key().Cid()
|
|
||||||
if err != nil {
|
|
||||||
rollback()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
msgs, err := api.ChainGetMessagesInTipset(ctx, ts.Key())
|
|
||||||
if err != nil {
|
|
||||||
rollback()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, msg := range msgs {
|
|
||||||
if err := insertMsg(msg.Cid, tsCid, epoch); err != nil {
|
|
||||||
rollback()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := tx.Commit(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var msgindexPruneCmd = &cli.Command{
|
|
||||||
Name: "prune",
|
|
||||||
Usage: "Prune the message index for messages included before a given epoch",
|
|
||||||
Flags: []cli.Flag{
|
|
||||||
&cli.IntFlag{
|
|
||||||
Name: "from",
|
|
||||||
Usage: "height to start the prune; if negative it indicates epochs from current head",
|
|
||||||
},
|
|
||||||
&cli.StringFlag{
|
|
||||||
Name: "repo",
|
|
||||||
Value: "~/.lotus",
|
|
||||||
Usage: "path to the repo",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Action: func(cctx *cli.Context) error {
|
|
||||||
api, closer, err := lcli.GetFullNodeAPI(cctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer closer()
|
|
||||||
ctx := lcli.ReqContext(cctx)
|
|
||||||
|
|
||||||
startHeight := int64(cctx.Int("from"))
|
|
||||||
if startHeight < 0 {
|
|
||||||
curTs, err := api.ChainHead(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
startHeight += int64(curTs.Height())
|
|
||||||
|
|
||||||
if startHeight < 0 {
|
|
||||||
return xerrors.Errorf("bogus start height %d", startHeight)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
basePath, err := homedir.Expand(cctx.String("repo"))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
dbPath := path.Join(basePath, "sqlite", "msgindex.db")
|
|
||||||
db, err := sql.Open("sqlite3", dbPath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
err := db.Close()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("ERROR: closing db: %s", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
tx, err := db.Begin()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := tx.Exec("DELETE FROM messages WHERE epoch < ?", startHeight); err != nil {
|
|
||||||
if err := tx.Rollback(); err != nil {
|
|
||||||
fmt.Printf("ERROR: rollback: %s", err)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := tx.Commit(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
}
|
|
@ -653,7 +653,7 @@ fr32 padding is removed from the output.`,
|
|||||||
return xerrors.Errorf("getting reader: %w", err)
|
return xerrors.Errorf("getting reader: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
rd, err := readStarter(0)
|
rd, err := readStarter(0, storiface.PaddedByteIndex(length))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("starting reader: %w", err)
|
return xerrors.Errorf("starting reader: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -10,11 +10,11 @@ import (
|
|||||||
|
|
||||||
"github.com/docker/go-units"
|
"github.com/docker/go-units"
|
||||||
lru "github.com/hashicorp/golang-lru/v2"
|
lru "github.com/hashicorp/golang-lru/v2"
|
||||||
"github.com/ipfs/go-blockservice"
|
"github.com/ipfs/boxo/blockservice"
|
||||||
|
offline "github.com/ipfs/boxo/exchange/offline"
|
||||||
|
"github.com/ipfs/boxo/ipld/merkledag"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
offline "github.com/ipfs/go-ipfs-exchange-offline"
|
|
||||||
format "github.com/ipfs/go-ipld-format"
|
format "github.com/ipfs/go-ipld-format"
|
||||||
"github.com/ipfs/go-merkledag"
|
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
@ -474,7 +474,7 @@ var verifRegRemoveVerifiedClientDataCapCmd = &cli.Command{
|
|||||||
|
|
||||||
st, err := multisig.Load(store, vrkState)
|
st, err := multisig.Load(store, vrkState)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return fmt.Errorf("load vrk failed: %w ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
signers, err := st.Signers()
|
signers, err := st.Signers()
|
||||||
@ -508,14 +508,13 @@ var verifRegRemoveVerifiedClientDataCapCmd = &cli.Command{
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
sm, _, err := srv.PublishMessage(ctx, proto, false)
|
sm, err := lcli.InteractiveSend(ctx, cctx, srv, proto)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
msgCid := sm.Cid()
|
msgCid := sm.Cid()
|
||||||
|
fmt.Println("sending msg: ", msgCid)
|
||||||
fmt.Printf("message sent, now waiting on cid: %s\n", msgCid)
|
|
||||||
|
|
||||||
mwait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true)
|
mwait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -275,6 +275,12 @@ var runCmd = &cli.Command{
|
|||||||
Name: "http-server-timeout",
|
Name: "http-server-timeout",
|
||||||
Value: "30s",
|
Value: "30s",
|
||||||
},
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "data-cid",
|
||||||
|
Usage: "Run the data-cid task. true|false",
|
||||||
|
Value: true,
|
||||||
|
DefaultText: "inherits --addpiece",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Before: func(cctx *cli.Context) error {
|
Before: func(cctx *cli.Context) error {
|
||||||
if cctx.IsSet("address") {
|
if cctx.IsSet("address") {
|
||||||
@ -386,8 +392,19 @@ var runCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ttDataCidDefault := false
|
||||||
if (workerType == sealtasks.WorkerSealing || cctx.IsSet("addpiece")) && cctx.Bool("addpiece") {
|
if (workerType == sealtasks.WorkerSealing || cctx.IsSet("addpiece")) && cctx.Bool("addpiece") {
|
||||||
taskTypes = append(taskTypes, sealtasks.TTAddPiece, sealtasks.TTDataCid)
|
taskTypes = append(taskTypes, sealtasks.TTAddPiece)
|
||||||
|
ttDataCidDefault = true
|
||||||
|
}
|
||||||
|
if workerType == sealtasks.WorkerSealing {
|
||||||
|
if cctx.IsSet("data-cid") {
|
||||||
|
if cctx.Bool("data-cid") {
|
||||||
|
taskTypes = append(taskTypes, sealtasks.TTDataCid)
|
||||||
|
}
|
||||||
|
} else if ttDataCidDefault {
|
||||||
|
taskTypes = append(taskTypes, sealtasks.TTDataCid)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (workerType == sealtasks.WorkerSealing || cctx.IsSet("sector-download")) && cctx.Bool("sector-download") {
|
if (workerType == sealtasks.WorkerSealing || cctx.IsSet("sector-download")) && cctx.Bool("sector-download") {
|
||||||
taskTypes = append(taskTypes, sealtasks.TTDownloadSector)
|
taskTypes = append(taskTypes, sealtasks.TTDownloadSector)
|
||||||
|
@ -178,6 +178,7 @@ var storageRedeclareCmd = &cli.Command{
|
|||||||
&cli.BoolFlag{
|
&cli.BoolFlag{
|
||||||
Name: "drop-missing",
|
Name: "drop-missing",
|
||||||
Usage: "Drop index entries with missing files",
|
Usage: "Drop index entries with missing files",
|
||||||
|
Value: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
@ -188,10 +189,19 @@ var storageRedeclareCmd = &cli.Command{
|
|||||||
defer closer()
|
defer closer()
|
||||||
ctx := lcli.ReqContext(cctx)
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
|
// check if no argument and no --id or --all flag is provided
|
||||||
|
if cctx.NArg() == 0 && !cctx.IsSet("id") && !cctx.Bool("all") {
|
||||||
|
return xerrors.Errorf("You must specify a storage path, or --id, or --all")
|
||||||
|
}
|
||||||
|
|
||||||
if cctx.IsSet("id") && cctx.Bool("all") {
|
if cctx.IsSet("id") && cctx.Bool("all") {
|
||||||
return xerrors.Errorf("--id and --all can't be passed at the same time")
|
return xerrors.Errorf("--id and --all can't be passed at the same time")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if cctx.Bool("all") && cctx.NArg() > 0 {
|
||||||
|
return xerrors.Errorf("No additional arguments are expected when --all is set")
|
||||||
|
}
|
||||||
|
|
||||||
if cctx.IsSet("id") {
|
if cctx.IsSet("id") {
|
||||||
id := storiface.ID(cctx.String("id"))
|
id := storiface.ID(cctx.String("id"))
|
||||||
return nodeApi.StorageRedeclareLocal(ctx, &id, cctx.Bool("drop-missing"))
|
return nodeApi.StorageRedeclareLocal(ctx, &id, cctx.Bool("drop-missing"))
|
||||||
@ -201,6 +211,27 @@ var storageRedeclareCmd = &cli.Command{
|
|||||||
return nodeApi.StorageRedeclareLocal(ctx, nil, cctx.Bool("drop-missing"))
|
return nodeApi.StorageRedeclareLocal(ctx, nil, cctx.Bool("drop-missing"))
|
||||||
}
|
}
|
||||||
|
|
||||||
return xerrors.Errorf("either --all or --id must be specified")
|
// As no --id or --all flag is set, we can assume the argument is a path.
|
||||||
|
path := cctx.Args().First()
|
||||||
|
metaFilePath := filepath.Join(path, "sectorstore.json")
|
||||||
|
|
||||||
|
var meta storiface.LocalStorageMeta
|
||||||
|
metaFile, err := os.Open(metaFilePath)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("Failed to open file: %w", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if closeErr := metaFile.Close(); closeErr != nil {
|
||||||
|
log.Error("Failed to close the file: %v", closeErr)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = json.NewDecoder(metaFile).Decode(&meta)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("Failed to decode file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
id := meta.ID
|
||||||
|
return nodeApi.StorageRedeclareLocal(ctx, &id, cctx.Bool("drop-missing"))
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -6,16 +6,16 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/fatih/color"
|
"github.com/fatih/color"
|
||||||
|
"github.com/ipfs/boxo/blockservice"
|
||||||
|
exchange "github.com/ipfs/boxo/exchange"
|
||||||
|
offline "github.com/ipfs/boxo/exchange/offline"
|
||||||
|
"github.com/ipfs/boxo/ipld/merkledag"
|
||||||
blocks "github.com/ipfs/go-block-format"
|
blocks "github.com/ipfs/go-block-format"
|
||||||
"github.com/ipfs/go-blockservice"
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
ds "github.com/ipfs/go-datastore"
|
ds "github.com/ipfs/go-datastore"
|
||||||
dssync "github.com/ipfs/go-datastore/sync"
|
dssync "github.com/ipfs/go-datastore/sync"
|
||||||
exchange "github.com/ipfs/go-ipfs-exchange-interface"
|
|
||||||
offline "github.com/ipfs/go-ipfs-exchange-offline"
|
|
||||||
cbor "github.com/ipfs/go-ipld-cbor"
|
cbor "github.com/ipfs/go-ipld-cbor"
|
||||||
format "github.com/ipfs/go-ipld-format"
|
format "github.com/ipfs/go-ipld-format"
|
||||||
"github.com/ipfs/go-merkledag"
|
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api/v0api"
|
"github.com/filecoin-project/lotus/api/v0api"
|
||||||
|
@ -13,13 +13,13 @@ import (
|
|||||||
|
|
||||||
"github.com/fatih/color"
|
"github.com/fatih/color"
|
||||||
"github.com/hashicorp/go-multierror"
|
"github.com/hashicorp/go-multierror"
|
||||||
|
"github.com/ipfs/boxo/blockservice"
|
||||||
|
offline "github.com/ipfs/boxo/exchange/offline"
|
||||||
|
"github.com/ipfs/boxo/ipld/merkledag"
|
||||||
blocks "github.com/ipfs/go-block-format"
|
blocks "github.com/ipfs/go-block-format"
|
||||||
"github.com/ipfs/go-blockservice"
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
ds "github.com/ipfs/go-datastore"
|
ds "github.com/ipfs/go-datastore"
|
||||||
offline "github.com/ipfs/go-ipfs-exchange-offline"
|
|
||||||
format "github.com/ipfs/go-ipld-format"
|
format "github.com/ipfs/go-ipld-format"
|
||||||
"github.com/ipfs/go-merkledag"
|
|
||||||
"github.com/ipld/go-car"
|
"github.com/ipld/go-car"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
@ -4202,7 +4202,7 @@ Inputs:
|
|||||||
Response:
|
Response:
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"Channel": "\u003cempty\u003e",
|
"Channel": "f01234",
|
||||||
"From": "f01234",
|
"From": "f01234",
|
||||||
"To": "f01234",
|
"To": "f01234",
|
||||||
"ConfirmedAmt": "0",
|
"ConfirmedAmt": "0",
|
||||||
@ -4233,7 +4233,7 @@ Inputs:
|
|||||||
Response:
|
Response:
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"Channel": "\u003cempty\u003e",
|
"Channel": "f01234",
|
||||||
"From": "f01234",
|
"From": "f01234",
|
||||||
"To": "f01234",
|
"To": "f01234",
|
||||||
"ConfirmedAmt": "0",
|
"ConfirmedAmt": "0",
|
||||||
@ -4953,7 +4953,7 @@ Response:
|
|||||||
},
|
},
|
||||||
"Nonce": 42,
|
"Nonce": 42,
|
||||||
"Balance": "0",
|
"Balance": "0",
|
||||||
"Address": "\u003cempty\u003e"
|
"Address": "f01234"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@ -5242,7 +5242,7 @@ Response:
|
|||||||
},
|
},
|
||||||
"Nonce": 42,
|
"Nonce": 42,
|
||||||
"Balance": "0",
|
"Balance": "0",
|
||||||
"Address": "\u003cempty\u003e"
|
"Address": "f01234"
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -5430,7 +5430,6 @@ Response:
|
|||||||
"UpgradeRefuelHeight": 10101,
|
"UpgradeRefuelHeight": 10101,
|
||||||
"UpgradeTapeHeight": 10101,
|
"UpgradeTapeHeight": 10101,
|
||||||
"UpgradeKumquatHeight": 10101,
|
"UpgradeKumquatHeight": 10101,
|
||||||
"UpgradePriceListOopsHeight": 10101,
|
|
||||||
"BreezeGasTampingDuration": 10101,
|
"BreezeGasTampingDuration": 10101,
|
||||||
"UpgradeCalicoHeight": 10101,
|
"UpgradeCalicoHeight": 10101,
|
||||||
"UpgradePersianHeight": 10101,
|
"UpgradePersianHeight": 10101,
|
||||||
@ -5986,6 +5985,7 @@ Response:
|
|||||||
"SectorSize": 34359738368,
|
"SectorSize": 34359738368,
|
||||||
"WindowPoStPartitionSectors": 42,
|
"WindowPoStPartitionSectors": 42,
|
||||||
"ConsensusFaultElapsed": 10101,
|
"ConsensusFaultElapsed": 10101,
|
||||||
|
"PendingOwnerAddress": "f01234",
|
||||||
"Beneficiary": "f01234",
|
"Beneficiary": "f01234",
|
||||||
"BeneficiaryTerm": {
|
"BeneficiaryTerm": {
|
||||||
"Quota": "0",
|
"Quota": "0",
|
||||||
|
@ -103,6 +103,7 @@
|
|||||||
* [EthProtocolVersion](#EthProtocolVersion)
|
* [EthProtocolVersion](#EthProtocolVersion)
|
||||||
* [EthSendRawTransaction](#EthSendRawTransaction)
|
* [EthSendRawTransaction](#EthSendRawTransaction)
|
||||||
* [EthSubscribe](#EthSubscribe)
|
* [EthSubscribe](#EthSubscribe)
|
||||||
|
* [EthSyncing](#EthSyncing)
|
||||||
* [EthUninstallFilter](#EthUninstallFilter)
|
* [EthUninstallFilter](#EthUninstallFilter)
|
||||||
* [EthUnsubscribe](#EthUnsubscribe)
|
* [EthUnsubscribe](#EthUnsubscribe)
|
||||||
* [Filecoin](#Filecoin)
|
* [Filecoin](#Filecoin)
|
||||||
@ -2598,7 +2599,7 @@ Polling method for a filter, returns event logs which occurred since last poll.
|
|||||||
(requires write perm since timestamp of last filter execution will be written)
|
(requires write perm since timestamp of last filter execution will be written)
|
||||||
|
|
||||||
|
|
||||||
Perms: write
|
Perms: read
|
||||||
|
|
||||||
Inputs:
|
Inputs:
|
||||||
```json
|
```json
|
||||||
@ -2619,7 +2620,7 @@ Returns event logs matching filter with given id.
|
|||||||
(requires write perm since timestamp of last filter execution will be written)
|
(requires write perm since timestamp of last filter execution will be written)
|
||||||
|
|
||||||
|
|
||||||
Perms: write
|
Perms: read
|
||||||
|
|
||||||
Inputs:
|
Inputs:
|
||||||
```json
|
```json
|
||||||
@ -2990,7 +2991,7 @@ Response: `"0x0"`
|
|||||||
Installs a persistent filter to notify when a new block arrives.
|
Installs a persistent filter to notify when a new block arrives.
|
||||||
|
|
||||||
|
|
||||||
Perms: write
|
Perms: read
|
||||||
|
|
||||||
Inputs: `null`
|
Inputs: `null`
|
||||||
|
|
||||||
@ -3000,7 +3001,7 @@ Response: `"0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e"`
|
|||||||
Installs a persistent filter based on given filter spec.
|
Installs a persistent filter based on given filter spec.
|
||||||
|
|
||||||
|
|
||||||
Perms: write
|
Perms: read
|
||||||
|
|
||||||
Inputs:
|
Inputs:
|
||||||
```json
|
```json
|
||||||
@ -3021,7 +3022,7 @@ Response: `"0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e"`
|
|||||||
Installs a persistent filter to notify when new messages arrive in the message pool.
|
Installs a persistent filter to notify when new messages arrive in the message pool.
|
||||||
|
|
||||||
|
|
||||||
Perms: write
|
Perms: read
|
||||||
|
|
||||||
Inputs: `null`
|
Inputs: `null`
|
||||||
|
|
||||||
@ -3060,7 +3061,7 @@ params contains additional parameters used with the log event type
|
|||||||
The client will receive a stream of EthSubscriptionResponse values until EthUnsubscribe is called.
|
The client will receive a stream of EthSubscriptionResponse values until EthUnsubscribe is called.
|
||||||
|
|
||||||
|
|
||||||
Perms: write
|
Perms: read
|
||||||
|
|
||||||
Inputs:
|
Inputs:
|
||||||
```json
|
```json
|
||||||
@ -3071,11 +3072,20 @@ Inputs:
|
|||||||
|
|
||||||
Response: `"0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e"`
|
Response: `"0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e"`
|
||||||
|
|
||||||
|
### EthSyncing
|
||||||
|
|
||||||
|
|
||||||
|
Perms: read
|
||||||
|
|
||||||
|
Inputs: `null`
|
||||||
|
|
||||||
|
Response: `false`
|
||||||
|
|
||||||
### EthUninstallFilter
|
### EthUninstallFilter
|
||||||
Uninstalls a filter with given id.
|
Uninstalls a filter with given id.
|
||||||
|
|
||||||
|
|
||||||
Perms: write
|
Perms: read
|
||||||
|
|
||||||
Inputs:
|
Inputs:
|
||||||
```json
|
```json
|
||||||
@ -3090,7 +3100,7 @@ Response: `true`
|
|||||||
Unsubscribe from a websocket subscription
|
Unsubscribe from a websocket subscription
|
||||||
|
|
||||||
|
|
||||||
Perms: write
|
Perms: read
|
||||||
|
|
||||||
Inputs:
|
Inputs:
|
||||||
```json
|
```json
|
||||||
@ -5566,7 +5576,7 @@ Inputs:
|
|||||||
Response:
|
Response:
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"Channel": "\u003cempty\u003e",
|
"Channel": "f01234",
|
||||||
"From": "f01234",
|
"From": "f01234",
|
||||||
"To": "f01234",
|
"To": "f01234",
|
||||||
"ConfirmedAmt": "0",
|
"ConfirmedAmt": "0",
|
||||||
@ -5597,7 +5607,7 @@ Inputs:
|
|||||||
Response:
|
Response:
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"Channel": "\u003cempty\u003e",
|
"Channel": "f01234",
|
||||||
"From": "f01234",
|
"From": "f01234",
|
||||||
"To": "f01234",
|
"To": "f01234",
|
||||||
"ConfirmedAmt": "0",
|
"ConfirmedAmt": "0",
|
||||||
@ -6380,7 +6390,7 @@ Response:
|
|||||||
},
|
},
|
||||||
"Nonce": 42,
|
"Nonce": 42,
|
||||||
"Balance": "0",
|
"Balance": "0",
|
||||||
"Address": "\u003cempty\u003e"
|
"Address": "f01234"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@ -6720,7 +6730,7 @@ Response:
|
|||||||
},
|
},
|
||||||
"Nonce": 42,
|
"Nonce": 42,
|
||||||
"Balance": "0",
|
"Balance": "0",
|
||||||
"Address": "\u003cempty\u003e"
|
"Address": "f01234"
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -6932,7 +6942,6 @@ Response:
|
|||||||
"UpgradeRefuelHeight": 10101,
|
"UpgradeRefuelHeight": 10101,
|
||||||
"UpgradeTapeHeight": 10101,
|
"UpgradeTapeHeight": 10101,
|
||||||
"UpgradeKumquatHeight": 10101,
|
"UpgradeKumquatHeight": 10101,
|
||||||
"UpgradePriceListOopsHeight": 10101,
|
|
||||||
"BreezeGasTampingDuration": 10101,
|
"BreezeGasTampingDuration": 10101,
|
||||||
"UpgradeCalicoHeight": 10101,
|
"UpgradeCalicoHeight": 10101,
|
||||||
"UpgradePersianHeight": 10101,
|
"UpgradePersianHeight": 10101,
|
||||||
@ -7496,6 +7505,7 @@ Response:
|
|||||||
"SectorSize": 34359738368,
|
"SectorSize": 34359738368,
|
||||||
"WindowPoStPartitionSectors": 42,
|
"WindowPoStPartitionSectors": 42,
|
||||||
"ConsensusFaultElapsed": 10101,
|
"ConsensusFaultElapsed": 10101,
|
||||||
|
"PendingOwnerAddress": "f01234",
|
||||||
"Beneficiary": "f01234",
|
"Beneficiary": "f01234",
|
||||||
"BeneficiaryTerm": {
|
"BeneficiaryTerm": {
|
||||||
"Quota": "0",
|
"Quota": "0",
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -7,7 +7,7 @@ USAGE:
|
|||||||
lotus-worker [global options] command [command options] [arguments...]
|
lotus-worker [global options] command [command options] [arguments...]
|
||||||
|
|
||||||
VERSION:
|
VERSION:
|
||||||
1.23.2
|
1.23.3
|
||||||
|
|
||||||
COMMANDS:
|
COMMANDS:
|
||||||
run Start lotus worker
|
run Start lotus worker
|
||||||
@ -19,12 +19,11 @@ COMMANDS:
|
|||||||
help, h Shows a list of commands or help for one command
|
help, h Shows a list of commands or help for one command
|
||||||
|
|
||||||
GLOBAL OPTIONS:
|
GLOBAL OPTIONS:
|
||||||
--enable-gpu-proving enable use of GPU for mining operations (default: true) [$LOTUS_WORKER_ENABLE_GPU_PROVING]
|
|
||||||
--help, -h show help (default: false)
|
|
||||||
--miner-repo value, --storagerepo value Specify miner repo path. flag storagerepo and env LOTUS_STORAGE_PATH are DEPRECATION, will REMOVE SOON (default: "~/.lotusminer") [$LOTUS_MINER_PATH, $LOTUS_STORAGE_PATH]
|
|
||||||
--version, -v print the version (default: false)
|
|
||||||
--worker-repo value, --workerrepo value Specify worker repo path. flag workerrepo and env WORKER_PATH are DEPRECATION, will REMOVE SOON (default: "~/.lotusworker") [$LOTUS_WORKER_PATH, $WORKER_PATH]
|
--worker-repo value, --workerrepo value Specify worker repo path. flag workerrepo and env WORKER_PATH are DEPRECATION, will REMOVE SOON (default: "~/.lotusworker") [$LOTUS_WORKER_PATH, $WORKER_PATH]
|
||||||
|
--miner-repo value, --storagerepo value Specify miner repo path. flag storagerepo and env LOTUS_STORAGE_PATH are DEPRECATION, will REMOVE SOON (default: "~/.lotusminer") [$LOTUS_MINER_PATH, $LOTUS_STORAGE_PATH]
|
||||||
|
--enable-gpu-proving enable use of GPU for mining operations (default: true) [$LOTUS_WORKER_ENABLE_GPU_PROVING]
|
||||||
|
--help, -h show help
|
||||||
|
--version, -v print the version
|
||||||
```
|
```
|
||||||
|
|
||||||
## lotus-worker run
|
## lotus-worker run
|
||||||
@ -36,28 +35,29 @@ USAGE:
|
|||||||
lotus-worker run [command options] [arguments...]
|
lotus-worker run [command options] [arguments...]
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
--addpiece enable addpiece (default: true) [$LOTUS_WORKER_ADDPIECE]
|
|
||||||
--commit enable commit (default: true) [$LOTUS_WORKER_COMMIT]
|
|
||||||
--http-server-timeout value (default: "30s")
|
|
||||||
--listen value host address and port the worker api will listen on (default: "0.0.0.0:3456") [$LOTUS_WORKER_LISTEN]
|
--listen value host address and port the worker api will listen on (default: "0.0.0.0:3456") [$LOTUS_WORKER_LISTEN]
|
||||||
--name value custom worker name (default: hostname) [$LOTUS_WORKER_NAME]
|
|
||||||
--no-default disable all default compute tasks, use the worker for storage/fetching only (default: false) [$LOTUS_WORKER_NO_DEFAULT]
|
|
||||||
--no-local-storage don't use storageminer repo for sector storage (default: false) [$LOTUS_WORKER_NO_LOCAL_STORAGE]
|
--no-local-storage don't use storageminer repo for sector storage (default: false) [$LOTUS_WORKER_NO_LOCAL_STORAGE]
|
||||||
--no-swap don't use swap (default: false) [$LOTUS_WORKER_NO_SWAP]
|
--no-swap don't use swap (default: false) [$LOTUS_WORKER_NO_SWAP]
|
||||||
|
--name value custom worker name (default: hostname) [$LOTUS_WORKER_NAME]
|
||||||
|
--addpiece enable addpiece (default: true) [$LOTUS_WORKER_ADDPIECE]
|
||||||
|
--precommit1 enable precommit1 (default: true) [$LOTUS_WORKER_PRECOMMIT1]
|
||||||
|
--unseal enable unsealing (default: true) [$LOTUS_WORKER_UNSEAL]
|
||||||
|
--precommit2 enable precommit2 (default: true) [$LOTUS_WORKER_PRECOMMIT2]
|
||||||
|
--commit enable commit (default: true) [$LOTUS_WORKER_COMMIT]
|
||||||
|
--replica-update enable replica update (default: true) [$LOTUS_WORKER_REPLICA_UPDATE]
|
||||||
|
--prove-replica-update2 enable prove replica update 2 (default: true) [$LOTUS_WORKER_PROVE_REPLICA_UPDATE2]
|
||||||
|
--regen-sector-key enable regen sector key (default: true) [$LOTUS_WORKER_REGEN_SECTOR_KEY]
|
||||||
|
--sector-download enable external sector data download (default: false) [$LOTUS_WORKER_SECTOR_DOWNLOAD]
|
||||||
|
--windowpost enable window post (default: false) [$LOTUS_WORKER_WINDOWPOST]
|
||||||
|
--winningpost enable winning post (default: false) [$LOTUS_WORKER_WINNINGPOST]
|
||||||
|
--no-default disable all default compute tasks, use the worker for storage/fetching only (default: false) [$LOTUS_WORKER_NO_DEFAULT]
|
||||||
--parallel-fetch-limit value maximum fetch operations to run in parallel (default: 5) [$LOTUS_WORKER_PARALLEL_FETCH_LIMIT]
|
--parallel-fetch-limit value maximum fetch operations to run in parallel (default: 5) [$LOTUS_WORKER_PARALLEL_FETCH_LIMIT]
|
||||||
--post-parallel-reads value maximum number of parallel challenge reads (0 = no limit) (default: 32) [$LOTUS_WORKER_POST_PARALLEL_READS]
|
--post-parallel-reads value maximum number of parallel challenge reads (0 = no limit) (default: 32) [$LOTUS_WORKER_POST_PARALLEL_READS]
|
||||||
--post-read-timeout value time limit for reading PoSt challenges (0 = no limit) (default: 0s) [$LOTUS_WORKER_POST_READ_TIMEOUT]
|
--post-read-timeout value time limit for reading PoSt challenges (0 = no limit) (default: 0s) [$LOTUS_WORKER_POST_READ_TIMEOUT]
|
||||||
--precommit1 enable precommit1 (default: true) [$LOTUS_WORKER_PRECOMMIT1]
|
|
||||||
--precommit2 enable precommit2 (default: true) [$LOTUS_WORKER_PRECOMMIT2]
|
|
||||||
--prove-replica-update2 enable prove replica update 2 (default: true) [$LOTUS_WORKER_PROVE_REPLICA_UPDATE2]
|
|
||||||
--regen-sector-key enable regen sector key (default: true) [$LOTUS_WORKER_REGEN_SECTOR_KEY]
|
|
||||||
--replica-update enable replica update (default: true) [$LOTUS_WORKER_REPLICA_UPDATE]
|
|
||||||
--sector-download enable external sector data download (default: false) [$LOTUS_WORKER_SECTOR_DOWNLOAD]
|
|
||||||
--timeout value used when 'listen' is unspecified. must be a valid duration recognized by golang's time.ParseDuration function (default: "30m") [$LOTUS_WORKER_TIMEOUT]
|
--timeout value used when 'listen' is unspecified. must be a valid duration recognized by golang's time.ParseDuration function (default: "30m") [$LOTUS_WORKER_TIMEOUT]
|
||||||
--unseal enable unsealing (default: true) [$LOTUS_WORKER_UNSEAL]
|
--http-server-timeout value (default: "30s")
|
||||||
--windowpost enable window post (default: false) [$LOTUS_WORKER_WINDOWPOST]
|
--data-cid Run the data-cid task. true|false (default: inherits --addpiece)
|
||||||
--winningpost enable winning post (default: false) [$LOTUS_WORKER_WINNINGPOST]
|
--help, -h show help
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## lotus-worker stop
|
## lotus-worker stop
|
||||||
@ -69,8 +69,7 @@ USAGE:
|
|||||||
lotus-worker stop [command options] [arguments...]
|
lotus-worker stop [command options] [arguments...]
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
--help, -h show help (default: false)
|
--help, -h show help
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## lotus-worker info
|
## lotus-worker info
|
||||||
@ -82,8 +81,7 @@ USAGE:
|
|||||||
lotus-worker info [command options] [arguments...]
|
lotus-worker info [command options] [arguments...]
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
--help, -h show help (default: false)
|
--help, -h show help
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## lotus-worker storage
|
## lotus-worker storage
|
||||||
@ -95,14 +93,13 @@ USAGE:
|
|||||||
lotus-worker storage command [command options] [arguments...]
|
lotus-worker storage command [command options] [arguments...]
|
||||||
|
|
||||||
COMMANDS:
|
COMMANDS:
|
||||||
attach attach local storage path
|
attach attach local storage path
|
||||||
detach detach local storage path
|
detach detach local storage path
|
||||||
redeclare redeclare sectors in a local storage path
|
redeclare redeclare sectors in a local storage path
|
||||||
help, h Shows a list of commands or help for one command
|
help, h Shows a list of commands or help for one command
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
--help, -h show help (default: false)
|
--help, -h show help
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### lotus-worker storage attach
|
### lotus-worker storage attach
|
||||||
@ -114,14 +111,14 @@ USAGE:
|
|||||||
lotus-worker storage attach [command options] [arguments...]
|
lotus-worker storage attach [command options] [arguments...]
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
--allow-to value [ --allow-to value ] path groups allowed to pull data from this path (allow all if not specified)
|
|
||||||
--groups value [ --groups value ] path group names
|
|
||||||
--init initialize the path first (default: false)
|
--init initialize the path first (default: false)
|
||||||
--max-storage value (for init) limit storage space for sectors (expensive for very large paths!)
|
--weight value (for init) path weight (default: 10)
|
||||||
--seal (for init) use path for sealing (default: false)
|
--seal (for init) use path for sealing (default: false)
|
||||||
--store (for init) use path for long-term storage (default: false)
|
--store (for init) use path for long-term storage (default: false)
|
||||||
--weight value (for init) path weight (default: 10)
|
--max-storage value (for init) limit storage space for sectors (expensive for very large paths!)
|
||||||
|
--groups value [ --groups value ] path group names
|
||||||
|
--allow-to value [ --allow-to value ] path groups allowed to pull data from this path (allow all if not specified)
|
||||||
|
--help, -h show help
|
||||||
```
|
```
|
||||||
|
|
||||||
### lotus-worker storage detach
|
### lotus-worker storage detach
|
||||||
@ -134,7 +131,7 @@ USAGE:
|
|||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
--really-do-it (default: false)
|
--really-do-it (default: false)
|
||||||
|
--help, -h show help
|
||||||
```
|
```
|
||||||
|
|
||||||
### lotus-worker storage redeclare
|
### lotus-worker storage redeclare
|
||||||
@ -146,10 +143,10 @@ USAGE:
|
|||||||
lotus-worker storage redeclare [command options] [arguments...]
|
lotus-worker storage redeclare [command options] [arguments...]
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
--all redeclare all storage paths (default: false)
|
|
||||||
--drop-missing Drop index entries with missing files (default: false)
|
|
||||||
--id value storage path ID
|
--id value storage path ID
|
||||||
|
--all redeclare all storage paths (default: false)
|
||||||
|
--drop-missing Drop index entries with missing files (default: true)
|
||||||
|
--help, -h show help
|
||||||
```
|
```
|
||||||
|
|
||||||
## lotus-worker resources
|
## lotus-worker resources
|
||||||
@ -161,9 +158,9 @@ USAGE:
|
|||||||
lotus-worker resources [command options] [arguments...]
|
lotus-worker resources [command options] [arguments...]
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
--all print all resource envvars (default: false)
|
--all print all resource envvars (default: false)
|
||||||
--default print default resource envvars (default: false)
|
--default print default resource envvars (default: false)
|
||||||
|
--help, -h show help
|
||||||
```
|
```
|
||||||
|
|
||||||
## lotus-worker tasks
|
## lotus-worker tasks
|
||||||
@ -175,13 +172,12 @@ USAGE:
|
|||||||
lotus-worker tasks command [command options] [arguments...]
|
lotus-worker tasks command [command options] [arguments...]
|
||||||
|
|
||||||
COMMANDS:
|
COMMANDS:
|
||||||
enable Enable a task type
|
enable Enable a task type
|
||||||
disable Disable a task type
|
disable Disable a task type
|
||||||
help, h Shows a list of commands or help for one command
|
help, h Shows a list of commands or help for one command
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
--help, -h show help (default: false)
|
--help, -h show help
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### lotus-worker tasks enable
|
### lotus-worker tasks enable
|
||||||
@ -193,8 +189,8 @@ USAGE:
|
|||||||
lotus-worker tasks enable [command options] --all | [UNS|C2|PC2|PC1|PR2|RU|AP|DC|GSK]
|
lotus-worker tasks enable [command options] --all | [UNS|C2|PC2|PC1|PR2|RU|AP|DC|GSK]
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
--all Enable all task types (default: false)
|
--all Enable all task types (default: false)
|
||||||
|
--help, -h show help
|
||||||
```
|
```
|
||||||
|
|
||||||
### lotus-worker tasks disable
|
### lotus-worker tasks disable
|
||||||
@ -206,6 +202,6 @@ USAGE:
|
|||||||
lotus-worker tasks disable [command options] --all | [UNS|C2|PC2|PC1|PR2|RU|AP|DC|GSK]
|
lotus-worker tasks disable [command options] --all | [UNS|C2|PC2|PC1|PR2|RU|AP|DC|GSK]
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
--all Disable all task types (default: false)
|
--all Disable all task types (default: false)
|
||||||
|
--help, -h show help
|
||||||
```
|
```
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -38,7 +38,7 @@
|
|||||||
#
|
#
|
||||||
# type: []string
|
# type: []string
|
||||||
# env var: LOTUS_LIBP2P_LISTENADDRESSES
|
# env var: LOTUS_LIBP2P_LISTENADDRESSES
|
||||||
#ListenAddresses = ["/ip4/0.0.0.0/tcp/0", "/ip6/::/tcp/0"]
|
#ListenAddresses = ["/ip4/0.0.0.0/tcp/0", "/ip6/::/tcp/0", "/ip4/0.0.0.0/udp/0/quic-v1", "/ip6/::/udp/0/quic-v1", "/ip4/0.0.0.0/udp/0/quic-v1/webtransport", "/ip6/::/udp/0/quic-v1/webtransport"]
|
||||||
|
|
||||||
# Addresses to explicitally announce to other peers. If not specified,
|
# Addresses to explicitally announce to other peers. If not specified,
|
||||||
# all interface addresses are announced
|
# all interface addresses are announced
|
||||||
@ -399,3 +399,32 @@
|
|||||||
#EnableMsgIndex = false
|
#EnableMsgIndex = false
|
||||||
|
|
||||||
|
|
||||||
|
[FaultReporter]
|
||||||
|
# EnableConsensusFaultReporter controls whether the node will monitor and
|
||||||
|
# report consensus faults. When enabled, the node will watch for malicious
|
||||||
|
# behaviors like double-mining and parent grinding, and submit reports to the
|
||||||
|
# network. This can earn reporter rewards, but is not guaranteed. Nodes should
|
||||||
|
# enable fault reporting with care, as it may increase resource usage, and may
|
||||||
|
# generate gas fees without earning rewards.
|
||||||
|
#
|
||||||
|
# type: bool
|
||||||
|
# env var: LOTUS_FAULTREPORTER_ENABLECONSENSUSFAULTREPORTER
|
||||||
|
#EnableConsensusFaultReporter = false
|
||||||
|
|
||||||
|
# ConsensusFaultReporterDataDir is the path where fault reporter state will be
|
||||||
|
# persisted. This directory should have adequate space and permissions for the
|
||||||
|
# node process.
|
||||||
|
#
|
||||||
|
# type: string
|
||||||
|
# env var: LOTUS_FAULTREPORTER_CONSENSUSFAULTREPORTERDATADIR
|
||||||
|
#ConsensusFaultReporterDataDir = ""
|
||||||
|
|
||||||
|
# ConsensusFaultReporterAddress is the wallet address used for submitting
|
||||||
|
# ReportConsensusFault messages. It will pay for gas fees, and receive any
|
||||||
|
# rewards. This address should have adequate funds to cover gas fees.
|
||||||
|
#
|
||||||
|
# type: string
|
||||||
|
# env var: LOTUS_FAULTREPORTER_CONSENSUSFAULTREPORTERADDRESS
|
||||||
|
#ConsensusFaultReporterAddress = ""
|
||||||
|
|
||||||
|
|
||||||
|
@ -38,7 +38,7 @@
|
|||||||
#
|
#
|
||||||
# type: []string
|
# type: []string
|
||||||
# env var: LOTUS_LIBP2P_LISTENADDRESSES
|
# env var: LOTUS_LIBP2P_LISTENADDRESSES
|
||||||
#ListenAddresses = ["/ip4/0.0.0.0/tcp/0", "/ip6/::/tcp/0"]
|
#ListenAddresses = ["/ip4/0.0.0.0/tcp/0", "/ip6/::/tcp/0", "/ip4/0.0.0.0/udp/0/quic-v1", "/ip6/::/udp/0/quic-v1", "/ip4/0.0.0.0/udp/0/quic-v1/webtransport", "/ip6/::/udp/0/quic-v1/webtransport"]
|
||||||
|
|
||||||
# Addresses to explicitally announce to other peers. If not specified,
|
# Addresses to explicitally announce to other peers. If not specified,
|
||||||
# all interface addresses are announced
|
# all interface addresses are announced
|
||||||
@ -515,15 +515,7 @@
|
|||||||
# env var: LOTUS_SEALING_MINUPGRADESECTOREXPIRATION
|
# env var: LOTUS_SEALING_MINUPGRADESECTOREXPIRATION
|
||||||
#MinUpgradeSectorExpiration = 0
|
#MinUpgradeSectorExpiration = 0
|
||||||
|
|
||||||
# When set to a non-zero value, minimum number of epochs until sector expiration above which upgrade candidates will
|
# DEPRECATED: Target expiration is no longer used
|
||||||
# be selected based on lowest initial pledge.
|
|
||||||
#
|
|
||||||
# Target sector expiration is calculated by looking at the input deal queue, sorting it by deal expiration, and
|
|
||||||
# selecting N deals from the queue up to sector size. The target expiration will be Nth deal end epoch, or in case
|
|
||||||
# where there weren't enough deals to fill a sector, DealMaxDuration (540 days = 1555200 epochs)
|
|
||||||
#
|
|
||||||
# Setting this to a high value (for example to maximum deal duration - 1555200) will disable selection based on
|
|
||||||
# initial pledge - upgrade sectors will always be chosen based on longest expiration
|
|
||||||
#
|
#
|
||||||
# type: uint64
|
# type: uint64
|
||||||
# env var: LOTUS_SEALING_MINTARGETUPGRADESECTOREXPIRATION
|
# env var: LOTUS_SEALING_MINTARGETUPGRADESECTOREXPIRATION
|
||||||
|
@ -2,8 +2,8 @@
|
|||||||
|
|
||||||
The gas balancing process targets to set gas costs of syscalls to be in line with
|
The gas balancing process targets to set gas costs of syscalls to be in line with
|
||||||
10 gas per nanosecond on reference hardware.
|
10 gas per nanosecond on reference hardware.
|
||||||
The process can be either performed for all syscalls based on existing messages and chain or targeted
|
The process can be either performed for all syscalls based on existing messages and chains or targeted
|
||||||
at single syscall.
|
at a single syscall.
|
||||||
|
|
||||||
#### Reference hardware
|
#### Reference hardware
|
||||||
|
|
||||||
@ -12,14 +12,14 @@ may be subject to change.
|
|||||||
|
|
||||||
### Complete gas balancing
|
### Complete gas balancing
|
||||||
|
|
||||||
Complete gas balancing is performed using `lotus-bench` the process is based on importing a chain export
|
Complete gas balancing is performed using a `lotus-bench` the process is based on importing a chain export
|
||||||
and collecting gas traces which are later aggregated.
|
and collecting gas traces which are later aggregated.
|
||||||
|
|
||||||
Before building `lotus-bench` make sure `EnableDetailedTracing` in `chain/vm/runtime.go` is set to `true`.
|
Before building `lotus-bench` make sure `EnableDetailedTracing` in `chain/vm/runtime.go` is set to `true`.
|
||||||
|
|
||||||
The process can be started using `./lotus-bench import` with `--car` flag set to the location of
|
The process can be started using `./lotus-bench import` with `--car` flag set to the location of
|
||||||
CAR chain export. `--start-epoch` and `--end-epoch` can be used to to limit the range of epochs to run
|
CAR chain export. `--start-epoch` and `--end-epoch` can be used to limit the range of epochs to run
|
||||||
the benchmark. Note that state tree of `start-epoch` needs to be in the CAR file or has to be previously computed
|
the benchmark. Note that the state tree of `start-epoch` needs to be in the CAR file or has to be previously computed
|
||||||
to work.
|
to work.
|
||||||
|
|
||||||
The output will be a `bench.json` file containing information about every syscall invoked
|
The output will be a `bench.json` file containing information about every syscall invoked
|
||||||
@ -29,7 +29,7 @@ spare space.
|
|||||||
After the bench run is complete the `bench.json` file can be analyzed with `./lotus-bench import analyze bench.json`.
|
After the bench run is complete the `bench.json` file can be analyzed with `./lotus-bench import analyze bench.json`.
|
||||||
|
|
||||||
It will compute means, standard deviations and co-variances (when applicable) of syscall runtimes.
|
It will compute means, standard deviations and co-variances (when applicable) of syscall runtimes.
|
||||||
The output is in nanoseconds, so the gas values for syscalls should be 10x that. In cases where co-variance of
|
The output is in nanoseconds, so the gas values for syscalls should be 10x that. In cases where the co-variance of
|
||||||
execution time to some parameter is evaluated, the strength of the correlation should be taken into account.
|
execution time to some parameter is evaluated, the strength of the correlation should be taken into account.
|
||||||
|
|
||||||
#### Special cases
|
#### Special cases
|
||||||
@ -40,15 +40,15 @@ during block execution (when gas traces are formed) objects are only written to
|
|||||||
|
|
||||||
### Targeted gas balancing
|
### Targeted gas balancing
|
||||||
|
|
||||||
In some cases complete gas balancing is infeasible, either new syscall gets introduced or
|
In some cases complete gas balancing is infeasible, either a new syscall gets introduced or
|
||||||
complete balancing is too time consuming.
|
complete balancing is too time consuming.
|
||||||
|
|
||||||
In these cases the recommended way to estimate gas for given syscall is to perform an `in-vivo` benchmark.
|
In these cases, the recommended way to estimate gas for a given syscall is to perform an `in-vivo` benchmark.
|
||||||
In the past `in-vitro` as in standalone benchmarks were found to be highly inaccurate when compared to results
|
In the past `in-vitro` as in standalone benchmarks were found to be highly inaccurate when compared to results
|
||||||
of real execution.
|
of real execution.
|
||||||
|
|
||||||
A in-vivo benchmark can be performed by running an example of such syscall during block execution.
|
An in-vivo benchmark can be performed by running an example of such a syscall during block execution.
|
||||||
The best place to hook-in such benchmark is message execution loop in
|
The best place to hook-in such a benchmark is the message execution loop in
|
||||||
`chain/stmgr/stmgr.go` in `ApplyBlocks()`. Depending of time required to complete the syscall it might be
|
`chain/stmgr/stmgr.go` in `ApplyBlocks()`. Depending on the time required to complete the syscall it might be
|
||||||
advisable to run the execution only once every few messages.
|
advisable to run the execution only once every few messages.
|
||||||
|
|
||||||
|
2
extern/filecoin-ffi
vendored
2
extern/filecoin-ffi
vendored
@ -1 +1 @@
|
|||||||
Subproject commit de34caff946d598edb299566d951b44b9b7f7dd4
|
Subproject commit a458f638e3c8603c9b5a9ed9847c3af4597e46d4
|
@ -43,6 +43,9 @@ const (
|
|||||||
// TargetAPI defines the API methods that the Node depends on
|
// TargetAPI defines the API methods that the Node depends on
|
||||||
// (to make it easy to mock for tests)
|
// (to make it easy to mock for tests)
|
||||||
type TargetAPI interface {
|
type TargetAPI interface {
|
||||||
|
MpoolPending(context.Context, types.TipSetKey) ([]*types.SignedMessage, error)
|
||||||
|
ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error)
|
||||||
|
MinerGetBaseInfo(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*api.MiningBaseInfo, error)
|
||||||
GasEstimateGasPremium(context.Context, uint64, address.Address, int64, types.TipSetKey) (types.BigInt, error)
|
GasEstimateGasPremium(context.Context, uint64, address.Address, int64, types.TipSetKey) (types.BigInt, error)
|
||||||
StateReplay(context.Context, types.TipSetKey, cid.Cid) (*api.InvocResult, error)
|
StateReplay(context.Context, types.TipSetKey, cid.Cid) (*api.InvocResult, error)
|
||||||
StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (api.MinerSectors, error)
|
StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (api.MinerSectors, error)
|
||||||
@ -107,14 +110,15 @@ type TargetAPI interface {
|
|||||||
EthGetTransactionByHashLimited(ctx context.Context, txHash *ethtypes.EthHash, limit abi.ChainEpoch) (*ethtypes.EthTx, error)
|
EthGetTransactionByHashLimited(ctx context.Context, txHash *ethtypes.EthHash, limit abi.ChainEpoch) (*ethtypes.EthTx, error)
|
||||||
EthGetTransactionHashByCid(ctx context.Context, cid cid.Cid) (*ethtypes.EthHash, error)
|
EthGetTransactionHashByCid(ctx context.Context, cid cid.Cid) (*ethtypes.EthHash, error)
|
||||||
EthGetMessageCidByTransactionHash(ctx context.Context, txHash *ethtypes.EthHash) (*cid.Cid, error)
|
EthGetMessageCidByTransactionHash(ctx context.Context, txHash *ethtypes.EthHash) (*cid.Cid, error)
|
||||||
EthGetTransactionCount(ctx context.Context, sender ethtypes.EthAddress, blkOpt string) (ethtypes.EthUint64, error)
|
EthGetTransactionCount(ctx context.Context, sender ethtypes.EthAddress, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthUint64, error)
|
||||||
EthGetTransactionReceiptLimited(ctx context.Context, txHash ethtypes.EthHash, limit abi.ChainEpoch) (*api.EthTxReceipt, error)
|
EthGetTransactionReceiptLimited(ctx context.Context, txHash ethtypes.EthHash, limit abi.ChainEpoch) (*api.EthTxReceipt, error)
|
||||||
EthGetTransactionByBlockHashAndIndex(ctx context.Context, blkHash ethtypes.EthHash, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error)
|
EthGetTransactionByBlockHashAndIndex(ctx context.Context, blkHash ethtypes.EthHash, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error)
|
||||||
EthGetTransactionByBlockNumberAndIndex(ctx context.Context, blkNum ethtypes.EthUint64, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error)
|
EthGetTransactionByBlockNumberAndIndex(ctx context.Context, blkNum ethtypes.EthUint64, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error)
|
||||||
EthGetCode(ctx context.Context, address ethtypes.EthAddress, blkOpt string) (ethtypes.EthBytes, error)
|
EthGetCode(ctx context.Context, address ethtypes.EthAddress, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error)
|
||||||
EthGetStorageAt(ctx context.Context, address ethtypes.EthAddress, position ethtypes.EthBytes, blkParam string) (ethtypes.EthBytes, error)
|
EthGetStorageAt(ctx context.Context, address ethtypes.EthAddress, position ethtypes.EthBytes, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error)
|
||||||
EthGetBalance(ctx context.Context, address ethtypes.EthAddress, blkParam string) (ethtypes.EthBigInt, error)
|
EthGetBalance(ctx context.Context, address ethtypes.EthAddress, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBigInt, error)
|
||||||
EthChainId(ctx context.Context) (ethtypes.EthUint64, error)
|
EthChainId(ctx context.Context) (ethtypes.EthUint64, error)
|
||||||
|
EthSyncing(ctx context.Context) (ethtypes.EthSyncingResult, error)
|
||||||
NetVersion(ctx context.Context) (string, error)
|
NetVersion(ctx context.Context) (string, error)
|
||||||
NetListening(ctx context.Context) (bool, error)
|
NetListening(ctx context.Context) (bool, error)
|
||||||
EthProtocolVersion(ctx context.Context) (ethtypes.EthUint64, error)
|
EthProtocolVersion(ctx context.Context) (ethtypes.EthUint64, error)
|
||||||
@ -122,7 +126,7 @@ type TargetAPI interface {
|
|||||||
EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthFeeHistory, error)
|
EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthFeeHistory, error)
|
||||||
EthMaxPriorityFeePerGas(ctx context.Context) (ethtypes.EthBigInt, error)
|
EthMaxPriorityFeePerGas(ctx context.Context) (ethtypes.EthBigInt, error)
|
||||||
EthEstimateGas(ctx context.Context, tx ethtypes.EthCall) (ethtypes.EthUint64, error)
|
EthEstimateGas(ctx context.Context, tx ethtypes.EthCall) (ethtypes.EthUint64, error)
|
||||||
EthCall(ctx context.Context, tx ethtypes.EthCall, blkParam string) (ethtypes.EthBytes, error)
|
EthCall(ctx context.Context, tx ethtypes.EthCall, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error)
|
||||||
EthSendRawTransaction(ctx context.Context, rawTx ethtypes.EthBytes) (ethtypes.EthHash, error)
|
EthSendRawTransaction(ctx context.Context, rawTx ethtypes.EthBytes) (ethtypes.EthHash, error)
|
||||||
EthGetLogs(ctx context.Context, filter *ethtypes.EthFilterSpec) (*ethtypes.EthFilterResult, error)
|
EthGetLogs(ctx context.Context, filter *ethtypes.EthFilterSpec) (*ethtypes.EthFilterResult, error)
|
||||||
EthGetFilterChanges(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error)
|
EthGetFilterChanges(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error)
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user