Merge branch 'master' into lotus-fountain-add-datacap

This commit is contained in:
JesseXie 2023-05-12 10:42:39 -07:00 committed by GitHub
commit 9b0cbbb0a8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
215 changed files with 4633 additions and 2318 deletions

View File

@ -63,7 +63,7 @@ commands:
name: Install Rust
command: |
curl https://sh.rustup.rs -sSf | sh -s -- -y
- run: make deps lotus
- run: make deps
download-params:
steps:
- restore_cache:
@ -304,9 +304,7 @@ jobs:
darwin: true
darwin-architecture: arm64
- run: |
export CPATH=$(brew --prefix)/include
export LIBRARY_PATH=$(brew --prefix)/lib
make lotus lotus-miner lotus-worker
export CPATH=$(brew --prefix)/include && export LIBRARY_PATH=$(brew --prefix)/lib && make lotus lotus-miner lotus-worker
- run: otool -hv lotus
- run:
name: check tag and version output match
@ -812,6 +810,12 @@ workflows:
- build
suite: itest-mpool_push_with_uuid
target: "./itests/mpool_push_with_uuid_test.go"
- test:
name: test-itest-msgindex
requires:
- build
suite: itest-msgindex
target: "./itests/msgindex_test.go"
- test:
name: test-itest-multisig
requires:
@ -878,6 +882,12 @@ workflows:
- build
suite: itest-sdr_upgrade
target: "./itests/sdr_upgrade_test.go"
- test:
name: test-itest-sealing_resources
requires:
- build
suite: itest-sealing_resources
target: "./itests/sealing_resources_test.go"
- test:
name: test-itest-sector_finalize_early
requires:
@ -1063,6 +1073,7 @@ workflows:
branches:
only:
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
- /^ci\/.*$/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
@ -1072,6 +1083,7 @@ workflows:
branches:
only:
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
- /^ci\/.*$/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
@ -1081,6 +1093,7 @@ workflows:
branches:
only:
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
- /^ci\/.*$/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
@ -1093,7 +1106,7 @@ workflows:
filters:
branches:
ignore:
- /.*/
- /^.*$/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
@ -1108,6 +1121,7 @@ workflows:
branches:
only:
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
- /^ci\/.*$/
- build-docker:
name: "Docker push (lotus-all-in-one / stable / mainnet)"
image: lotus-all-in-one

View File

@ -63,7 +63,7 @@ commands:
name: Install Rust
command: |
curl https://sh.rustup.rs -sSf | sh -s -- -y
- run: make deps lotus
- run: make deps
download-params:
steps:
- restore_cache:
@ -304,9 +304,7 @@ jobs:
darwin: true
darwin-architecture: arm64
- run: |
export CPATH=$(brew --prefix)/include
export LIBRARY_PATH=$(brew --prefix)/lib
make lotus lotus-miner lotus-worker
export CPATH=$(brew --prefix)/include && export LIBRARY_PATH=$(brew --prefix)/lib && make lotus lotus-miner lotus-worker
- run: otool -hv lotus
- run:
name: check tag and version output match
@ -583,6 +581,7 @@ workflows:
branches:
only:
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
- /^ci\/.*$/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
@ -592,6 +591,7 @@ workflows:
branches:
only:
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
- /^ci\/.*$/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
@ -601,6 +601,7 @@ workflows:
branches:
only:
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
- /^ci\/.*$/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
@ -613,7 +614,7 @@ workflows:
filters:
branches:
ignore:
- /.*/
- /^.*$/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
@ -628,6 +629,7 @@ workflows:
branches:
only:
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
- /^ci\/.*$/
[[- range .Networks]]
- build-docker:
name: "Docker push (lotus-all-in-one / stable / [[.]])"

1
.gitignore vendored
View File

@ -52,3 +52,4 @@ dist/
# The following files are checked into git and result
# in dirty git state if removed from the docker context
!extern/filecoin-ffi/rust/filecoin.pc
!extern/test-vectors

View File

@ -1,5 +1,11 @@
# Lotus changelog
# UNRELEASED
## New features
- feat: Added new environment variable `LOTUS_EXEC_TRACE_CACHE_SIZE` to configure execution trace cache size ([filecoin-project/lotus#10585](https://github.com/filecoin-project/lotus/pull/10585))
- If unset, we default to caching 16 most recent execution traces. Node operatores may want to set this to 0 while exchanges may want to crank it up.
# v1.23.0 / 2023-04-21
This is the stable feature release for the upcoming MANDATORY network upgrade at `2023-04-27T13:00:00Z`, epoch `2809800`. This feature release delivers the nv19 Lighting and nv20 Thunder network upgrade for mainnet, and includes numerous improvements and enhancements for node operators, ETH RPC-providers and storage providers.
@ -606,10 +612,6 @@ verifiedregistry bafk2bzacedej3dnr62g2je2abmyjg3xqv4otvh6e26du5fcrhvw7zgcaaez3a
### Dependencies
github.com/filecoin-project/go-state-types (v0.11.0-rc1 -> v0.11.1):
<<<<<<< HEAD
=======
>>>>>>> releases
# v1.20.4 / 2023-03-17
This is a patch release intended to alleviate performance issues reported by some users since the nv18 upgrade.

View File

@ -1,273 +0,0 @@
##### DEPRECATED
FROM golang:1.19.7-buster AS builder-deps
MAINTAINER Lotus Development Team
RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev
ENV XDG_CACHE_HOME="/tmp"
### taken from https://github.com/rust-lang/docker-rust/blob/master/1.63.0/buster/Dockerfile
ENV RUSTUP_HOME=/usr/local/rustup \
CARGO_HOME=/usr/local/cargo \
PATH=/usr/local/cargo/bin:$PATH \
RUST_VERSION=1.63.0
RUN set -eux; \
dpkgArch="$(dpkg --print-architecture)"; \
case "${dpkgArch##*-}" in \
amd64) rustArch='x86_64-unknown-linux-gnu'; rustupSha256='5cc9ffd1026e82e7fb2eec2121ad71f4b0f044e88bca39207b3f6b769aaa799c' ;; \
arm64) rustArch='aarch64-unknown-linux-gnu'; rustupSha256='e189948e396d47254103a49c987e7fb0e5dd8e34b200aa4481ecc4b8e41fb929' ;; \
*) echo >&2 "unsupported architecture: ${dpkgArch}"; exit 1 ;; \
esac; \
url="https://static.rust-lang.org/rustup/archive/1.25.1/${rustArch}/rustup-init"; \
wget "$url"; \
echo "${rustupSha256} *rustup-init" | sha256sum -c -; \
chmod +x rustup-init; \
./rustup-init -y --no-modify-path --profile minimal --default-toolchain $RUST_VERSION --default-host ${rustArch}; \
rm rustup-init; \
chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \
rustup --version; \
cargo --version; \
rustc --version;
### end rust
FROM builder-deps AS builder-local
MAINTAINER Lotus Development Team
COPY ./ /opt/filecoin
WORKDIR /opt/filecoin
### make configurable filecoin-ffi build
ARG FFI_BUILD_FROM_SOURCE=0
ENV FFI_BUILD_FROM_SOURCE=${FFI_BUILD_FROM_SOURCE}
RUN make clean deps
FROM builder-local AS builder-test
MAINTAINER Lotus Development Team
WORKDIR /opt/filecoin
RUN make debug
FROM builder-local AS builder
MAINTAINER Lotus Development Team
WORKDIR /opt/filecoin
ARG RUSTFLAGS=""
ARG GOFLAGS=""
RUN make lotus lotus-miner lotus-worker lotus-shed lotus-wallet lotus-gateway lotus-stats
FROM ubuntu:20.04 AS base
MAINTAINER Lotus Development Team
# Base resources
COPY --from=builder /etc/ssl/certs /etc/ssl/certs
COPY --from=builder /lib/*/libdl.so.2 /lib/
COPY --from=builder /lib/*/librt.so.1 /lib/
COPY --from=builder /lib/*/libgcc_s.so.1 /lib/
COPY --from=builder /lib/*/libutil.so.1 /lib/
COPY --from=builder /usr/lib/*/libltdl.so.7 /lib/
COPY --from=builder /usr/lib/*/libnuma.so.1 /lib/
COPY --from=builder /usr/lib/*/libhwloc.so.5 /lib/
COPY --from=builder /usr/lib/*/libOpenCL.so.1 /lib/
RUN useradd -r -u 532 -U fc \
&& mkdir -p /etc/OpenCL/vendors \
&& echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd
###
FROM base AS lotus
MAINTAINER Lotus Development Team
COPY --from=builder /opt/filecoin/lotus /usr/local/bin/
COPY --from=builder /opt/filecoin/lotus-shed /usr/local/bin/
COPY scripts/docker-lotus-entrypoint.sh /
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
ENV LOTUS_PATH /var/lib/lotus
ENV DOCKER_LOTUS_IMPORT_SNAPSHOT https://fil-chain-snapshots-fallback.s3.amazonaws.com/mainnet/minimal_finality_stateroots_latest.car
ENV DOCKER_LOTUS_IMPORT_WALLET ""
RUN mkdir /var/lib/lotus /var/tmp/filecoin-proof-parameters
RUN chown fc: /var/lib/lotus /var/tmp/filecoin-proof-parameters
VOLUME /var/lib/lotus
VOLUME /var/tmp/filecoin-proof-parameters
USER fc
EXPOSE 1234
ENTRYPOINT ["/docker-lotus-entrypoint.sh"]
CMD ["-help"]
###
FROM base AS lotus-wallet
MAINTAINER Lotus Development Team
COPY --from=builder /opt/filecoin/lotus-wallet /usr/local/bin/
ENV WALLET_PATH /var/lib/lotus-wallet
RUN mkdir /var/lib/lotus-wallet
RUN chown fc: /var/lib/lotus-wallet
VOLUME /var/lib/lotus-wallet
USER fc
EXPOSE 1777
ENTRYPOINT ["/usr/local/bin/lotus-wallet"]
CMD ["-help"]
###
FROM base AS lotus-gateway
MAINTAINER Lotus Development Team
COPY --from=builder /opt/filecoin/lotus-gateway /usr/local/bin/
USER fc
EXPOSE 1234
ENTRYPOINT ["/usr/local/bin/lotus-gateway"]
CMD ["-help"]
###
FROM base AS lotus-miner
MAINTAINER Lotus Development Team
COPY --from=builder /opt/filecoin/lotus-miner /usr/local/bin/
COPY scripts/docker-lotus-miner-entrypoint.sh /
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
ENV LOTUS_MINER_PATH /var/lib/lotus-miner
RUN mkdir /var/lib/lotus-miner /var/tmp/filecoin-proof-parameters
RUN chown fc: /var/lib/lotus-miner /var/tmp/filecoin-proof-parameters
VOLUME /var/lib/lotus-miner
VOLUME /var/tmp/filecoin-proof-parameters
USER fc
EXPOSE 2345
ENTRYPOINT ["/docker-lotus-miner-entrypoint.sh"]
CMD ["-help"]
###
FROM base AS lotus-worker
MAINTAINER Lotus Development Team
COPY --from=builder /opt/filecoin/lotus-worker /usr/local/bin/
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
ENV LOTUS_WORKER_PATH /var/lib/lotus-worker
RUN mkdir /var/lib/lotus-worker
RUN chown fc: /var/lib/lotus-worker
VOLUME /var/lib/lotus-worker
USER fc
EXPOSE 3456
ENTRYPOINT ["/usr/local/bin/lotus-worker"]
CMD ["-help"]
###
from base as lotus-all-in-one
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
ENV LOTUS_MINER_PATH /var/lib/lotus-miner
ENV LOTUS_PATH /var/lib/lotus
ENV LOTUS_WORKER_PATH /var/lib/lotus-worker
ENV WALLET_PATH /var/lib/lotus-wallet
ENV DOCKER_LOTUS_IMPORT_SNAPSHOT https://fil-chain-snapshots-fallback.s3.amazonaws.com/mainnet/minimal_finality_stateroots_latest.car
COPY --from=builder /opt/filecoin/lotus /usr/local/bin/
COPY --from=builder /opt/filecoin/lotus-shed /usr/local/bin/
COPY --from=builder /opt/filecoin/lotus-wallet /usr/local/bin/
COPY --from=builder /opt/filecoin/lotus-gateway /usr/local/bin/
COPY --from=builder /opt/filecoin/lotus-miner /usr/local/bin/
COPY --from=builder /opt/filecoin/lotus-worker /usr/local/bin/
COPY --from=builder /opt/filecoin/lotus-stats /usr/local/bin/
RUN mkdir /var/tmp/filecoin-proof-parameters
RUN mkdir /var/lib/lotus
RUN mkdir /var/lib/lotus-miner
RUN mkdir /var/lib/lotus-worker
RUN mkdir /var/lib/lotus-wallet
RUN chown fc: /var/tmp/filecoin-proof-parameters
RUN chown fc: /var/lib/lotus
RUN chown fc: /var/lib/lotus-miner
RUN chown fc: /var/lib/lotus-worker
RUN chown fc: /var/lib/lotus-wallet
VOLUME /var/tmp/filecoin-proof-parameters
VOLUME /var/lib/lotus
VOLUME /var/lib/lotus-miner
VOLUME /var/lib/lotus-worker
VOLUME /var/lib/lotus-wallet
EXPOSE 1234
EXPOSE 2345
EXPOSE 3456
EXPOSE 1777
###
from base as lotus-test
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
ENV LOTUS_MINER_PATH /var/lib/lotus-miner
ENV LOTUS_PATH /var/lib/lotus
ENV LOTUS_WORKER_PATH /var/lib/lotus-worker
ENV WALLET_PATH /var/lib/lotus-wallet
COPY --from=builder-test /opt/filecoin/lotus /usr/local/bin/
COPY --from=builder-test /opt/filecoin/lotus-miner /usr/local/bin/
COPY --from=builder-test /opt/filecoin/lotus-worker /usr/local/bin/
COPY --from=builder-test /opt/filecoin/lotus-seed /usr/local/bin/
RUN mkdir /var/tmp/filecoin-proof-parameters
RUN mkdir /var/lib/lotus
RUN mkdir /var/lib/lotus-miner
RUN mkdir /var/lib/lotus-worker
RUN mkdir /var/lib/lotus-wallet
RUN chown fc: /var/tmp/filecoin-proof-parameters
RUN chown fc: /var/lib/lotus
RUN chown fc: /var/lib/lotus-miner
RUN chown fc: /var/lib/lotus-worker
RUN chown fc: /var/lib/lotus-wallet
VOLUME /var/tmp/filecoin-proof-parameters
VOLUME /var/lib/lotus
VOLUME /var/lib/lotus-miner
VOLUME /var/lib/lotus-worker
VOLUME /var/lib/lotus-wallet
EXPOSE 1234
EXPOSE 2345
EXPOSE 3456
EXPOSE 1777

View File

@ -7,8 +7,8 @@ import (
"time"
"github.com/google/uuid"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
blocks "github.com/ipfs/go-libipfs/blocks"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/filecoin-project/go-address"
@ -297,8 +297,10 @@ type FullNode interface {
MpoolGetNonce(context.Context, address.Address) (uint64, error) //perm:read
MpoolSub(context.Context) (<-chan MpoolUpdate, error) //perm:read
// MpoolClear clears pending messages from the mpool
MpoolClear(context.Context, bool) error //perm:write
// MpoolClear clears pending messages from the mpool.
// If clearLocal is true, ALL messages will be cleared.
// If clearLocal is false, local messages will be protected, all others will be cleared.
MpoolClear(ctx context.Context, clearLocal bool) error //perm:write
// MpoolGetConfig returns (a copy of) the current mpool config
MpoolGetConfig(context.Context) (*types.MpoolConfig, error) //perm:read
@ -810,6 +812,7 @@ type FullNode interface {
EthGetStorageAt(ctx context.Context, address ethtypes.EthAddress, position ethtypes.EthBytes, blkParam string) (ethtypes.EthBytes, error) //perm:read
EthGetBalance(ctx context.Context, address ethtypes.EthAddress, blkParam string) (ethtypes.EthBigInt, error) //perm:read
EthChainId(ctx context.Context) (ethtypes.EthUint64, error) //perm:read
EthSyncing(ctx context.Context) (ethtypes.EthSyncingResult, error) //perm:read
NetVersion(ctx context.Context) (string, error) //perm:read
NetListening(ctx context.Context) (bool, error) //perm:read
EthProtocolVersion(ctx context.Context) (ethtypes.EthUint64, error) //perm:read
@ -827,23 +830,23 @@ type FullNode interface {
// Polling method for a filter, returns event logs which occurred since last poll.
// (requires write perm since timestamp of last filter execution will be written)
EthGetFilterChanges(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) //perm:write
EthGetFilterChanges(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) //perm:read
// Returns event logs matching filter with given id.
// (requires write perm since timestamp of last filter execution will be written)
EthGetFilterLogs(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) //perm:write
EthGetFilterLogs(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) //perm:read
// Installs a persistent filter based on given filter spec.
EthNewFilter(ctx context.Context, filter *ethtypes.EthFilterSpec) (ethtypes.EthFilterID, error) //perm:write
EthNewFilter(ctx context.Context, filter *ethtypes.EthFilterSpec) (ethtypes.EthFilterID, error) //perm:read
// Installs a persistent filter to notify when a new block arrives.
EthNewBlockFilter(ctx context.Context) (ethtypes.EthFilterID, error) //perm:write
EthNewBlockFilter(ctx context.Context) (ethtypes.EthFilterID, error) //perm:read
// Installs a persistent filter to notify when new messages arrive in the message pool.
EthNewPendingTransactionFilter(ctx context.Context) (ethtypes.EthFilterID, error) //perm:write
EthNewPendingTransactionFilter(ctx context.Context) (ethtypes.EthFilterID, error) //perm:read
// Uninstalls a filter with given id.
EthUninstallFilter(ctx context.Context, id ethtypes.EthFilterID) (bool, error) //perm:write
EthUninstallFilter(ctx context.Context, id ethtypes.EthFilterID) (bool, error) //perm:read
// Subscribe to different event types using websockets
// eventTypes is one or more of:
@ -852,10 +855,10 @@ type FullNode interface {
// - logs: notify new event logs that match a criteria
// params contains additional parameters used with the log event type
// The client will receive a stream of EthSubscriptionResponse values until EthUnsubscribe is called.
EthSubscribe(ctx context.Context, params jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error) //perm:write
EthSubscribe(ctx context.Context, params jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error) //perm:read
// Unsubscribe from a websocket subscription
EthUnsubscribe(ctx context.Context, id ethtypes.EthSubscriptionID) (bool, error) //perm:write
EthUnsubscribe(ctx context.Context, id ethtypes.EthSubscriptionID) (bool, error) //perm:read
// Returns the client version
Web3ClientVersion(ctx context.Context) (string, error) //perm:read

View File

@ -3,8 +3,8 @@ package api
import (
"context"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
blocks "github.com/ipfs/go-libipfs/blocks"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-jsonrpc"
@ -33,6 +33,9 @@ import (
// * Generate openrpc blobs
type Gateway interface {
StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (MinerSectors, error)
GasEstimateGasPremium(context.Context, uint64, address.Address, int64, types.TipSetKey) (types.BigInt, error)
StateReplay(context.Context, types.TipSetKey, cid.Cid) (*InvocResult, error)
ChainHasObj(context.Context, cid.Cid) (bool, error)
ChainPutObj(context.Context, blocks.Block) error
ChainHead(ctx context.Context) (*types.TipSet, error)
@ -95,6 +98,7 @@ type Gateway interface {
EthGetStorageAt(ctx context.Context, address ethtypes.EthAddress, position ethtypes.EthBytes, blkParam string) (ethtypes.EthBytes, error)
EthGetBalance(ctx context.Context, address ethtypes.EthAddress, blkParam string) (ethtypes.EthBigInt, error)
EthChainId(ctx context.Context) (ethtypes.EthUint64, error)
EthSyncing(ctx context.Context) (ethtypes.EthSyncingResult, error)
NetVersion(ctx context.Context) (string, error)
NetListening(ctx context.Context) (bool, error)
EthProtocolVersion(ctx context.Context) (ethtypes.EthUint64, error)

View File

@ -73,5 +73,5 @@ type CommonNet interface {
type NatInfo struct {
Reachability network.Reachability
PublicAddr string
PublicAddrs []string
}

View File

@ -129,6 +129,8 @@ type StorageMiner interface {
SectorMatchPendingPiecesToOpenSectors(ctx context.Context) error //perm:admin
// SectorAbortUpgrade can be called on sectors that are in the process of being upgraded to abort it
SectorAbortUpgrade(context.Context, abi.SectorNumber) error //perm:admin
// SectorUnseal unseals the provided sector
SectorUnseal(ctx context.Context, number abi.SectorNumber) error //perm:admin
// SectorNumAssignerMeta returns sector number assigner metadata - reserved/allocated
SectorNumAssignerMeta(ctx context.Context) (NumAssignerMeta, error) //perm:read

View File

@ -14,9 +14,9 @@ import (
"unicode"
"github.com/google/uuid"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-graphsync"
blocks "github.com/ipfs/go-libipfs/blocks"
textselector "github.com/ipld/go-ipld-selector-text-lite"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/metrics"

View File

@ -21,6 +21,7 @@ func CreateEthRPCAliases(as apitypes.Aliaser) {
as.AliasMethod("eth_getStorageAt", "Filecoin.EthGetStorageAt")
as.AliasMethod("eth_getBalance", "Filecoin.EthGetBalance")
as.AliasMethod("eth_chainId", "Filecoin.EthChainId")
as.AliasMethod("eth_syncing", "Filecoin.EthSyncing")
as.AliasMethod("eth_feeHistory", "Filecoin.EthFeeHistory")
as.AliasMethod("eth_protocolVersion", "Filecoin.EthProtocolVersion")
as.AliasMethod("eth_maxPriorityFeePerGas", "Filecoin.EthMaxPriorityFeePerGas")

View File

@ -12,8 +12,8 @@ import (
gomock "github.com/golang/mock/gomock"
uuid "github.com/google/uuid"
blocks "github.com/ipfs/go-block-format"
cid "github.com/ipfs/go-cid"
blocks "github.com/ipfs/go-libipfs/blocks"
metrics "github.com/libp2p/go-libp2p/core/metrics"
network0 "github.com/libp2p/go-libp2p/core/network"
peer "github.com/libp2p/go-libp2p/core/peer"
@ -1476,6 +1476,21 @@ func (mr *MockFullNodeMockRecorder) EthSubscribe(arg0, arg1 interface{}) *gomock
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthSubscribe", reflect.TypeOf((*MockFullNode)(nil).EthSubscribe), arg0, arg1)
}
// EthSyncing mocks base method.
func (m *MockFullNode) EthSyncing(arg0 context.Context) (ethtypes.EthSyncingResult, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "EthSyncing", arg0)
ret0, _ := ret[0].(ethtypes.EthSyncingResult)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// EthSyncing indicates an expected call of EthSyncing.
func (mr *MockFullNodeMockRecorder) EthSyncing(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthSyncing", reflect.TypeOf((*MockFullNode)(nil).EthSyncing), arg0)
}
// EthUninstallFilter mocks base method.
func (m *MockFullNode) EthUninstallFilter(arg0 context.Context, arg1 ethtypes.EthFilterID) (bool, error) {
m.ctrl.T.Helper()

View File

@ -8,8 +8,8 @@ import (
"time"
"github.com/google/uuid"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
blocks "github.com/ipfs/go-libipfs/blocks"
"github.com/libp2p/go-libp2p/core/metrics"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
@ -274,9 +274,9 @@ type FullNodeMethods struct {
EthGetCode func(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthBytes, error) `perm:"read"`
EthGetFilterChanges func(p0 context.Context, p1 ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) `perm:"write"`
EthGetFilterChanges func(p0 context.Context, p1 ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) `perm:"read"`
EthGetFilterLogs func(p0 context.Context, p1 ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) `perm:"write"`
EthGetFilterLogs func(p0 context.Context, p1 ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) `perm:"read"`
EthGetLogs func(p0 context.Context, p1 *ethtypes.EthFilterSpec) (*ethtypes.EthFilterResult, error) `perm:"read"`
@ -302,21 +302,23 @@ type FullNodeMethods struct {
EthMaxPriorityFeePerGas func(p0 context.Context) (ethtypes.EthBigInt, error) `perm:"read"`
EthNewBlockFilter func(p0 context.Context) (ethtypes.EthFilterID, error) `perm:"write"`
EthNewBlockFilter func(p0 context.Context) (ethtypes.EthFilterID, error) `perm:"read"`
EthNewFilter func(p0 context.Context, p1 *ethtypes.EthFilterSpec) (ethtypes.EthFilterID, error) `perm:"write"`
EthNewFilter func(p0 context.Context, p1 *ethtypes.EthFilterSpec) (ethtypes.EthFilterID, error) `perm:"read"`
EthNewPendingTransactionFilter func(p0 context.Context) (ethtypes.EthFilterID, error) `perm:"write"`
EthNewPendingTransactionFilter func(p0 context.Context) (ethtypes.EthFilterID, error) `perm:"read"`
EthProtocolVersion func(p0 context.Context) (ethtypes.EthUint64, error) `perm:"read"`
EthSendRawTransaction func(p0 context.Context, p1 ethtypes.EthBytes) (ethtypes.EthHash, error) `perm:"read"`
EthSubscribe func(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error) `perm:"write"`
EthSubscribe func(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error) `perm:"read"`
EthUninstallFilter func(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) `perm:"write"`
EthSyncing func(p0 context.Context) (ethtypes.EthSyncingResult, error) `perm:"read"`
EthUnsubscribe func(p0 context.Context, p1 ethtypes.EthSubscriptionID) (bool, error) `perm:"write"`
EthUninstallFilter func(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) `perm:"read"`
EthUnsubscribe func(p0 context.Context, p1 ethtypes.EthSubscriptionID) (bool, error) `perm:"read"`
FilecoinAddressToEthAddress func(p0 context.Context, p1 address.Address) (ethtypes.EthAddress, error) `perm:"read"`
@ -722,10 +724,14 @@ type GatewayMethods struct {
EthSubscribe func(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error) ``
EthSyncing func(p0 context.Context) (ethtypes.EthSyncingResult, error) ``
EthUninstallFilter func(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) ``
EthUnsubscribe func(p0 context.Context, p1 ethtypes.EthSubscriptionID) (bool, error) ``
GasEstimateGasPremium func(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) ``
GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) ``
MpoolGetNonce func(p0 context.Context, p1 address.Address) (uint64, error) ``
@ -768,12 +774,16 @@ type GatewayMethods struct {
StateMinerProvingDeadline func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) ``
StateMinerSectorCount func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MinerSectors, error) ``
StateNetworkName func(p0 context.Context) (dtypes.NetworkName, error) ``
StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) ``
StateReadState func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*ActorState, error) ``
StateReplay func(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*InvocResult, error) ``
StateSearchMsg func(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) ``
StateSectorGetInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) ``
@ -1079,6 +1089,8 @@ type StorageMinerMethods struct {
SectorTerminatePending func(p0 context.Context) ([]abi.SectorID, error) `perm:"admin"`
SectorUnseal func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"`
SectorsList func(p0 context.Context) ([]abi.SectorNumber, error) `perm:"read"`
SectorsListInStates func(p0 context.Context, p1 []SectorState) ([]abi.SectorNumber, error) `perm:"read"`
@ -2414,6 +2426,17 @@ func (s *FullNodeStub) EthSubscribe(p0 context.Context, p1 jsonrpc.RawParams) (e
return *new(ethtypes.EthSubscriptionID), ErrNotSupported
}
func (s *FullNodeStruct) EthSyncing(p0 context.Context) (ethtypes.EthSyncingResult, error) {
if s.Internal.EthSyncing == nil {
return *new(ethtypes.EthSyncingResult), ErrNotSupported
}
return s.Internal.EthSyncing(p0)
}
func (s *FullNodeStub) EthSyncing(p0 context.Context) (ethtypes.EthSyncingResult, error) {
return *new(ethtypes.EthSyncingResult), ErrNotSupported
}
func (s *FullNodeStruct) EthUninstallFilter(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) {
if s.Internal.EthUninstallFilter == nil {
return false, ErrNotSupported
@ -4592,6 +4615,17 @@ func (s *GatewayStub) EthSubscribe(p0 context.Context, p1 jsonrpc.RawParams) (et
return *new(ethtypes.EthSubscriptionID), ErrNotSupported
}
func (s *GatewayStruct) EthSyncing(p0 context.Context) (ethtypes.EthSyncingResult, error) {
if s.Internal.EthSyncing == nil {
return *new(ethtypes.EthSyncingResult), ErrNotSupported
}
return s.Internal.EthSyncing(p0)
}
func (s *GatewayStub) EthSyncing(p0 context.Context) (ethtypes.EthSyncingResult, error) {
return *new(ethtypes.EthSyncingResult), ErrNotSupported
}
func (s *GatewayStruct) EthUninstallFilter(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) {
if s.Internal.EthUninstallFilter == nil {
return false, ErrNotSupported
@ -4614,6 +4648,17 @@ func (s *GatewayStub) EthUnsubscribe(p0 context.Context, p1 ethtypes.EthSubscrip
return false, ErrNotSupported
}
func (s *GatewayStruct) GasEstimateGasPremium(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) {
if s.Internal.GasEstimateGasPremium == nil {
return *new(types.BigInt), ErrNotSupported
}
return s.Internal.GasEstimateGasPremium(p0, p1, p2, p3, p4)
}
func (s *GatewayStub) GasEstimateGasPremium(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) {
return *new(types.BigInt), ErrNotSupported
}
func (s *GatewayStruct) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) {
if s.Internal.GasEstimateMessageGas == nil {
return nil, ErrNotSupported
@ -4845,6 +4890,17 @@ func (s *GatewayStub) StateMinerProvingDeadline(p0 context.Context, p1 address.A
return nil, ErrNotSupported
}
func (s *GatewayStruct) StateMinerSectorCount(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MinerSectors, error) {
if s.Internal.StateMinerSectorCount == nil {
return *new(MinerSectors), ErrNotSupported
}
return s.Internal.StateMinerSectorCount(p0, p1, p2)
}
func (s *GatewayStub) StateMinerSectorCount(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MinerSectors, error) {
return *new(MinerSectors), ErrNotSupported
}
func (s *GatewayStruct) StateNetworkName(p0 context.Context) (dtypes.NetworkName, error) {
if s.Internal.StateNetworkName == nil {
return *new(dtypes.NetworkName), ErrNotSupported
@ -4878,6 +4934,17 @@ func (s *GatewayStub) StateReadState(p0 context.Context, p1 address.Address, p2
return nil, ErrNotSupported
}
func (s *GatewayStruct) StateReplay(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*InvocResult, error) {
if s.Internal.StateReplay == nil {
return nil, ErrNotSupported
}
return s.Internal.StateReplay(p0, p1, p2)
}
func (s *GatewayStub) StateReplay(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*InvocResult, error) {
return nil, ErrNotSupported
}
func (s *GatewayStruct) StateSearchMsg(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) {
if s.Internal.StateSearchMsg == nil {
return nil, ErrNotSupported
@ -6385,6 +6452,17 @@ func (s *StorageMinerStub) SectorTerminatePending(p0 context.Context) ([]abi.Sec
return *new([]abi.SectorID), ErrNotSupported
}
func (s *StorageMinerStruct) SectorUnseal(p0 context.Context, p1 abi.SectorNumber) error {
if s.Internal.SectorUnseal == nil {
return ErrNotSupported
}
return s.Internal.SectorUnseal(p0, p1)
}
func (s *StorageMinerStub) SectorUnseal(p0 context.Context, p1 abi.SectorNumber) error {
return ErrNotSupported
}
func (s *StorageMinerStruct) SectorsList(p0 context.Context) ([]abi.SectorNumber, error) {
if s.Internal.SectorsList == nil {
return *new([]abi.SectorNumber), ErrNotSupported

View File

@ -314,31 +314,30 @@ type NetworkParams struct {
}
type ForkUpgradeParams struct {
UpgradeSmokeHeight abi.ChainEpoch
UpgradeBreezeHeight abi.ChainEpoch
UpgradeIgnitionHeight abi.ChainEpoch
UpgradeLiftoffHeight abi.ChainEpoch
UpgradeAssemblyHeight abi.ChainEpoch
UpgradeRefuelHeight abi.ChainEpoch
UpgradeTapeHeight abi.ChainEpoch
UpgradeKumquatHeight abi.ChainEpoch
UpgradePriceListOopsHeight abi.ChainEpoch
BreezeGasTampingDuration abi.ChainEpoch
UpgradeCalicoHeight abi.ChainEpoch
UpgradePersianHeight abi.ChainEpoch
UpgradeOrangeHeight abi.ChainEpoch
UpgradeClausHeight abi.ChainEpoch
UpgradeTrustHeight abi.ChainEpoch
UpgradeNorwegianHeight abi.ChainEpoch
UpgradeTurboHeight abi.ChainEpoch
UpgradeHyperdriveHeight abi.ChainEpoch
UpgradeChocolateHeight abi.ChainEpoch
UpgradeOhSnapHeight abi.ChainEpoch
UpgradeSkyrHeight abi.ChainEpoch
UpgradeSharkHeight abi.ChainEpoch
UpgradeHyggeHeight abi.ChainEpoch
UpgradeLightningHeight abi.ChainEpoch
UpgradeThunderHeight abi.ChainEpoch
UpgradeSmokeHeight abi.ChainEpoch
UpgradeBreezeHeight abi.ChainEpoch
UpgradeIgnitionHeight abi.ChainEpoch
UpgradeLiftoffHeight abi.ChainEpoch
UpgradeAssemblyHeight abi.ChainEpoch
UpgradeRefuelHeight abi.ChainEpoch
UpgradeTapeHeight abi.ChainEpoch
UpgradeKumquatHeight abi.ChainEpoch
BreezeGasTampingDuration abi.ChainEpoch
UpgradeCalicoHeight abi.ChainEpoch
UpgradePersianHeight abi.ChainEpoch
UpgradeOrangeHeight abi.ChainEpoch
UpgradeClausHeight abi.ChainEpoch
UpgradeTrustHeight abi.ChainEpoch
UpgradeNorwegianHeight abi.ChainEpoch
UpgradeTurboHeight abi.ChainEpoch
UpgradeHyperdriveHeight abi.ChainEpoch
UpgradeChocolateHeight abi.ChainEpoch
UpgradeOhSnapHeight abi.ChainEpoch
UpgradeSkyrHeight abi.ChainEpoch
UpgradeSharkHeight abi.ChainEpoch
UpgradeHyggeHeight abi.ChainEpoch
UpgradeLightningHeight abi.ChainEpoch
UpgradeThunderHeight abi.ChainEpoch
}
type NonceMapType map[address.Address]uint64

View File

@ -3,8 +3,8 @@ package v0api
import (
"context"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
blocks "github.com/ipfs/go-libipfs/blocks"
textselector "github.com/ipld/go-ipld-selector-text-lite"
"github.com/libp2p/go-libp2p/core/peer"

View File

@ -3,8 +3,8 @@ package v0api
import (
"context"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
blocks "github.com/ipfs/go-libipfs/blocks"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
@ -35,6 +35,9 @@ import (
// * Generate openrpc blobs
type Gateway interface {
StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (api.MinerSectors, error)
GasEstimateGasPremium(context.Context, uint64, address.Address, int64, types.TipSetKey) (types.BigInt, error)
StateReplay(context.Context, types.TipSetKey, cid.Cid) (*api.InvocResult, error)
ChainHasObj(context.Context, cid.Cid) (bool, error)
ChainPutObj(context.Context, blocks.Block) error
ChainHead(ctx context.Context) (*types.TipSet, error)

View File

@ -5,8 +5,8 @@ package v0api
import (
"context"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
blocks "github.com/ipfs/go-libipfs/blocks"
"github.com/libp2p/go-libp2p/core/peer"
"golang.org/x/xerrors"
@ -449,6 +449,8 @@ type GatewayMethods struct {
ChainReadObj func(p0 context.Context, p1 cid.Cid) ([]byte, error) ``
GasEstimateGasPremium func(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) ``
GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) ``
MpoolGetNonce func(p0 context.Context, p1 address.Address) (uint64, error) ``
@ -487,10 +489,14 @@ type GatewayMethods struct {
StateMinerProvingDeadline func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) ``
StateMinerSectorCount func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MinerSectors, error) ``
StateNetworkName func(p0 context.Context) (dtypes.NetworkName, error) ``
StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (abinetwork.Version, error) ``
StateReplay func(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*api.InvocResult, error) ``
StateSearchMsg func(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) ``
StateSectorGetInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) ``
@ -2674,6 +2680,17 @@ func (s *GatewayStub) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, erro
return *new([]byte), ErrNotSupported
}
func (s *GatewayStruct) GasEstimateGasPremium(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) {
if s.Internal.GasEstimateGasPremium == nil {
return *new(types.BigInt), ErrNotSupported
}
return s.Internal.GasEstimateGasPremium(p0, p1, p2, p3, p4)
}
func (s *GatewayStub) GasEstimateGasPremium(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) {
return *new(types.BigInt), ErrNotSupported
}
func (s *GatewayStruct) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) {
if s.Internal.GasEstimateMessageGas == nil {
return nil, ErrNotSupported
@ -2883,6 +2900,17 @@ func (s *GatewayStub) StateMinerProvingDeadline(p0 context.Context, p1 address.A
return nil, ErrNotSupported
}
func (s *GatewayStruct) StateMinerSectorCount(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MinerSectors, error) {
if s.Internal.StateMinerSectorCount == nil {
return *new(api.MinerSectors), ErrNotSupported
}
return s.Internal.StateMinerSectorCount(p0, p1, p2)
}
func (s *GatewayStub) StateMinerSectorCount(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MinerSectors, error) {
return *new(api.MinerSectors), ErrNotSupported
}
func (s *GatewayStruct) StateNetworkName(p0 context.Context) (dtypes.NetworkName, error) {
if s.Internal.StateNetworkName == nil {
return *new(dtypes.NetworkName), ErrNotSupported
@ -2905,6 +2933,17 @@ func (s *GatewayStub) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey
return *new(abinetwork.Version), ErrNotSupported
}
func (s *GatewayStruct) StateReplay(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*api.InvocResult, error) {
if s.Internal.StateReplay == nil {
return nil, ErrNotSupported
}
return s.Internal.StateReplay(p0, p1, p2)
}
func (s *GatewayStub) StateReplay(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*api.InvocResult, error) {
return nil, ErrNotSupported
}
func (s *GatewayStruct) StateSearchMsg(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) {
if s.Internal.StateSearchMsg == nil {
return nil, ErrNotSupported

View File

@ -11,8 +11,8 @@ import (
gomock "github.com/golang/mock/gomock"
uuid "github.com/google/uuid"
blocks "github.com/ipfs/go-block-format"
cid "github.com/ipfs/go-cid"
blocks "github.com/ipfs/go-libipfs/blocks"
metrics "github.com/libp2p/go-libp2p/core/metrics"
network0 "github.com/libp2p/go-libp2p/core/network"
peer "github.com/libp2p/go-libp2p/core/peer"

View File

@ -3,8 +3,8 @@ package blockstore
import (
"context"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
blocks "github.com/ipfs/go-libipfs/blocks"
"golang.org/x/xerrors"
)

View File

@ -5,9 +5,9 @@ import (
"sync"
"time"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
block "github.com/ipfs/go-libipfs/blocks"
"golang.org/x/xerrors"
)

View File

@ -13,9 +13,9 @@ import (
"github.com/dgraph-io/badger/v2"
"github.com/dgraph-io/badger/v2/options"
"github.com/dgraph-io/badger/v2/pb"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
blocks "github.com/ipfs/go-libipfs/blocks"
logger "github.com/ipfs/go-log/v2"
pool "github.com/libp2p/go-buffer-pool"
"github.com/multiformats/go-base32"
@ -746,6 +746,20 @@ func (b *Blockstore) Put(ctx context.Context, block blocks.Block) error {
}
put := func(db *badger.DB) error {
// Check if we have it before writing it.
switch err := db.View(func(txn *badger.Txn) error {
_, err := txn.Get(k)
return err
}); err {
case badger.ErrKeyNotFound:
case nil:
// Already exists, skip the put.
return nil
default:
return err
}
// Then write it.
err := db.Update(func(txn *badger.Txn) error {
return txn.Set(k, block.RawData())
})
@ -801,12 +815,33 @@ func (b *Blockstore) PutMany(ctx context.Context, blocks []blocks.Block) error {
keys = append(keys, k)
}
err := b.db.View(func(txn *badger.Txn) error {
for i, k := range keys {
switch _, err := txn.Get(k); err {
case badger.ErrKeyNotFound:
case nil:
keys[i] = nil
default:
// Something is actually wrong
return err
}
}
return nil
})
if err != nil {
return err
}
put := func(db *badger.DB) error {
batch := db.NewWriteBatch()
defer batch.Cancel()
for i, block := range blocks {
k := keys[i]
if k == nil {
// skipped because we already have it.
continue
}
if err := batch.Set(k, block.RawData()); err != nil {
return err
}

View File

@ -10,8 +10,8 @@ import (
"strings"
"testing"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
blocks "github.com/ipfs/go-libipfs/blocks"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"

View File

@ -9,10 +9,10 @@ import (
"strings"
"testing"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
u "github.com/ipfs/go-ipfs-util"
ipld "github.com/ipfs/go-ipld-format"
blocks "github.com/ipfs/go-libipfs/blocks"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/lotus/blockstore"

View File

@ -4,9 +4,9 @@ import (
"context"
"os"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
block "github.com/ipfs/go-libipfs/blocks"
)
// buflog is a logger for the buffered blockstore. It is subscoped from the

View File

@ -4,8 +4,8 @@ import (
"context"
"io"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
blocks "github.com/ipfs/go-libipfs/blocks"
)
var _ Blockstore = (*discardstore)(nil)

View File

@ -5,9 +5,9 @@ import (
"sync"
"time"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
blocks "github.com/ipfs/go-libipfs/blocks"
"golang.org/x/xerrors"
)

View File

@ -4,8 +4,8 @@ import (
"context"
"io"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
blocks "github.com/ipfs/go-libipfs/blocks"
mh "github.com/multiformats/go-multihash"
"golang.org/x/xerrors"
)

View File

@ -5,9 +5,9 @@ import (
"context"
"io"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
httpapi "github.com/ipfs/go-ipfs-http-client"
blocks "github.com/ipfs/go-libipfs/blocks"
iface "github.com/ipfs/interface-go-ipfs-core"
"github.com/ipfs/interface-go-ipfs-core/options"
"github.com/ipfs/interface-go-ipfs-core/path"

View File

@ -3,9 +3,9 @@ package blockstore
import (
"context"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
blocks "github.com/ipfs/go-libipfs/blocks"
)
// NewMemory returns a temporary memory-backed blockstore.

View File

@ -4,8 +4,8 @@ import (
"context"
"testing"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
blocks "github.com/ipfs/go-libipfs/blocks"
mh "github.com/multiformats/go-multihash"
"github.com/stretchr/testify/require"
)

View File

@ -8,9 +8,9 @@ import (
"sync"
"sync/atomic"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
blocks "github.com/ipfs/go-libipfs/blocks"
"github.com/libp2p/go-msgio"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"

View File

@ -5,9 +5,9 @@ import (
"context"
"encoding/binary"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
block "github.com/ipfs/go-libipfs/blocks"
"github.com/libp2p/go-msgio"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"

View File

@ -6,8 +6,8 @@ import (
"io"
"testing"
block "github.com/ipfs/go-block-format"
ipld "github.com/ipfs/go-ipld-format"
block "github.com/ipfs/go-libipfs/blocks"
"github.com/libp2p/go-msgio"
"github.com/stretchr/testify/require"
)

View File

@ -12,8 +12,8 @@ import (
"sync"
"time"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
blocks "github.com/ipfs/go-libipfs/blocks"
"go.uber.org/multierr"
"golang.org/x/xerrors"
)

View File

@ -8,10 +8,10 @@ import (
"sync/atomic"
"time"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
dstore "github.com/ipfs/go-datastore"
ipld "github.com/ipfs/go-ipld-format"
blocks "github.com/ipfs/go-libipfs/blocks"
logging "github.com/ipfs/go-log/v2"
"go.opencensus.io/stats"
"go.uber.org/multierr"
@ -164,7 +164,7 @@ type SplitStore struct {
path string
mx sync.Mutex
warmupEpoch abi.ChainEpoch // protected by mx
warmupEpoch atomic.Int64
baseEpoch abi.ChainEpoch // protected by compaction lock
pruneEpoch abi.ChainEpoch // protected by compaction lock
@ -684,9 +684,7 @@ func (s *SplitStore) View(ctx context.Context, cid cid.Cid, cb func([]byte) erro
}
func (s *SplitStore) isWarm() bool {
s.mx.Lock()
defer s.mx.Unlock()
return s.warmupEpoch > 0
return s.warmupEpoch.Load() > 0
}
// State tracking
@ -757,7 +755,7 @@ func (s *SplitStore) Start(chain ChainAccessor, us stmgr.UpgradeSchedule) error
bs, err = s.ds.Get(s.ctx, warmupEpochKey)
switch err {
case nil:
s.warmupEpoch = bytesToEpoch(bs)
s.warmupEpoch.Store(bytesToInt64(bs))
case dstore.ErrNotFound:
warmup = true
@ -791,7 +789,7 @@ func (s *SplitStore) Start(chain ChainAccessor, us stmgr.UpgradeSchedule) error
return xerrors.Errorf("error loading compaction index: %w", err)
}
log.Infow("starting splitstore", "baseEpoch", s.baseEpoch, "warmupEpoch", s.warmupEpoch)
log.Infow("starting splitstore", "baseEpoch", s.baseEpoch, "warmupEpoch", s.warmupEpoch.Load())
if warmup {
err = s.warmup(curTs)

View File

@ -145,7 +145,7 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error {
func (s *SplitStore) Info() map[string]interface{} {
info := make(map[string]interface{})
info["base epoch"] = s.baseEpoch
info["warmup epoch"] = s.warmupEpoch
info["warmup epoch"] = s.warmupEpoch.Load()
info["compactions"] = s.compactionIndex
info["prunes"] = s.pruneIndex
info["compacting"] = s.compacting == 1

View File

@ -10,9 +10,9 @@ import (
"sync/atomic"
"time"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
blocks "github.com/ipfs/go-libipfs/blocks"
cbg "github.com/whyrusleeping/cbor-gen"
"go.opencensus.io/stats"
"golang.org/x/sync/errgroup"
@ -455,7 +455,7 @@ func (s *SplitStore) protectTxnRefs(markSet MarkSet) error {
// transactionally protect a reference by walking the object and marking.
// concurrent markings are short circuited by checking the markset.
func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSet) (int64, error) {
if err := s.checkYield(); err != nil {
if err := s.checkClosing(); err != nil {
return 0, err
}
@ -1114,13 +1114,17 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp
if err := walkBlock(c); err != nil {
return xerrors.Errorf("error walking block (cid: %s): %w", c, err)
}
if err := s.checkYield(); err != nil {
return xerrors.Errorf("check yield: %w", err)
}
}
return nil
})
}
if err := g.Wait(); err != nil {
return err
return xerrors.Errorf("walkBlock workers errored: %w", err)
}
}
@ -1153,8 +1157,8 @@ func (s *SplitStore) walkObject(c cid.Cid, visitor ObjectVisitor, f func(cid.Cid
}
// check this before recursing
if err := s.checkYield(); err != nil {
return 0, err
if err := s.checkClosing(); err != nil {
return 0, xerrors.Errorf("check closing: %w", err)
}
var links []cid.Cid
@ -1222,8 +1226,8 @@ func (s *SplitStore) walkObjectIncomplete(c cid.Cid, visitor ObjectVisitor, f, m
}
// check this before recursing
if err := s.checkYield(); err != nil {
return sz, err
if err := s.checkClosing(); err != nil {
return sz, xerrors.Errorf("check closing: %w", err)
}
var links []cid.Cid

View File

@ -4,9 +4,9 @@ import (
"context"
"errors"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
blocks "github.com/ipfs/go-libipfs/blocks"
bstore "github.com/filecoin-project/lotus/blockstore"
)

View File

@ -5,8 +5,8 @@ import (
"runtime"
"sync/atomic"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
blocks "github.com/ipfs/go-libipfs/blocks"
"golang.org/x/xerrors"
)

View File

@ -11,11 +11,11 @@ import (
"testing"
"time"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
dssync "github.com/ipfs/go-datastore/sync"
ipld "github.com/ipfs/go-ipld-format"
blocks "github.com/ipfs/go-libipfs/blocks"
logging "github.com/ipfs/go-log/v2"
mh "github.com/multiformats/go-multihash"
@ -429,7 +429,7 @@ func testSplitStoreReification(t *testing.T, f func(context.Context, blockstore.
}
defer ss.Close() //nolint
ss.warmupEpoch = 1
ss.warmupEpoch.Store(1)
go ss.reifyOrchestrator()
waitForReification := func() {
@ -529,7 +529,7 @@ func testSplitStoreReificationLimit(t *testing.T, f func(context.Context, blocks
}
defer ss.Close() //nolint
ss.warmupEpoch = 1
ss.warmupEpoch.Store(1)
go ss.reifyOrchestrator()
waitForReification := func() {

View File

@ -5,9 +5,9 @@ import (
"sync/atomic"
"time"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
blocks "github.com/ipfs/go-libipfs/blocks"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi"
@ -136,9 +136,8 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error {
if err != nil {
return xerrors.Errorf("error saving warm up epoch: %w", err)
}
s.mx.Lock()
s.warmupEpoch = epoch
s.mx.Unlock()
s.warmupEpoch.Store(int64(epoch))
// also save the compactionIndex, as this is used as an indicator of warmup for upgraded nodes
err = s.ds.Put(s.ctx, compactionIndexKey, int64ToBytes(s.compactionIndex))

View File

@ -4,8 +4,8 @@ import (
"context"
"sync"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
blocks "github.com/ipfs/go-libipfs/blocks"
)
// NewMemorySync returns a thread-safe in-memory blockstore.

View File

@ -6,9 +6,9 @@ import (
"sync"
"time"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
blocks "github.com/ipfs/go-libipfs/blocks"
"github.com/raulk/clock"
"go.uber.org/multierr"
)

View File

@ -6,8 +6,8 @@ import (
"testing"
"time"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
blocks "github.com/ipfs/go-libipfs/blocks"
"github.com/raulk/clock"
"github.com/stretchr/testify/require"
)

View File

@ -3,9 +3,9 @@ package blockstore
import (
"context"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
blocks "github.com/ipfs/go-libipfs/blocks"
)
type unionBlockstore []Blockstore

View File

@ -5,7 +5,7 @@ import (
"context"
"testing"
blocks "github.com/ipfs/go-libipfs/blocks"
blocks "github.com/ipfs/go-block-format"
"github.com/stretchr/testify/require"
)

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -9,6 +9,7 @@ package build
import (
"math/big"
"time"
"github.com/ipfs/go-cid"
@ -137,3 +138,7 @@ const BootstrapPeerThreshold = 1
// ChainId defines the chain ID used in the Ethereum JSON-RPC endpoint.
// As per https://github.com/ethereum-lists/chains
const Eip155ChainId = 31415926
// Reducing the delivery delay for equivocation of
// consistent broadcast to just half a second.
var CBDeliveryDelay = 500 * time.Millisecond

View File

@ -37,7 +37,7 @@ func BuildTypeString() string {
}
// BuildVersion is the local build version
const BuildVersion = "1.23.0"
const BuildVersion = "1.23.2-dev"
func UserVersion() string {
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {

View File

@ -110,6 +110,7 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context,
TipSetGetter: stmgr.TipSetGetterForTipset(sm.ChainStore(), ts),
Tracing: vmTracing,
ReturnEvents: sm.ChainStore().IsStoringEvents(),
ExecutionLane: vm.ExecutionLanePriority,
}
return sm.VMConstructor()(ctx, vmopt)

View File

@ -382,13 +382,21 @@ func (filec *FilecoinEC) VerifyWinningPoStProof(ctx context.Context, nv network.
return nil
}
func (filec *FilecoinEC) IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool {
func (filec *FilecoinEC) IsEpochInConsensusRange(epoch abi.ChainEpoch) bool {
if filec.genesis == nil {
return true
}
// Don't try to sync anything before finality. Don't propagate such blocks either.
//
// We use _our_ current head, not the expected head, because the network's head can lag on
// catch-up (after a network outage).
if epoch < filec.store.GetHeaviestTipSet().Height()-build.Finality {
return false
}
now := uint64(build.Clock.Now().Unix())
return epoch > (abi.ChainEpoch((now-filec.genesis.MinTimestamp())/build.BlockDelaySecs) + MaxHeightDrift)
return epoch <= (abi.ChainEpoch((now-filec.genesis.MinTimestamp())/build.BlockDelaySecs) + MaxHeightDrift)
}
func (filec *FilecoinEC) minerIsValid(ctx context.Context, maddr address.Address, baseTs *types.TipSet) error {

View File

@ -32,9 +32,10 @@ type Consensus interface {
// the block (signature verifications, VRF checks, message validity, etc.)
ValidateBlock(ctx context.Context, b *types.FullBlock) (err error)
// IsEpochBeyondCurrMax is used to configure the fork rules for longest-chain
// consensus protocols.
IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool
// IsEpochInConsensusRange returns true if the epoch is "in range" for consensus. That is:
// - It's not before finality.
// - It's not too far in the future.
IsEpochInConsensusRange(epoch abi.ChainEpoch) bool
// CreateBlock implements all the logic required to propose and assemble a new Filecoin block.
//
@ -65,23 +66,24 @@ func ValidateBlockPubsub(ctx context.Context, cns Consensus, self bool, msg *pub
stats.Record(ctx, metrics.BlockReceived.M(1))
recordFailureFlagPeer := func(what string) {
// bv.Validate will flag the peer in that case
panic(what)
}
blk, what, err := decodeAndCheckBlock(msg)
if err != nil {
log.Error("got invalid block over pubsub: ", err)
recordFailureFlagPeer(what)
return pubsub.ValidationReject, what
}
if !cns.IsEpochInConsensusRange(blk.Header.Height) {
// We ignore these blocks instead of rejecting to avoid breaking the network if
// we're recovering from an outage (e.g., where nobody agrees on where "head" is
// currently).
log.Warnf("received block outside of consensus range (%d)", blk.Header.Height)
return pubsub.ValidationIgnore, "invalid_block_height"
}
// validate the block meta: the Message CID in the header must match the included messages
err = validateMsgMeta(ctx, blk)
if err != nil {
log.Warnf("error validating message metadata: %s", err)
recordFailureFlagPeer("invalid_block_meta")
return pubsub.ValidationReject, "invalid_block_meta"
}
@ -91,7 +93,6 @@ func ValidateBlockPubsub(ctx context.Context, cns Consensus, self bool, msg *pub
log.Warn("ignoring block msg: ", err)
return pubsub.ValidationIgnore, reject
}
recordFailureFlagPeer(reject)
return pubsub.ValidationReject, reject
}

View File

@ -47,7 +47,7 @@ type Events struct {
*hcEvents
}
func NewEventsWithConfidence(ctx context.Context, api EventAPI, gcConfidence abi.ChainEpoch) (*Events, error) {
func newEventsWithGCConfidence(ctx context.Context, api EventAPI, gcConfidence abi.ChainEpoch) (*Events, error) {
cache := newCache(api, gcConfidence)
ob := newObserver(cache, gcConfidence)
@ -63,5 +63,5 @@ func NewEventsWithConfidence(ctx context.Context, api EventAPI, gcConfidence abi
func NewEvents(ctx context.Context, api EventAPI) (*Events, error) {
gcConfidence := 2 * build.ForkLengthThreshold
return NewEventsWithConfidence(ctx, api, gcConfidence)
return newEventsWithGCConfidence(ctx, api, gcConfidence)
}

View File

@ -174,13 +174,16 @@ func (fcs *fakeCS) makeTs(t *testing.T, parents []cid.Cid, h abi.ChainEpoch, msg
},
})
require.NoError(t, err)
fcs.mu.Lock()
defer fcs.mu.Unlock()
if fcs.tipsets == nil {
fcs.tipsets = map[types.TipSetKey]*types.TipSet{}
}
fcs.tipsets[ts.Key()] = ts
require.NoError(t, err)
return ts
}

View File

@ -125,7 +125,7 @@ func (o *observer) listenHeadChangesOnce(ctx context.Context) error {
for changes := range notifs {
if err := o.applyChanges(ctx, changes); err != nil {
return err
return xerrors.Errorf("failed to apply a change notification: %w", err)
}
}

View File

@ -4,8 +4,8 @@ import (
"context"
"sync"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
blocks "github.com/ipfs/go-libipfs/blocks"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"

View File

@ -34,6 +34,7 @@ import (
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis"
"github.com/filecoin-project/lotus/chain/index"
"github.com/filecoin-project/lotus/chain/rand"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
@ -256,7 +257,7 @@ func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeS
//return nil, xerrors.Errorf("creating drand beacon: %w", err)
//}
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), sys, us, beac, ds)
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), sys, us, beac, ds, index.DummyMsgIndex)
if err != nil {
return nil, xerrors.Errorf("initing stmgr: %w", err)
}

View File

@ -3,8 +3,8 @@ package genesis
import (
"encoding/hex"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
blocks "github.com/ipfs/go-libipfs/blocks"
"github.com/multiformats/go-multihash"
)

45
chain/index/interface.go Normal file
View File

@ -0,0 +1,45 @@
package index
import (
"context"
"errors"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-state-types/abi"
)
var ErrNotFound = errors.New("message not found")
var ErrClosed = errors.New("index closed")
// MsgInfo is the Message metadata the index tracks.
type MsgInfo struct {
// the message this record refers to
Message cid.Cid
// the tipset where this message was included
TipSet cid.Cid
// the epoch where this message was included
Epoch abi.ChainEpoch
}
// MsgIndex is the interface to the message index
type MsgIndex interface {
// GetMsgInfo retrieves the message metadata through the index.
// The lookup is done using the onchain message Cid; that is the signed message Cid
// for SECP messages and unsigned message Cid for BLS messages.
GetMsgInfo(ctx context.Context, m cid.Cid) (MsgInfo, error)
// Close closes the index
Close() error
}
type dummyMsgIndex struct{}
func (dummyMsgIndex) GetMsgInfo(ctx context.Context, m cid.Cid) (MsgInfo, error) {
return MsgInfo{}, ErrNotFound
}
func (dummyMsgIndex) Close() error {
return nil
}
var DummyMsgIndex MsgIndex = dummyMsgIndex{}

553
chain/index/msgindex.go Normal file
View File

@ -0,0 +1,553 @@
package index
import (
"context"
"database/sql"
"errors"
"io/fs"
"os"
"path"
"sync"
"time"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
_ "github.com/mattn/go-sqlite3"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
)
var log = logging.Logger("msgindex")
var dbName = "msgindex.db"
var dbDefs = []string{
`CREATE TABLE IF NOT EXISTS messages (
cid VARCHAR(80) PRIMARY KEY ON CONFLICT REPLACE,
tipset_cid VARCHAR(80) NOT NULL,
epoch INTEGER NOT NULL
)`,
`CREATE INDEX IF NOT EXISTS tipset_cids ON messages (tipset_cid)
`,
`CREATE TABLE IF NOT EXISTS _meta (
version UINT64 NOT NULL UNIQUE
)`,
`INSERT OR IGNORE INTO _meta (version) VALUES (1)`,
}
var dbPragmas = []string{}
const (
// prepared stmts
dbqGetMessageInfo = "SELECT tipset_cid, epoch FROM messages WHERE cid = ?"
dbqInsertMessage = "INSERT INTO messages VALUES (?, ?, ?)"
dbqDeleteTipsetMessages = "DELETE FROM messages WHERE tipset_cid = ?"
// reconciliation
dbqCountMessages = "SELECT COUNT(*) FROM messages"
dbqMinEpoch = "SELECT MIN(epoch) FROM messages"
dbqCountTipsetMessages = "SELECT COUNT(*) FROM messages WHERE tipset_cid = ?"
dbqDeleteMessagesByEpoch = "DELETE FROM messages WHERE epoch >= ?"
)
// coalescer configuration (TODO: use observer instead)
// these are exposed to make tests snappy
var (
CoalesceMinDelay = time.Second
CoalesceMaxDelay = 15 * time.Second
CoalesceMergeInterval = time.Second
)
// chain store interface; we could use store.ChainStore directly,
// but this simplifies unit testing.
type ChainStore interface {
SubscribeHeadChanges(f store.ReorgNotifee)
MessagesForTipset(ctx context.Context, ts *types.TipSet) ([]types.ChainMsg, error)
GetHeaviestTipSet() *types.TipSet
GetTipSetFromKey(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error)
}
var _ ChainStore = (*store.ChainStore)(nil)
type msgIndex struct {
cs ChainStore
db *sql.DB
selectMsgStmt *sql.Stmt
insertMsgStmt *sql.Stmt
deleteTipSetStmt *sql.Stmt
sema chan struct{}
mx sync.Mutex
pend []headChange
cancel func()
workers sync.WaitGroup
closeLk sync.RWMutex
closed bool
}
var _ MsgIndex = (*msgIndex)(nil)
type headChange struct {
rev []*types.TipSet
app []*types.TipSet
}
func NewMsgIndex(lctx context.Context, basePath string, cs ChainStore) (MsgIndex, error) {
var (
dbPath string
exists bool
err error
)
err = os.MkdirAll(basePath, 0755)
if err != nil {
return nil, xerrors.Errorf("error creating msgindex base directory: %w", err)
}
dbPath = path.Join(basePath, dbName)
_, err = os.Stat(dbPath)
switch {
case err == nil:
exists = true
case errors.Is(err, fs.ErrNotExist):
case err != nil:
return nil, xerrors.Errorf("error stating msgindex database: %w", err)
}
db, err := sql.Open("sqlite3", dbPath)
if err != nil {
// TODO [nice to have]: automaticaly delete corrupt databases
// but for now we can just error and let the operator delete.
return nil, xerrors.Errorf("error opening msgindex database: %w", err)
}
if err := prepareDB(db); err != nil {
return nil, xerrors.Errorf("error creating msgindex database: %w", err)
}
// TODO we may consider populating the index when first creating the db
if exists {
if err := reconcileIndex(db, cs); err != nil {
return nil, xerrors.Errorf("error reconciling msgindex database: %w", err)
}
}
ctx, cancel := context.WithCancel(lctx)
msgIndex := &msgIndex{
db: db,
cs: cs,
sema: make(chan struct{}, 1),
cancel: cancel,
}
err = msgIndex.prepareStatements()
if err != nil {
if err := db.Close(); err != nil {
log.Errorf("error closing msgindex database: %s", err)
}
return nil, xerrors.Errorf("error preparing msgindex database statements: %w", err)
}
rnf := store.WrapHeadChangeCoalescer(
msgIndex.onHeadChange,
CoalesceMinDelay,
CoalesceMaxDelay,
CoalesceMergeInterval,
)
cs.SubscribeHeadChanges(rnf)
msgIndex.workers.Add(1)
go msgIndex.background(ctx)
return msgIndex, nil
}
func PopulateAfterSnapshot(lctx context.Context, basePath string, cs ChainStore) error {
err := os.MkdirAll(basePath, 0755)
if err != nil {
return xerrors.Errorf("error creating msgindex base directory: %w", err)
}
dbPath := path.Join(basePath, dbName)
// if a database already exists, we try to delete it and create a new one
if _, err := os.Stat(dbPath); err == nil {
if err = os.Remove(dbPath); err != nil {
return xerrors.Errorf("msgindex already exists at %s and can't be deleted", dbPath)
}
}
db, err := sql.Open("sqlite3", dbPath)
if err != nil {
return xerrors.Errorf("error opening msgindex database: %w", err)
}
defer func() {
if err := db.Close(); err != nil {
log.Errorf("error closing msgindex database: %s", err)
}
}()
if err := prepareDB(db); err != nil {
return xerrors.Errorf("error creating msgindex database: %w", err)
}
tx, err := db.Begin()
if err != nil {
return xerrors.Errorf("error when starting transaction: %w", err)
}
rollback := func() {
if err := tx.Rollback(); err != nil {
log.Errorf("error in rollback: %s", err)
}
}
insertStmt, err := tx.Prepare(dbqInsertMessage)
if err != nil {
rollback()
return xerrors.Errorf("error preparing insertStmt: %w", err)
}
curTs := cs.GetHeaviestTipSet()
startHeight := curTs.Height()
for curTs != nil {
tscid, err := curTs.Key().Cid()
if err != nil {
rollback()
return xerrors.Errorf("error computing tipset cid: %w", err)
}
tskey := tscid.String()
epoch := int64(curTs.Height())
msgs, err := cs.MessagesForTipset(lctx, curTs)
if err != nil {
log.Infof("stopping import after %d tipsets", startHeight-curTs.Height())
break
}
for _, msg := range msgs {
key := msg.Cid().String()
if _, err := insertStmt.Exec(key, tskey, epoch); err != nil {
rollback()
return xerrors.Errorf("error inserting message: %w", err)
}
}
curTs, err = cs.GetTipSetFromKey(lctx, curTs.Parents())
if err != nil {
rollback()
return xerrors.Errorf("error walking chain: %w", err)
}
}
err = tx.Commit()
if err != nil {
return xerrors.Errorf("error committing transaction: %w", err)
}
return nil
}
// init utilities
func prepareDB(db *sql.DB) error {
for _, stmt := range dbDefs {
if _, err := db.Exec(stmt); err != nil {
return xerrors.Errorf("error executing sql statement '%s': %w", stmt, err)
}
}
for _, stmt := range dbPragmas {
if _, err := db.Exec(stmt); err != nil {
return xerrors.Errorf("error executing sql statement '%s': %w", stmt, err)
}
}
return nil
}
func reconcileIndex(db *sql.DB, cs ChainStore) error {
// Invariant: after reconciliation, every tipset in the index is in the current chain; ie either
// the chain head or reachable by walking the chain.
// Algorithm:
// 1. Count messages in index; if none, trivially reconciled.
// TODO we may consider populating the index in that case
// 2. Find the minimum tipset in the index; this will mark the end of the reconciliation walk
// 3. Walk from current tipset until we find a tipset in the index.
// 4. Delete (revert!) all tipsets above the found tipset.
// 5. If the walk ends in the boundary epoch, then delete everything.
//
row := db.QueryRow(dbqCountMessages)
var result int64
if err := row.Scan(&result); err != nil {
return xerrors.Errorf("error counting messages: %w", err)
}
if result == 0 {
return nil
}
row = db.QueryRow(dbqMinEpoch)
if err := row.Scan(&result); err != nil {
return xerrors.Errorf("error finding boundary epoch: %w", err)
}
boundaryEpoch := abi.ChainEpoch(result)
countMsgsStmt, err := db.Prepare(dbqCountTipsetMessages)
if err != nil {
return xerrors.Errorf("error preparing statement: %w", err)
}
curTs := cs.GetHeaviestTipSet()
for curTs != nil && curTs.Height() >= boundaryEpoch {
tsCid, err := curTs.Key().Cid()
if err != nil {
return xerrors.Errorf("error computing tipset cid: %w", err)
}
key := tsCid.String()
row = countMsgsStmt.QueryRow(key)
if err := row.Scan(&result); err != nil {
return xerrors.Errorf("error counting messages: %w", err)
}
if result > 0 {
// found it!
boundaryEpoch = curTs.Height() + 1
break
}
// walk up
parents := curTs.Parents()
curTs, err = cs.GetTipSetFromKey(context.TODO(), parents)
if err != nil {
return xerrors.Errorf("error walking chain: %w", err)
}
}
// delete everything above the minEpoch
if _, err = db.Exec(dbqDeleteMessagesByEpoch, int64(boundaryEpoch)); err != nil {
return xerrors.Errorf("error deleting stale reorged out message: %w", err)
}
return nil
}
func (x *msgIndex) prepareStatements() error {
stmt, err := x.db.Prepare(dbqGetMessageInfo)
if err != nil {
return xerrors.Errorf("prepare selectMsgStmt: %w", err)
}
x.selectMsgStmt = stmt
stmt, err = x.db.Prepare(dbqInsertMessage)
if err != nil {
return xerrors.Errorf("prepare insertMsgStmt: %w", err)
}
x.insertMsgStmt = stmt
stmt, err = x.db.Prepare(dbqDeleteTipsetMessages)
if err != nil {
return xerrors.Errorf("prepare deleteTipSetStmt: %w", err)
}
x.deleteTipSetStmt = stmt
return nil
}
// head change notifee
func (x *msgIndex) onHeadChange(rev, app []*types.TipSet) error {
x.closeLk.RLock()
defer x.closeLk.RUnlock()
if x.closed {
return nil
}
// do it in the background to avoid blocking head change processing
x.mx.Lock()
x.pend = append(x.pend, headChange{rev: rev, app: app})
pendLen := len(x.pend)
x.mx.Unlock()
// complain loudly if this is building backlog
if pendLen > 10 {
log.Warnf("message index head change processing is building backlog: %d pending head changes", pendLen)
}
select {
case x.sema <- struct{}{}:
default:
}
return nil
}
func (x *msgIndex) background(ctx context.Context) {
defer x.workers.Done()
for {
select {
case <-x.sema:
err := x.processHeadChanges(ctx)
if err != nil {
// we can't rely on an inconsistent index, so shut it down.
log.Errorf("error processing head change notifications: %s; shutting down message index", err)
if err2 := x.Close(); err2 != nil {
log.Errorf("error shutting down index: %s", err2)
}
}
case <-ctx.Done():
return
}
}
}
func (x *msgIndex) processHeadChanges(ctx context.Context) error {
x.mx.Lock()
pend := x.pend
x.pend = nil
x.mx.Unlock()
tx, err := x.db.Begin()
if err != nil {
return xerrors.Errorf("error creating transaction: %w", err)
}
for _, hc := range pend {
for _, ts := range hc.rev {
if err := x.doRevert(ctx, tx, ts); err != nil {
if err2 := tx.Rollback(); err2 != nil {
log.Errorf("error rolling back transaction: %s", err2)
}
return xerrors.Errorf("error reverting %s: %w", ts, err)
}
}
for _, ts := range hc.app {
if err := x.doApply(ctx, tx, ts); err != nil {
if err2 := tx.Rollback(); err2 != nil {
log.Errorf("error rolling back transaction: %s", err2)
}
return xerrors.Errorf("error applying %s: %w", ts, err)
}
}
}
return tx.Commit()
}
func (x *msgIndex) doRevert(ctx context.Context, tx *sql.Tx, ts *types.TipSet) error {
tskey, err := ts.Key().Cid()
if err != nil {
return xerrors.Errorf("error computing tipset cid: %w", err)
}
key := tskey.String()
_, err = tx.Stmt(x.deleteTipSetStmt).Exec(key)
return err
}
func (x *msgIndex) doApply(ctx context.Context, tx *sql.Tx, ts *types.TipSet) error {
tscid, err := ts.Key().Cid()
if err != nil {
return xerrors.Errorf("error computing tipset cid: %w", err)
}
tskey := tscid.String()
epoch := int64(ts.Height())
msgs, err := x.cs.MessagesForTipset(ctx, ts)
if err != nil {
return xerrors.Errorf("error retrieving messages for tipset %s: %w", ts, err)
}
insertStmt := tx.Stmt(x.insertMsgStmt)
for _, msg := range msgs {
key := msg.Cid().String()
if _, err := insertStmt.Exec(key, tskey, epoch); err != nil {
return xerrors.Errorf("error inserting message: %w", err)
}
}
return nil
}
// interface
func (x *msgIndex) GetMsgInfo(ctx context.Context, m cid.Cid) (MsgInfo, error) {
x.closeLk.RLock()
defer x.closeLk.RUnlock()
if x.closed {
return MsgInfo{}, ErrClosed
}
var (
tipset string
epoch int64
)
key := m.String()
row := x.selectMsgStmt.QueryRow(key)
err := row.Scan(&tipset, &epoch)
switch {
case err == sql.ErrNoRows:
return MsgInfo{}, ErrNotFound
case err != nil:
return MsgInfo{}, xerrors.Errorf("error querying msgindex database: %w", err)
}
tipsetCid, err := cid.Decode(tipset)
if err != nil {
return MsgInfo{}, xerrors.Errorf("error decoding tipset cid: %w", err)
}
return MsgInfo{
Message: m,
TipSet: tipsetCid,
Epoch: abi.ChainEpoch(epoch),
}, nil
}
func (x *msgIndex) Close() error {
x.closeLk.Lock()
defer x.closeLk.Unlock()
if x.closed {
return nil
}
x.closed = true
x.cancel()
x.workers.Wait()
return x.db.Close()
}
// informal apis for itests; not exposed in the main interface
func (x *msgIndex) CountMessages() (int64, error) {
x.closeLk.RLock()
defer x.closeLk.RUnlock()
if x.closed {
return 0, ErrClosed
}
var result int64
row := x.db.QueryRow(dbqCountMessages)
err := row.Scan(&result)
return result, err
}

View File

@ -0,0 +1,307 @@
package index
import (
"context"
"errors"
"math/rand"
"os"
"testing"
"time"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/mock"
)
func TestBasicMsgIndex(t *testing.T) {
// the most basic of tests:
// 1. Create an index with mock chain store
// 2. Advance the chain for a few tipsets
// 3. Verify that the index contains all messages with the correct tipset/epoch
cs := newMockChainStore()
cs.genesis()
tmp := t.TempDir()
t.Cleanup(func() { _ = os.RemoveAll(tmp) })
msgIndex, err := NewMsgIndex(context.Background(), tmp, cs)
require.NoError(t, err)
defer msgIndex.Close() //nolint
for i := 0; i < 10; i++ {
t.Logf("advance to epoch %d", i+1)
err := cs.advance()
require.NoError(t, err)
}
waitForCoalescerAfterLastEvent()
t.Log("verifying index")
verifyIndex(t, cs, msgIndex)
}
func TestReorgMsgIndex(t *testing.T) {
// slightly more nuanced test that includes reorgs
// 1. Create an index with mock chain store
// 2. Advance/Reorg the chain for a few tipsets
// 3. Verify that the index contains all messages with the correct tipset/epoch
cs := newMockChainStore()
cs.genesis()
tmp := t.TempDir()
t.Cleanup(func() { _ = os.RemoveAll(tmp) })
msgIndex, err := NewMsgIndex(context.Background(), tmp, cs)
require.NoError(t, err)
defer msgIndex.Close() //nolint
for i := 0; i < 10; i++ {
t.Logf("advance to epoch %d", i+1)
err := cs.advance()
require.NoError(t, err)
}
waitForCoalescerAfterLastEvent()
// a simple reorg
t.Log("doing reorg")
reorgme := cs.curTs
reorgmeParent, err := cs.GetTipSetFromKey(context.Background(), reorgme.Parents())
require.NoError(t, err)
cs.setHead(reorgmeParent)
reorgmeChild := cs.makeBlk()
err = cs.reorg([]*types.TipSet{reorgme}, []*types.TipSet{reorgmeChild})
require.NoError(t, err)
waitForCoalescerAfterLastEvent()
t.Log("verifying index")
verifyIndex(t, cs, msgIndex)
t.Log("verifying that reorged messages are not present")
verifyMissing(t, cs, msgIndex, reorgme)
}
func TestReconcileMsgIndex(t *testing.T) {
// test that exercises the reconciliation code paths
// 1. Create and populate a basic msgindex, similar to TestBasicMsgIndex.
// 2. Close it
// 3. Reorg the mock chain store
// 4. Reopen the index to trigger reconciliation
// 5. Enxure that only the stable messages remain.
cs := newMockChainStore()
cs.genesis()
tmp := t.TempDir()
t.Cleanup(func() { _ = os.RemoveAll(tmp) })
msgIndex, err := NewMsgIndex(context.Background(), tmp, cs)
require.NoError(t, err)
for i := 0; i < 10; i++ {
t.Logf("advance to epoch %d", i+1)
err := cs.advance()
require.NoError(t, err)
}
waitForCoalescerAfterLastEvent()
// Close it and reorg
err = msgIndex.Close()
require.NoError(t, err)
cs.notify = nil
// a simple reorg
t.Log("doing reorg")
reorgme := cs.curTs
reorgmeParent, err := cs.GetTipSetFromKey(context.Background(), reorgme.Parents())
require.NoError(t, err)
cs.setHead(reorgmeParent)
reorgmeChild := cs.makeBlk()
err = cs.reorg([]*types.TipSet{reorgme}, []*types.TipSet{reorgmeChild})
require.NoError(t, err)
// reopen to reconcile
msgIndex, err = NewMsgIndex(context.Background(), tmp, cs)
require.NoError(t, err)
defer msgIndex.Close() //nolint
t.Log("verifying index")
// need to step one up because the last tipset is not known by the index
cs.setHead(reorgmeParent)
verifyIndex(t, cs, msgIndex)
t.Log("verifying that reorged and unknown messages are not present")
verifyMissing(t, cs, msgIndex, reorgme, reorgmeChild)
}
func verifyIndex(t *testing.T, cs *mockChainStore, msgIndex MsgIndex) {
for ts := cs.curTs; ts.Height() > 0; {
t.Logf("verify at height %d", ts.Height())
blks := ts.Blocks()
if len(blks) == 0 {
break
}
tsCid, err := ts.Key().Cid()
require.NoError(t, err)
msgs, err := cs.MessagesForTipset(context.Background(), ts)
require.NoError(t, err)
for _, m := range msgs {
minfo, err := msgIndex.GetMsgInfo(context.Background(), m.Cid())
require.NoError(t, err)
require.Equal(t, tsCid, minfo.TipSet)
require.Equal(t, ts.Height(), minfo.Epoch)
}
parents := ts.Parents()
ts, err = cs.GetTipSetFromKey(context.Background(), parents)
require.NoError(t, err)
}
}
func verifyMissing(t *testing.T, cs *mockChainStore, msgIndex MsgIndex, missing ...*types.TipSet) {
for _, ts := range missing {
msgs, err := cs.MessagesForTipset(context.Background(), ts)
require.NoError(t, err)
for _, m := range msgs {
_, err := msgIndex.GetMsgInfo(context.Background(), m.Cid())
require.Equal(t, ErrNotFound, err)
}
}
}
type mockChainStore struct {
notify store.ReorgNotifee
curTs *types.TipSet
tipsets map[types.TipSetKey]*types.TipSet
msgs map[types.TipSetKey][]types.ChainMsg
nonce uint64
}
var _ ChainStore = (*mockChainStore)(nil)
var systemAddr address.Address
var rng *rand.Rand
func init() {
systemAddr, _ = address.NewIDAddress(0)
rng = rand.New(rand.NewSource(314159))
// adjust those to make tests snappy
CoalesceMinDelay = 100 * time.Millisecond
CoalesceMaxDelay = time.Second
CoalesceMergeInterval = 100 * time.Millisecond
}
func newMockChainStore() *mockChainStore {
return &mockChainStore{
tipsets: make(map[types.TipSetKey]*types.TipSet),
msgs: make(map[types.TipSetKey][]types.ChainMsg),
}
}
func (cs *mockChainStore) genesis() {
genBlock := mock.MkBlock(nil, 0, 0)
genTs := mock.TipSet(genBlock)
cs.msgs[genTs.Key()] = nil
cs.setHead(genTs)
}
func (cs *mockChainStore) setHead(ts *types.TipSet) {
cs.curTs = ts
cs.tipsets[ts.Key()] = ts
}
func (cs *mockChainStore) advance() error {
ts := cs.makeBlk()
return cs.reorg(nil, []*types.TipSet{ts})
}
func (cs *mockChainStore) reorg(rev, app []*types.TipSet) error {
for _, ts := range rev {
parents := ts.Parents()
cs.curTs = cs.tipsets[parents]
}
for _, ts := range app {
cs.tipsets[ts.Key()] = ts
cs.curTs = ts
}
if cs.notify != nil {
return cs.notify(rev, app)
}
return nil
}
func (cs *mockChainStore) makeBlk() *types.TipSet {
height := cs.curTs.Height() + 1
blk := mock.MkBlock(cs.curTs, uint64(height), uint64(height))
blk.Messages = cs.makeGarbageCid()
ts := mock.TipSet(blk)
msg1 := cs.makeMsg()
msg2 := cs.makeMsg()
cs.msgs[ts.Key()] = []types.ChainMsg{msg1, msg2}
return ts
}
func (cs *mockChainStore) makeMsg() *types.Message {
nonce := cs.nonce
cs.nonce++
return &types.Message{To: systemAddr, From: systemAddr, Nonce: nonce}
}
func (cs *mockChainStore) makeGarbageCid() cid.Cid {
garbage := blocks.NewBlock([]byte{byte(rng.Intn(256)), byte(rng.Intn(256)), byte(rng.Intn(256))})
return garbage.Cid()
}
func (cs *mockChainStore) SubscribeHeadChanges(f store.ReorgNotifee) {
cs.notify = f
}
func (cs *mockChainStore) MessagesForTipset(ctx context.Context, ts *types.TipSet) ([]types.ChainMsg, error) {
msgs, ok := cs.msgs[ts.Key()]
if !ok {
return nil, errors.New("unknown tipset")
}
return msgs, nil
}
func (cs *mockChainStore) GetHeaviestTipSet() *types.TipSet {
return cs.curTs
}
func (cs *mockChainStore) GetTipSetFromKey(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) {
ts, ok := cs.tipsets[tsk]
if !ok {
return nil, errors.New("unknown tipset")
}
return ts, nil
}
func waitForCoalescerAfterLastEvent() {
// It can take up to CoalesceMinDelay for the coalescer timer to fire after the last event.
// When the timer fires, it can wait up to CoalesceMinDelay again for more events.
// Therefore the total wait is 2 * CoalesceMinDelay.
// Then we wait another second for the listener (the index) to actually process events.
time.Sleep(2*CoalesceMinDelay + time.Second)
}

View File

@ -32,14 +32,18 @@ func (mp *MessagePool) CheckMessages(ctx context.Context, protos []*api.MessageP
// CheckPendingMessages performs a set of logical sets for all messages pending from a given actor
func (mp *MessagePool) CheckPendingMessages(ctx context.Context, from address.Address) ([][]api.MessageCheckStatus, error) {
var msgs []*types.Message
mp.lk.Lock()
mset, ok := mp.pending[from]
mp.lk.RLock()
mset, ok, err := mp.getPendingMset(ctx, from)
if err != nil {
return nil, xerrors.Errorf("errored while getting pending mset: %w", err)
}
if ok {
msgs = make([]*types.Message, 0, len(mset.msgs))
for _, sm := range mset.msgs {
msgs = append(msgs, &sm.Message)
}
}
mp.lk.Unlock()
mp.lk.RUnlock()
if len(msgs) == 0 {
return nil, nil
@ -58,13 +62,16 @@ func (mp *MessagePool) CheckReplaceMessages(ctx context.Context, replace []*type
msgMap := make(map[address.Address]map[uint64]*types.Message)
count := 0
mp.lk.Lock()
mp.lk.RLock()
for _, m := range replace {
mmap, ok := msgMap[m.From]
if !ok {
mmap = make(map[uint64]*types.Message)
msgMap[m.From] = mmap
mset, ok := mp.pending[m.From]
mset, ok, err := mp.getPendingMset(ctx, m.From)
if err != nil {
return nil, xerrors.Errorf("errored while getting pending mset: %w", err)
}
if ok {
count += len(mset.msgs)
for _, sm := range mset.msgs {
@ -76,7 +83,7 @@ func (mp *MessagePool) CheckReplaceMessages(ctx context.Context, replace []*type
}
mmap[m.Nonce] = m
}
mp.lk.Unlock()
mp.lk.RUnlock()
msgs := make([]*types.Message, 0, count)
start := 0
@ -103,9 +110,9 @@ func (mp *MessagePool) checkMessages(ctx context.Context, msgs []*types.Message,
if mp.api.IsLite() {
return nil, nil
}
mp.curTsLk.Lock()
mp.curTsLk.RLock()
curTs := mp.curTs
mp.curTsLk.Unlock()
mp.curTsLk.RUnlock()
epoch := curTs.Height() + 1
@ -143,22 +150,25 @@ func (mp *MessagePool) checkMessages(ctx context.Context, msgs []*types.Message,
st, ok := state[m.From]
if !ok {
mp.lk.Lock()
mset, ok := mp.pending[m.From]
mp.lk.RLock()
mset, ok, err := mp.getPendingMset(ctx, m.From)
if err != nil {
return nil, xerrors.Errorf("errored while getting pending mset: %w", err)
}
if ok && !interned {
st = &actorState{nextNonce: mset.nextNonce, requiredFunds: mset.requiredFunds}
for _, m := range mset.msgs {
st.requiredFunds = new(stdbig.Int).Add(st.requiredFunds, m.Message.Value.Int)
}
state[m.From] = st
mp.lk.Unlock()
mp.lk.RUnlock()
check.OK = true
check.Hint = map[string]interface{}{
"nonce": st.nextNonce,
}
} else {
mp.lk.Unlock()
mp.lk.RUnlock()
stateNonce, err := mp.getStateNonce(ctx, m.From, curTs)
if err != nil {

View File

@ -118,7 +118,7 @@ func init() {
}
type MessagePool struct {
lk sync.Mutex
lk sync.RWMutex
ds dtypes.MetadataDS
@ -137,9 +137,9 @@ type MessagePool struct {
// do NOT access this map directly, use getPendingMset, setPendingMset, deletePendingMset, forEachPending, and clearPending respectively
pending map[address.Address]*msgSet
keyCache map[address.Address]address.Address
keyCache *lru.Cache[address.Address, address.Address]
curTsLk sync.Mutex // DO NOT LOCK INSIDE lk
curTsLk sync.RWMutex // DO NOT LOCK INSIDE lk
curTs *types.TipSet
cfgLk sync.RWMutex
@ -169,13 +169,13 @@ type MessagePool struct {
sigValCache *lru.TwoQueueCache[string, struct{}]
nonceCache *lru.Cache[nonceCacheKey, uint64]
stateNonceCache *lru.Cache[stateNonceCacheKey, uint64]
evtTypes [3]journal.EventType
journal journal.Journal
}
type nonceCacheKey struct {
type stateNonceCacheKey struct {
tsk types.TipSetKey
addr address.Address
}
@ -371,7 +371,8 @@ func (ms *msgSet) toSlice() []*types.SignedMessage {
func New(ctx context.Context, api Provider, ds dtypes.MetadataDS, us stmgr.UpgradeSchedule, netName dtypes.NetworkName, j journal.Journal) (*MessagePool, error) {
cache, _ := lru.New2Q[cid.Cid, crypto.Signature](build.BlsSignatureCacheSize)
verifcache, _ := lru.New2Q[string, struct{}](build.VerifSigCacheSize)
noncecache, _ := lru.New[nonceCacheKey, uint64](256)
stateNonceCache, _ := lru.New[stateNonceCacheKey, uint64](32768) // 32k * ~200 bytes = 6MB
keycache, _ := lru.New[address.Address, address.Address](1_000_000)
cfg, err := loadConfig(ctx, ds)
if err != nil {
@ -383,26 +384,26 @@ func New(ctx context.Context, api Provider, ds dtypes.MetadataDS, us stmgr.Upgra
}
mp := &MessagePool{
ds: ds,
addSema: make(chan struct{}, 1),
closer: make(chan struct{}),
repubTk: build.Clock.Ticker(RepublishInterval),
repubTrigger: make(chan struct{}, 1),
localAddrs: make(map[address.Address]struct{}),
pending: make(map[address.Address]*msgSet),
keyCache: make(map[address.Address]address.Address),
minGasPrice: types.NewInt(0),
getNtwkVersion: us.GetNtwkVersion,
pruneTrigger: make(chan struct{}, 1),
pruneCooldown: make(chan struct{}, 1),
blsSigCache: cache,
sigValCache: verifcache,
nonceCache: noncecache,
changes: lps.New(50),
localMsgs: namespace.Wrap(ds, datastore.NewKey(localMsgsDs)),
api: api,
netName: netName,
cfg: cfg,
ds: ds,
addSema: make(chan struct{}, 1),
closer: make(chan struct{}),
repubTk: build.Clock.Ticker(RepublishInterval),
repubTrigger: make(chan struct{}, 1),
localAddrs: make(map[address.Address]struct{}),
pending: make(map[address.Address]*msgSet),
keyCache: keycache,
minGasPrice: types.NewInt(0),
getNtwkVersion: us.GetNtwkVersion,
pruneTrigger: make(chan struct{}, 1),
pruneCooldown: make(chan struct{}, 1),
blsSigCache: cache,
sigValCache: verifcache,
stateNonceCache: stateNonceCache,
changes: lps.New(50),
localMsgs: namespace.Wrap(ds, datastore.NewKey(localMsgsDs)),
api: api,
netName: netName,
cfg: cfg,
evtTypes: [...]journal.EventType{
evtTypeMpoolAdd: j.RegisterEventType("mpool", "add"),
evtTypeMpoolRemove: j.RegisterEventType("mpool", "remove"),
@ -447,12 +448,8 @@ func New(ctx context.Context, api Provider, ds dtypes.MetadataDS, us stmgr.Upgra
return mp, nil
}
func (mp *MessagePool) TryForEachPendingMessage(f func(cid.Cid) error) error {
// avoid deadlocks in splitstore compaction when something else needs to access the blockstore
// while holding the mpool lock
if !mp.lk.TryLock() {
return xerrors.Errorf("mpool TryForEachPendingMessage: could not acquire lock")
}
func (mp *MessagePool) ForEachPendingMessage(f func(cid.Cid) error) error {
mp.lk.Lock()
defer mp.lk.Unlock()
for _, mset := range mp.pending {
@ -473,9 +470,18 @@ func (mp *MessagePool) TryForEachPendingMessage(f func(cid.Cid) error) error {
}
func (mp *MessagePool) resolveToKey(ctx context.Context, addr address.Address) (address.Address, error) {
//if addr is not an ID addr, then it is already resolved to a key
if addr.Protocol() != address.ID {
return addr, nil
}
return mp.resolveToKeyFromID(ctx, addr)
}
func (mp *MessagePool) resolveToKeyFromID(ctx context.Context, addr address.Address) (address.Address, error) {
// check the cache
a, f := mp.keyCache[addr]
if f {
a, ok := mp.keyCache.Get(addr)
if ok {
return a, nil
}
@ -486,9 +492,7 @@ func (mp *MessagePool) resolveToKey(ctx context.Context, addr address.Address) (
}
// place both entries in the cache (may both be key addresses, which is fine)
mp.keyCache[addr] = ka
mp.keyCache[ka] = ka
mp.keyCache.Add(addr, ka)
return ka, nil
}
@ -741,8 +745,7 @@ func (mp *MessagePool) checkMessage(ctx context.Context, m *types.SignedMessage)
}
if err := mp.VerifyMsgSig(m); err != nil {
log.Warnf("signature verification failed: %s", err)
return err
return xerrors.Errorf("signature verification failed: %s", err)
}
return nil
@ -763,7 +766,28 @@ func (mp *MessagePool) Add(ctx context.Context, m *types.SignedMessage) error {
<-mp.addSema
}()
mp.curTsLk.RLock()
tmpCurTs := mp.curTs
mp.curTsLk.RUnlock()
//ensures computations are cached without holding lock
_, _ = mp.api.GetActorAfter(m.Message.From, tmpCurTs)
_, _ = mp.getStateNonce(ctx, m.Message.From, tmpCurTs)
mp.curTsLk.Lock()
if tmpCurTs == mp.curTs {
//with the lock enabled, mp.curTs is the same Ts as we just had, so we know that our computations are cached
} else {
//curTs has been updated so we want to cache the new one:
tmpCurTs = mp.curTs
//we want to release the lock, cache the computations then grab it again
mp.curTsLk.Unlock()
_, _ = mp.api.GetActorAfter(m.Message.From, tmpCurTs)
_, _ = mp.getStateNonce(ctx, m.Message.From, tmpCurTs)
mp.curTsLk.Lock()
//now that we have the lock, we continue, we could do this as a loop forever, but that's bad to loop forever, and this was added as an optimization and it seems once is enough because the computation < block time
}
defer mp.curTsLk.Unlock()
_, err = mp.addTs(ctx, m, mp.curTs, false, false)
@ -852,9 +876,6 @@ func (mp *MessagePool) addTs(ctx context.Context, m *types.SignedMessage, curTs
return false, xerrors.Errorf("minimum expected nonce is %d: %w", snonce, ErrNonceTooLow)
}
mp.lk.Lock()
defer mp.lk.Unlock()
senderAct, err := mp.api.GetActorAfter(m.Message.From, curTs)
if err != nil {
return false, xerrors.Errorf("failed to get sender actor: %w", err)
@ -869,6 +890,9 @@ func (mp *MessagePool) addTs(ctx context.Context, m *types.SignedMessage, curTs
return false, xerrors.Errorf("sender actor %s is not a valid top-level sender", m.Message.From)
}
mp.lk.Lock()
defer mp.lk.Unlock()
publish, err := mp.verifyMsgBeforeAdd(ctx, m, curTs, local)
if err != nil {
return false, xerrors.Errorf("verify msg failed: %w", err)
@ -940,13 +964,11 @@ func (mp *MessagePool) addLocked(ctx context.Context, m *types.SignedMessage, st
}
if _, err := mp.api.PutMessage(ctx, m); err != nil {
log.Warnf("mpooladd cs.PutMessage failed: %s", err)
return err
return xerrors.Errorf("mpooladd cs.PutMessage failed: %s", err)
}
if _, err := mp.api.PutMessage(ctx, &m.Message); err != nil {
log.Warnf("mpooladd cs.PutMessage failed: %s", err)
return err
return xerrors.Errorf("mpooladd cs.PutMessage failed: %s", err)
}
// Note: If performance becomes an issue, making this getOrCreatePendingMset will save some work
@ -1001,19 +1023,19 @@ func (mp *MessagePool) addLocked(ctx context.Context, m *types.SignedMessage, st
}
func (mp *MessagePool) GetNonce(ctx context.Context, addr address.Address, _ types.TipSetKey) (uint64, error) {
mp.curTsLk.Lock()
defer mp.curTsLk.Unlock()
mp.curTsLk.RLock()
defer mp.curTsLk.RUnlock()
mp.lk.Lock()
defer mp.lk.Unlock()
mp.lk.RLock()
defer mp.lk.RUnlock()
return mp.getNonceLocked(ctx, addr, mp.curTs)
}
// GetActor should not be used. It is only here to satisfy interface mess caused by lite node handling
func (mp *MessagePool) GetActor(_ context.Context, addr address.Address, _ types.TipSetKey) (*types.Actor, error) {
mp.curTsLk.Lock()
defer mp.curTsLk.Unlock()
mp.curTsLk.RLock()
defer mp.curTsLk.RUnlock()
return mp.api.GetActorAfter(addr, mp.curTs)
}
@ -1046,24 +1068,52 @@ func (mp *MessagePool) getStateNonce(ctx context.Context, addr address.Address,
done := metrics.Timer(ctx, metrics.MpoolGetNonceDuration)
defer done()
nk := nonceCacheKey{
nk := stateNonceCacheKey{
tsk: ts.Key(),
addr: addr,
}
n, ok := mp.nonceCache.Get(nk)
n, ok := mp.stateNonceCache.Get(nk)
if ok {
return n, nil
}
act, err := mp.api.GetActorAfter(addr, ts)
// get the nonce from the actor before ts
actor, err := mp.api.GetActorBefore(addr, ts)
if err != nil {
return 0, err
}
nextNonce := actor.Nonce
raddr, err := mp.resolveToKey(ctx, addr)
if err != nil {
return 0, err
}
mp.nonceCache.Add(nk, act.Nonce)
// loop over all messages sent by 'addr' and find the highest nonce
messages, err := mp.api.MessagesForTipset(ctx, ts)
if err != nil {
return 0, err
}
for _, message := range messages {
msg := message.VMMessage()
return act.Nonce, nil
maddr, err := mp.resolveToKey(ctx, msg.From)
if err != nil {
log.Warnf("failed to resolve message from address: %s", err)
continue
}
if maddr == raddr {
if n := msg.Nonce + 1; n > nextNonce {
nextNonce = n
}
}
}
mp.stateNonceCache.Add(nk, nextNonce)
return nextNonce, nil
}
func (mp *MessagePool) getStateBalance(ctx context.Context, addr address.Address, ts *types.TipSet) (types.BigInt, error) {
@ -1164,11 +1214,11 @@ func (mp *MessagePool) remove(ctx context.Context, from address.Address, nonce u
}
func (mp *MessagePool) Pending(ctx context.Context) ([]*types.SignedMessage, *types.TipSet) {
mp.curTsLk.Lock()
defer mp.curTsLk.Unlock()
mp.curTsLk.RLock()
defer mp.curTsLk.RUnlock()
mp.lk.Lock()
defer mp.lk.Unlock()
mp.lk.RLock()
defer mp.lk.RUnlock()
return mp.allPending(ctx)
}
@ -1184,11 +1234,11 @@ func (mp *MessagePool) allPending(ctx context.Context) ([]*types.SignedMessage,
}
func (mp *MessagePool) PendingFor(ctx context.Context, a address.Address) ([]*types.SignedMessage, *types.TipSet) {
mp.curTsLk.Lock()
defer mp.curTsLk.Unlock()
mp.curTsLk.RLock()
defer mp.curTsLk.RUnlock()
mp.lk.Lock()
defer mp.lk.Unlock()
mp.lk.RLock()
defer mp.lk.RUnlock()
return mp.pendingFor(ctx, a), mp.curTs
}
@ -1237,9 +1287,9 @@ func (mp *MessagePool) HeadChange(ctx context.Context, revert []*types.TipSet, a
maybeRepub := func(cid cid.Cid) {
if !repubTrigger {
mp.lk.Lock()
mp.lk.RLock()
_, republished := mp.republished[cid]
mp.lk.Unlock()
mp.lk.RUnlock()
if republished {
repubTrigger = true
}
@ -1310,9 +1360,9 @@ func (mp *MessagePool) HeadChange(ctx context.Context, revert []*types.TipSet, a
}
if len(revert) > 0 && futureDebug {
mp.lk.Lock()
mp.lk.RLock()
msgs, ts := mp.allPending(ctx)
mp.lk.Unlock()
mp.lk.RUnlock()
buckets := map[address.Address]*statBucket{}

View File

@ -120,6 +120,22 @@ func (tma *testMpoolAPI) PubSubPublish(string, []byte) error {
return nil
}
func (tma *testMpoolAPI) GetActorBefore(addr address.Address, ts *types.TipSet) (*types.Actor, error) {
balance, ok := tma.balance[addr]
if !ok {
balance = types.NewInt(1000e6)
tma.balance[addr] = balance
}
nonce := tma.statenonce[addr]
return &types.Actor{
Code: builtin2.AccountActorCodeID,
Nonce: nonce,
Balance: balance,
}, nil
}
func (tma *testMpoolAPI) GetActorAfter(addr address.Address, ts *types.TipSet) (*types.Actor, error) {
// regression check for load bug
if ts == nil {

View File

@ -2,6 +2,7 @@ package messagepool
import (
"context"
"errors"
"time"
"github.com/ipfs/go-cid"
@ -27,6 +28,7 @@ type Provider interface {
SubscribeHeadChanges(func(rev, app []*types.TipSet) error) *types.TipSet
PutMessage(ctx context.Context, m types.ChainMsg) (cid.Cid, error)
PubSubPublish(string, []byte) error
GetActorBefore(address.Address, *types.TipSet) (*types.Actor, error)
GetActorAfter(address.Address, *types.TipSet) (*types.Actor, error)
StateDeterministicAddressAtFinality(context.Context, address.Address, *types.TipSet) (address.Address, error)
StateNetworkVersion(context.Context, abi.ChainEpoch) network.Version
@ -58,6 +60,23 @@ func (mpp *mpoolProvider) IsLite() bool {
return mpp.lite != nil
}
func (mpp *mpoolProvider) getActorLite(addr address.Address, ts *types.TipSet) (*types.Actor, error) {
if !mpp.IsLite() {
return nil, errors.New("should not use getActorLite on non lite Provider")
}
n, err := mpp.lite.GetNonce(context.TODO(), addr, ts.Key())
if err != nil {
return nil, xerrors.Errorf("getting nonce over lite: %w", err)
}
a, err := mpp.lite.GetActor(context.TODO(), addr, ts.Key())
if err != nil {
return nil, xerrors.Errorf("getting actor over lite: %w", err)
}
a.Nonce = n
return a, nil
}
func (mpp *mpoolProvider) SubscribeHeadChanges(cb func(rev, app []*types.TipSet) error) *types.TipSet {
mpp.sm.ChainStore().SubscribeHeadChanges(
store.WrapHeadChangeCoalescer(
@ -77,18 +96,17 @@ func (mpp *mpoolProvider) PubSubPublish(k string, v []byte) error {
return mpp.ps.Publish(k, v) // nolint
}
func (mpp *mpoolProvider) GetActorBefore(addr address.Address, ts *types.TipSet) (*types.Actor, error) {
if mpp.IsLite() {
return mpp.getActorLite(addr, ts)
}
return mpp.sm.LoadActor(context.TODO(), addr, ts)
}
func (mpp *mpoolProvider) GetActorAfter(addr address.Address, ts *types.TipSet) (*types.Actor, error) {
if mpp.IsLite() {
n, err := mpp.lite.GetNonce(context.TODO(), addr, ts.Key())
if err != nil {
return nil, xerrors.Errorf("getting nonce over lite: %w", err)
}
a, err := mpp.lite.GetActor(context.TODO(), addr, ts.Key())
if err != nil {
return nil, xerrors.Errorf("getting actor over lite: %w", err)
}
a.Nonce = n
return a, nil
return mpp.getActorLite(addr, ts)
}
stcid, _, err := mpp.sm.TipSetState(context.TODO(), ts)

View File

@ -20,19 +20,23 @@ const repubMsgLimit = 30
var RepublishBatchDelay = 100 * time.Millisecond
func (mp *MessagePool) republishPendingMessages(ctx context.Context) error {
mp.curTsLk.Lock()
mp.curTsLk.RLock()
ts := mp.curTs
mp.curTsLk.RUnlock()
baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts)
if err != nil {
mp.curTsLk.Unlock()
return xerrors.Errorf("computing basefee: %w", err)
}
baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactor)
pending := make(map[address.Address]map[uint64]*types.SignedMessage)
mp.lk.Lock()
mp.republished = nil // clear this to avoid races triggering an early republish
mp.lk.Unlock()
mp.lk.RLock()
mp.forEachLocal(ctx, func(ctx context.Context, actor address.Address) {
mset, ok, err := mp.getPendingMset(ctx, actor)
if err != nil {
@ -53,9 +57,7 @@ func (mp *MessagePool) republishPendingMessages(ctx context.Context) error {
}
pending[actor] = pend
})
mp.lk.Unlock()
mp.curTsLk.Unlock()
mp.lk.RUnlock()
if len(pending) == 0 {
return nil
@ -176,8 +178,8 @@ loop:
republished[m.Cid()] = struct{}{}
}
mp.lk.Lock()
// update the republished set so that we can trigger early republish from head changes
mp.lk.Lock()
mp.republished = republished
mp.lk.Unlock()

View File

@ -40,11 +40,21 @@ type msgChain struct {
}
func (mp *MessagePool) SelectMessages(ctx context.Context, ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) {
mp.curTsLk.Lock()
defer mp.curTsLk.Unlock()
mp.curTsLk.RLock()
defer mp.curTsLk.RUnlock()
mp.lk.Lock()
defer mp.lk.Unlock()
mp.lk.RLock()
defer mp.lk.RUnlock()
// See if we need to prune before selection; excessive buildup can lead to slow selection,
// so prune if we have too many messages (ignoring the cooldown).
mpCfg := mp.getConfig()
if mp.currentSize > mpCfg.SizeLimitHigh {
log.Infof("too many messages; pruning before selection")
if err := mp.pruneMessages(ctx, ts); err != nil {
log.Warnf("error pruning excess messages: %s", err)
}
}
// if the ticket quality is high enough that the first block has higher probability
// than any other block, then we don't bother with optimal selection because the

View File

@ -128,10 +128,47 @@ func (sm *StateManager) ExecutionTraceWithMonitor(ctx context.Context, ts *types
}
func (sm *StateManager) ExecutionTrace(ctx context.Context, ts *types.TipSet) (cid.Cid, []*api.InvocResult, error) {
tsKey := ts.Key()
// check if we have the trace for this tipset in the cache
if execTraceCacheSize > 0 {
sm.execTraceCacheLock.Lock()
if entry, ok := sm.execTraceCache.Get(tsKey); ok {
// we have to make a deep copy since caller can modify the invocTrace
// and we don't want that to change what we store in cache
invocTraceCopy := makeDeepCopy(entry.invocTrace)
sm.execTraceCacheLock.Unlock()
return entry.postStateRoot, invocTraceCopy, nil
}
sm.execTraceCacheLock.Unlock()
}
var invocTrace []*api.InvocResult
st, err := sm.ExecutionTraceWithMonitor(ctx, ts, &InvocationTracer{trace: &invocTrace})
if err != nil {
return cid.Undef, nil, err
}
if execTraceCacheSize > 0 {
invocTraceCopy := makeDeepCopy(invocTrace)
sm.execTraceCacheLock.Lock()
sm.execTraceCache.Add(tsKey, tipSetCacheEntry{st, invocTraceCopy})
sm.execTraceCacheLock.Unlock()
}
return st, invocTrace, nil
}
func makeDeepCopy(invocTrace []*api.InvocResult) []*api.InvocResult {
c := make([]*api.InvocResult, len(invocTrace))
for i, ir := range invocTrace {
if ir == nil {
continue
}
tmp := *ir
c[i] = &tmp
}
return c
}

View File

@ -36,6 +36,7 @@ import (
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/index"
"github.com/filecoin-project/lotus/chain/stmgr"
. "github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types"
@ -168,7 +169,7 @@ func TestForkHeightTriggers(t *testing.T) {
}
return st.Flush(ctx)
}}}, cg.BeaconSchedule(), datastore.NewMapDatastore())
}}}, cg.BeaconSchedule(), datastore.NewMapDatastore(), index.DummyMsgIndex)
if err != nil {
t.Fatal(err)
}
@ -286,7 +287,7 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) {
root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
migrationCount++
return root, nil
}}}, cg.BeaconSchedule(), datastore.NewMapDatastore())
}}}, cg.BeaconSchedule(), datastore.NewMapDatastore(), index.DummyMsgIndex)
if err != nil {
t.Fatal(err)
}
@ -504,7 +505,7 @@ func TestForkPreMigration(t *testing.T) {
return nil
},
}}},
}, cg.BeaconSchedule(), datastore.NewMapDatastore())
}, cg.BeaconSchedule(), datastore.NewMapDatastore(), index.DummyMsgIndex)
if err != nil {
t.Fatal(err)
}
@ -579,6 +580,7 @@ func TestDisablePreMigration(t *testing.T) {
},
cg.BeaconSchedule(),
datastore.NewMapDatastore(),
index.DummyMsgIndex,
)
require.NoError(t, err)
require.NoError(t, sm.Start(context.Background()))
@ -633,6 +635,7 @@ func TestMigrtionCache(t *testing.T) {
},
cg.BeaconSchedule(),
metadataDs,
index.DummyMsgIndex,
)
require.NoError(t, err)
require.NoError(t, sm.Start(context.Background()))
@ -685,6 +688,7 @@ func TestMigrtionCache(t *testing.T) {
},
cg.BeaconSchedule(),
metadataDs,
index.DummyMsgIndex,
)
require.NoError(t, err)
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (vm.Interface, error) {

View File

@ -10,6 +10,7 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/index"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
)
@ -18,6 +19,7 @@ import (
// happened, with an optional limit to how many epochs it will search. It guarantees that the message has been on
// chain for at least confidence epochs without being reverted before returning.
func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confidence uint64, lookbackLimit abi.ChainEpoch, allowReplaced bool) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) {
// TODO use the index to speed this up.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
@ -55,10 +57,15 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid
var backFm cid.Cid
backSearchWait := make(chan struct{})
go func() {
fts, r, foundMsg, err := sm.searchBackForMsg(ctx, head[0].Val, msg, lookbackLimit, allowReplaced)
if err != nil {
log.Warnf("failed to look back through chain for message: %v", err)
return
fts, r, foundMsg, err := sm.searchForIndexedMsg(ctx, mcid, msg)
found := (err == nil && r != nil && foundMsg.Defined())
if !found {
fts, r, foundMsg, err = sm.searchBackForMsg(ctx, head[0].Val, msg, lookbackLimit, allowReplaced)
if err != nil {
log.Warnf("failed to look back through chain for message: %v", err)
return
}
}
backTs = fts
@ -145,7 +152,30 @@ func (sm *StateManager) SearchForMessage(ctx context.Context, head *types.TipSet
return head, r, foundMsg, nil
}
fts, r, foundMsg, err := sm.searchBackForMsg(ctx, head, msg, lookbackLimit, allowReplaced)
fts, r, foundMsg, err := sm.searchForIndexedMsg(ctx, mcid, msg)
switch {
case err == nil:
if r != nil && foundMsg.Defined() {
return fts, r, foundMsg, nil
}
// debug log this, it's noteworthy
if r == nil {
log.Debugf("missing receipt for message in index for %s", mcid)
}
if !foundMsg.Defined() {
log.Debugf("message %s not found", mcid)
}
case errors.Is(err, index.ErrNotFound):
// ok for the index to have incomplete data
default:
log.Warnf("error searching message index: %s", err)
}
fts, r, foundMsg, err = sm.searchBackForMsg(ctx, head, msg, lookbackLimit, allowReplaced)
if err != nil {
log.Warnf("failed to look back through chain for message %s", mcid)
@ -159,6 +189,44 @@ func (sm *StateManager) SearchForMessage(ctx context.Context, head *types.TipSet
return fts, r, foundMsg, nil
}
func (sm *StateManager) searchForIndexedMsg(ctx context.Context, mcid cid.Cid, m types.ChainMsg) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) {
minfo, err := sm.msgIndex.GetMsgInfo(ctx, mcid)
if err != nil {
return nil, nil, cid.Undef, xerrors.Errorf("error looking up message in index: %w", err)
}
// check the height against the current tipset; minimum execution confidence requires that the
// inclusion tipset height is lower than the current head + 1
curTs := sm.cs.GetHeaviestTipSet()
if curTs.Height() <= minfo.Epoch+1 {
return nil, nil, cid.Undef, xerrors.Errorf("indexed message does not appear before the current tipset; index epoch: %d, current epoch: %d", minfo.Epoch, curTs.Height())
}
// now get the execution tipset
// TODO optimization: the index should have it implicitly so we can return it in the msginfo.
xts, err := sm.cs.GetTipsetByHeight(ctx, minfo.Epoch+1, curTs, false)
if err != nil {
return nil, nil, cid.Undef, xerrors.Errorf("error looking up execution tipset: %w", err)
}
// check that the parent of the execution index is indeed the inclusion tipset
parent := xts.Parents()
parentCid, err := parent.Cid()
if err != nil {
return nil, nil, cid.Undef, xerrors.Errorf("error computing tipset cid: %w", err)
}
if !parentCid.Equals(minfo.TipSet) {
return nil, nil, cid.Undef, xerrors.Errorf("inclusion tipset mismatch: have %s, expected %s", parentCid, minfo.TipSet)
}
r, foundMsg, err := sm.tipsetExecutedMessage(ctx, xts, mcid, m.VMMessage(), false)
if err != nil {
return nil, nil, cid.Undef, xerrors.Errorf("error in tipstExecutedMessage: %w", err)
}
return xts, r, foundMsg, nil
}
// searchBackForMsg searches up to limit tipsets backwards from the given
// tipset for a message receipt.
// If limit is

View File

@ -3,8 +3,11 @@ package stmgr
import (
"context"
"fmt"
"os"
"strconv"
"sync"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/ipfs/go-cid"
dstore "github.com/ipfs/go-datastore"
cbor "github.com/ipfs/go-ipld-cbor"
@ -25,6 +28,7 @@ import (
"github.com/filecoin-project/lotus/chain/actors/builtin/paych"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/beacon"
"github.com/filecoin-project/lotus/chain/index"
"github.com/filecoin-project/lotus/chain/rand"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/store"
@ -38,6 +42,7 @@ import (
const LookbackNoLimit = api.LookbackNoLimit
const ReceiptAmtBitwidth = 3
var execTraceCacheSize = 16
var log = logging.Logger("statemgr")
type StateManagerAPI interface {
@ -70,6 +75,17 @@ func (m *migrationResultCache) keyForMigration(root cid.Cid) dstore.Key {
return dstore.NewKey(kStr)
}
func init() {
if s := os.Getenv("LOTUS_EXEC_TRACE_CACHE_SIZE"); s != "" {
letc, err := strconv.Atoi(s)
if err != nil {
log.Errorf("failed to parse 'LOTUS_EXEC_TRACE_CACHE_SIZE' env var: %s", err)
} else {
execTraceCacheSize = letc
}
}
}
func (m *migrationResultCache) Get(ctx context.Context, root cid.Cid) (cid.Cid, bool, error) {
k := m.keyForMigration(root)
@ -135,6 +151,15 @@ type StateManager struct {
tsExec Executor
tsExecMonitor ExecMonitor
beacon beacon.Schedule
msgIndex index.MsgIndex
// We keep a small cache for calls to ExecutionTrace which helps improve
// performance for node operators like exchanges and block explorers
execTraceCache *lru.ARCCache[types.TipSetKey, tipSetCacheEntry]
// We need a lock while making the copy as to prevent other callers
// overwrite the cache while making the copy
execTraceCacheLock sync.Mutex
}
// Caches a single state tree
@ -143,7 +168,12 @@ type treeCache struct {
tree *state.StateTree
}
func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, beacon beacon.Schedule, metadataDs dstore.Batching) (*StateManager, error) {
type tipSetCacheEntry struct {
postStateRoot cid.Cid
invocTrace []*api.InvocResult
}
func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, beacon beacon.Schedule, metadataDs dstore.Batching, msgIndex index.MsgIndex) (*StateManager, error) {
// If we have upgrades, make sure they're in-order and make sense.
if err := us.Validate(); err != nil {
return nil, err
@ -182,6 +212,16 @@ func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder,
}
}
log.Debugf("execTraceCache size: %d", execTraceCacheSize)
var execTraceCache *lru.ARCCache[types.TipSetKey, tipSetCacheEntry]
var err error
if execTraceCacheSize > 0 {
execTraceCache, err = lru.NewARC[types.TipSetKey, tipSetCacheEntry](execTraceCacheSize)
if err != nil {
return nil, err
}
}
return &StateManager{
networkVersions: networkVersions,
latestVersion: lastVersion,
@ -197,12 +237,14 @@ func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder,
root: cid.Undef,
tree: nil,
},
compWait: make(map[string]chan struct{}),
compWait: make(map[string]chan struct{}),
msgIndex: msgIndex,
execTraceCache: execTraceCache,
}, nil
}
func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, b beacon.Schedule, em ExecMonitor, metadataDs dstore.Batching) (*StateManager, error) {
sm, err := NewStateManager(cs, exec, sys, us, b, metadataDs)
func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, b beacon.Schedule, em ExecMonitor, metadataDs dstore.Batching, msgIndex index.MsgIndex) (*StateManager, error) {
sm, err := NewStateManager(cs, exec, sys, us, b, metadataDs, msgIndex)
if err != nil {
return nil, err
}

View File

@ -3,10 +3,10 @@ package store
import (
"context"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
ipld "github.com/ipfs/go-ipld-format"
block "github.com/ipfs/go-libipfs/blocks"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
@ -114,12 +114,35 @@ func (cs *ChainStore) BlockMsgsForTipset(ctx context.Context, ts *types.TipSet)
return nil, xerrors.Errorf("failed to load state tree at tipset %s: %w", ts, err)
}
useIds := false
selectMsg := func(m *types.Message) (bool, error) {
var sender address.Address
if ts.Height() >= build.UpgradeHyperdriveHeight {
sender, err = st.LookupID(m.From)
if err != nil {
return false, err
if useIds {
sender, err = st.LookupID(m.From)
if err != nil {
return false, xerrors.Errorf("failed to resolve sender: %w", err)
}
} else {
if m.From.Protocol() != address.ID {
// we haven't been told to use IDs, just use the robust addr
sender = m.From
} else {
// uh-oh, we actually have an ID-sender!
useIds = true
for robust, nonce := range applied {
resolved, err := st.LookupID(robust)
if err != nil {
return false, xerrors.Errorf("failed to resolve sender: %w", err)
}
applied[resolved] = nonce
}
sender, err = st.LookupID(m.From)
if err != nil {
return false, xerrors.Errorf("failed to resolve sender: %w", err)
}
}
}
} else {
sender = m.From

View File

@ -3,13 +3,15 @@ package store
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"sync"
"time"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
blocks "github.com/ipfs/go-libipfs/blocks"
format "github.com/ipfs/go-ipld-format"
"github.com/ipld/go-car"
carutil "github.com/ipld/go-car/util"
carv2 "github.com/ipld/go-car/v2"
@ -121,11 +123,9 @@ func (cs *ChainStore) Import(ctx context.Context, r io.Reader) (*types.TipSet, e
}
ts := root
tssToPersist := make([]*types.TipSet, 0, TipsetkeyBackfillRange)
for i := 0; i < int(TipsetkeyBackfillRange); i++ {
err = cs.PersistTipset(ctx, ts)
if err != nil {
return nil, err
}
tssToPersist = append(tssToPersist, ts)
parentTsKey := ts.Parents()
ts, err = cs.LoadTipSet(ctx, parentTsKey)
if ts == nil || err != nil {
@ -134,6 +134,10 @@ func (cs *ChainStore) Import(ctx context.Context, r io.Reader) (*types.TipSet, e
}
}
if err := cs.PersistTipsets(ctx, tssToPersist); err != nil {
return nil, xerrors.Errorf("failed to persist tipsets: %w", err)
}
return root, nil
}
@ -167,8 +171,11 @@ func (t walkSchedTaskType) String() string {
}
type walkTask struct {
c cid.Cid
taskType walkSchedTaskType
c cid.Cid
taskType walkSchedTaskType
topLevelTaskType walkSchedTaskType
blockCid cid.Cid
epoch abi.ChainEpoch
}
// an ever growing FIFO
@ -317,8 +324,11 @@ func newWalkScheduler(ctx context.Context, store bstore.Blockstore, cfg walkSche
cancel() // kill workers
return nil, ctx.Err()
case s.workerTasks.in <- walkTask{
c: b.Cid(),
taskType: blockTask,
c: b.Cid(),
taskType: blockTask,
topLevelTaskType: blockTask,
blockCid: b.Cid(),
epoch: cfg.head.Height(),
}:
}
}
@ -363,6 +373,9 @@ func (s *walkScheduler) enqueueIfNew(task walkTask) {
//log.Infow("ignored", "cid", todo.c.String())
return
}
// This lets through RAW and CBOR blocks, the only two types that we
// end up writing to the exported CAR.
if task.c.Prefix().Codec != cid.Raw && task.c.Prefix().Codec != cid.DagCBOR {
//log.Infow("ignored", "cid", todo.c.String())
return
@ -416,8 +429,17 @@ func (s *walkScheduler) processTask(t walkTask, workerN int) error {
}
blk, err := s.store.Get(s.ctx, t.c)
if errors.Is(err, format.ErrNotFound{}) && t.topLevelTaskType == receiptTask {
log.Debugw("ignoring not-found block in Receipts",
"block", t.blockCid,
"epoch", t.epoch,
"cid", t.c)
return nil
}
if err != nil {
return xerrors.Errorf("writing object to car, bs.Get: %w", err)
return xerrors.Errorf(
"blockstore.Get(%s). Task: %s. Block: %s (%s). Epoch: %d. Err: %w",
t.c, t.taskType, t.topLevelTaskType, t.blockCid, t.epoch, err)
}
s.results <- taskResult{
@ -425,15 +447,19 @@ func (s *walkScheduler) processTask(t walkTask, workerN int) error {
b: blk,
}
// We exported the ipld block. If it wasn't a CBOR block, there's nothing
// else to do and we can bail out early as it won't have any links
// etc.
if t.c.Prefix().Codec != cid.DagCBOR || t.c.Prefix().MhType == mh.IDENTITY {
return nil
}
rawData := blk.RawData()
// extract relevant dags to walk from the block
if t.taskType == blockTask {
blk := t.c
data, err := s.store.Get(s.ctx, blk)
if err != nil {
return err
}
var b types.BlockHeader
if err := b.UnmarshalCBOR(bytes.NewBuffer(data.RawData())); err != nil {
if err := b.UnmarshalCBOR(bytes.NewBuffer(rawData)); err != nil {
return xerrors.Errorf("unmarshalling block header (cid=%s): %w", blk, err)
}
if b.Height%1_000 == 0 {
@ -443,13 +469,19 @@ func (s *walkScheduler) processTask(t walkTask, workerN int) error {
log.Info("exporting genesis block")
for i := range b.Parents {
s.enqueueIfNew(walkTask{
c: b.Parents[i],
taskType: dagTask,
c: b.Parents[i],
taskType: dagTask,
topLevelTaskType: blockTask,
blockCid: b.Parents[i],
epoch: 0,
})
}
s.enqueueIfNew(walkTask{
c: b.ParentStateRoot,
taskType: stateTask,
c: b.ParentStateRoot,
taskType: stateTask,
topLevelTaskType: stateTask,
blockCid: t.c,
epoch: 0,
})
return s.sendFinish(workerN)
@ -457,33 +489,45 @@ func (s *walkScheduler) processTask(t walkTask, workerN int) error {
// enqueue block parents
for i := range b.Parents {
s.enqueueIfNew(walkTask{
c: b.Parents[i],
taskType: blockTask,
c: b.Parents[i],
taskType: blockTask,
topLevelTaskType: blockTask,
blockCid: b.Parents[i],
epoch: b.Height,
})
}
if s.cfg.tail.Height() >= b.Height {
log.Debugw("tail reached: only blocks will be exported from now until genesis", "cid", blk.String())
log.Debugw("tail reached: only blocks will be exported from now until genesis", "cid", t.c.String())
return nil
}
if s.cfg.includeMessages {
// enqueue block messages
s.enqueueIfNew(walkTask{
c: b.Messages,
taskType: messageTask,
c: b.Messages,
taskType: messageTask,
topLevelTaskType: messageTask,
blockCid: t.c,
epoch: b.Height,
})
}
if s.cfg.includeReceipts {
// enqueue block receipts
s.enqueueIfNew(walkTask{
c: b.ParentMessageReceipts,
taskType: receiptTask,
c: b.ParentMessageReceipts,
taskType: receiptTask,
topLevelTaskType: receiptTask,
blockCid: t.c,
epoch: b.Height,
})
}
if s.cfg.includeState {
s.enqueueIfNew(walkTask{
c: b.ParentStateRoot,
taskType: stateTask,
c: b.ParentStateRoot,
taskType: stateTask,
topLevelTaskType: stateTask,
blockCid: t.c,
epoch: b.Height,
})
}
@ -491,16 +535,22 @@ func (s *walkScheduler) processTask(t walkTask, workerN int) error {
}
// Not a chain-block: we scan for CIDs in the raw block-data
return cbg.ScanForLinks(bytes.NewReader(blk.RawData()), func(c cid.Cid) {
if t.c.Prefix().Codec != cid.DagCBOR || t.c.Prefix().MhType == mh.IDENTITY {
return
}
err = cbg.ScanForLinks(bytes.NewReader(rawData), func(c cid.Cid) {
s.enqueueIfNew(walkTask{
c: c,
taskType: dagTask,
c: c,
taskType: dagTask,
topLevelTaskType: t.topLevelTaskType,
blockCid: t.blockCid,
epoch: t.epoch,
})
})
if err != nil {
return xerrors.Errorf(
"ScanForLinks(%s). Task: %s. Block: %s (%s). Epoch: %d. Err: %w",
t.c, t.taskType, t.topLevelTaskType, t.blockCid, t.epoch, err)
}
return nil
}
func (cs *ChainStore) ExportRange(

View File

@ -12,11 +12,11 @@ import (
"time"
lru "github.com/hashicorp/golang-lru/v2"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
dstore "github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/query"
cbor "github.com/ipfs/go-ipld-cbor"
block "github.com/ipfs/go-libipfs/blocks"
logging "github.com/ipfs/go-log/v2"
"go.opencensus.io/stats"
"go.opencensus.io/trace"
@ -378,7 +378,7 @@ func (cs *ChainStore) SetGenesis(ctx context.Context, b *types.BlockHeader) erro
}
func (cs *ChainStore) PutTipSet(ctx context.Context, ts *types.TipSet) error {
if err := cs.PersistTipset(ctx, ts); err != nil {
if err := cs.PersistTipsets(ctx, []*types.TipSet{ts}); err != nil {
return xerrors.Errorf("failed to persist tipset: %w", err)
}
@ -425,6 +425,11 @@ func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipS
}
defer cs.heaviestLk.Unlock()
if ts.Equals(cs.heaviest) {
return nil
}
w, err := cs.weight(ctx, cs.StateBlockstore(), ts)
if err != nil {
return err
@ -639,22 +644,10 @@ func (cs *ChainStore) reorgWorker(ctx context.Context, initialNotifees []ReorgNo
func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet) error {
_, span := trace.StartSpan(ctx, "takeHeaviestTipSet")
defer span.End()
if cs.heaviest != nil { // buf
if len(cs.reorgCh) > 0 {
log.Warnf("Reorg channel running behind, %d reorgs buffered", len(cs.reorgCh))
}
cs.reorgCh <- reorg{
old: cs.heaviest,
new: ts,
}
} else {
log.Warnf("no heaviest tipset found, using %s", ts.Cids())
}
span.AddAttributes(trace.BoolAttribute("newHead", true))
log.Infof("New heaviest tipset! %s (height=%d)", ts.Cids(), ts.Height())
prevHeaviest := cs.heaviest
cs.heaviest = ts
if err := cs.writeHead(ctx, ts); err != nil {
@ -662,6 +655,18 @@ func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet)
return err
}
if prevHeaviest != nil { // buf
if len(cs.reorgCh) > 0 {
log.Warnf("Reorg channel running behind, %d reorgs buffered", len(cs.reorgCh))
}
cs.reorgCh <- reorg{
old: prevHeaviest,
new: ts,
}
} else {
log.Warnf("no previous heaviest tipset found, using %s", ts.Cids())
}
return nil
}
@ -970,18 +975,25 @@ func (cs *ChainStore) AddToTipSetTracker(ctx context.Context, b *types.BlockHead
return nil
}
func (cs *ChainStore) PersistTipset(ctx context.Context, ts *types.TipSet) error {
if err := cs.persistBlockHeaders(ctx, ts.Blocks()...); err != nil {
func (cs *ChainStore) PersistTipsets(ctx context.Context, tipsets []*types.TipSet) error {
toPersist := make([]*types.BlockHeader, 0, len(tipsets)*int(build.BlocksPerEpoch))
tsBlks := make([]block.Block, 0, len(tipsets))
for _, ts := range tipsets {
toPersist = append(toPersist, ts.Blocks()...)
tsBlk, err := ts.Key().ToStorageBlock()
if err != nil {
return xerrors.Errorf("failed to get tipset key block: %w", err)
}
tsBlks = append(tsBlks, tsBlk)
}
if err := cs.persistBlockHeaders(ctx, toPersist...); err != nil {
return xerrors.Errorf("failed to persist block headers: %w", err)
}
tsBlk, err := ts.Key().ToStorageBlock()
if err != nil {
return xerrors.Errorf("failed to get tipset key block: %w", err)
}
if err = cs.chainLocalBlockstore.Put(ctx, tsBlk); err != nil {
return xerrors.Errorf("failed to put tipset key block: %w", err)
if err := cs.chainLocalBlockstore.PutMany(ctx, tsBlks); err != nil {
return xerrors.Errorf("failed to put tipset key blocks: %w", err)
}
return nil

View File

@ -18,6 +18,7 @@ import (
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/index"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
@ -214,7 +215,7 @@ func TestChainExportImportFull(t *testing.T) {
t.Fatal("imported chain differed from exported chain")
}
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), nil, filcns.DefaultUpgradeSchedule(), cg.BeaconSchedule(), ds)
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), nil, filcns.DefaultUpgradeSchedule(), cg.BeaconSchedule(), ds, index.DummyMsgIndex)
if err != nil {
t.Fatal(err)
}

View File

@ -8,9 +8,9 @@ import (
"time"
lru "github.com/hashicorp/golang-lru/v2"
blocks "github.com/ipfs/go-block-format"
bserv "github.com/ipfs/go-blockservice"
"github.com/ipfs/go-cid"
blocks "github.com/ipfs/go-libipfs/blocks"
logging "github.com/ipfs/go-log/v2"
"github.com/ipni/storetheindex/announce/message"
pubsub "github.com/libp2p/go-libp2p-pubsub"

View File

@ -7,8 +7,8 @@ import (
"testing"
"github.com/golang/mock/gomock"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
blocks "github.com/ipfs/go-libipfs/blocks"
"github.com/ipni/storetheindex/announce/message"
pubsub "github.com/libp2p/go-libp2p-pubsub"
pb "github.com/libp2p/go-libp2p-pubsub/pb"

View File

@ -11,10 +11,10 @@ import (
"github.com/Gurpartap/async"
"github.com/hashicorp/go-multierror"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
ipld "github.com/ipfs/go-ipld-format"
blocks "github.com/ipfs/go-libipfs/blocks"
logging "github.com/ipfs/go-log/v2"
"github.com/libp2p/go-libp2p/core/connmgr"
"github.com/libp2p/go-libp2p/core/peer"
@ -208,8 +208,8 @@ func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool {
return false
}
if syncer.consensus.IsEpochBeyondCurrMax(fts.TipSet().Height()) {
log.Errorf("Received block with impossibly large height %d", fts.TipSet().Height())
if !syncer.consensus.IsEpochInConsensusRange(fts.TipSet().Height()) {
log.Infof("received block outside of consensus range at height %d", fts.TipSet().Height())
return false
}
@ -228,7 +228,7 @@ func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool {
// TODO: IMPORTANT(GARBAGE) this needs to be put in the 'temporary' side of
// the blockstore
if err := syncer.store.PersistTipset(ctx, fts.TipSet()); err != nil {
if err := syncer.store.PersistTipsets(ctx, []*types.TipSet{fts.TipSet()}); err != nil {
log.Warn("failed to persist incoming block header: ", err)
return false
}
@ -1193,17 +1193,16 @@ func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet, hts *t
span.AddAttributes(trace.Int64Attribute("syncChainLength", int64(len(headers))))
if !headers[0].Equals(ts) {
log.Errorf("collectChain headers[0] should be equal to sync target. Its not: %s != %s", headers[0].Cids(), ts.Cids())
return xerrors.Errorf("collectChain synced %s, wanted to sync %s", headers[0].Cids(), ts.Cids())
}
ss.SetStage(api.StagePersistHeaders)
for _, ts := range headers {
if err := syncer.store.PersistTipset(ctx, ts); err != nil {
err = xerrors.Errorf("failed to persist synced tipset to the chainstore: %w", err)
ss.Error(err)
return err
}
// Write tipsets from oldest to newest.
if err := syncer.store.PersistTipsets(ctx, headers); err != nil {
err = xerrors.Errorf("failed to persist synced tipset to the chainstore: %w", err)
ss.Error(err)
return err
}
ss.SetStage(api.StageMessages)

View File

@ -4,8 +4,8 @@ import (
"bytes"
"math/big"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
block "github.com/ipfs/go-libipfs/blocks"
"github.com/minio/blake2b-simd"
"golang.org/x/xerrors"
@ -13,8 +13,6 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/proof"
"github.com/filecoin-project/lotus/build"
)
type Ticket struct {
@ -195,36 +193,6 @@ func CidArrsContains(a []cid.Cid, b cid.Cid) bool {
return false
}
var blocksPerEpoch = NewInt(build.BlocksPerEpoch)
const sha256bits = 256
func IsTicketWinner(vrfTicket []byte, mypow BigInt, totpow BigInt) bool {
/*
Need to check that
(h(vrfout) + 1) / (max(h) + 1) <= e * myPower / totalPower
max(h) == 2^256-1
which in terms of integer math means:
(h(vrfout) + 1) * totalPower <= e * myPower * 2^256
in 2^256 space, it is equivalent to:
h(vrfout) * totalPower < e * myPower * 2^256
*/
h := blake2b.Sum256(vrfTicket)
lhs := BigFromBytes(h[:]).Int
lhs = lhs.Mul(lhs, totpow.Int)
// rhs = sectorSize * 2^256
// rhs = sectorSize << 256
rhs := new(big.Int).Lsh(mypow.Int, sha256bits)
rhs = rhs.Mul(rhs, blocksPerEpoch.Int)
// h(vrfout) * totalPower < e * sectorSize * 2^256?
return lhs.Cmp(rhs) < 0
}
func (t *Ticket) Equals(ot *Ticket) bool {
return bytes.Equal(t.VRFProof, ot.VRFProof)
}

View File

@ -100,6 +100,7 @@ func polyval(p []*big.Int, x *big.Int) *big.Int {
// computes lambda in Q.256
func lambda(power, totalPower *big.Int) *big.Int {
blocksPerEpoch := NewInt(build.BlocksPerEpoch)
lam := new(big.Int).Mul(power, blocksPerEpoch.Int) // Q.0
lam = lam.Lsh(lam, precision) // Q.256
lam = lam.Div(lam /* Q.256 */, totalPower /* Q.0 */) // Q.256

View File

@ -238,6 +238,30 @@ func (c *EthCall) UnmarshalJSON(b []byte) error {
return nil
}
type EthSyncingResult struct {
DoneSync bool
StartingBlock EthUint64
CurrentBlock EthUint64
HighestBlock EthUint64
}
func (sr EthSyncingResult) MarshalJSON() ([]byte, error) {
if sr.DoneSync {
// when done syncing, the json response should be '"result": false'
return []byte("false"), nil
}
// need to do an anonymous struct to avoid infinite recursion
return json.Marshal(&struct {
StartingBlock EthUint64 `json:"startingblock"`
CurrentBlock EthUint64 `json:"currentblock"`
HighestBlock EthUint64 `json:"highestblock"`
}{
StartingBlock: sr.StartingBlock,
CurrentBlock: sr.CurrentBlock,
HighestBlock: sr.HighestBlock})
}
const (
EthAddressLength = 20
EthHashLength = 32
@ -548,12 +572,12 @@ func (h EthSubscriptionID) String() string {
}
type EthFilterSpec struct {
// Interpreted as an epoch or one of "latest" for last mined block, "earliest" for first,
// Interpreted as an epoch (in hex) or one of "latest" for last mined block, "earliest" for first,
// "pending" for not yet committed messages.
// Optional, default: "latest".
FromBlock *string `json:"fromBlock,omitempty"`
// Interpreted as an epoch or one of "latest" for last mined block, "earliest" for first,
// Interpreted as an epoch (in hex) or one of "latest" for last mined block, "earliest" for first,
// "pending" for not yet committed messages.
// Optional, default: "latest".
ToBlock *string `json:"toBlock,omitempty"`

View File

@ -5,8 +5,8 @@ import (
"encoding/json"
"fmt"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
block "github.com/ipfs/go-libipfs/blocks"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
@ -220,12 +220,17 @@ func (m *Message) ValidForBlockInclusion(minGas int64, version network.Version)
}
// EffectiveGasPremium returns the effective gas premium claimable by the miner
// given the supplied base fee.
// given the supplied base fee. This method is not used anywhere except the Eth API.
//
// Filecoin clamps the gas premium at GasFeeCap - BaseFee, if lower than the
// specified premium.
// specified premium. Returns 0 if GasFeeCap is less than BaseFee.
func (m *Message) EffectiveGasPremium(baseFee abi.TokenAmount) abi.TokenAmount {
available := big.Sub(m.GasFeeCap, baseFee)
// It's possible that storage providers may include messages with gasFeeCap less than the baseFee
// In such cases, their reward should be viewed as zero
if available.LessThan(big.NewInt(0)) {
available = big.NewInt(0)
}
if big.Cmp(m.GasPremium, available) <= 0 {
return m.GasPremium
}

View File

@ -4,8 +4,8 @@ import (
"bytes"
"encoding/json"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
block "github.com/ipfs/go-libipfs/blocks"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto"

View File

@ -7,8 +7,8 @@ import (
"io"
"strings"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
block "github.com/ipfs/go-libipfs/blocks"
typegen "github.com/whyrusleeping/cbor-gen"
"github.com/filecoin-project/go-state-types/abi"

192
chain/vm/execution.go Normal file
View File

@ -0,0 +1,192 @@
package vm
import (
"context"
"os"
"strconv"
"sync"
"github.com/ipfs/go-cid"
"go.opencensus.io/stats"
"go.opencensus.io/tag"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/metrics"
)
const (
// DefaultAvailableExecutionLanes is the number of available execution lanes; it is the bound of
// concurrent active executions.
// This is the default value in filecoin-ffi
DefaultAvailableExecutionLanes = 4
// DefaultPriorityExecutionLanes is the number of reserved execution lanes for priority computations.
// This is purely userspace, but we believe it is a reasonable default, even with more available
// lanes.
DefaultPriorityExecutionLanes = 2
)
// the execution environment; see below for definition, methods, and initialization
var execution *executionEnv
// implementation of vm executor with simple sanity check preventing use after free.
type vmExecutor struct {
vmi Interface
lane ExecutionLane
}
var _ Interface = (*vmExecutor)(nil)
func newVMExecutor(vmi Interface, lane ExecutionLane) Interface {
return &vmExecutor{vmi: vmi, lane: lane}
}
func (e *vmExecutor) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, error) {
token := execution.getToken(e.lane)
defer token.Done()
return e.vmi.ApplyMessage(ctx, cmsg)
}
func (e *vmExecutor) ApplyImplicitMessage(ctx context.Context, msg *types.Message) (*ApplyRet, error) {
token := execution.getToken(e.lane)
defer token.Done()
return e.vmi.ApplyImplicitMessage(ctx, msg)
}
func (e *vmExecutor) Flush(ctx context.Context) (cid.Cid, error) {
return e.vmi.Flush(ctx)
}
type executionToken struct {
lane ExecutionLane
reserved int
}
func (token *executionToken) Done() {
execution.putToken(token)
}
type executionEnv struct {
mx *sync.Mutex
cond *sync.Cond
// available executors
available int
// reserved executors
reserved int
}
func (e *executionEnv) getToken(lane ExecutionLane) *executionToken {
metricsUp(metrics.VMExecutionWaiting, lane)
defer metricsDown(metrics.VMExecutionWaiting, lane)
e.mx.Lock()
defer e.mx.Unlock()
switch lane {
case ExecutionLaneDefault:
for e.available <= e.reserved {
e.cond.Wait()
}
e.available--
metricsUp(metrics.VMExecutionRunning, lane)
return &executionToken{lane: lane, reserved: 0}
case ExecutionLanePriority:
for e.available == 0 {
e.cond.Wait()
}
e.available--
reserving := 0
if e.reserved > 0 {
e.reserved--
reserving = 1
}
metricsUp(metrics.VMExecutionRunning, lane)
return &executionToken{lane: lane, reserved: reserving}
default:
// already checked at interface boundary in NewVM, so this is appropriate
panic("bogus execution lane")
}
}
func (e *executionEnv) putToken(token *executionToken) {
e.mx.Lock()
defer e.mx.Unlock()
e.available++
e.reserved += token.reserved
// Note: Signal is unsound, because a priority token could wake up a non-priority
// goroutnie and lead to deadlock. So Broadcast it must be.
e.cond.Broadcast()
metricsDown(metrics.VMExecutionRunning, token.lane)
}
func metricsUp(metric *stats.Int64Measure, lane ExecutionLane) {
metricsAdjust(metric, lane, 1)
}
func metricsDown(metric *stats.Int64Measure, lane ExecutionLane) {
metricsAdjust(metric, lane, -1)
}
func metricsAdjust(metric *stats.Int64Measure, lane ExecutionLane, delta int) {
laneName := "default"
if lane > ExecutionLaneDefault {
laneName = "priority"
}
ctx, _ := tag.New(
context.Background(),
tag.Upsert(metrics.ExecutionLane, laneName),
)
stats.Record(ctx, metric.M(int64(delta)))
}
func init() {
var err error
available := DefaultAvailableExecutionLanes
if concurrency := os.Getenv("LOTUS_FVM_CONCURRENCY"); concurrency != "" {
available, err = strconv.Atoi(concurrency)
if err != nil {
panic(err)
}
}
priority := DefaultPriorityExecutionLanes
if reserved := os.Getenv("LOTUS_FVM_CONCURRENCY_RESERVED"); reserved != "" {
priority, err = strconv.Atoi(reserved)
if err != nil {
panic(err)
}
}
// some sanity checks
if available < 2 {
panic("insufficient execution concurrency")
}
if available <= priority {
panic("insufficient default execution concurrency")
}
mx := &sync.Mutex{}
cond := sync.NewCond(mx)
execution = &executionEnv{
mx: mx,
cond: cond,
available: available,
reserved: priority,
}
}

View File

@ -7,9 +7,9 @@ import (
"sync/atomic"
"time"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
block "github.com/ipfs/go-libipfs/blocks"
logging "github.com/ipfs/go-log/v2"
mh "github.com/multiformats/go-multihash"
cbg "github.com/whyrusleeping/cbor-gen"
@ -250,6 +250,8 @@ type VMOpts struct {
Tracing bool
// ReturnEvents decodes and returns emitted events.
ReturnEvents bool
// ExecutionLane specifies the execution priority of the created vm
ExecutionLane ExecutionLane
}
func NewLegacyVM(ctx context.Context, opts *VMOpts) (*LegacyVM, error) {

View File

@ -2,6 +2,7 @@ package vm
import (
"context"
"fmt"
"os"
cid "github.com/ipfs/go-cid"
@ -17,6 +18,15 @@ var (
StatApplied uint64
)
type ExecutionLane int
const (
// ExecutionLaneDefault signifies a default, non prioritized execution lane.
ExecutionLaneDefault ExecutionLane = iota
// ExecutionLanePriority signifies a prioritized execution lane with reserved resources.
ExecutionLanePriority
)
type Interface interface {
// Applies the given message onto the VM's current state, returning the result of the execution
ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, error)
@ -33,7 +43,7 @@ type Interface interface {
// Message failures, unexpected terminations,gas costs, etc. should all be ignored.
var useFvmDebug = os.Getenv("LOTUS_FVM_DEVELOPER_DEBUG") == "1"
func NewVM(ctx context.Context, opts *VMOpts) (Interface, error) {
func makeVM(ctx context.Context, opts *VMOpts) (Interface, error) {
if opts.NetworkVersion >= network.Version16 {
if useFvmDebug {
return NewDualExecutionFVM(ctx, opts)
@ -43,3 +53,18 @@ func NewVM(ctx context.Context, opts *VMOpts) (Interface, error) {
return NewLegacyVM(ctx, opts)
}
func NewVM(ctx context.Context, opts *VMOpts) (Interface, error) {
switch opts.ExecutionLane {
case ExecutionLaneDefault, ExecutionLanePriority:
default:
return nil, fmt.Errorf("invalid execution lane: %d", opts.ExecutionLane)
}
vmi, err := makeVM(ctx, opts)
if err != nil {
return nil, err
}
return newVMExecutor(vmi, opts.ExecutionLane), nil
}

View File

@ -388,7 +388,7 @@ var ChainSetHeadCmd = &cli.Command{
defer closer()
ctx := ReqContext(cctx)
if cctx.NArg() != 1 {
if !cctx.Bool("genesis") && !cctx.IsSet("epoch") && cctx.NArg() != 1 {
return IncorrectNumArgs(cctx)
}

View File

@ -367,7 +367,7 @@ func AddrInfoFromArg(ctx context.Context, cctx *cli.Context) ([]peer.AddrInfo, e
pis = append(pis, pi)
}
return pis, err
return pis, nil
}
var NetId = &cli.Command{
@ -445,8 +445,8 @@ var NetReachability = &cli.Command{
}
fmt.Println("AutoNAT status: ", i.Reachability.String())
if i.PublicAddr != "" {
fmt.Println("Public address: ", i.PublicAddr)
if len(i.PublicAddrs) > 0 {
fmt.Println("Public address:", i.PublicAddrs)
}
return nil
},

View File

@ -1268,7 +1268,7 @@ var compStateMsg = `
{{end}}
{{if ne .MsgRct.ExitCode 0}}
<div class="error">Error: <pre>{{.Error}}</pre></div>
<div class="error">Exit: <pre>{{.MsgRct.ExitCode}}</pre></div>
{{end}}
<details>
@ -1372,7 +1372,14 @@ func isVerySlow(t time.Duration) bool {
}
func JsonParams(code cid.Cid, method abi.MethodNum, params []byte) (string, error) {
p, err := stmgr.GetParamType(consensus.NewActorRegistry(), code, method) // todo use api for correct actor registry
ar := consensus.NewActorRegistry()
_, found := ar.Methods[code][method]
if !found {
return fmt.Sprintf("raw:%x", params), nil
}
p, err := stmgr.GetParamType(ar, code, method) // todo use api for correct actor registry
if err != nil {
return "", err
}

View File

@ -35,6 +35,7 @@ import (
badgerbs "github.com/filecoin-project/lotus/blockstore/badger"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/index"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
@ -228,7 +229,7 @@ var importBenchCmd = &cli.Command{
defer cs.Close() //nolint:errcheck
// TODO: We need to supply the actual beacon after v14
stm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(verifier), filcns.DefaultUpgradeSchedule(), nil, metadataDs)
stm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(verifier), filcns.DefaultUpgradeSchedule(), nil, metadataDs, index.DummyMsgIndex)
if err != nil {
return err
}

View File

@ -7,6 +7,7 @@ import (
"net"
"net/http"
"os"
"strings"
"time"
rice "github.com/GeertJohan/go.rice"
@ -22,6 +23,7 @@ import (
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/ethtypes"
lcli "github.com/filecoin-project/lotus/cli"
)
@ -217,18 +219,34 @@ func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
to, err := address.NewFromString(r.FormValue("address"))
if err != nil {
addressInput := r.FormValue("address")
var filecoinAddress address.Address
var decodeError error
if strings.HasPrefix(addressInput, "0x") {
ethAddress, err := ethtypes.ParseEthAddress(addressInput)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
filecoinAddress, decodeError = ethAddress.ToFilecoinAddress()
} else {
filecoinAddress, decodeError = address.NewFromString(addressInput)
}
if decodeError != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
if to == address.Undef {
if filecoinAddress == address.Undef {
http.Error(w, "empty address", http.StatusBadRequest)
return
}
// Limit based on wallet address
limiter := h.limiter.GetWalletLimiter(to.String())
limiter := h.limiter.GetWalletLimiter(filecoinAddress.String())
if !limiter.Allow() {
http.Error(w, http.StatusText(http.StatusTooManyRequests)+": wallet limit", http.StatusTooManyRequests)
return
@ -282,6 +300,7 @@ func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return

View File

@ -15,7 +15,7 @@
<div class="Index">
<div class="Index-nodes">
<div class="Index-node">
[SENDING FUNDS]
SENDING FUNDS
</div>
<div class="Index-node">
<form action='/send' method='post' id='funds-form'>

View File

@ -8,10 +8,10 @@
<div class="Index">
<div class="Index-nodes">
<div class="Index-node">
[LOTUS DEVNET FAUCET]
LOTUS DEVNET FAUCET
</div>
<div class="Index-node">
<a href="funds.html">[Send Funds]</a>
<a href="funds.html">Send Funds</a>
</div>
<div class="Index-node">
[LOTUS DEVNET GRANT DATACAP]

View File

@ -1,7 +1,6 @@
body {
font-family: 'monospace';
background: #1f1f1f;
color: #f0f0f0;
font-family: 'Helvetica Neue', sans-serif;
background-color: #f0f0f0;
padding: 0;
margin: 0;
}
@ -9,21 +8,22 @@ body {
.Index {
width: 100vw;
height: 100vh;
background: #1a1a1a;
color: #f0f0f0;
font-family: monospace;
background-color: #f0f0f0;
color: #333;
font-family: 'Helvetica Neue', sans-serif;
display: grid;
grid-template-columns: auto 40vw auto;
grid-template-rows: auto auto auto 3em;
grid-template-areas:
". . . ."
". main main ."
". . . ."
"footer footer footer footer";
". . . ."
". main main ."
". . . ."
"footer footer footer footer";
}
.Index-footer {
background: #2a2a2a;
background-color: #333;
grid-area: footer;
}
@ -34,23 +34,49 @@ body {
.Index-nodes {
grid-area: main;
background: #2a2a2a;
}
.Index-node {
margin: 5px;
padding: 15px;
background: #1f1f1f;
background-color: #fff;
box-shadow: 0 0 5px rgba(0, 0, 0, 0.1);
}
span {
display: block;
margin-bottom: 5px;
}
input[type="text"] {
width: 100%;
padding: 10px;
border-radius: 5px;
border: 1px solid #ccc;
margin-bottom: 10px;
}
button {
background-color: #4c9aff;
color: #fff;
border: none;
border-radius: 5px;
padding: 10px 20px;
font-size: 1.2em;
}
button:hover {
background-color: #555;
}
a:link {
color: #50f020;
color: #333;
}
a:visited {
color: #50f020;
color: #333;
}
a:hover {
color: #30a00a;
color: #555;
}

View File

@ -1166,7 +1166,7 @@ var actorConfirmChangeWorker = &cli.Command{
var actorConfirmChangeBeneficiary = &cli.Command{
Name: "confirm-change-beneficiary",
Usage: "Confirm a beneficiary address change",
ArgsUsage: "[minerAddress]",
ArgsUsage: "[minerID]",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "really-do-it",

View File

@ -325,14 +325,12 @@ func handleMiningInfo(ctx context.Context, cctx *cli.Context, fullapi v1api.Full
}
}
{
fmt.Println()
ws, err := nodeApi.WorkerStats(ctx)
if err != nil {
return xerrors.Errorf("getting worker stats: %w", err)
}
fmt.Println()
ws, err := nodeApi.WorkerStats(ctx)
if err != nil {
fmt.Printf("ERROR: getting worker stats: %s\n", err)
} else {
workersByType := map[string]int{
sealtasks.WorkerSealing: 0,
sealtasks.WorkerWindowPoSt: 0,

Some files were not shown because too many files have changed in this diff Show More