Merge branch 'master' into raulk/merge-gs-fix

This commit is contained in:
raulk 2021-07-22 23:37:17 +01:00 committed by GitHub
commit 4ffbb03a82
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
97 changed files with 1802 additions and 698 deletions

View File

@ -735,6 +735,45 @@ jobs:
- packer/build:
template: tools/packer/lotus.pkr.hcl
args: "-var ci_workspace_bins=./linux-nerpanet -var lotus_network=nerpanet -var git_tag=$CIRCLE_TAG"
publish-dockerhub:
description: publish to dockerhub
machine:
image: ubuntu-2004:202010-01
parameters:
tag:
type: string
default: latest
steps:
- checkout
- run:
name: dockerhub login
command: echo $DOCKERHUB_PASSWORD | docker login --username $DOCKERHUB_USERNAME --password-stdin
- run:
name: docker build
command: |
docker build --target lotus -t filecoin/lotus:<< parameters.tag >> -f Dockerfile.lotus .
docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:<< parameters.tag >> -f Dockerfile.lotus .
if [[ ! -z $CIRCLE_SHA1 ]]; then
docker build --target lotus -t filecoin/lotus:$CIRCLE_SHA1 -f Dockerfile.lotus .
docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:$CIRCLE_SHA1 -f Dockerfile.lotus .
fi
if [[ ! -z $CIRCLE_TAG ]]; then
docker build --target lotus -t filecoin/lotus:$CIRCLE_TAG -f Dockerfile.lotus .
docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:$CIRCLE_TAG -f Dockerfile.lotus .
fi
- run:
name: docker push
command: |
docker push filecoin/lotus:<< parameters.tag >>
docker push filecoin/lotus-all-in-one:<< parameters.tag >>
if [[ ! -z $CIRCLE_SHA1 ]]; then
docker push filecoin/lotus:$CIRCLE_SHA1
docker push filecoin/lotus-all-in-one:$CIRCLE_SHA1
fi
if [[ ! -z $CIRCLE_TAG ]]; then
docker push filecoin/lotus:$CIRCLE_TAG
docker push filecoin/lotus-all-in-one:$CIRCLE_TAG
fi
workflows:
version: 2.1
@ -1017,6 +1056,16 @@ workflows:
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- publish-dockerhub:
name: publish-dockerhub
tag: stable
filters:
branches:
ignore:
- /.*/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
nightly:
triggers:
@ -1030,3 +1079,6 @@ workflows:
- publish-snapcraft:
name: publish-snapcraft-nightly
channel: edge
- publish-dockerhub:
name: publish-dockerhub-nightly
tag: nightly

View File

@ -735,6 +735,45 @@ jobs:
- packer/build:
template: tools/packer/lotus.pkr.hcl
args: "-var ci_workspace_bins=./linux-nerpanet -var lotus_network=nerpanet -var git_tag=$CIRCLE_TAG"
publish-dockerhub:
description: publish to dockerhub
machine:
image: ubuntu-2004:202010-01
parameters:
tag:
type: string
default: latest
steps:
- checkout
- run:
name: dockerhub login
command: echo $DOCKERHUB_PASSWORD | docker login --username $DOCKERHUB_USERNAME --password-stdin
- run:
name: docker build
command: |
docker build --target lotus -t filecoin/lotus:<< parameters.tag >> -f Dockerfile.lotus .
docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:<< parameters.tag >> -f Dockerfile.lotus .
if [["[[ ! -z $CIRCLE_SHA1 ]]"]]; then
docker build --target lotus -t filecoin/lotus:$CIRCLE_SHA1 -f Dockerfile.lotus .
docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:$CIRCLE_SHA1 -f Dockerfile.lotus .
fi
if [["[[ ! -z $CIRCLE_TAG ]]"]]; then
docker build --target lotus -t filecoin/lotus:$CIRCLE_TAG -f Dockerfile.lotus .
docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:$CIRCLE_TAG -f Dockerfile.lotus .
fi
- run:
name: docker push
command: |
docker push filecoin/lotus:<< parameters.tag >>
docker push filecoin/lotus-all-in-one:<< parameters.tag >>
if [["[[ ! -z $CIRCLE_SHA1 ]]"]]; then
docker push filecoin/lotus:$CIRCLE_SHA1
docker push filecoin/lotus-all-in-one:$CIRCLE_SHA1
fi
if [["[[ ! -z $CIRCLE_TAG ]]"]]; then
docker push filecoin/lotus:$CIRCLE_TAG
docker push filecoin/lotus-all-in-one:$CIRCLE_TAG
fi
workflows:
version: 2.1
@ -887,6 +926,16 @@ workflows:
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- publish-dockerhub:
name: publish-dockerhub
tag: stable
filters:
branches:
ignore:
- /.*/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
nightly:
triggers:
@ -900,3 +949,6 @@ workflows:
- publish-snapcraft:
name: publish-snapcraft-nightly
channel: edge
- publish-dockerhub:
name: publish-dockerhub-nightly
tag: nightly

View File

@ -1,5 +1,38 @@
# Lotus changelog
# 1.10.1 / 2021-07-05
This is an optional but **highly recommended** release of Lotus for lotus miners that has many bug fixes and improvements based on the feedback we got from the community since HyperDrive.
## New Features
- commit batch: AggregateAboveBaseFee config #6650
- `AggregateAboveBaseFee` is added to miner sealing configuration for setting the network base fee to start aggregating proofs. When the network base fee is lower than this value, the prove commits will be submitted individually via `ProveCommitSector`. According to the [Batch Incentive Alignment](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0013.md#batch-incentive-alignment) introduced in FIP-0013, we recommend miners to set this value to 0.15 nanoFIL(which is the default value) to avoid unexpected aggregation fee in burn and enjoy the most benefits of aggregation!
## Bug Fixes
- storage: Fix FinalizeSector with sectors in storage paths #6652
- Fix tiny error in check-client-datacap #6664
- Fix: precommit_batch method used the wrong cfg.PreCommitBatchWait #6658
- to optimize the batchwait #6636
- fix getTicket: sector precommitted but expired case #6635
- handleSubmitCommitAggregate() exception handling #6595
- remove precommit check in handleCommitFailed #6634
- ensure agg fee is adequate
- fix: miner balance is not enough, so that ProveCommitAggregate msg exec failed #6623
- commit batch: Initialize the FailedSectors map #6647
Contributors
| Contributor | Commits | Lines ± | Files Changed |
|-------------|---------|---------|---------------|
| @magik6k| 7 | +151/-56 | 21 |
| @llifezou | 4 | +59/-20 | 4 |
| @johnli-helloworld | 2 | +45/-14 | 4 |
| @wangchao | 1 | +1/-27 | 1 |
| Jerry | 2 | +9/-4 | 2 |
| @zhoutian527 | 1 | +2/-2 | 1 |
| @ribasushi| 1 | +1/-1 | 1 |
# 1.10.0 / 2021-06-23
This is a mandatory release of Lotus that introduces Filecoin network v13, codenamed the HyperDrive upgrade. The

View File

@ -36,7 +36,7 @@ WORKDIR /opt/filecoin
ARG RUSTFLAGS=""
ARG GOFLAGS=""
RUN make deps lotus lotus-miner lotus-worker lotus-shed lotus-chainwatch lotus-stats
RUN make lotus lotus-miner lotus-worker lotus-shed lotus-wallet lotus-gateway
FROM ubuntu:20.04 AS base
@ -56,19 +56,173 @@ COPY --from=builder /usr/lib/x86_64-linux-gnu/libOpenCL.so.1 /lib/
RUN useradd -r -u 532 -U fc
###
FROM base AS lotus
MAINTAINER Lotus Development Team
COPY --from=builder /opt/filecoin/lotus /usr/local/bin/
COPY --from=builder /opt/filecoin/lotus /usr/local/bin/
COPY --from=builder /opt/filecoin/lotus-shed /usr/local/bin/
COPY scripts/docker-lotus-entrypoint.sh /
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
ENV LOTUS_PATH /var/lib/lotus
ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1
ENV LOTUS_JAEGER_AGENT_PORT 6831
ENV DOCKER_LOTUS_IMPORT_SNAPSHOT https://fil-chain-snapshots-fallback.s3.amazonaws.com/mainnet/minimal_finality_stateroots_latest.car
ENV DOCKER_LOTUS_IMPORT_WALLET ""
RUN mkdir /var/lib/lotus /var/tmp/filecoin-proof-parameters && chown fc /var/lib/lotus /var/tmp/filecoin-proof-parameters
RUN mkdir /var/lib/lotus /var/tmp/filecoin-proof-parameters
RUN chown fc: /var/lib/lotus /var/tmp/filecoin-proof-parameters
VOLUME /var/lib/lotus
VOLUME /var/tmp/filecoin-proof-parameters
USER fc
ENTRYPOINT ["/usr/local/bin/lotus"]
EXPOSE 1234
ENTRYPOINT ["/docker-lotus-entrypoint.sh"]
CMD ["-help"]
###
FROM base AS lotus-wallet
MAINTAINER Lotus Development Team
COPY --from=builder /opt/filecoin/lotus-wallet /usr/local/bin/
ENV WALLET_PATH /var/lib/lotus-wallet
ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1
ENV LOTUS_JAEGER_AGENT_PORT 6831
RUN mkdir /var/lib/lotus-wallet
RUN chown fc: /var/lib/lotus-wallet
VOLUME /var/lib/lotus-wallet
USER fc
EXPOSE 1777
ENTRYPOINT ["/usr/local/bin/lotus-wallet"]
CMD ["-help"]
###
FROM base AS lotus-gateway
MAINTAINER Lotus Development Team
COPY --from=builder /opt/filecoin/lotus-gateway /usr/local/bin/
ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1
ENV LOTUS_JAEGER_AGENT_PORT 6831
ENV FULLNODE_API_INFO /ip4/127.0.0.1/tcp/1234/http
USER fc
EXPOSE 1234
ENTRYPOINT ["/usr/local/bin/lotus-gateway"]
CMD ["-help"]
###
FROM base AS lotus-miner
MAINTAINER Lotus Development Team
COPY --from=builder /opt/filecoin/lotus-miner /usr/local/bin/
COPY scripts/docker-lotus-miner-entrypoint.sh /
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
ENV FULLNODE_API_INFO /ip4/127.0.0.1/tcp/1234/http
ENV LOTUS_MINER_PATH /var/lib/lotus-miner
ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1
ENV LOTUS_JAEGER_AGENT_PORT 6831
ENV DOCKER_LOTUS_MINER_INIT true
RUN mkdir /var/lib/lotus-miner /var/tmp/filecoin-proof-parameters
RUN chown fc: /var/lib/lotus-miner /var/tmp/filecoin-proof-parameters
VOLUME /var/lib/lotus-miner
VOLUME /var/tmp/filecoin-proof-parameters
USER fc
EXPOSE 2345
ENTRYPOINT ["/docker-lotus-miner-entrypoint.sh"]
CMD ["-help"]
###
FROM base AS lotus-worker
MAINTAINER Lotus Development Team
COPY --from=builder /opt/filecoin/lotus-worker /usr/local/bin/
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
ENV MINER_API_INFO /ip4/127.0.0.1/tcp/2345/http
ENV LOTUS_WORKER_PATH /var/lib/lotus-worker
ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1
ENV LOTUS_JAEGER_AGENT_PORT 6831
RUN mkdir /var/lib/lotus-worker
RUN chown fc: /var/lib/lotus-worker
VOLUME /var/lib/lotus-worker
USER fc
EXPOSE 3456
ENTRYPOINT ["/usr/local/bin/lotus-worker"]
CMD ["-help"]
###
from base as lotus-all-in-one
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
ENV FULLNODE_API_INFO /ip4/127.0.0.1/tcp/1234/http
ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1
ENV LOTUS_JAEGER_AGENT_PORT 6831
ENV LOTUS_MINER_PATH /var/lib/lotus-miner
ENV LOTUS_PATH /var/lib/lotus
ENV LOTUS_WORKER_PATH /var/lib/lotus-worker
ENV MINER_API_INFO /ip4/127.0.0.1/tcp/2345/http
ENV WALLET_PATH /var/lib/lotus-wallet
ENV DOCKER_LOTUS_IMPORT_SNAPSHOT https://fil-chain-snapshots-fallback.s3.amazonaws.com/mainnet/minimal_finality_stateroots_latest.car
ENV DOCKER_LOTUS_MINER_INIT true
COPY --from=builder /opt/filecoin/lotus /usr/local/bin/
COPY --from=builder /opt/filecoin/lotus-shed /usr/local/bin/
COPY --from=builder /opt/filecoin/lotus-wallet /usr/local/bin/
COPY --from=builder /opt/filecoin/lotus-gateway /usr/local/bin/
COPY --from=builder /opt/filecoin/lotus-miner /usr/local/bin/
COPY --from=builder /opt/filecoin/lotus-worker /usr/local/bin/
RUN mkdir /var/tmp/filecoin-proof-parameters
RUN mkdir /var/lib/lotus
RUN mkdir /var/lib/lotus-miner
RUN mkdir /var/lib/lotus-worker
RUN mkdir /var/lib/lotus-wallet
RUN chown fc: /var/tmp/filecoin-proof-parameters
RUN chown fc: /var/lib/lotus
RUN chown fc: /var/lib/lotus-miner
RUN chown fc: /var/lib/lotus-worker
RUN chown fc: /var/lib/lotus-wallet
VOLUME /var/tmp/filecoin-proof-parameters
VOLUME /var/lib/lotus
VOLUME /var/lib/lotus-miner
VOLUME /var/lib/lotus-worker
VOLUME /var/lib/lotus-wallet
EXPOSE 1234
EXPOSE 2345
EXPOSE 3456
EXPOSE 1777

View File

@ -131,6 +131,9 @@ install-miner:
install-worker:
install -C ./lotus-worker /usr/local/bin/lotus-worker
install-app:
install -C ./$(APP) /usr/local/bin/$(APP)
# TOOLS
lotus-seed: $(BUILD_DEPS)

View File

@ -4,15 +4,11 @@ import (
"context"
"fmt"
apitypes "github.com/filecoin-project/lotus/api/types"
"github.com/google/uuid"
"github.com/filecoin-project/go-jsonrpc/auth"
metrics "github.com/libp2p/go-libp2p-core/metrics"
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
protocol "github.com/libp2p/go-libp2p-core/protocol"
apitypes "github.com/filecoin-project/lotus/api/types"
)
// MODIFYING THE API INTERFACE
@ -27,55 +23,23 @@ import (
// * Generate openrpc blobs
type Common interface {
// MethodGroup: Auth
AuthVerify(ctx context.Context, token string) ([]auth.Permission, error) //perm:read
AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error) //perm:admin
// MethodGroup: Net
// MethodGroup: Log
NetConnectedness(context.Context, peer.ID) (network.Connectedness, error) //perm:read
NetPeers(context.Context) ([]peer.AddrInfo, error) //perm:read
NetConnect(context.Context, peer.AddrInfo) error //perm:write
NetAddrsListen(context.Context) (peer.AddrInfo, error) //perm:read
NetDisconnect(context.Context, peer.ID) error //perm:write
NetFindPeer(context.Context, peer.ID) (peer.AddrInfo, error) //perm:read
NetPubsubScores(context.Context) ([]PubsubScore, error) //perm:read
NetAutoNatStatus(context.Context) (NatInfo, error) //perm:read
NetAgentVersion(ctx context.Context, p peer.ID) (string, error) //perm:read
NetPeerInfo(context.Context, peer.ID) (*ExtendedPeerInfo, error) //perm:read
// NetBandwidthStats returns statistics about the nodes total bandwidth
// usage and current rate across all peers and protocols.
NetBandwidthStats(ctx context.Context) (metrics.Stats, error) //perm:read
// NetBandwidthStatsByPeer returns statistics about the nodes bandwidth
// usage and current rate per peer
NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) //perm:read
// NetBandwidthStatsByProtocol returns statistics about the nodes bandwidth
// usage and current rate per protocol
NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) //perm:read
// ConnectionGater API
NetBlockAdd(ctx context.Context, acl NetBlockList) error //perm:admin
NetBlockRemove(ctx context.Context, acl NetBlockList) error //perm:admin
NetBlockList(ctx context.Context) (NetBlockList, error) //perm:read
LogList(context.Context) ([]string, error) //perm:write
LogSetLevel(context.Context, string, string) error //perm:write
// MethodGroup: Common
// Discover returns an OpenRPC document describing an RPC API.
Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) //perm:read
// ID returns peerID of libp2p node backing this API
ID(context.Context) (peer.ID, error) //perm:read
// Version provides information about API provider
Version(context.Context) (APIVersion, error) //perm:read
LogList(context.Context) ([]string, error) //perm:write
LogSetLevel(context.Context, string, string) error //perm:write
// Discover returns an OpenRPC document describing an RPC API.
Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) //perm:read
// trigger graceful shutdown
Shutdown(context.Context) error //perm:admin
@ -105,8 +69,3 @@ type APIVersion struct {
func (v APIVersion) String() string {
return fmt.Sprintf("%s+api%s", v.Version, v.APIVersion.String())
}
type NatInfo struct {
Reachability network.Reachability
PublicAddr string
}

View File

@ -58,6 +58,7 @@ const LookbackNoLimit = abi.ChainEpoch(-1)
// FullNode API is a low-level interface to the Filecoin network full node
type FullNode interface {
Common
Net
// MethodGroup: Chain
// The Chain method group contains methods for interacting with the

View File

@ -45,6 +45,7 @@ type Gateway interface {
StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (DealCollateralBounds, error)
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error) //perm:read
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MarketBalance, error)

66
api/api_net.go Normal file
View File

@ -0,0 +1,66 @@
package api
import (
"context"
metrics "github.com/libp2p/go-libp2p-core/metrics"
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/libp2p/go-libp2p-core/protocol"
)
// MODIFYING THE API INTERFACE
//
// When adding / changing methods in this file:
// * Do the change here
// * Adjust implementation in `node/impl/`
// * Run `make gen` - this will:
// * Generate proxy structs
// * Generate mocks
// * Generate markdown docs
// * Generate openrpc blobs
type Net interface {
// MethodGroup: Net
NetConnectedness(context.Context, peer.ID) (network.Connectedness, error) //perm:read
NetPeers(context.Context) ([]peer.AddrInfo, error) //perm:read
NetConnect(context.Context, peer.AddrInfo) error //perm:write
NetAddrsListen(context.Context) (peer.AddrInfo, error) //perm:read
NetDisconnect(context.Context, peer.ID) error //perm:write
NetFindPeer(context.Context, peer.ID) (peer.AddrInfo, error) //perm:read
NetPubsubScores(context.Context) ([]PubsubScore, error) //perm:read
NetAutoNatStatus(context.Context) (NatInfo, error) //perm:read
NetAgentVersion(ctx context.Context, p peer.ID) (string, error) //perm:read
NetPeerInfo(context.Context, peer.ID) (*ExtendedPeerInfo, error) //perm:read
// NetBandwidthStats returns statistics about the nodes total bandwidth
// usage and current rate across all peers and protocols.
NetBandwidthStats(ctx context.Context) (metrics.Stats, error) //perm:read
// NetBandwidthStatsByPeer returns statistics about the nodes bandwidth
// usage and current rate per peer
NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) //perm:read
// NetBandwidthStatsByProtocol returns statistics about the nodes bandwidth
// usage and current rate per protocol
NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) //perm:read
// ConnectionGater API
NetBlockAdd(ctx context.Context, acl NetBlockList) error //perm:admin
NetBlockRemove(ctx context.Context, acl NetBlockList) error //perm:admin
NetBlockList(ctx context.Context) (NetBlockList, error) //perm:read
// ID returns peerID of libp2p node backing this API
ID(context.Context) (peer.ID, error) //perm:read
}
type CommonNet interface {
Common
Net
}
type NatInfo struct {
Reachability network.Reachability
PublicAddr string
}

View File

@ -41,6 +41,7 @@ import (
// StorageMiner is a low-level interface to the Filecoin network storage miner node
type StorageMiner interface {
Common
Net
ActorAddress(context.Context) (address.Address, error) //perm:read

View File

@ -16,14 +16,10 @@ import (
)
// NewCommonRPCV0 creates a new http jsonrpc client.
func NewCommonRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.Common, jsonrpc.ClientCloser, error) {
var res v0api.CommonStruct
func NewCommonRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.CommonNet, jsonrpc.ClientCloser, error) {
var res v0api.CommonNetStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
[]interface{}{
&res.Internal,
},
requestHeader,
)
api.GetInternalStructs(&res), requestHeader)
return &res, closer, err
}
@ -31,11 +27,9 @@ func NewCommonRPCV0(ctx context.Context, addr string, requestHeader http.Header)
// NewFullNodeRPCV0 creates a new http jsonrpc client.
func NewFullNodeRPCV0(ctx context.Context, addr string, requestHeader http.Header) (v0api.FullNode, jsonrpc.ClientCloser, error) {
var res v0api.FullNodeStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
[]interface{}{
&res.CommonStruct.Internal,
&res.Internal,
}, requestHeader)
api.GetInternalStructs(&res), requestHeader)
return &res, closer, err
}
@ -44,10 +38,7 @@ func NewFullNodeRPCV0(ctx context.Context, addr string, requestHeader http.Heade
func NewFullNodeRPCV1(ctx context.Context, addr string, requestHeader http.Header) (api.FullNode, jsonrpc.ClientCloser, error) {
var res v1api.FullNodeStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
[]interface{}{
&res.CommonStruct.Internal,
&res.Internal,
}, requestHeader)
api.GetInternalStructs(&res), requestHeader)
return &res, closer, err
}
@ -78,15 +69,10 @@ func NewStorageMinerRPCV0(ctx context.Context, addr string, requestHeader http.H
var res v0api.StorageMinerStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
[]interface{}{
&res.CommonStruct.Internal,
&res.Internal,
},
requestHeader,
api.GetInternalStructs(&res), requestHeader,
append([]jsonrpc.Option{
rpcenc.ReaderParamEncoder(pushUrl),
}, opts...)...,
)
}, opts...)...)
return &res, closer, err
}
@ -99,9 +85,7 @@ func NewWorkerRPCV0(ctx context.Context, addr string, requestHeader http.Header)
var res api.WorkerStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
[]interface{}{
&res.Internal,
},
api.GetInternalStructs(&res),
requestHeader,
rpcenc.ReaderParamEncoder(pushUrl),
jsonrpc.WithNoReconnect(),
@ -115,9 +99,7 @@ func NewWorkerRPCV0(ctx context.Context, addr string, requestHeader http.Header)
func NewGatewayRPCV1(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (api.Gateway, jsonrpc.ClientCloser, error) {
var res api.GatewayStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
[]interface{}{
&res.Internal,
},
api.GetInternalStructs(&res),
requestHeader,
opts...,
)
@ -129,9 +111,7 @@ func NewGatewayRPCV1(ctx context.Context, addr string, requestHeader http.Header
func NewGatewayRPCV0(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (v0api.Gateway, jsonrpc.ClientCloser, error) {
var res v0api.GatewayStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
[]interface{}{
&res.Internal,
},
api.GetInternalStructs(&res),
requestHeader,
opts...,
)
@ -142,9 +122,7 @@ func NewGatewayRPCV0(ctx context.Context, addr string, requestHeader http.Header
func NewWalletRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.Wallet, jsonrpc.ClientCloser, error) {
var res api.WalletStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
[]interface{}{
&res.Internal,
},
api.GetInternalStructs(&res),
requestHeader,
)

View File

@ -34,7 +34,7 @@ func main() {
doc := docgen_openrpc.NewLotusOpenRPCDocument(Comments, GroupDocs)
i, _, _, _ := docgen.GetAPIType(os.Args[2], os.Args[3])
i, _, _ := docgen.GetAPIType(os.Args[2], os.Args[3])
doc.RegisterReceiverName("Filecoin", i)
out, err := doc.Discover()

View File

@ -4,6 +4,7 @@ import (
"encoding/json"
"fmt"
"os"
"reflect"
"sort"
"strings"
@ -15,7 +16,7 @@ func main() {
groups := make(map[string]*docgen.MethodGroup)
_, t, permStruct, commonPermStruct := docgen.GetAPIType(os.Args[2], os.Args[3])
_, t, permStruct := docgen.GetAPIType(os.Args[2], os.Args[3])
for i := 0; i < t.NumMethod(); i++ {
m := t.Method(i)
@ -88,13 +89,17 @@ func main() {
fmt.Printf("### %s\n", m.Name)
fmt.Printf("%s\n\n", m.Comment)
meth, ok := permStruct.FieldByName(m.Name)
if !ok {
meth, ok = commonPermStruct.FieldByName(m.Name)
if !ok {
panic("no perms for method: " + m.Name)
var meth reflect.StructField
var ok bool
for _, ps := range permStruct {
meth, ok = ps.FieldByName(m.Name)
if ok {
break
}
}
if !ok {
panic("no perms for method: " + m.Name)
}
perms := meth.Tag.Get("perm")

View File

@ -266,25 +266,27 @@ func init() {
addExample(map[string]interface{}{"abc": 123})
}
func GetAPIType(name, pkg string) (i interface{}, t, permStruct, commonPermStruct reflect.Type) {
func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []reflect.Type) {
switch pkg {
case "api": // latest
switch name {
case "FullNode":
i = &api.FullNodeStruct{}
t = reflect.TypeOf(new(struct{ api.FullNode })).Elem()
permStruct = reflect.TypeOf(api.FullNodeStruct{}.Internal)
commonPermStruct = reflect.TypeOf(api.CommonStruct{}.Internal)
permStruct = append(permStruct, reflect.TypeOf(api.FullNodeStruct{}.Internal))
permStruct = append(permStruct, reflect.TypeOf(api.CommonStruct{}.Internal))
permStruct = append(permStruct, reflect.TypeOf(api.NetStruct{}.Internal))
case "StorageMiner":
i = &api.StorageMinerStruct{}
t = reflect.TypeOf(new(struct{ api.StorageMiner })).Elem()
permStruct = reflect.TypeOf(api.StorageMinerStruct{}.Internal)
commonPermStruct = reflect.TypeOf(api.CommonStruct{}.Internal)
permStruct = append(permStruct, reflect.TypeOf(api.StorageMinerStruct{}.Internal))
permStruct = append(permStruct, reflect.TypeOf(api.CommonStruct{}.Internal))
permStruct = append(permStruct, reflect.TypeOf(api.NetStruct{}.Internal))
case "Worker":
i = &api.WorkerStruct{}
t = reflect.TypeOf(new(struct{ api.Worker })).Elem()
permStruct = reflect.TypeOf(api.WorkerStruct{}.Internal)
commonPermStruct = reflect.TypeOf(api.WorkerStruct{}.Internal)
permStruct = append(permStruct, reflect.TypeOf(api.WorkerStruct{}.Internal))
default:
panic("unknown type")
}
@ -293,8 +295,9 @@ func GetAPIType(name, pkg string) (i interface{}, t, permStruct, commonPermStruc
case "FullNode":
i = v0api.FullNodeStruct{}
t = reflect.TypeOf(new(struct{ v0api.FullNode })).Elem()
permStruct = reflect.TypeOf(v0api.FullNodeStruct{}.Internal)
commonPermStruct = reflect.TypeOf(v0api.CommonStruct{}.Internal)
permStruct = append(permStruct, reflect.TypeOf(v0api.FullNodeStruct{}.Internal))
permStruct = append(permStruct, reflect.TypeOf(v0api.CommonStruct{}.Internal))
permStruct = append(permStruct, reflect.TypeOf(v0api.NetStruct{}.Internal))
default:
panic("unknown type")
}

View File

@ -16,28 +16,33 @@ const (
var AllPermissions = []auth.Permission{PermRead, PermWrite, PermSign, PermAdmin}
var DefaultPerms = []auth.Permission{PermRead}
func permissionedProxies(in, out interface{}) {
outs := GetInternalStructs(out)
for _, o := range outs {
auth.PermissionedProxy(AllPermissions, DefaultPerms, in, o)
}
}
func PermissionedStorMinerAPI(a StorageMiner) StorageMiner {
var out StorageMinerStruct
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal)
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.CommonStruct.Internal)
permissionedProxies(a, &out)
return &out
}
func PermissionedFullAPI(a FullNode) FullNode {
var out FullNodeStruct
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal)
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.CommonStruct.Internal)
permissionedProxies(a, &out)
return &out
}
func PermissionedWorkerAPI(a Worker) Worker {
var out WorkerStruct
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal)
permissionedProxies(a, &out)
return &out
}
func PermissionedWalletAPI(a Wallet) Wallet {
var out WalletStruct
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal)
permissionedProxies(a, &out)
return &out
}

View File

@ -35,7 +35,7 @@ import (
metrics "github.com/libp2p/go-libp2p-core/metrics"
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
protocol "github.com/libp2p/go-libp2p-core/protocol"
"github.com/libp2p/go-libp2p-core/protocol"
xerrors "golang.org/x/xerrors"
)
@ -60,44 +60,10 @@ type CommonStruct struct {
Discover func(p0 context.Context) (apitypes.OpenRPCDocument, error) `perm:"read"`
ID func(p0 context.Context) (peer.ID, error) `perm:"read"`
LogList func(p0 context.Context) ([]string, error) `perm:"write"`
LogSetLevel func(p0 context.Context, p1 string, p2 string) error `perm:"write"`
NetAddrsListen func(p0 context.Context) (peer.AddrInfo, error) `perm:"read"`
NetAgentVersion func(p0 context.Context, p1 peer.ID) (string, error) `perm:"read"`
NetAutoNatStatus func(p0 context.Context) (NatInfo, error) `perm:"read"`
NetBandwidthStats func(p0 context.Context) (metrics.Stats, error) `perm:"read"`
NetBandwidthStatsByPeer func(p0 context.Context) (map[string]metrics.Stats, error) `perm:"read"`
NetBandwidthStatsByProtocol func(p0 context.Context) (map[protocol.ID]metrics.Stats, error) `perm:"read"`
NetBlockAdd func(p0 context.Context, p1 NetBlockList) error `perm:"admin"`
NetBlockList func(p0 context.Context) (NetBlockList, error) `perm:"read"`
NetBlockRemove func(p0 context.Context, p1 NetBlockList) error `perm:"admin"`
NetConnect func(p0 context.Context, p1 peer.AddrInfo) error `perm:"write"`
NetConnectedness func(p0 context.Context, p1 peer.ID) (network.Connectedness, error) `perm:"read"`
NetDisconnect func(p0 context.Context, p1 peer.ID) error `perm:"write"`
NetFindPeer func(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) `perm:"read"`
NetPeerInfo func(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) `perm:"read"`
NetPeers func(p0 context.Context) ([]peer.AddrInfo, error) `perm:"read"`
NetPubsubScores func(p0 context.Context) ([]PubsubScore, error) `perm:"read"`
Session func(p0 context.Context) (uuid.UUID, error) `perm:"read"`
Shutdown func(p0 context.Context) error `perm:"admin"`
@ -109,9 +75,26 @@ type CommonStruct struct {
type CommonStub struct {
}
type CommonNetStruct struct {
CommonStruct
NetStruct
Internal struct {
}
}
type CommonNetStub struct {
CommonStub
NetStub
}
type FullNodeStruct struct {
CommonStruct
NetStruct
Internal struct {
BeaconGetEntry func(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"`
@ -473,6 +456,8 @@ type FullNodeStruct struct {
type FullNodeStub struct {
CommonStub
NetStub
}
type GatewayStruct struct {
@ -525,6 +510,8 @@ type GatewayStruct struct {
StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) ``
StateReadState func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*ActorState, error) `perm:"read"`
StateSearchMsg func(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) ``
StateSectorGetInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) ``
@ -542,6 +529,47 @@ type GatewayStruct struct {
type GatewayStub struct {
}
type NetStruct struct {
Internal struct {
ID func(p0 context.Context) (peer.ID, error) `perm:"read"`
NetAddrsListen func(p0 context.Context) (peer.AddrInfo, error) `perm:"read"`
NetAgentVersion func(p0 context.Context, p1 peer.ID) (string, error) `perm:"read"`
NetAutoNatStatus func(p0 context.Context) (NatInfo, error) `perm:"read"`
NetBandwidthStats func(p0 context.Context) (metrics.Stats, error) `perm:"read"`
NetBandwidthStatsByPeer func(p0 context.Context) (map[string]metrics.Stats, error) `perm:"read"`
NetBandwidthStatsByProtocol func(p0 context.Context) (map[protocol.ID]metrics.Stats, error) `perm:"read"`
NetBlockAdd func(p0 context.Context, p1 NetBlockList) error `perm:"admin"`
NetBlockList func(p0 context.Context) (NetBlockList, error) `perm:"read"`
NetBlockRemove func(p0 context.Context, p1 NetBlockList) error `perm:"admin"`
NetConnect func(p0 context.Context, p1 peer.AddrInfo) error `perm:"write"`
NetConnectedness func(p0 context.Context, p1 peer.ID) (network.Connectedness, error) `perm:"read"`
NetDisconnect func(p0 context.Context, p1 peer.ID) error `perm:"write"`
NetFindPeer func(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) `perm:"read"`
NetPeerInfo func(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) `perm:"read"`
NetPeers func(p0 context.Context) ([]peer.AddrInfo, error) `perm:"read"`
NetPubsubScores func(p0 context.Context) ([]PubsubScore, error) `perm:"read"`
}
}
type NetStub struct {
}
type SignableStruct struct {
Internal struct {
Sign func(p0 context.Context, p1 SignFunc) error ``
@ -554,6 +582,8 @@ type SignableStub struct {
type StorageMinerStruct struct {
CommonStruct
NetStruct
Internal struct {
ActorAddress func(p0 context.Context) (address.Address, error) `perm:"read"`
@ -747,6 +777,8 @@ type StorageMinerStruct struct {
type StorageMinerStub struct {
CommonStub
NetStub
}
type WalletStruct struct {
@ -871,14 +903,6 @@ func (s *CommonStub) Discover(p0 context.Context) (apitypes.OpenRPCDocument, err
return *new(apitypes.OpenRPCDocument), xerrors.New("method not supported")
}
func (s *CommonStruct) ID(p0 context.Context) (peer.ID, error) {
return s.Internal.ID(p0)
}
func (s *CommonStub) ID(p0 context.Context) (peer.ID, error) {
return *new(peer.ID), xerrors.New("method not supported")
}
func (s *CommonStruct) LogList(p0 context.Context) ([]string, error) {
return s.Internal.LogList(p0)
}
@ -895,134 +919,6 @@ func (s *CommonStub) LogSetLevel(p0 context.Context, p1 string, p2 string) error
return xerrors.New("method not supported")
}
func (s *CommonStruct) NetAddrsListen(p0 context.Context) (peer.AddrInfo, error) {
return s.Internal.NetAddrsListen(p0)
}
func (s *CommonStub) NetAddrsListen(p0 context.Context) (peer.AddrInfo, error) {
return *new(peer.AddrInfo), xerrors.New("method not supported")
}
func (s *CommonStruct) NetAgentVersion(p0 context.Context, p1 peer.ID) (string, error) {
return s.Internal.NetAgentVersion(p0, p1)
}
func (s *CommonStub) NetAgentVersion(p0 context.Context, p1 peer.ID) (string, error) {
return "", xerrors.New("method not supported")
}
func (s *CommonStruct) NetAutoNatStatus(p0 context.Context) (NatInfo, error) {
return s.Internal.NetAutoNatStatus(p0)
}
func (s *CommonStub) NetAutoNatStatus(p0 context.Context) (NatInfo, error) {
return *new(NatInfo), xerrors.New("method not supported")
}
func (s *CommonStruct) NetBandwidthStats(p0 context.Context) (metrics.Stats, error) {
return s.Internal.NetBandwidthStats(p0)
}
func (s *CommonStub) NetBandwidthStats(p0 context.Context) (metrics.Stats, error) {
return *new(metrics.Stats), xerrors.New("method not supported")
}
func (s *CommonStruct) NetBandwidthStatsByPeer(p0 context.Context) (map[string]metrics.Stats, error) {
return s.Internal.NetBandwidthStatsByPeer(p0)
}
func (s *CommonStub) NetBandwidthStatsByPeer(p0 context.Context) (map[string]metrics.Stats, error) {
return *new(map[string]metrics.Stats), xerrors.New("method not supported")
}
func (s *CommonStruct) NetBandwidthStatsByProtocol(p0 context.Context) (map[protocol.ID]metrics.Stats, error) {
return s.Internal.NetBandwidthStatsByProtocol(p0)
}
func (s *CommonStub) NetBandwidthStatsByProtocol(p0 context.Context) (map[protocol.ID]metrics.Stats, error) {
return *new(map[protocol.ID]metrics.Stats), xerrors.New("method not supported")
}
func (s *CommonStruct) NetBlockAdd(p0 context.Context, p1 NetBlockList) error {
return s.Internal.NetBlockAdd(p0, p1)
}
func (s *CommonStub) NetBlockAdd(p0 context.Context, p1 NetBlockList) error {
return xerrors.New("method not supported")
}
func (s *CommonStruct) NetBlockList(p0 context.Context) (NetBlockList, error) {
return s.Internal.NetBlockList(p0)
}
func (s *CommonStub) NetBlockList(p0 context.Context) (NetBlockList, error) {
return *new(NetBlockList), xerrors.New("method not supported")
}
func (s *CommonStruct) NetBlockRemove(p0 context.Context, p1 NetBlockList) error {
return s.Internal.NetBlockRemove(p0, p1)
}
func (s *CommonStub) NetBlockRemove(p0 context.Context, p1 NetBlockList) error {
return xerrors.New("method not supported")
}
func (s *CommonStruct) NetConnect(p0 context.Context, p1 peer.AddrInfo) error {
return s.Internal.NetConnect(p0, p1)
}
func (s *CommonStub) NetConnect(p0 context.Context, p1 peer.AddrInfo) error {
return xerrors.New("method not supported")
}
func (s *CommonStruct) NetConnectedness(p0 context.Context, p1 peer.ID) (network.Connectedness, error) {
return s.Internal.NetConnectedness(p0, p1)
}
func (s *CommonStub) NetConnectedness(p0 context.Context, p1 peer.ID) (network.Connectedness, error) {
return *new(network.Connectedness), xerrors.New("method not supported")
}
func (s *CommonStruct) NetDisconnect(p0 context.Context, p1 peer.ID) error {
return s.Internal.NetDisconnect(p0, p1)
}
func (s *CommonStub) NetDisconnect(p0 context.Context, p1 peer.ID) error {
return xerrors.New("method not supported")
}
func (s *CommonStruct) NetFindPeer(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) {
return s.Internal.NetFindPeer(p0, p1)
}
func (s *CommonStub) NetFindPeer(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) {
return *new(peer.AddrInfo), xerrors.New("method not supported")
}
func (s *CommonStruct) NetPeerInfo(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) {
return s.Internal.NetPeerInfo(p0, p1)
}
func (s *CommonStub) NetPeerInfo(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) {
return nil, xerrors.New("method not supported")
}
func (s *CommonStruct) NetPeers(p0 context.Context) ([]peer.AddrInfo, error) {
return s.Internal.NetPeers(p0)
}
func (s *CommonStub) NetPeers(p0 context.Context) ([]peer.AddrInfo, error) {
return *new([]peer.AddrInfo), xerrors.New("method not supported")
}
func (s *CommonStruct) NetPubsubScores(p0 context.Context) ([]PubsubScore, error) {
return s.Internal.NetPubsubScores(p0)
}
func (s *CommonStub) NetPubsubScores(p0 context.Context) ([]PubsubScore, error) {
return *new([]PubsubScore), xerrors.New("method not supported")
}
func (s *CommonStruct) Session(p0 context.Context) (uuid.UUID, error) {
return s.Internal.Session(p0)
}
@ -2663,6 +2559,14 @@ func (s *GatewayStub) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey
return *new(apitypes.NetworkVersion), xerrors.New("method not supported")
}
func (s *GatewayStruct) StateReadState(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*ActorState, error) {
return s.Internal.StateReadState(p0, p1, p2)
}
func (s *GatewayStub) StateReadState(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*ActorState, error) {
return nil, xerrors.New("method not supported")
}
func (s *GatewayStruct) StateSearchMsg(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) {
return s.Internal.StateSearchMsg(p0, p1, p2, p3, p4)
}
@ -2711,6 +2615,142 @@ func (s *GatewayStub) WalletBalance(p0 context.Context, p1 address.Address) (typ
return *new(types.BigInt), xerrors.New("method not supported")
}
func (s *NetStruct) ID(p0 context.Context) (peer.ID, error) {
return s.Internal.ID(p0)
}
func (s *NetStub) ID(p0 context.Context) (peer.ID, error) {
return *new(peer.ID), xerrors.New("method not supported")
}
func (s *NetStruct) NetAddrsListen(p0 context.Context) (peer.AddrInfo, error) {
return s.Internal.NetAddrsListen(p0)
}
func (s *NetStub) NetAddrsListen(p0 context.Context) (peer.AddrInfo, error) {
return *new(peer.AddrInfo), xerrors.New("method not supported")
}
func (s *NetStruct) NetAgentVersion(p0 context.Context, p1 peer.ID) (string, error) {
return s.Internal.NetAgentVersion(p0, p1)
}
func (s *NetStub) NetAgentVersion(p0 context.Context, p1 peer.ID) (string, error) {
return "", xerrors.New("method not supported")
}
func (s *NetStruct) NetAutoNatStatus(p0 context.Context) (NatInfo, error) {
return s.Internal.NetAutoNatStatus(p0)
}
func (s *NetStub) NetAutoNatStatus(p0 context.Context) (NatInfo, error) {
return *new(NatInfo), xerrors.New("method not supported")
}
func (s *NetStruct) NetBandwidthStats(p0 context.Context) (metrics.Stats, error) {
return s.Internal.NetBandwidthStats(p0)
}
func (s *NetStub) NetBandwidthStats(p0 context.Context) (metrics.Stats, error) {
return *new(metrics.Stats), xerrors.New("method not supported")
}
func (s *NetStruct) NetBandwidthStatsByPeer(p0 context.Context) (map[string]metrics.Stats, error) {
return s.Internal.NetBandwidthStatsByPeer(p0)
}
func (s *NetStub) NetBandwidthStatsByPeer(p0 context.Context) (map[string]metrics.Stats, error) {
return *new(map[string]metrics.Stats), xerrors.New("method not supported")
}
func (s *NetStruct) NetBandwidthStatsByProtocol(p0 context.Context) (map[protocol.ID]metrics.Stats, error) {
return s.Internal.NetBandwidthStatsByProtocol(p0)
}
func (s *NetStub) NetBandwidthStatsByProtocol(p0 context.Context) (map[protocol.ID]metrics.Stats, error) {
return *new(map[protocol.ID]metrics.Stats), xerrors.New("method not supported")
}
func (s *NetStruct) NetBlockAdd(p0 context.Context, p1 NetBlockList) error {
return s.Internal.NetBlockAdd(p0, p1)
}
func (s *NetStub) NetBlockAdd(p0 context.Context, p1 NetBlockList) error {
return xerrors.New("method not supported")
}
func (s *NetStruct) NetBlockList(p0 context.Context) (NetBlockList, error) {
return s.Internal.NetBlockList(p0)
}
func (s *NetStub) NetBlockList(p0 context.Context) (NetBlockList, error) {
return *new(NetBlockList), xerrors.New("method not supported")
}
func (s *NetStruct) NetBlockRemove(p0 context.Context, p1 NetBlockList) error {
return s.Internal.NetBlockRemove(p0, p1)
}
func (s *NetStub) NetBlockRemove(p0 context.Context, p1 NetBlockList) error {
return xerrors.New("method not supported")
}
func (s *NetStruct) NetConnect(p0 context.Context, p1 peer.AddrInfo) error {
return s.Internal.NetConnect(p0, p1)
}
func (s *NetStub) NetConnect(p0 context.Context, p1 peer.AddrInfo) error {
return xerrors.New("method not supported")
}
func (s *NetStruct) NetConnectedness(p0 context.Context, p1 peer.ID) (network.Connectedness, error) {
return s.Internal.NetConnectedness(p0, p1)
}
func (s *NetStub) NetConnectedness(p0 context.Context, p1 peer.ID) (network.Connectedness, error) {
return *new(network.Connectedness), xerrors.New("method not supported")
}
func (s *NetStruct) NetDisconnect(p0 context.Context, p1 peer.ID) error {
return s.Internal.NetDisconnect(p0, p1)
}
func (s *NetStub) NetDisconnect(p0 context.Context, p1 peer.ID) error {
return xerrors.New("method not supported")
}
func (s *NetStruct) NetFindPeer(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) {
return s.Internal.NetFindPeer(p0, p1)
}
func (s *NetStub) NetFindPeer(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) {
return *new(peer.AddrInfo), xerrors.New("method not supported")
}
func (s *NetStruct) NetPeerInfo(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) {
return s.Internal.NetPeerInfo(p0, p1)
}
func (s *NetStub) NetPeerInfo(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) {
return nil, xerrors.New("method not supported")
}
func (s *NetStruct) NetPeers(p0 context.Context) ([]peer.AddrInfo, error) {
return s.Internal.NetPeers(p0)
}
func (s *NetStub) NetPeers(p0 context.Context) ([]peer.AddrInfo, error) {
return *new([]peer.AddrInfo), xerrors.New("method not supported")
}
func (s *NetStruct) NetPubsubScores(p0 context.Context) ([]PubsubScore, error) {
return s.Internal.NetPubsubScores(p0)
}
func (s *NetStub) NetPubsubScores(p0 context.Context) ([]PubsubScore, error) {
return *new([]PubsubScore), xerrors.New("method not supported")
}
func (s *SignableStruct) Sign(p0 context.Context, p1 SignFunc) error {
return s.Internal.Sign(p0, p1)
}
@ -3713,8 +3753,10 @@ func (s *WorkerStub) WaitQuiet(p0 context.Context) error {
var _ ChainIO = new(ChainIOStruct)
var _ Common = new(CommonStruct)
var _ CommonNet = new(CommonNetStruct)
var _ FullNode = new(FullNodeStruct)
var _ Gateway = new(GatewayStruct)
var _ Net = new(NetStruct)
var _ Signable = new(SignableStruct)
var _ StorageMiner = new(StorageMinerStruct)
var _ Wallet = new(WalletStruct)

30
api/proxy_util.go Normal file
View File

@ -0,0 +1,30 @@
package api
import "reflect"
var _internalField = "Internal"
// GetInternalStructs extracts all pointers to 'Internal' sub-structs from the provided pointer to a proxy struct
func GetInternalStructs(in interface{}) []interface{} {
return getInternalStructs(reflect.ValueOf(in).Elem())
}
func getInternalStructs(rv reflect.Value) []interface{} {
var out []interface{}
internal := rv.FieldByName(_internalField)
ii := internal.Addr().Interface()
out = append(out, ii)
for i := 0; i < rv.NumField(); i++ {
if rv.Type().Field(i).Name == _internalField {
continue
}
sub := getInternalStructs(rv.Field(i))
out = append(out, sub...)
}
return out
}

62
api/proxy_util_test.go Normal file
View File

@ -0,0 +1,62 @@
package api
import (
"testing"
"github.com/stretchr/testify/require"
)
type StrA struct {
StrB
Internal struct {
A int
}
}
type StrB struct {
Internal struct {
B int
}
}
type StrC struct {
Internal struct {
Internal struct {
C int
}
}
}
func TestGetInternalStructs(t *testing.T) {
var proxy StrA
sts := GetInternalStructs(&proxy)
require.Len(t, sts, 2)
sa := sts[0].(*struct{ A int })
sa.A = 3
sb := sts[1].(*struct{ B int })
sb.B = 4
require.Equal(t, 3, proxy.Internal.A)
require.Equal(t, 4, proxy.StrB.Internal.B)
}
func TestNestedInternalStructs(t *testing.T) {
var proxy StrC
// check that only the top-level internal struct gets picked up
sts := GetInternalStructs(&proxy)
require.Len(t, sts, 1)
sa := sts[0].(*struct {
Internal struct {
C int
}
})
sa.Internal.C = 5
require.Equal(t, 5, proxy.Internal.Internal.C)
}

View File

@ -46,6 +46,7 @@ import (
// FullNode API is a low-level interface to the Filecoin network full node
type FullNode interface {
Common
Net
// MethodGroup: Chain
// The Chain method group contains methods for interacting with the

View File

@ -5,8 +5,15 @@ import (
)
type Common = api.Common
type Net = api.Net
type CommonNet = api.CommonNet
type CommonStruct = api.CommonStruct
type CommonStub = api.CommonStub
type NetStruct = api.NetStruct
type NetStub = api.NetStub
type CommonNetStruct = api.CommonNetStruct
type CommonNetStub = api.CommonNetStub
type StorageMiner = api.StorageMiner
type StorageMinerStruct = api.StorageMinerStruct

View File

@ -30,6 +30,8 @@ import (
type FullNodeStruct struct {
CommonStruct
NetStruct
Internal struct {
BeaconGetEntry func(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"`
@ -389,6 +391,8 @@ type FullNodeStruct struct {
type FullNodeStub struct {
CommonStub
NetStub
}
type GatewayStruct struct {

View File

@ -54,8 +54,8 @@ func VersionForType(nodeType NodeType) (Version, error) {
// semver versions of the rpc api exposed
var (
FullAPIVersion0 = newVer(1, 3, 0)
FullAPIVersion1 = newVer(2, 1, 0)
FullAPIVersion0 = newVer(1, 3, 1)
FullAPIVersion1 = newVer(2, 1, 1)
MinerAPIVersion0 = newVer(1, 2, 0)
WorkerAPIVersion0 = newVer(1, 1, 0)

View File

@ -71,6 +71,13 @@ type Config struct {
// which skips moving (as it is a noop, but still takes time to read all the cold objects)
// and directly purges cold blocks.
DiscardColdBlocks bool
// HotstoreMessageRetention indicates the hotstore retention policy for messages.
// It has the following semantics:
// - a value of 0 will only retain messages within the compaction boundary (4 finalities)
// - a positive integer indicates the number of finalities, outside the compaction boundary,
// for which messages will be retained in the hotstore.
HotStoreMessageRetention uint64
}
// ChainAccessor allows the Splitstore to access the chain. It will most likely
@ -128,6 +135,9 @@ type SplitStore struct {
txnRefsMx sync.Mutex
txnRefs map[cid.Cid]struct{}
txnMissing map[cid.Cid]struct{}
// registered protectors
protectors []func(func(cid.Cid) error) error
}
var _ bstore.Blockstore = (*SplitStore)(nil)
@ -520,6 +530,13 @@ func (s *SplitStore) Start(chain ChainAccessor) error {
return nil
}
func (s *SplitStore) AddProtector(protector func(func(cid.Cid) error) error) {
s.mx.Lock()
defer s.mx.Unlock()
s.protectors = append(s.protectors, protector)
}
func (s *SplitStore) Close() error {
if !atomic.CompareAndSwapInt32(&s.closing, 0, 1) {
// already closing

View File

@ -345,6 +345,30 @@ func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSet) error {
})
}
func (s *SplitStore) applyProtectors() error {
s.mx.Lock()
defer s.mx.Unlock()
count := 0
for _, protect := range s.protectors {
err := protect(func(c cid.Cid) error {
s.trackTxnRef(c)
count++
return nil
})
if err != nil {
return xerrors.Errorf("error applynig protector: %w", err)
}
}
if count > 0 {
log.Infof("protected %d references through %d protectors", count, len(s.protectors))
}
return nil
}
// --- Compaction ---
// Compaction works transactionally with the following algorithm:
// - We prepare a transaction, whereby all i/o referenced objects through the API are tracked.
@ -376,7 +400,13 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
currentEpoch := curTs.Height()
boundaryEpoch := currentEpoch - CompactionBoundary
log.Infow("running compaction", "currentEpoch", currentEpoch, "baseEpoch", s.baseEpoch, "boundaryEpoch", boundaryEpoch, "compactionIndex", s.compactionIndex)
var inclMsgsEpoch abi.ChainEpoch
inclMsgsRange := abi.ChainEpoch(s.cfg.HotStoreMessageRetention) * build.Finality
if inclMsgsRange < boundaryEpoch {
inclMsgsEpoch = boundaryEpoch - inclMsgsRange
}
log.Infow("running compaction", "currentEpoch", currentEpoch, "baseEpoch", s.baseEpoch, "boundaryEpoch", boundaryEpoch, "inclMsgsEpoch", inclMsgsEpoch, "compactionIndex", s.compactionIndex)
markSet, err := s.markSetEnv.Create("live", s.markSetSize)
if err != nil {
@ -392,13 +422,21 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
// we are ready for concurrent marking
s.beginTxnMarking(markSet)
// 0. track all protected references at beginning of compaction; anything added later should
// be transactionally protected by the write
log.Info("protecting references with registered protectors")
err = s.applyProtectors()
if err != nil {
return err
}
// 1. mark reachable objects by walking the chain from the current epoch; we keep state roots
// and messages until the boundary epoch.
log.Info("marking reachable objects")
startMark := time.Now()
var count int64
err = s.walkChain(curTs, boundaryEpoch, true,
err = s.walkChain(curTs, boundaryEpoch, inclMsgsEpoch,
func(c cid.Cid) error {
if isUnitaryObject(c) {
return errStopWalk
@ -593,7 +631,7 @@ func (s *SplitStore) endTxnProtect() {
s.txnMissing = nil
}
func (s *SplitStore) walkChain(ts *types.TipSet, boundary abi.ChainEpoch, inclMsgs bool,
func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEpoch,
f func(cid.Cid) error) error {
visited := cid.NewSet()
walked := cid.NewSet()
@ -601,6 +639,8 @@ func (s *SplitStore) walkChain(ts *types.TipSet, boundary abi.ChainEpoch, inclMs
walkCnt := 0
scanCnt := 0
stopWalk := func(_ cid.Cid) error { return errStopWalk }
walkBlock := func(c cid.Cid) error {
if !visited.Visit(c) {
return nil
@ -621,10 +661,19 @@ func (s *SplitStore) walkChain(ts *types.TipSet, boundary abi.ChainEpoch, inclMs
return xerrors.Errorf("error unmarshaling block header (cid: %s): %w", c, err)
}
// we only scan the block if it is at or above the boundary
if hdr.Height >= boundary || hdr.Height == 0 {
scanCnt++
if inclMsgs && hdr.Height > 0 {
// message are retained if within the inclMsgs boundary
if hdr.Height >= inclMsgs && hdr.Height > 0 {
if inclMsgs < inclState {
// we need to use walkObjectIncomplete here, as messages/receipts may be missing early on if we
// synced from snapshot and have a long HotStoreMessageRetentionPolicy.
if err := s.walkObjectIncomplete(hdr.Messages, walked, f, stopWalk); err != nil {
return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err)
}
if err := s.walkObjectIncomplete(hdr.ParentMessageReceipts, walked, f, stopWalk); err != nil {
return xerrors.Errorf("error walking messages receipts (cid: %s): %w", hdr.ParentMessageReceipts, err)
}
} else {
if err := s.walkObject(hdr.Messages, walked, f); err != nil {
return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err)
}
@ -633,10 +682,14 @@ func (s *SplitStore) walkChain(ts *types.TipSet, boundary abi.ChainEpoch, inclMs
return xerrors.Errorf("error walking message receipts (cid: %s): %w", hdr.ParentMessageReceipts, err)
}
}
}
// state is only retained if within the inclState boundary, with the exception of genesis
if hdr.Height >= inclState || hdr.Height == 0 {
if err := s.walkObject(hdr.ParentStateRoot, walked, f); err != nil {
return xerrors.Errorf("error walking state root (cid: %s): %w", hdr.ParentStateRoot, err)
}
scanCnt++
}
if hdr.Height > 0 {

View File

@ -63,6 +63,20 @@ func testSplitStore(t *testing.T, cfg *Config) {
t.Fatal(err)
}
// create a garbage block that is protected with a rgistered protector
protected := blocks.NewBlock([]byte("protected!"))
err = hot.Put(protected)
if err != nil {
t.Fatal(err)
}
// and another one that is not protected
unprotected := blocks.NewBlock([]byte("unprotected!"))
err = hot.Put(unprotected)
if err != nil {
t.Fatal(err)
}
// open the splitstore
ss, err := Open("", ds, hot, cold, cfg)
if err != nil {
@ -70,6 +84,11 @@ func testSplitStore(t *testing.T, cfg *Config) {
}
defer ss.Close() //nolint
// register our protector
ss.AddProtector(func(protect func(cid.Cid) error) error {
return protect(protected.Cid())
})
err = ss.Start(chain)
if err != nil {
t.Fatal(err)
@ -132,8 +151,8 @@ func testSplitStore(t *testing.T, cfg *Config) {
t.Errorf("expected %d blocks, but got %d", 2, coldCnt)
}
if hotCnt != 10 {
t.Errorf("expected %d blocks, but got %d", 10, hotCnt)
if hotCnt != 12 {
t.Errorf("expected %d blocks, but got %d", 12, hotCnt)
}
// trigger a compaction
@ -146,12 +165,41 @@ func testSplitStore(t *testing.T, cfg *Config) {
coldCnt = countBlocks(cold)
hotCnt = countBlocks(hot)
if coldCnt != 5 {
t.Errorf("expected %d cold blocks, but got %d", 5, coldCnt)
if coldCnt != 6 {
t.Errorf("expected %d cold blocks, but got %d", 6, coldCnt)
}
if hotCnt != 17 {
t.Errorf("expected %d hot blocks, but got %d", 17, hotCnt)
if hotCnt != 18 {
t.Errorf("expected %d hot blocks, but got %d", 18, hotCnt)
}
// ensure our protected block is still there
has, err := hot.Has(protected.Cid())
if err != nil {
t.Fatal(err)
}
if !has {
t.Fatal("protected block is missing from hotstore")
}
// ensure our unprotected block is in the coldstore now
has, err = hot.Has(unprotected.Cid())
if err != nil {
t.Fatal(err)
}
if has {
t.Fatal("unprotected block is still in hotstore")
}
has, err = cold.Has(unprotected.Cid())
if err != nil {
t.Fatal(err)
}
if !has {
t.Fatal("unprotected block is missing from coldstore")
}
// Make sure we can revert without panicking.

View File

@ -48,7 +48,7 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error {
count := int64(0)
xcount := int64(0)
missing := int64(0)
err := s.walkChain(curTs, epoch, false,
err := s.walkChain(curTs, epoch, epoch+1, // we don't load messages/receipts in warmup
func(c cid.Cid) error {
if isUnitaryObject(c) {
return errStopWalk

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -28,18 +28,19 @@ var UpgradeAssemblyHeight = abi.ChainEpoch(-5)
var UpgradeLiftoffHeight = abi.ChainEpoch(-6)
var UpgradeKumquatHeight = abi.ChainEpoch(-7)
var UpgradeCalicoHeight = abi.ChainEpoch(-8)
var UpgradePersianHeight = abi.ChainEpoch(-9)
var UpgradeOrangeHeight = abi.ChainEpoch(-10)
var UpgradeClausHeight = abi.ChainEpoch(-11)
var UpgradePricelistOopsHeight = abi.ChainEpoch(-8)
var UpgradeCalicoHeight = abi.ChainEpoch(-9)
var UpgradePersianHeight = abi.ChainEpoch(-10)
var UpgradeOrangeHeight = abi.ChainEpoch(-11)
var UpgradeClausHeight = abi.ChainEpoch(-12)
var UpgradeTrustHeight = abi.ChainEpoch(-12)
var UpgradeTrustHeight = abi.ChainEpoch(-13)
var UpgradeNorwegianHeight = abi.ChainEpoch(-13)
var UpgradeNorwegianHeight = abi.ChainEpoch(-14)
var UpgradeTurboHeight = abi.ChainEpoch(-14)
var UpgradeTurboHeight = abi.ChainEpoch(-15)
var UpgradeHyperdriveHeight = abi.ChainEpoch(-15)
var UpgradeHyperdriveHeight = abi.ChainEpoch(-16)
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet,

View File

@ -28,6 +28,7 @@ var UpgradeAssemblyHeight = abi.ChainEpoch(30)
const UpgradeTapeHeight = 60
const UpgradeLiftoffHeight = -5
const UpgradeKumquatHeight = 90
const UpgradePricelistOopsHeight = 119
const UpgradeCalicoHeight = 120
const UpgradePersianHeight = 150
const UpgradeClausHeight = 180

View File

@ -33,6 +33,8 @@ const UpgradeLiftoffHeight = -5
const UpgradeKumquatHeight = 90
const UpgradePricelistOopsHeight = 119
const UpgradeCalicoHeight = 120
const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 1)

View File

@ -31,18 +31,19 @@ var UpgradeAssemblyHeight = abi.ChainEpoch(-5)
var UpgradeLiftoffHeight = abi.ChainEpoch(-6)
var UpgradeKumquatHeight = abi.ChainEpoch(-7)
var UpgradeCalicoHeight = abi.ChainEpoch(-8)
var UpgradePersianHeight = abi.ChainEpoch(-9)
var UpgradeOrangeHeight = abi.ChainEpoch(-10)
var UpgradeClausHeight = abi.ChainEpoch(-11)
var UpgradePricelistOopsHeight = abi.ChainEpoch(-8)
var UpgradeCalicoHeight = abi.ChainEpoch(-9)
var UpgradePersianHeight = abi.ChainEpoch(-10)
var UpgradeOrangeHeight = abi.ChainEpoch(-11)
var UpgradeClausHeight = abi.ChainEpoch(-12)
var UpgradeTrustHeight = abi.ChainEpoch(-12)
var UpgradeTrustHeight = abi.ChainEpoch(-13)
var UpgradeNorwegianHeight = abi.ChainEpoch(-13)
var UpgradeNorwegianHeight = abi.ChainEpoch(-14)
var UpgradeTurboHeight = abi.ChainEpoch(-14)
var UpgradeTurboHeight = abi.ChainEpoch(-15)
var UpgradeHyperdriveHeight = abi.ChainEpoch(-15)
var UpgradeHyperdriveHeight = abi.ChainEpoch(-16)
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet,

View File

@ -45,6 +45,7 @@ const UpgradeLiftoffHeight = 148888
const UpgradeKumquatHeight = 170000
const UpgradePricelistOopsHeight = 265199
const UpgradeCalicoHeight = 265200
const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 60)

View File

@ -32,6 +32,7 @@ const UpgradeTapeHeight = 60
const UpgradeKumquatHeight = 90
const UpgradePricelistOopsHeight = 99
const UpgradeCalicoHeight = 100
const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 1)

View File

@ -82,21 +82,22 @@ var (
UpgradeBreezeHeight abi.ChainEpoch = -1
BreezeGasTampingDuration abi.ChainEpoch = 0
UpgradeSmokeHeight abi.ChainEpoch = -1
UpgradeIgnitionHeight abi.ChainEpoch = -2
UpgradeRefuelHeight abi.ChainEpoch = -3
UpgradeTapeHeight abi.ChainEpoch = -4
UpgradeAssemblyHeight abi.ChainEpoch = 10
UpgradeLiftoffHeight abi.ChainEpoch = -5
UpgradeKumquatHeight abi.ChainEpoch = -6
UpgradeCalicoHeight abi.ChainEpoch = -7
UpgradePersianHeight abi.ChainEpoch = -8
UpgradeOrangeHeight abi.ChainEpoch = -9
UpgradeClausHeight abi.ChainEpoch = -10
UpgradeTrustHeight abi.ChainEpoch = -11
UpgradeNorwegianHeight abi.ChainEpoch = -12
UpgradeTurboHeight abi.ChainEpoch = -13
UpgradeHyperdriveHeight abi.ChainEpoch = -13
UpgradeSmokeHeight abi.ChainEpoch = -1
UpgradeIgnitionHeight abi.ChainEpoch = -2
UpgradeRefuelHeight abi.ChainEpoch = -3
UpgradeTapeHeight abi.ChainEpoch = -4
UpgradeAssemblyHeight abi.ChainEpoch = 10
UpgradeLiftoffHeight abi.ChainEpoch = -5
UpgradeKumquatHeight abi.ChainEpoch = -6
UpgradePricelistOopsHeight abi.ChainEpoch = -7
UpgradeCalicoHeight abi.ChainEpoch = -8
UpgradePersianHeight abi.ChainEpoch = -9
UpgradeOrangeHeight abi.ChainEpoch = -10
UpgradeClausHeight abi.ChainEpoch = -11
UpgradeTrustHeight abi.ChainEpoch = -12
UpgradeNorwegianHeight abi.ChainEpoch = -13
UpgradeTurboHeight abi.ChainEpoch = -14
UpgradeHyperdriveHeight abi.ChainEpoch = -15
DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet,

View File

@ -105,6 +105,7 @@ type State interface {
// UnallocatedSectorNumbers returns up to count unallocated sector numbers (or less than
// count if there aren't enough).
UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error)
GetAllocatedSectors() (*bitfield.BitField, error)
// Note that ProvingPeriodStart is deprecated and will be renamed / removed in a future version of actors
GetProvingPeriodStart() (abi.ChainEpoch, error)

View File

@ -164,6 +164,7 @@ type State interface {
// UnallocatedSectorNumbers returns up to count unallocated sector numbers (or less than
// count if there aren't enough).
UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error)
GetAllocatedSectors() (*bitfield.BitField, error)
// Note that ProvingPeriodStart is deprecated and will be renamed / removed in a future version of actors
GetProvingPeriodStart() (abi.ChainEpoch, error)

View File

@ -318,6 +318,15 @@ func (s *state{{.v}}) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, e
return sectors, nil
}
func (s *state{{.v}}) GetAllocatedSectors() (*bitfield.BitField, error) {
var allocatedSectors bitfield.BitField
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
return nil, err
}
return &allocatedSectors, nil
}
func (s *state{{.v}}) LoadDeadline(idx uint64) (Deadline, error) {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {

View File

@ -311,6 +311,15 @@ func (s *state0) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error)
return sectors, nil
}
func (s *state0) GetAllocatedSectors() (*bitfield.BitField, error) {
var allocatedSectors bitfield.BitField
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
return nil, err
}
return &allocatedSectors, nil
}
func (s *state0) LoadDeadline(idx uint64) (Deadline, error) {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {

View File

@ -309,6 +309,15 @@ func (s *state2) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error)
return sectors, nil
}
func (s *state2) GetAllocatedSectors() (*bitfield.BitField, error) {
var allocatedSectors bitfield.BitField
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
return nil, err
}
return &allocatedSectors, nil
}
func (s *state2) LoadDeadline(idx uint64) (Deadline, error) {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {

View File

@ -311,6 +311,15 @@ func (s *state3) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error)
return sectors, nil
}
func (s *state3) GetAllocatedSectors() (*bitfield.BitField, error) {
var allocatedSectors bitfield.BitField
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
return nil, err
}
return &allocatedSectors, nil
}
func (s *state3) LoadDeadline(idx uint64) (Deadline, error) {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {

View File

@ -311,6 +311,15 @@ func (s *state4) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error)
return sectors, nil
}
func (s *state4) GetAllocatedSectors() (*bitfield.BitField, error) {
var allocatedSectors bitfield.BitField
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
return nil, err
}
return &allocatedSectors, nil
}
func (s *state4) LoadDeadline(idx uint64) (Deadline, error) {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {

View File

@ -311,6 +311,15 @@ func (s *state5) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error)
return sectors, nil
}
func (s *state5) GetAllocatedSectors() (*bitfield.BitField, error) {
var allocatedSectors bitfield.BitField
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
return nil, err
}
return &allocatedSectors, nil
}
func (s *state5) LoadDeadline(idx uint64) (Deadline, error) {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {

View File

@ -25,7 +25,7 @@ func VersionForNetwork(version network.Version) Version {
switch version {
case network.Version0, network.Version1, network.Version2, network.Version3:
return Version0
case network.Version4, network.Version5, network.Version6, network.Version7, network.Version8, network.Version9:
case network.Version4, network.Version5, network.Version6, network.Version6AndAHalf, network.Version7, network.Version8, network.Version9:
return Version2
case network.Version10, network.Version11:
return Version3

View File

@ -5,6 +5,9 @@ import (
"math"
"sync"
"github.com/filecoin-project/lotus/api"
lru "github.com/hashicorp/golang-lru"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/go-state-types/abi"
@ -464,14 +467,20 @@ type messageEvents struct {
lk sync.RWMutex
matchers map[triggerID]MsgMatchFunc
blockMsgLk sync.Mutex
blockMsgCache *lru.ARCCache
}
func newMessageEvents(ctx context.Context, hcAPI headChangeAPI, cs EventAPI) messageEvents {
blsMsgCache, _ := lru.NewARC(500)
return messageEvents{
ctx: ctx,
cs: cs,
hcAPI: hcAPI,
matchers: make(map[triggerID]MsgMatchFunc),
ctx: ctx,
cs: cs,
hcAPI: hcAPI,
matchers: make(map[triggerID]MsgMatchFunc),
blockMsgLk: sync.Mutex{},
blockMsgCache: blsMsgCache,
}
}
@ -515,14 +524,21 @@ func (me *messageEvents) messagesForTs(ts *types.TipSet, consume func(*types.Mes
seen := map[cid.Cid]struct{}{}
for _, tsb := range ts.Blocks() {
msgs, err := me.cs.ChainGetBlockMessages(context.TODO(), tsb.Cid())
if err != nil {
log.Errorf("messagesForTs MessagesForBlock failed (ts.H=%d, Bcid:%s, B.Mcid:%s): %s", ts.Height(), tsb.Cid(), tsb.Messages, err)
// this is quite bad, but probably better than missing all the other updates
continue
me.blockMsgLk.Lock()
msgsI, ok := me.blockMsgCache.Get(tsb.Cid())
var err error
if !ok {
msgsI, err = me.cs.ChainGetBlockMessages(context.TODO(), tsb.Cid())
if err != nil {
log.Errorf("messagesForTs MessagesForBlock failed (ts.H=%d, Bcid:%s, B.Mcid:%s): %s", ts.Height(), tsb.Cid(), tsb.Messages, err)
// this is quite bad, but probably better than missing all the other updates
me.blockMsgLk.Unlock()
continue
}
me.blockMsgCache.Add(tsb.Cid(), msgsI)
}
me.blockMsgLk.Unlock()
msgs := msgsI.(*api.BlockMessages)
for _, m := range msgs.BlsMessages {
_, ok := seen[m.Cid()]
if ok {

View File

@ -6,6 +6,8 @@ import (
"sync"
"testing"
"gotest.tools/assert"
"github.com/ipfs/go-cid"
"github.com/multiformats/go-multihash"
"github.com/stretchr/testify/require"
@ -44,25 +46,43 @@ type fakeCS struct {
tipsets map[types.TipSetKey]*types.TipSet
sub func(rev, app []*types.TipSet)
callNumberLk sync.Mutex
callNumber map[string]int
}
func (fcs *fakeCS) ChainHead(ctx context.Context) (*types.TipSet, error) {
fcs.callNumberLk.Lock()
defer fcs.callNumberLk.Unlock()
fcs.callNumber["ChainHead"] = fcs.callNumber["ChainHead"] + 1
panic("implement me")
}
func (fcs *fakeCS) ChainGetTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) {
fcs.callNumberLk.Lock()
defer fcs.callNumberLk.Unlock()
fcs.callNumber["ChainGetTipSet"] = fcs.callNumber["ChainGetTipSet"] + 1
return fcs.tipsets[key], nil
}
func (fcs *fakeCS) StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) {
fcs.callNumberLk.Lock()
defer fcs.callNumberLk.Unlock()
fcs.callNumber["StateSearchMsg"] = fcs.callNumber["StateSearchMsg"] + 1
return nil, nil
}
func (fcs *fakeCS) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) {
fcs.callNumberLk.Lock()
defer fcs.callNumberLk.Unlock()
fcs.callNumber["StateGetActor"] = fcs.callNumber["StateGetActor"] + 1
panic("Not Implemented")
}
func (fcs *fakeCS) ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) {
fcs.callNumberLk.Lock()
defer fcs.callNumberLk.Unlock()
fcs.callNumber["ChainGetTipSetByHeight"] = fcs.callNumber["ChainGetTipSetByHeight"] + 1
panic("Not Implemented")
}
@ -113,6 +133,10 @@ func (fcs *fakeCS) makeTs(t *testing.T, parents []cid.Cid, h abi.ChainEpoch, msg
}
func (fcs *fakeCS) ChainNotify(context.Context) (<-chan []*api.HeadChange, error) {
fcs.callNumberLk.Lock()
defer fcs.callNumberLk.Unlock()
fcs.callNumber["ChainNotify"] = fcs.callNumber["ChainNotify"] + 1
out := make(chan []*api.HeadChange, 1)
best, err := fcs.tsc.best()
if err != nil {
@ -143,6 +167,9 @@ func (fcs *fakeCS) ChainNotify(context.Context) (<-chan []*api.HeadChange, error
}
func (fcs *fakeCS) ChainGetBlockMessages(ctx context.Context, blk cid.Cid) (*api.BlockMessages, error) {
fcs.callNumberLk.Lock()
defer fcs.callNumberLk.Unlock()
fcs.callNumber["ChainGetBlockMessages"] = fcs.callNumber["ChainGetBlockMessages"] + 1
messages, ok := fcs.blkMsgs[blk]
if !ok {
return &api.BlockMessages{}, nil
@ -152,8 +179,8 @@ func (fcs *fakeCS) ChainGetBlockMessages(ctx context.Context, blk cid.Cid) (*api
if !ok {
return &api.BlockMessages{}, nil
}
return &api.BlockMessages{BlsMessages: ms.bmsgs, SecpkMessages: ms.smsgs}, nil
return &api.BlockMessages{BlsMessages: ms.bmsgs, SecpkMessages: ms.smsgs}, nil
}
func (fcs *fakeCS) fakeMsgs(m fakeMsg) cid.Cid {
@ -233,9 +260,10 @@ var _ EventAPI = &fakeCS{}
func TestAt(t *testing.T) {
fcs := &fakeCS{
t: t,
h: 1,
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
t: t,
h: 1,
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -298,9 +326,10 @@ func TestAt(t *testing.T) {
func TestAtDoubleTrigger(t *testing.T) {
fcs := &fakeCS{
t: t,
h: 1,
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
t: t,
h: 1,
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -340,9 +369,10 @@ func TestAtDoubleTrigger(t *testing.T) {
func TestAtNullTrigger(t *testing.T) {
fcs := &fakeCS{
t: t,
h: 1,
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
t: t,
h: 1,
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -374,9 +404,10 @@ func TestAtNullTrigger(t *testing.T) {
func TestAtNullConf(t *testing.T) {
fcs := &fakeCS{
t: t,
h: 1,
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
t: t,
h: 1,
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -413,9 +444,10 @@ func TestAtNullConf(t *testing.T) {
func TestAtStart(t *testing.T) {
fcs := &fakeCS{
t: t,
h: 1,
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
t: t,
h: 1,
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -447,9 +479,10 @@ func TestAtStart(t *testing.T) {
func TestAtStartConfidence(t *testing.T) {
fcs := &fakeCS{
t: t,
h: 1,
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
t: t,
h: 1,
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -477,9 +510,10 @@ func TestAtStartConfidence(t *testing.T) {
func TestAtChained(t *testing.T) {
fcs := &fakeCS{
t: t,
h: 1,
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
t: t,
h: 1,
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -511,9 +545,10 @@ func TestAtChained(t *testing.T) {
func TestAtChainedConfidence(t *testing.T) {
fcs := &fakeCS{
t: t,
h: 1,
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
t: t,
h: 1,
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -545,9 +580,10 @@ func TestAtChainedConfidence(t *testing.T) {
func TestAtChainedConfidenceNull(t *testing.T) {
fcs := &fakeCS{
t: t,
h: 1,
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
t: t,
h: 1,
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -583,9 +619,10 @@ func TestCalled(t *testing.T) {
t: t,
h: 1,
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -795,9 +832,10 @@ func TestCalledTimeout(t *testing.T) {
t: t,
h: 1,
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -835,9 +873,10 @@ func TestCalledTimeout(t *testing.T) {
t: t,
h: 1,
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
callNumber: map[string]int{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -869,9 +908,10 @@ func TestCalledOrder(t *testing.T) {
t: t,
h: 1,
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -932,9 +972,10 @@ func TestCalledNull(t *testing.T) {
t: t,
h: 1,
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -997,9 +1038,10 @@ func TestRemoveTriggersOnMessage(t *testing.T) {
t: t,
h: 1,
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -1087,9 +1129,10 @@ func TestStateChanged(t *testing.T) {
t: t,
h: 1,
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -1175,9 +1218,10 @@ func TestStateChangedRevert(t *testing.T) {
t: t,
h: 1,
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -1253,9 +1297,10 @@ func TestStateChangedTimeout(t *testing.T) {
t: t,
h: 1,
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -1293,9 +1338,10 @@ func TestStateChangedTimeout(t *testing.T) {
t: t,
h: 1,
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
callNumber: map[string]int{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -1329,9 +1375,10 @@ func TestCalledMultiplePerEpoch(t *testing.T) {
t: t,
h: 1,
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
callNumber: map[string]int{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -1382,3 +1429,24 @@ func TestCalledMultiplePerEpoch(t *testing.T) {
fcs.advance(9, 1, nil)
}
func TestCachedSameBlock(t *testing.T) {
fcs := &fakeCS{
t: t,
h: 1,
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
callNumber: map[string]int{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
_ = NewEvents(context.Background(), fcs)
fcs.advance(0, 10, map[int]cid.Cid{})
assert.Assert(t, fcs.callNumber["ChainGetBlockMessages"] == 20, "expect call ChainGetBlockMessages %d but got ", 20, fcs.callNumber["ChainGetBlockMessages"])
fcs.advance(5, 10, map[int]cid.Cid{})
assert.Assert(t, fcs.callNumber["ChainGetBlockMessages"] == 30, "expect call ChainGetBlockMessages %d but got ", 30, fcs.callNumber["ChainGetBlockMessages"])
}

View File

@ -11,7 +11,6 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
)
@ -259,8 +258,14 @@ func (mp *MessagePool) checkMessages(ctx context.Context, msgs []*types.Message,
Code: api.CheckStatusMessageValidity,
},
}
if err := m.ValidForBlockInclusion(0, build.NewestNetworkVersion); err != nil {
nv, err := mp.getNtwkVersion(epoch)
if err != nil {
check.OK = false
check.Err = fmt.Sprintf("error retrieving network version: %s", err.Error())
} else {
check.OK = true
}
if err := m.ValidForBlockInclusion(0, nv); err != nil {
check.OK = false
check.Err = fmt.Sprintf("syntactically invalid message: %s", err.Error())
} else {
@ -276,7 +281,7 @@ func (mp *MessagePool) checkMessages(ctx context.Context, msgs []*types.Message,
// gas checks
// 4. Min Gas
minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength())
minGas := vm.PricelistByVersion(nv).OnChainMessage(m.ChainLength())
check = api.MessageCheckStatus{
Cid: m.Cid(),

View File

@ -14,6 +14,7 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/network"
"github.com/hashicorp/go-multierror"
lru "github.com/hashicorp/golang-lru"
"github.com/ipfs/go-cid"
@ -29,6 +30,7 @@ import (
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
@ -147,6 +149,8 @@ type MessagePool struct {
minGasPrice types.BigInt
getNtwkVersion func(abi.ChainEpoch) (network.Version, error)
currentSize int
// pruneTrigger is a channel used to trigger a mempool pruning
@ -362,26 +366,28 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journ
if j == nil {
j = journal.NilJournal()
}
us := stmgr.DefaultUpgradeSchedule()
mp := &MessagePool{
ds: ds,
addSema: make(chan struct{}, 1),
closer: make(chan struct{}),
repubTk: build.Clock.Ticker(RepublishInterval),
repubTrigger: make(chan struct{}, 1),
localAddrs: make(map[address.Address]struct{}),
pending: make(map[address.Address]*msgSet),
keyCache: make(map[address.Address]address.Address),
minGasPrice: types.NewInt(0),
pruneTrigger: make(chan struct{}, 1),
pruneCooldown: make(chan struct{}, 1),
blsSigCache: cache,
sigValCache: verifcache,
changes: lps.New(50),
localMsgs: namespace.Wrap(ds, datastore.NewKey(localMsgsDs)),
api: api,
netName: netName,
cfg: cfg,
ds: ds,
addSema: make(chan struct{}, 1),
closer: make(chan struct{}),
repubTk: build.Clock.Ticker(RepublishInterval),
repubTrigger: make(chan struct{}, 1),
localAddrs: make(map[address.Address]struct{}),
pending: make(map[address.Address]*msgSet),
keyCache: make(map[address.Address]address.Address),
minGasPrice: types.NewInt(0),
getNtwkVersion: us.GetNtwkVersion,
pruneTrigger: make(chan struct{}, 1),
pruneCooldown: make(chan struct{}, 1),
blsSigCache: cache,
sigValCache: verifcache,
changes: lps.New(50),
localMsgs: namespace.Wrap(ds, datastore.NewKey(localMsgsDs)),
api: api,
netName: netName,
cfg: cfg,
evtTypes: [...]journal.EventType{
evtTypeMpoolAdd: j.RegisterEventType("mpool", "add"),
evtTypeMpoolRemove: j.RegisterEventType("mpool", "remove"),
@ -426,6 +432,27 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journ
return mp, nil
}
func (mp *MessagePool) ForEachPendingMessage(f func(cid.Cid) error) error {
mp.lk.Lock()
defer mp.lk.Unlock()
for _, mset := range mp.pending {
for _, m := range mset.msgs {
err := f(m.Cid())
if err != nil {
return err
}
err = f(m.Message.Cid())
if err != nil {
return err
}
}
}
return nil
}
func (mp *MessagePool) resolveToKey(ctx context.Context, addr address.Address) (address.Address, error) {
// check the cache
a, f := mp.keyCache[addr]
@ -589,8 +616,7 @@ func (mp *MessagePool) addLocal(ctx context.Context, m *types.SignedMessage) err
// For non local messages, if the message cannot be included in the next 20 blocks it returns
// a (soft) validation error.
func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.TipSet, local bool) (bool, error) {
epoch := curTs.Height()
minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength())
minGas := vm.PricelistByVersion(build.NewestNetworkVersion).OnChainMessage(m.ChainLength())
if err := m.VMMessage().ValidForBlockInclusion(minGas.Total(), build.NewestNetworkVersion); err != nil {
return false, xerrors.Errorf("message will not be included in a block: %w", err)

View File

@ -105,6 +105,7 @@ func (tma *testMpoolAPI) SubscribeHeadChanges(cb func(rev, app []*types.TipSet)
func (tma *testMpoolAPI) PutMessage(m types.ChainMsg) (cid.Cid, error) {
return cid.Undef, nil
}
func (tma *testMpoolAPI) IsLite() bool {
return false
}
@ -286,7 +287,7 @@ func TestCheckMessageBig(t *testing.T) {
From: from,
Value: types.NewInt(1),
Nonce: 0,
GasLimit: 50000000,
GasLimit: 60000000,
GasFeeCap: types.NewInt(100),
GasPremium: types.NewInt(1),
Params: make([]byte, 41<<10), // 41KiB payload

View File

@ -749,7 +749,7 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6
}
curNonce++
minGas := vm.PricelistByEpoch(ts.Height()).OnChainMessage(m.ChainLength()).Total()
minGas := vm.PricelistByVersion(build.NewestNetworkVersion).OnChainMessage(m.ChainLength()).Total()
if m.Message.GasLimit < minGas {
break
}

View File

@ -161,6 +161,10 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
Height: build.UpgradeKumquatHeight,
Network: network.Version6,
Migration: nil,
}, {
Height: build.UpgradePricelistOopsHeight,
Network: network.Version6AndAHalf,
Migration: nil,
}, {
Height: build.UpgradeCalicoHeight,
Network: network.Version7,
@ -292,6 +296,18 @@ func (us UpgradeSchedule) Validate() error {
return nil
}
func (us UpgradeSchedule) GetNtwkVersion(e abi.ChainEpoch) (network.Version, error) {
// Traverse from newest to oldest returning upgrade active during epoch e
for i := len(us) - 1; i >= 0; i-- {
u := us[i]
// u.Height is the last epoch before u.Network becomes the active version
if u.Height < e {
return u.Network, nil
}
}
return network.Version0, xerrors.Errorf("Epoch %d has no defined network version", e)
}
func (sm *StateManager) handleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, cb ExecMonitor, ts *types.TipSet) (cid.Cid, error) {
retCid := root
var err error

View File

@ -17,6 +17,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/cbor"
"github.com/filecoin-project/go-state-types/network"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init"
@ -121,7 +122,7 @@ func TestForkHeightTriggers(t *testing.T) {
sm, err := NewStateManagerWithUpgradeSchedule(
cg.ChainStore(), UpgradeSchedule{{
Network: 1,
Network: network.Version1,
Height: testForkHeight,
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor,
root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
@ -250,7 +251,7 @@ func TestForkRefuseCall(t *testing.T) {
sm, err := NewStateManagerWithUpgradeSchedule(
cg.ChainStore(), UpgradeSchedule{{
Network: 1,
Network: network.Version1,
Expensive: true,
Height: testForkHeight,
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor,
@ -365,7 +366,7 @@ func TestForkPreMigration(t *testing.T) {
sm, err := NewStateManagerWithUpgradeSchedule(
cg.ChainStore(), UpgradeSchedule{{
Network: 1,
Network: network.Version1,
Height: testForkHeight,
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor,
root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {

View File

@ -1054,14 +1054,15 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock
return xerrors.Errorf("failed to load base state tree: %w", err)
}
pl := vm.PricelistByEpoch(baseTs.Height())
nv := syncer.sm.GetNtwkVersion(ctx, b.Header.Height)
pl := vm.PricelistByVersion(nv)
var sumGasLimit int64
checkMsg := func(msg types.ChainMsg) error {
m := msg.VMMessage()
// Phase 1: syntactic validation, as defined in the spec
minGas := pl.OnChainMessage(msg.ChainLength())
if err := m.ValidForBlockInclusion(minGas.Total(), syncer.sm.GetNtwkVersion(ctx, b.Header.Height)); err != nil {
if err := m.ValidForBlockInclusion(minGas.Total(), nv); err != nil {
return err
}
@ -1075,7 +1076,7 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock
// Phase 2: (Partial) semantic validation:
// the sender exists and is an account actor, and the nonces make sense
var sender address.Address
if syncer.sm.GetNtwkVersion(ctx, b.Header.Height) >= network.Version13 {
if nv >= network.Version13 {
sender, err = st.LookupID(m.From)
if err != nil {
return err

View File

@ -3,12 +3,11 @@ package vm
import (
"fmt"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/go-address"
addr "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/network"
vmr5 "github.com/filecoin-project/specs-actors/v5/actors/runtime"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/ipfs/go-cid"
@ -80,8 +79,8 @@ type Pricelist interface {
OnVerifyConsensusFault() GasCharge
}
var prices = map[abi.ChainEpoch]Pricelist{
abi.ChainEpoch(0): &pricelistV0{
var prices = map[network.Version]Pricelist{
network.Version0: &pricelistV0{
computeGasMulti: 1,
storageGasMulti: 1000,
@ -130,7 +129,7 @@ var prices = map[abi.ChainEpoch]Pricelist{
verifyPostDiscount: true,
verifyConsensusFault: 495422,
},
abi.ChainEpoch(build.UpgradeCalicoHeight): &pricelistV0{
network.Version6AndAHalf: &pricelistV0{
computeGasMulti: 1,
storageGasMulti: 1300,
@ -208,21 +207,19 @@ var prices = map[abi.ChainEpoch]Pricelist{
},
}
// PricelistByEpoch finds the latest prices for the given epoch
func PricelistByEpoch(epoch abi.ChainEpoch) Pricelist {
// since we are storing the prices as map or epoch to price
// we need to get the price with the highest epoch that is lower or equal to the `epoch` arg
bestEpoch := abi.ChainEpoch(0)
bestPrice := prices[bestEpoch]
for e, pl := range prices {
// if `e` happened after `bestEpoch` and `e` is earlier or equal to the target `epoch`
if e > bestEpoch && e <= epoch {
bestEpoch = e
// PricelistByVersion finds the latest prices for the given network version
func PricelistByVersion(version network.Version) Pricelist {
bestVersion := network.Version0
bestPrice := prices[bestVersion]
for nv, pl := range prices {
// if `nv > bestVersion` and `nv <= version`
if nv > bestVersion && nv <= version {
bestVersion = nv
bestPrice = pl
}
}
if bestPrice == nil {
panic(fmt.Sprintf("bad setup: no gas prices available for epoch %d", epoch))
panic(fmt.Sprintf("bad setup: no gas prices available for version %d", version))
}
return bestPrice
}

View File

@ -41,7 +41,7 @@ var EmptyObjectCid cid.Cid
// TryCreateAccountActor creates account actors from only BLS/SECP256K1 addresses.
func TryCreateAccountActor(rt *Runtime, addr address.Address) (*types.Actor, address.Address, aerrors.ActorError) {
if err := rt.chargeGasSafe(PricelistByEpoch(rt.height).OnCreateActor()); err != nil {
if err := rt.chargeGasSafe(PricelistByVersion(rt.NetworkVersion()).OnCreateActor()); err != nil {
return nil, address.Undef, err
}

View File

@ -135,7 +135,7 @@ func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runti
gasAvailable: msg.GasLimit,
depth: 0,
numActorsCreated: 0,
pricelist: PricelistByEpoch(vm.blockHeight),
pricelist: PricelistByVersion(vm.ntwkVersion(ctx, vm.blockHeight)),
allowInternal: true,
callerValidated: false,
executionTrace: types.ExecutionTrace{Msg: msg},
@ -424,7 +424,7 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
return nil, err
}
pl := PricelistByEpoch(vm.blockHeight)
pl := PricelistByVersion(vm.ntwkVersion(ctx, vm.blockHeight))
msgGas := pl.OnChainMessage(cmsg.ChainLength())
msgGasCost := msgGas.Total()

View File

@ -446,6 +446,9 @@ var StateExecTraceCmd = &cli.Command{
if err != nil {
return err
}
if lookup == nil {
return fmt.Errorf("failed to find message: %s", mcid)
}
ts, err := capi.ChainGetTipSet(ctx, lookup.TipSet)
if err != nil {
@ -1491,6 +1494,10 @@ var StateSearchMsgCmd = &cli.Command{
return err
}
if mw == nil {
return fmt.Errorf("failed to find message: %s", msg)
}
m, err := api.ChainGetMessage(ctx, msg)
if err != nil {
return err

View File

@ -149,7 +149,7 @@ func GetRawAPI(ctx *cli.Context, t repo.RepoType, version string) (string, http.
return addr, ainfo.AuthHeader(), nil
}
func GetAPI(ctx *cli.Context) (api.Common, jsonrpc.ClientCloser, error) {
func GetAPI(ctx *cli.Context) (api.CommonNet, jsonrpc.ClientCloser, error) {
ti, ok := ctx.App.Metadata["repoType"]
if !ok {
log.Errorf("unknown repo type, are you sure you want to use GetAPI?")

View File

@ -5,6 +5,7 @@ import (
"os"
"strings"
rlepluslazy "github.com/filecoin-project/go-bitfield/rle"
cbor "github.com/ipfs/go-ipld-cbor"
"github.com/fatih/color"
@ -14,6 +15,7 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
@ -41,6 +43,7 @@ var actorCmd = &cli.Command{
actorControl,
actorProposeChangeWorker,
actorConfirmChangeWorker,
actorCompactAllocatedCmd,
},
}
@ -987,3 +990,154 @@ var actorConfirmChangeWorker = &cli.Command{
return nil
},
}
var actorCompactAllocatedCmd = &cli.Command{
Name: "compact-allocated",
Usage: "compact allocated sectors bitfield",
Flags: []cli.Flag{
&cli.Uint64Flag{
Name: "mask-last-offset",
Usage: "Mask sector IDs from 0 to 'higest_allocated - offset'",
},
&cli.Uint64Flag{
Name: "mask-upto-n",
Usage: "Mask sector IDs from 0 to 'n'",
},
&cli.BoolFlag{
Name: "really-do-it",
Usage: "Actually send transaction performing the action",
Value: false,
},
},
Action: func(cctx *cli.Context) error {
if !cctx.Bool("really-do-it") {
fmt.Println("Pass --really-do-it to actually execute this action")
return nil
}
if !cctx.Args().Present() {
return fmt.Errorf("must pass address of new owner address")
}
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
api, acloser, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer acloser()
ctx := lcli.ReqContext(cctx)
maddr, err := nodeApi.ActorAddress(ctx)
if err != nil {
return err
}
mact, err := api.StateGetActor(ctx, maddr, types.EmptyTSK)
if err != nil {
return err
}
store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(api)))
mst, err := miner.Load(store, mact)
if err != nil {
return err
}
allocs, err := mst.GetAllocatedSectors()
if err != nil {
return err
}
var maskBf bitfield.BitField
{
exclusiveFlags := []string{"mask-last-offset", "mask-upto-n"}
hasFlag := false
for _, f := range exclusiveFlags {
if hasFlag && cctx.IsSet(f) {
return xerrors.Errorf("more than one 'mask` flag set")
}
hasFlag = hasFlag || cctx.IsSet(f)
}
}
switch {
case cctx.IsSet("mask-last-offset"):
last, err := allocs.Last()
if err != nil {
return err
}
m := cctx.Uint64("mask-last-offset")
if last <= m+1 {
return xerrors.Errorf("highest allocated sector lower than mask offset %d: %d", m+1, last)
}
// securty to not brick a miner
if last > 1<<60 {
return xerrors.Errorf("very high last sector number, refusing to mask: %d", last)
}
maskBf, err = bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{
Runs: []rlepluslazy.Run{{Val: true, Len: last - m}}})
if err != nil {
return xerrors.Errorf("forming bitfield: %w", err)
}
case cctx.IsSet("mask-upto-n"):
n := cctx.Uint64("mask-upto-n")
maskBf, err = bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{
Runs: []rlepluslazy.Run{{Val: true, Len: n}}})
if err != nil {
return xerrors.Errorf("forming bitfield: %w", err)
}
default:
return xerrors.Errorf("no 'mask' flags set")
}
mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
if err != nil {
return err
}
params := &miner2.CompactSectorNumbersParams{
MaskSectorNumbers: maskBf,
}
sp, err := actors.SerializeParams(params)
if err != nil {
return xerrors.Errorf("serializing params: %w", err)
}
smsg, err := api.MpoolPushMessage(ctx, &types.Message{
From: mi.Worker,
To: maddr,
Method: miner.Methods.CompactSectorNumbers,
Value: big.Zero(),
Params: sp,
}, nil)
if err != nil {
return xerrors.Errorf("mpool push: %w", err)
}
fmt.Println("CompactSectorNumbers Message CID:", smsg.Cid())
// wait for it to get mined into a block
wait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence)
if err != nil {
return err
}
// check it executed successfully
if wait.Receipt.ExitCode != 0 {
fmt.Println("Propose owner change failed!")
return err
}
return nil
},
}

View File

@ -337,6 +337,9 @@ func resolveFromChain(ctx context.Context, api v0api.FullNode, mcid cid.Cid, blo
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to locate message: %w", err)
}
if msgInfo == nil {
return nil, nil, nil, fmt.Errorf("failed to locate message: not found")
}
log.Printf("located message at tipset %s (height: %d) with exit code: %s", msgInfo.TipSet, msgInfo.Height, msgInfo.Receipt.ExitCode)

145
docker-compose.yaml Normal file
View File

@ -0,0 +1,145 @@
# By default, this docker-compose file will start a lotus fullnode
#
# Some directives have been left commented out so they serve as an
# example for more advanced use.
#
# To provide a custom configuration file, or automatically import
# a wallet, uncomment the "configs" or "secrets" sections.
#
# start on a single node:
#
# docker-compose up
#
# start on docker swarm:
#
# docker swarm init (if you haven't already)
# docker stack deploy -c docker-compose.yaml mylotuscluster
#
# for more information, please visit docs.filecoin.io
version: "3.8"
volumes:
parameters:
lotus-repo:
lotus-miner-repo:
lotus-worker-repo:
configs:
lotus-config-toml:
file: /path/to/lotus/config.toml
lotus-miner-config-toml:
file: /path/to/lotus-miner/config.toml
secrets:
lotus-wallet:
file: /path/to/exported/lotus/wallet
services:
lotus:
build:
context: .
target: lotus
dockerfile: Dockerfile.lotus
image: filecoin/lotus
volumes:
- parameters:/var/tmp/filecoin-proof-parameters
- lotus-repo:/var/lib/lotus
ports:
- 1234:1234
environment:
- LOTUS_JAEGER_AGENT_HOST=jaeger
- LOTUS_JAEGER_AGENT_PORT=6831
# - DOCKER_LOTUS_IMPORT_WALLET=/tmp/wallet
deploy:
restart_policy:
condition: on-failure
delay: 30s
# configs:
# - source: lotus-config-toml
# target: /var/lib/lotus/config.toml
# secrets:
# - source: lotus-wallet
# target: /tmp/wallet
command:
- daemon
lotus-gateway:
build:
context: .
target: lotus-gateway
dockerfile: Dockerfile.lotus
image: filecoin/lotus-gateway
depends_on:
- lotus
ports:
- 1235:1234
environment:
- FULLNODE_API_INFO=/dns/lotus/tcp/1234/http
- LOTUS_JAEGER_AGENT_HOST=jaeger
- LOTUS_JAEGER_AGENT_PORT=6831
deploy:
restart_policy:
condition: on-failure
delay: 30s
command:
- run
#
# Uncomment to run miner software
#
# lotus-miner:
# build:
# context: .
# target: lotus-miner
# dockerfile: Dockerfile.lotus
# image: filecoin/lotus-miner
# volumes:
# - parameters:/var/tmp/filecoin-proof-parameters
# - lotus-miner-repo:/var/lib/lotus-miner
# depends_on:
# - lotus
# ports:
# - 2345:2345
# environment:
# - FULLNODE_API_INFO=/dns/lotus/tcp/1234/http
# - LOTUS_JAEGER_AGENT_HOST=jaeger
# - LOTUS_JAEGER_AGENT_PORT=6831
# deploy:
# restart_policy:
# condition: on-failure
# delay: 30s
# configs:
# - source: lotus-miner-config-toml
# - target: /var/lib/lotus-miner/config.toml
# command:
# - run
# lotus-worker:
# build:
# context: .
# target: lotus-worker
# dockerfile: Dockerfile.lotus
# image: filecoin/lotus-worker
# volumes:
# - parameters:/var/tmp/filecoin-proof-parameters
# - lotus-worker-repo:/var/lib/lotus-worker
# depends_on:
# - lotus-worker
# environment:
# - MINER_API_INFO=/dns/lotus-miner/tcp/1234/http
# - LOTUS_JAEGER_AGENT_HOST=jaeger
# - LOTUS_JAEGER_AGENT_PORT=6831
# deploy:
# restart_policy:
# condition: on-failure
# delay: 30s
# replicas: 2
# command:
# - run
jaeger:
image: jaegertracing/all-in-one
ports:
- "6831:6831/udp"
- "16686:16686"
deploy:
restart_policy:
condition: on-failure
delay: 30s

View File

@ -199,7 +199,7 @@ Response:
```json
{
"Version": "string value",
"APIVersion": 131328,
"APIVersion": 131329,
"BlockDelay": 42
}
```

View File

@ -144,7 +144,7 @@ Perms: admin
Inputs: `null`
Response: `131328`
Response: `131329`
## Add

View File

@ -280,7 +280,7 @@ Response:
```json
{
"Version": "string value",
"APIVersion": 131328,
"APIVersion": 131329,
"BlockDelay": 42
}
```
@ -4634,7 +4634,7 @@ Inputs:
]
```
Response: `13`
Response: `1300`
### StateReadState
StateReadState returns the indicated actor's state.

View File

@ -282,7 +282,7 @@ Response:
```json
{
"Version": "string value",
"APIVersion": 131328,
"APIVersion": 131329,
"BlockDelay": 42
}
```
@ -4855,7 +4855,7 @@ Inputs:
]
```
Response: `13`
Response: `1300`
### StateReadState
StateReadState returns the indicated actor's state.

View File

@ -207,6 +207,7 @@ COMMANDS:
control Manage control addresses
propose-change-worker Propose a worker address change
confirm-change-worker Confirm a worker address change
compact-allocated compact allocated sectors bitfield
help, h Shows a list of commands or help for one command
OPTIONS:
@ -361,6 +362,22 @@ OPTIONS:
```
### lotus-miner actor compact-allocated
```
NAME:
lotus-miner actor compact-allocated - compact allocated sectors bitfield
USAGE:
lotus-miner actor compact-allocated [command options] [arguments...]
OPTIONS:
--mask-last-offset value Mask sector IDs from 0 to 'higest_allocated - offset' (default: 0)
--mask-upto-n value Mask sector IDs from 0 to 'n' (default: 0)
--really-do-it Actually send transaction performing the action (default: false)
--help, -h show help (default: false)
```
## lotus-miner info
```
NAME:

View File

@ -69,6 +69,10 @@ func (mgr *CurrentDealInfoManager) dealIDFromPublishDealsMsg(ctx context.Context
return dealID, nil, xerrors.Errorf("looking for publish deal message %s: search msg failed: %w", publishCid, err)
}
if lookup == nil {
return dealID, nil, xerrors.Errorf("looking for publish deal message %s: not found", publishCid)
}
if lookup.Receipt.ExitCode != exitcode.Ok {
return dealID, nil, xerrors.Errorf("looking for publish deal message %s: non-ok exit code: %s", publishCid, lookup.Receipt.ExitCode)
}

View File

@ -25,7 +25,7 @@ import (
"github.com/stretchr/testify/require"
)
var errNotFound = errors.New("Could not find")
var errNotFound = errors.New("could not find")
func TestGetCurrentDealInfo(t *testing.T) {
ctx := context.Background()
@ -180,6 +180,12 @@ func TestGetCurrentDealInfo(t *testing.T) {
expectedDealID: zeroDealID,
expectedError: xerrors.Errorf("looking for publish deal message %s: search msg failed: something went wrong", dummyCid),
},
"search message not found": {
publishCid: dummyCid,
targetProposal: &proposal,
expectedDealID: zeroDealID,
expectedError: xerrors.Errorf("looking for publish deal message %s: not found", dummyCid),
},
"return code not ok": {
publishCid: dummyCid,
searchMessageLookup: &MsgLookup{

View File

@ -358,8 +358,11 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf
}
params, pcd, tok, err := m.preCommitParams(ctx, sector)
if params == nil || err != nil {
return err
if err != nil {
return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("preCommitParams: %w", err)})
}
if params == nil {
return nil // event was sent in preCommitParams
}
deposit, err := collateralSendAmount(ctx.Context(), m.api, m.maddr, cfg, pcd)
@ -403,9 +406,12 @@ func (m *Sealing) handleSubmitPreCommitBatch(ctx statemachine.Context, sector Se
}
params, deposit, _, err := m.preCommitParams(ctx, sector)
if params == nil || err != nil {
if err != nil {
return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("preCommitParams: %w", err)})
}
if params == nil {
return nil // event was sent in preCommitParams
}
res, err := m.precommiter.AddPreCommit(ctx.Context(), sector, deposit, params)
if err != nil {

4
go.mod
View File

@ -40,7 +40,7 @@ require (
github.com/filecoin-project/go-multistore v0.0.3
github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20
github.com/filecoin-project/go-paramfetch v0.0.2-0.20210614165157-25a6c7769498
github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48
github.com/filecoin-project/go-state-types v0.1.1-0.20210722133031-ad9bfe54c124
github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe
github.com/filecoin-project/go-statestore v0.1.1
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b
@ -48,7 +48,7 @@ require (
github.com/filecoin-project/specs-actors/v2 v2.3.5
github.com/filecoin-project/specs-actors/v3 v3.1.1
github.com/filecoin-project/specs-actors/v4 v4.0.1
github.com/filecoin-project/specs-actors/v5 v5.0.1
github.com/filecoin-project/specs-actors/v5 v5.0.2
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506
github.com/filecoin-project/test-vectors/schema v0.0.5
github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1

7
go.sum
View File

@ -305,8 +305,9 @@ github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go
github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48 h1:Jc4OprDp3bRDxbsrXNHPwJabZJM3iDy+ri8/1e0ZnX4=
github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-state-types v0.1.1-0.20210722133031-ad9bfe54c124 h1:veGrNABg/9I7prngrowkhwbvW5d5JN55MNKmbsr5FqA=
github.com/filecoin-project/go-state-types v0.1.1-0.20210722133031-ad9bfe54c124/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe h1:dF8u+LEWeIcTcfUcCf3WFVlc81Fr2JKg8zPzIbBDKDw=
github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig=
github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI=
@ -331,8 +332,8 @@ github.com/filecoin-project/specs-actors/v4 v4.0.0/go.mod h1:TkHXf/l7Wyw4ZejyXIP
github.com/filecoin-project/specs-actors/v4 v4.0.1 h1:AiWrtvJZ63MHGe6rn7tPu4nSUY8bA1KDNszqJaD5+Fg=
github.com/filecoin-project/specs-actors/v4 v4.0.1/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng=
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210512015452-4fe3889fff57/go.mod h1:283yBMMUSDB2abcjP/hhrwTkhb9h3sfM6KGrep/ZlBI=
github.com/filecoin-project/specs-actors/v5 v5.0.1 h1:PrYm5AKdMlJ/55eRW5laWcnaX66gyyDYBWvH38kNAMo=
github.com/filecoin-project/specs-actors/v5 v5.0.1/go.mod h1:74euMDIXorusOBs/QL/LNkYsXZdDpLJwojWw6T03pdE=
github.com/filecoin-project/specs-actors/v5 v5.0.2 h1:pLNFUt9xtFuhrgZZ0tPnzGchAVu4koyCRIopzkx/OP0=
github.com/filecoin-project/specs-actors/v5 v5.0.2/go.mod h1:E0yeEl6Scl6eWeeWmxwQsAufvOAC72H6ELyh2Y62H90=
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw=
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g=
github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg=

View File

@ -121,6 +121,7 @@ func (ts *apiSuite) testSearchMsg(t *testing.T) {
searchRes, err := full.StateSearchMsg(ctx, types.EmptyTSK, sm.Cid(), lapi.LookbackNoLimit, true)
require.NoError(t, err)
require.NotNil(t, searchRes)
require.Equalf(t, res.TipSet, searchRes.TipSet, "search ts: %s, different from wait ts: %s", searchRes.TipSet, res.TipSet)
}

View File

@ -635,7 +635,7 @@ func (n *Ensemble) InterconnectAll() *Ensemble {
}
// Connect connects one full node to the provided full nodes.
func (n *Ensemble) Connect(from api.Common, to ...api.Common) *Ensemble {
func (n *Ensemble) Connect(from api.Net, to ...api.Net) *Ensemble {
addr, err := from.NetAddrsListen(context.Background())
require.NoError(n.t, err)

View File

@ -213,12 +213,18 @@ func TestWindowPostBaseFeeNoBurn(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
sched := kit.DefaultTestUpgradeSchedule
lastUpgradeHeight := sched[len(sched)-1].Height
och := build.UpgradeClausHeight
build.UpgradeClausHeight = 10
build.UpgradeClausHeight = lastUpgradeHeight + 1
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs())
ens.InterconnectAll().BeginMining(blocktime)
// Wait till all upgrades are done and we've passed the clause epoch.
client.WaitTillChain(ctx, kit.HeightAtLeast(build.UpgradeClausHeight+1))
maddr, err := miner.ActorAddress(ctx)
require.NoError(t, err)
@ -268,6 +274,12 @@ func TestWindowPostBaseFeeBurn(t *testing.T) {
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts)
ens.InterconnectAll().BeginMining(blocktime)
// Ideally we'd be a bit more precise here, but getting the information we need from the
// test framework is more work than it's worth.
//
// We just need to wait till all upgrades are done.
client.WaitTillChain(ctx, kit.HeightAtLeast(20))
maddr, err := miner.ActorAddress(ctx)
require.NoError(t, err)

View File

@ -78,27 +78,38 @@ func ReaderParamEncoder(addr string) jsonrpc.Option {
})
}
type waitReadCloser struct {
// watchReadCloser watches the ReadCloser and closes the watch channel when
// either: (1) the ReaderCloser fails on Read (including with a benign error
// like EOF), or (2) when Close is called.
//
// Use it be notified of terminal states, in situations where a Read failure (or
// EOF) is considered a terminal state too (besides Close).
type watchReadCloser struct {
io.ReadCloser
wait chan struct{}
watch chan struct{}
closeOnce sync.Once
}
func (w *waitReadCloser) Read(p []byte) (int, error) {
func (w *watchReadCloser) Read(p []byte) (int, error) {
n, err := w.ReadCloser.Read(p)
if err != nil {
close(w.wait)
w.closeOnce.Do(func() {
close(w.watch)
})
}
return n, err
}
func (w *waitReadCloser) Close() error {
close(w.wait)
func (w *watchReadCloser) Close() error {
w.closeOnce.Do(func() {
close(w.watch)
})
return w.ReadCloser.Close()
}
func ReaderParamDecoder() (http.HandlerFunc, jsonrpc.ServerOption) {
var readersLk sync.Mutex
readers := map[uuid.UUID]chan *waitReadCloser{}
readers := map[uuid.UUID]chan *watchReadCloser{}
hnd := func(resp http.ResponseWriter, req *http.Request) {
strId := path.Base(req.URL.Path)
@ -111,14 +122,14 @@ func ReaderParamDecoder() (http.HandlerFunc, jsonrpc.ServerOption) {
readersLk.Lock()
ch, found := readers[u]
if !found {
ch = make(chan *waitReadCloser)
ch = make(chan *watchReadCloser)
readers[u] = ch
}
readersLk.Unlock()
wr := &waitReadCloser{
wr := &watchReadCloser{
ReadCloser: req.Body,
wait: make(chan struct{}),
watch: make(chan struct{}),
}
tctx, cancel := context.WithTimeout(req.Context(), Timeout)
@ -134,7 +145,9 @@ func ReaderParamDecoder() (http.HandlerFunc, jsonrpc.ServerOption) {
}
select {
case <-wr.wait:
case <-wr.watch:
// TODO should we check if we failed the Read, and if so
// return an HTTP 500? i.e. turn watch into a chan error?
case <-req.Context().Done():
log.Errorf("context error in reader stream handler (2): %v", req.Context().Err())
resp.WriteHeader(500)
@ -167,7 +180,7 @@ func ReaderParamDecoder() (http.HandlerFunc, jsonrpc.ServerOption) {
readersLk.Lock()
ch, found := readers[u]
if !found {
ch = make(chan *waitReadCloser)
ch = make(chan *watchReadCloser)
readers[u] = ch
}
readersLk.Unlock()

View File

@ -54,7 +54,7 @@ func (rpn *retrievalProviderNode) GetMinerWorkerAddress(ctx context.Context, min
func (rpn *retrievalProviderNode) UnsealSector(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (io.ReadCloser, error) {
log.Debugf("get sector %d, offset %d, length %d", sectorID, offset, length)
si, err := rpn.sectorsStatus(ctx, sectorID, true)
si, err := rpn.sectorsStatus(ctx, sectorID, false)
if err != nil {
return nil, err
}

View File

@ -25,6 +25,7 @@ import (
)
func TestDealPublisher(t *testing.T) {
t.Skip("this test randomly fails in various subtests; see issue #6799")
testCases := []struct {
name string
publishPeriod time.Duration

View File

@ -11,54 +11,54 @@ import (
func MetricedStorMinerAPI(a api.StorageMiner) api.StorageMiner {
var out api.StorageMinerStruct
proxy(a, &out.Internal)
proxy(a, &out.CommonStruct.Internal)
proxy(a, &out)
return &out
}
func MetricedFullAPI(a api.FullNode) api.FullNode {
var out api.FullNodeStruct
proxy(a, &out.Internal)
proxy(a, &out.CommonStruct.Internal)
proxy(a, &out)
return &out
}
func MetricedWorkerAPI(a api.Worker) api.Worker {
var out api.WorkerStruct
proxy(a, &out.Internal)
proxy(a, &out)
return &out
}
func MetricedWalletAPI(a api.Wallet) api.Wallet {
var out api.WalletStruct
proxy(a, &out.Internal)
proxy(a, &out)
return &out
}
func MetricedGatewayAPI(a api.Gateway) api.Gateway {
var out api.GatewayStruct
proxy(a, &out.Internal)
proxy(a, &out)
return &out
}
func proxy(in interface{}, out interface{}) {
rint := reflect.ValueOf(out).Elem()
ra := reflect.ValueOf(in)
func proxy(in interface{}, outstr interface{}) {
outs := api.GetInternalStructs(outstr)
for _, out := range outs {
rint := reflect.ValueOf(out).Elem()
ra := reflect.ValueOf(in)
for f := 0; f < rint.NumField(); f++ {
field := rint.Type().Field(f)
fn := ra.MethodByName(field.Name)
rint.Field(f).Set(reflect.MakeFunc(field.Type, func(args []reflect.Value) (results []reflect.Value) {
ctx := args[0].Interface().(context.Context)
// upsert function name into context
ctx, _ = tag.New(ctx, tag.Upsert(Endpoint, field.Name))
stop := Timer(ctx, APIRequestDuration)
defer stop()
// pass tagged ctx back into function call
args[0] = reflect.ValueOf(ctx)
return fn.Call(args)
}))
for f := 0; f < rint.NumField(); f++ {
field := rint.Type().Field(f)
fn := ra.MethodByName(field.Name)
rint.Field(f).Set(reflect.MakeFunc(field.Type, func(args []reflect.Value) (results []reflect.Value) {
ctx := args[0].Interface().(context.Context)
// upsert function name into context
ctx, _ = tag.New(ctx, tag.Upsert(Endpoint, field.Name))
stop := Timer(ctx, APIRequestDuration)
defer stop()
// pass tagged ctx back into function call
args[0] = reflect.ValueOf(ctx)
return fn.Call(args)
}))
}
}
}

View File

@ -6,6 +6,7 @@ import (
"os"
"time"
"github.com/filecoin-project/lotus/node/impl/net"
metricsi "github.com/ipfs/go-metrics-interface"
"github.com/filecoin-project/lotus/api"
@ -36,7 +37,6 @@ import (
"github.com/filecoin-project/lotus/markets/storageadapter"
"github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/lotus/node/impl/common"
"github.com/filecoin-project/lotus/node/impl/common/mock"
"github.com/filecoin-project/lotus/node/modules"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/node/modules/helpers"
@ -245,11 +245,11 @@ func ConfigCommon(cfg *config.Common, enableLibp2pNode bool) Option {
}),
ApplyIf(func(s *Settings) bool { return s.Base }), // apply only if Base has already been applied
If(!enableLibp2pNode,
Override(new(common.NetAPI), From(new(mock.MockNetAPI))),
Override(new(api.Net), new(api.NetStub)),
Override(new(api.Common), From(new(common.CommonAPI))),
),
If(enableLibp2pNode,
Override(new(common.NetAPI), From(new(common.Libp2pNetAPI))),
Override(new(api.Net), From(new(net.NetAPI))),
Override(new(api.Common), From(new(common.CommonAPI))),
Override(StartListeningKey, lp2p.StartListening(cfg.Libp2p.ListenAddresses)),
Override(ConnectionManagerKey, lp2p.ConnectionManager(
@ -312,12 +312,14 @@ func Repo(r repo.Repo) Option {
Override(new(dtypes.BasicStateBlockstore), modules.StateSplitBlockstore),
Override(new(dtypes.BaseBlockstore), From(new(dtypes.SplitBlockstore))),
Override(new(dtypes.ExposedBlockstore), modules.ExposedSplitBlockstore),
Override(new(dtypes.GCReferenceProtector), modules.SplitBlockstoreGCReferenceProtector),
),
If(!cfg.EnableSplitstore,
Override(new(dtypes.BasicChainBlockstore), modules.ChainFlatBlockstore),
Override(new(dtypes.BasicStateBlockstore), modules.StateFlatBlockstore),
Override(new(dtypes.BaseBlockstore), From(new(dtypes.UniversalBlockstore))),
Override(new(dtypes.ExposedBlockstore), From(new(dtypes.UniversalBlockstore))),
Override(new(dtypes.GCReferenceProtector), modules.NoopGCReferenceProtector),
),
Override(new(dtypes.ChainBlockstore), From(new(dtypes.BasicChainBlockstore))),

View File

@ -251,6 +251,8 @@ type Splitstore struct {
ColdStoreType string
HotStoreType string
MarkSetType string
HotStoreMessageRetention uint64
}
// // Full Node

View File

@ -436,7 +436,19 @@ func (a *API) ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid)
if piece != nil && !piece.Equals(*p.PieceCID) {
continue
}
out = append(out, a.makeRetrievalQuery(ctx, p, root, piece, rm.QueryParams{}))
// do not rely on local data with respect to peer id
// fetch an up-to-date miner peer id from chain
mi, err := a.StateMinerInfo(ctx, p.Address, types.EmptyTSK)
if err != nil {
return nil, err
}
pp := rm.RetrievalPeer{
Address: p.Address,
ID: *mi.PeerId,
}
out = append(out, a.makeRetrievalQuery(ctx, pp, root, piece, rm.QueryParams{}))
}
return out, nil

View File

@ -10,8 +10,8 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/lotus/api"
apitypes "github.com/filecoin-project/lotus/api/types"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
@ -21,8 +21,6 @@ var session = uuid.New()
type CommonAPI struct {
fx.In
NetAPI
APISecret *dtypes.APIAlg
ShutdownChan dtypes.ShutdownChan
}
@ -48,6 +46,24 @@ func (a *CommonAPI) AuthNew(ctx context.Context, perms []auth.Permission) ([]byt
return jwt.Sign(&p, (*jwt.HMACSHA)(a.APISecret))
}
func (a *CommonAPI) Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) {
return build.OpenRPCDiscoverJSON_Full(), nil
}
func (a *CommonAPI) Version(context.Context) (api.APIVersion, error) {
v, err := api.VersionForType(api.RunningNodeType)
if err != nil {
return api.APIVersion{}, err
}
return api.APIVersion{
Version: build.UserVersion(),
APIVersion: v,
BlockDelay: build.BlockDelaySecs,
}, nil
}
func (a *CommonAPI) LogList(context.Context) ([]string, error) {
return logging.GetSubsystems(), nil
}
@ -68,17 +84,3 @@ func (a *CommonAPI) Session(ctx context.Context) (uuid.UUID, error) {
func (a *CommonAPI) Closing(ctx context.Context) (<-chan struct{}, error) {
return make(chan struct{}), nil // relies on jsonrpc closing
}
func (a *CommonAPI) Version(context.Context) (api.APIVersion, error) {
v, err := api.VersionForType(api.RunningNodeType)
if err != nil {
return api.APIVersion{}, err
}
return api.APIVersion{
Version: build.UserVersion(),
APIVersion: v,
BlockDelay: build.BlockDelaySecs,
}, nil
}

View File

@ -1,101 +0,0 @@
package mock
import (
"context"
"errors"
"github.com/filecoin-project/lotus/api"
apitypes "github.com/filecoin-project/lotus/api/types"
"github.com/libp2p/go-libp2p-core/metrics"
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/libp2p/go-libp2p-core/protocol"
"go.uber.org/fx"
)
var (
errNotImplemented = errors.New("not implemented")
)
type MockNetAPI struct {
fx.In
}
func (a *MockNetAPI) NetAgentVersion(ctx context.Context, p peer.ID) (string, error) {
return "", errNotImplemented
}
func (a *MockNetAPI) NetConnectedness(ctx context.Context, pid peer.ID) (conn network.Connectedness, err error) {
err = errNotImplemented
return
}
func (a *MockNetAPI) NetPubsubScores(context.Context) ([]api.PubsubScore, error) {
return nil, errNotImplemented
}
func (a *MockNetAPI) NetPeers(context.Context) ([]peer.AddrInfo, error) {
return nil, errNotImplemented
}
func (a *MockNetAPI) NetPeerInfo(_ context.Context, p peer.ID) (*api.ExtendedPeerInfo, error) {
return nil, errNotImplemented
}
func (a *MockNetAPI) NetConnect(ctx context.Context, p peer.AddrInfo) error {
return errNotImplemented
}
func (a *MockNetAPI) NetAddrsListen(context.Context) (ai peer.AddrInfo, err error) {
err = errNotImplemented
return
}
func (a *MockNetAPI) NetDisconnect(ctx context.Context, p peer.ID) error {
return errNotImplemented
}
func (a *MockNetAPI) NetFindPeer(ctx context.Context, p peer.ID) (ai peer.AddrInfo, err error) {
err = errNotImplemented
return
}
func (a *MockNetAPI) NetAutoNatStatus(ctx context.Context) (i api.NatInfo, err error) {
err = errNotImplemented
return
}
func (a *MockNetAPI) NetBandwidthStats(ctx context.Context) (s metrics.Stats, err error) {
err = errNotImplemented
return
}
func (a *MockNetAPI) NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) {
return nil, errNotImplemented
}
func (a *MockNetAPI) NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) {
return nil, errNotImplemented
}
func (a *MockNetAPI) Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) {
return nil, errNotImplemented
}
func (a *MockNetAPI) ID(context.Context) (p peer.ID, err error) {
err = errNotImplemented
return
}
func (a *MockNetAPI) NetBlockAdd(ctx context.Context, acl api.NetBlockList) error {
return errNotImplemented
}
func (a *MockNetAPI) NetBlockRemove(ctx context.Context, acl api.NetBlockList) error {
return errNotImplemented
}
func (a *MockNetAPI) NetBlockList(ctx context.Context) (result api.NetBlockList, err error) {
err = errNotImplemented
return
}

View File

@ -1,34 +0,0 @@
package common
import (
"context"
metrics "github.com/libp2p/go-libp2p-core/metrics"
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
protocol "github.com/libp2p/go-libp2p-core/protocol"
"github.com/filecoin-project/lotus/api"
apitypes "github.com/filecoin-project/lotus/api/types"
)
type NetAPI interface {
NetConnectedness(ctx context.Context, pid peer.ID) (network.Connectedness, error)
NetPubsubScores(context.Context) ([]api.PubsubScore, error)
NetPeers(context.Context) ([]peer.AddrInfo, error)
NetPeerInfo(_ context.Context, p peer.ID) (*api.ExtendedPeerInfo, error)
NetConnect(ctx context.Context, p peer.AddrInfo) error
NetAddrsListen(context.Context) (peer.AddrInfo, error)
NetDisconnect(ctx context.Context, p peer.ID) error
NetFindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error)
NetAutoNatStatus(ctx context.Context) (i api.NatInfo, err error)
NetAgentVersion(ctx context.Context, p peer.ID) (string, error)
NetBandwidthStats(ctx context.Context) (metrics.Stats, error)
NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error)
NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error)
Discover(ctx context.Context) (apitypes.OpenRPCDocument, error)
ID(context.Context) (peer.ID, error)
NetBlockAdd(ctx context.Context, acl api.NetBlockList) error
NetBlockRemove(ctx context.Context, acl api.NetBlockList) error
NetBlockList(ctx context.Context) (api.NetBlockList, error)
}

View File

@ -14,6 +14,7 @@ import (
"github.com/filecoin-project/lotus/node/impl/common"
"github.com/filecoin-project/lotus/node/impl/full"
"github.com/filecoin-project/lotus/node/impl/market"
"github.com/filecoin-project/lotus/node/impl/net"
"github.com/filecoin-project/lotus/node/impl/paych"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/node/modules/lp2p"
@ -23,6 +24,7 @@ var log = logging.Logger("node")
type FullNodeAPI struct {
common.CommonAPI
net.NetAPI
full.ChainAPI
client.API
full.MpoolAPI

View File

@ -1,4 +1,4 @@
package common
package net
import (
"context"
@ -14,7 +14,7 @@ import (
var cLog = logging.Logger("conngater")
func (a *Libp2pNetAPI) NetBlockAdd(ctx context.Context, acl api.NetBlockList) error {
func (a *NetAPI) NetBlockAdd(ctx context.Context, acl api.NetBlockList) error {
for _, p := range acl.Peers {
err := a.ConnGater.BlockPeer(p)
if err != nil {
@ -89,7 +89,7 @@ func (a *Libp2pNetAPI) NetBlockAdd(ctx context.Context, acl api.NetBlockList) er
return nil
}
func (a *Libp2pNetAPI) NetBlockRemove(ctx context.Context, acl api.NetBlockList) error {
func (a *NetAPI) NetBlockRemove(ctx context.Context, acl api.NetBlockList) error {
for _, p := range acl.Peers {
err := a.ConnGater.UnblockPeer(p)
if err != nil {
@ -124,7 +124,7 @@ func (a *Libp2pNetAPI) NetBlockRemove(ctx context.Context, acl api.NetBlockList)
return nil
}
func (a *Libp2pNetAPI) NetBlockList(ctx context.Context) (result api.NetBlockList, err error) {
func (a *NetAPI) NetBlockList(ctx context.Context) (result api.NetBlockList, err error) {
result.Peers = a.ConnGater.ListBlockedPeers()
for _, ip := range a.ConnGater.ListBlockedAddrs() {
result.IPAddrs = append(result.IPAddrs, ip.String())

View File

@ -1,29 +1,28 @@
package common
package net
import (
"context"
"sort"
"strings"
"go.uber.org/fx"
"github.com/libp2p/go-libp2p-core/host"
metrics "github.com/libp2p/go-libp2p-core/metrics"
"github.com/libp2p/go-libp2p-core/metrics"
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
protocol "github.com/libp2p/go-libp2p-core/protocol"
"github.com/libp2p/go-libp2p-core/protocol"
swarm "github.com/libp2p/go-libp2p-swarm"
basichost "github.com/libp2p/go-libp2p/p2p/host/basic"
"github.com/libp2p/go-libp2p/p2p/net/conngater"
ma "github.com/multiformats/go-multiaddr"
"go.uber.org/fx"
"github.com/filecoin-project/lotus/api"
apitypes "github.com/filecoin-project/lotus/api/types"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/node/modules/lp2p"
)
type Libp2pNetAPI struct {
type NetAPI struct {
fx.In
RawHost lp2p.RawHost
@ -34,11 +33,15 @@ type Libp2pNetAPI struct {
Sk *dtypes.ScoreKeeper
}
func (a *Libp2pNetAPI) NetConnectedness(ctx context.Context, pid peer.ID) (network.Connectedness, error) {
func (a *NetAPI) ID(context.Context) (peer.ID, error) {
return a.Host.ID(), nil
}
func (a *NetAPI) NetConnectedness(ctx context.Context, pid peer.ID) (network.Connectedness, error) {
return a.Host.Network().Connectedness(pid), nil
}
func (a *Libp2pNetAPI) NetPubsubScores(context.Context) ([]api.PubsubScore, error) {
func (a *NetAPI) NetPubsubScores(context.Context) ([]api.PubsubScore, error) {
scores := a.Sk.Get()
out := make([]api.PubsubScore, len(scores))
i := 0
@ -54,7 +57,7 @@ func (a *Libp2pNetAPI) NetPubsubScores(context.Context) ([]api.PubsubScore, erro
return out, nil
}
func (a *Libp2pNetAPI) NetPeers(context.Context) ([]peer.AddrInfo, error) {
func (a *NetAPI) NetPeers(context.Context) ([]peer.AddrInfo, error) {
conns := a.Host.Network().Conns()
out := make([]peer.AddrInfo, len(conns))
@ -70,7 +73,7 @@ func (a *Libp2pNetAPI) NetPeers(context.Context) ([]peer.AddrInfo, error) {
return out, nil
}
func (a *Libp2pNetAPI) NetPeerInfo(_ context.Context, p peer.ID) (*api.ExtendedPeerInfo, error) {
func (a *NetAPI) NetPeerInfo(_ context.Context, p peer.ID) (*api.ExtendedPeerInfo, error) {
info := &api.ExtendedPeerInfo{ID: p}
agent, err := a.Host.Peerstore().Get(p, "AgentVersion")
@ -101,7 +104,7 @@ func (a *Libp2pNetAPI) NetPeerInfo(_ context.Context, p peer.ID) (*api.ExtendedP
return info, nil
}
func (a *Libp2pNetAPI) NetConnect(ctx context.Context, p peer.AddrInfo) error {
func (a *NetAPI) NetConnect(ctx context.Context, p peer.AddrInfo) error {
if swrm, ok := a.Host.Network().(*swarm.Swarm); ok {
swrm.Backoff().Clear(p.ID)
}
@ -109,22 +112,22 @@ func (a *Libp2pNetAPI) NetConnect(ctx context.Context, p peer.AddrInfo) error {
return a.Host.Connect(ctx, p)
}
func (a *Libp2pNetAPI) NetAddrsListen(context.Context) (peer.AddrInfo, error) {
func (a *NetAPI) NetAddrsListen(context.Context) (peer.AddrInfo, error) {
return peer.AddrInfo{
ID: a.Host.ID(),
Addrs: a.Host.Addrs(),
}, nil
}
func (a *Libp2pNetAPI) NetDisconnect(ctx context.Context, p peer.ID) error {
func (a *NetAPI) NetDisconnect(ctx context.Context, p peer.ID) error {
return a.Host.Network().ClosePeer(p)
}
func (a *Libp2pNetAPI) NetFindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error) {
func (a *NetAPI) NetFindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error) {
return a.Router.FindPeer(ctx, p)
}
func (a *Libp2pNetAPI) NetAutoNatStatus(ctx context.Context) (i api.NatInfo, err error) {
func (a *NetAPI) NetAutoNatStatus(ctx context.Context) (i api.NatInfo, err error) {
autonat := a.RawHost.(*basichost.BasicHost).GetAutoNat()
if autonat == nil {
@ -148,7 +151,7 @@ func (a *Libp2pNetAPI) NetAutoNatStatus(ctx context.Context) (i api.NatInfo, err
}, nil
}
func (a *Libp2pNetAPI) NetAgentVersion(ctx context.Context, p peer.ID) (string, error) {
func (a *NetAPI) NetAgentVersion(ctx context.Context, p peer.ID) (string, error) {
ag, err := a.Host.Peerstore().Get(p, "AgentVersion")
if err != nil {
return "", err
@ -161,11 +164,11 @@ func (a *Libp2pNetAPI) NetAgentVersion(ctx context.Context, p peer.ID) (string,
return ag.(string), nil
}
func (a *Libp2pNetAPI) NetBandwidthStats(ctx context.Context) (metrics.Stats, error) {
func (a *NetAPI) NetBandwidthStats(ctx context.Context) (metrics.Stats, error) {
return a.Reporter.GetBandwidthTotals(), nil
}
func (a *Libp2pNetAPI) NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) {
func (a *NetAPI) NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) {
out := make(map[string]metrics.Stats)
for p, s := range a.Reporter.GetBandwidthByPeer() {
out[p.String()] = s
@ -173,14 +176,8 @@ func (a *Libp2pNetAPI) NetBandwidthStatsByPeer(ctx context.Context) (map[string]
return out, nil
}
func (a *Libp2pNetAPI) NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) {
func (a *NetAPI) NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) {
return a.Reporter.GetBandwidthByProtocol(), nil
}
func (a *Libp2pNetAPI) Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) {
return build.OpenRPCDiscoverJSON_Full(), nil
}
func (a *Libp2pNetAPI) ID(context.Context) (peer.ID, error) {
return a.Host.ID(), nil
}
var _ api.Net = &NetAPI{}

View File

@ -17,6 +17,7 @@ import (
"github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p-core/host"
"github.com/libp2p/go-libp2p-core/peer"
"go.uber.org/fx"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
@ -38,7 +39,6 @@ import (
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/markets/storageadapter"
"github.com/filecoin-project/lotus/miner"
"github.com/filecoin-project/lotus/node/impl/common"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/storage"
"github.com/filecoin-project/lotus/storage/sectorblocks"
@ -46,7 +46,10 @@ import (
)
type StorageMinerAPI struct {
common.CommonAPI
fx.In
api.Common
api.Net
Full api.FullNode
LocalStore *stores.Local

View File

@ -78,8 +78,9 @@ func SplitBlockstore(cfg *config.Chainstore) func(lc fx.Lifecycle, r repo.Locked
}
cfg := &splitstore.Config{
MarkSetType: cfg.Splitstore.MarkSetType,
DiscardColdBlocks: cfg.Splitstore.ColdStoreType == "discard",
MarkSetType: cfg.Splitstore.MarkSetType,
DiscardColdBlocks: cfg.Splitstore.ColdStoreType == "discard",
HotStoreMessageRetention: cfg.Splitstore.HotStoreMessageRetention,
}
ss, err := splitstore.Open(path, ds, hot, cold, cfg)
if err != nil {
@ -95,6 +96,14 @@ func SplitBlockstore(cfg *config.Chainstore) func(lc fx.Lifecycle, r repo.Locked
}
}
func SplitBlockstoreGCReferenceProtector(_ fx.Lifecycle, s dtypes.SplitBlockstore) dtypes.GCReferenceProtector {
return s.(dtypes.GCReferenceProtector)
}
func NoopGCReferenceProtector(_ fx.Lifecycle) dtypes.GCReferenceProtector {
return dtypes.NoopGCReferenceProtector{}
}
func ExposedSplitBlockstore(_ fx.Lifecycle, s dtypes.SplitBlockstore) dtypes.ExposedBlockstore {
return s.(*splitstore.SplitStore).Expose()
}

View File

@ -58,7 +58,7 @@ func ChainBlockService(bs dtypes.ExposedBlockstore, rem dtypes.ChainBitswap) dty
return blockservice.New(bs, rem)
}
func MessagePool(lc fx.Lifecycle, mpp messagepool.Provider, ds dtypes.MetadataDS, nn dtypes.NetworkName, j journal.Journal) (*messagepool.MessagePool, error) {
func MessagePool(lc fx.Lifecycle, mpp messagepool.Provider, ds dtypes.MetadataDS, nn dtypes.NetworkName, j journal.Journal, protector dtypes.GCReferenceProtector) (*messagepool.MessagePool, error) {
mp, err := messagepool.New(mpp, ds, nn, j)
if err != nil {
return nil, xerrors.Errorf("constructing mpool: %w", err)
@ -68,6 +68,7 @@ func MessagePool(lc fx.Lifecycle, mpp messagepool.Provider, ds dtypes.MetadataDS
return mp.Close()
},
})
protector.AddProtector(mp.ForEachPendingMessage)
return mp, nil
}

View File

@ -0,0 +1,13 @@
package dtypes
import (
cid "github.com/ipfs/go-cid"
)
type GCReferenceProtector interface {
AddProtector(func(func(cid.Cid) error) error)
}
type NoopGCReferenceProtector struct{}
func (p NoopGCReferenceProtector) AddProtector(func(func(cid.Cid) error) error) {}

View File

@ -327,6 +327,21 @@ func (fsr *fsLockedRepo) Blockstore(ctx context.Context, domain BlockstoreDomain
return
}
//
// Tri-state environment variable LOTUS_CHAIN_BADGERSTORE_DISABLE_FSYNC
// - unset == the default (currently fsync enabled)
// - set with a false-y value == fsync enabled no matter what a future default is
// - set with any other value == fsync is disabled ignored defaults (recommended for day-to-day use)
//
if nosyncBs, nosyncBsSet := os.LookupEnv("LOTUS_CHAIN_BADGERSTORE_DISABLE_FSYNC"); nosyncBsSet {
nosyncBs = strings.ToLower(nosyncBs)
if nosyncBs == "" || nosyncBs == "0" || nosyncBs == "false" || nosyncBs == "no" {
opts.SyncWrites = true
} else {
opts.SyncWrites = false
}
}
bs, err := badgerbs.Open(opts)
if err != nil {
fsr.bsErr = err

View File

@ -0,0 +1,19 @@
#!/usr/bin/env bash
if [ ! -z DOCKER_LOTUS_IMPORT_SNAPSHOT ]; then
GATE="$LOTUS_PATH"/date_initialized
# Don't init if already initialized.
if [ ! -f "$GATE" ]; then
echo importing minimal snapshot
/usr/local/bin/lotus daemon --import-snapshot "$DOCKER_LOTUS_IMPORT_SNAPSHOT" --halt-after-import
# Block future inits
date > "$GATE"
fi
fi
# import wallet, if provided
if [ ! -z DOCKER_LOTUS_IMPORT_WALLET ]; then
/usr/local/bin/lotus-shed keyinfo import "$DOCKER_LOTUS_IMPORT_WALLET"
fi
exec /usr/local/bin/lotus $@

View File

@ -0,0 +1,19 @@
#!/usr/bin/env bash
if [ ! -z DOCKER_LOTUS_MINER_INIT ]; then
GATE="$LOTUS_PATH"/date_initialized
# Don't init if already initialized.
if [ -f "GATE" ]; then
echo lotus-miner already initialized.
exit 0
fi
echo starting init
/usr/local/bin/lotus-miner init
# Block future inits
date > "$GATE"
fi
exec /usr/local/bin/lotus-miner $@

View File

@ -134,7 +134,7 @@ func (m *Miner) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnC
LastErr: info.LastErr,
Log: log,
// on chain info
SealProof: 0,
SealProof: info.SectorType,
Activation: 0,
Expiration: 0,
DealWeight: big.Zero(),

View File

@ -659,6 +659,7 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, di dline.Info, t
if !bytes.Equal(checkRand, rand) {
log.Warnw("windowpost randomness changed", "old", rand, "new", checkRand, "ts-height", ts.Height(), "challenge-height", di.Challenge, "tsk", ts.Key())
rand = checkRand
continue
}