Merge remote-tracking branch 'origin/master' into feat/post-worker
This commit is contained in:
commit
046a9f8af0
@ -44,13 +44,13 @@ commands:
|
||||
- restore_cache:
|
||||
name: Restore parameters cache
|
||||
keys:
|
||||
- 'v25-2k-lotus-params'
|
||||
- 'v26-2k-lotus-params'
|
||||
paths:
|
||||
- /var/tmp/filecoin-proof-parameters/
|
||||
- run: ./lotus fetch-params 2048
|
||||
- save_cache:
|
||||
name: Save parameters cache
|
||||
key: 'v25-2k-lotus-params'
|
||||
key: 'v26-2k-lotus-params'
|
||||
paths:
|
||||
- /var/tmp/filecoin-proof-parameters/
|
||||
install_ipfs:
|
||||
@ -390,7 +390,7 @@ jobs:
|
||||
|
||||
build-appimage:
|
||||
machine:
|
||||
image: ubuntu-2004:202104-01
|
||||
image: ubuntu-2004:202111-02
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
@ -398,6 +398,16 @@ jobs:
|
||||
- run:
|
||||
name: install appimage-builder
|
||||
command: |
|
||||
# appimage-builder requires /dev/snd to exist. It creates containers during the testing phase
|
||||
# that pass sound devices from the host to the testing container. (hard coded!)
|
||||
# https://github.com/AppImageCrafters/appimage-builder/blob/master/appimagebuilder/modules/test/execution_test.py#L54
|
||||
# Circleci doesn't provide a working sound device; this is enough to fake it.
|
||||
if [ ! -e /dev/snd ]
|
||||
then
|
||||
sudo mkdir /dev/snd
|
||||
sudo mknod /dev/snd/ControlC0 c 1 2
|
||||
fi
|
||||
|
||||
# docs: https://appimage-builder.readthedocs.io/en/latest/intro/install.html
|
||||
sudo apt update
|
||||
sudo apt install -y python3-pip python3-setuptools patchelf desktop-file-utils libgdk-pixbuf2.0-dev fakeroot strace
|
||||
@ -705,6 +715,17 @@ jobs:
|
||||
- packer/build:
|
||||
template: tools/packer/lotus.pkr.hcl
|
||||
args: "-var ci_workspace_bins=./linux-butterflynet -var lotus_network=butterflynet -var git_tag=$CIRCLE_TAG"
|
||||
publish-packer-snap:
|
||||
description: build packer image with snap. mainnet only.
|
||||
executor:
|
||||
name: packer/default
|
||||
packer-version: 1.6.6
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: "."
|
||||
- packer/build:
|
||||
template: tools/packer/lotus-snap.pkr.hcl
|
||||
publish-dockerhub:
|
||||
description: publish to dockerhub
|
||||
machine:
|
||||
@ -785,6 +806,11 @@ workflows:
|
||||
suite: itest-deals_512mb
|
||||
target: "./itests/deals_512mb_test.go"
|
||||
|
||||
- test:
|
||||
name: test-itest-deals_anycid
|
||||
suite: itest-deals_anycid
|
||||
target: "./itests/deals_anycid_test.go"
|
||||
|
||||
- test:
|
||||
name: test-itest-deals_concurrent
|
||||
suite: itest-deals_concurrent
|
||||
@ -850,6 +876,11 @@ workflows:
|
||||
suite: itest-get_messages_in_ts
|
||||
target: "./itests/get_messages_in_ts_test.go"
|
||||
|
||||
- test:
|
||||
name: test-itest-mempool
|
||||
suite: itest-mempool
|
||||
target: "./itests/mempool_test.go"
|
||||
|
||||
- test:
|
||||
name: test-itest-multisig
|
||||
suite: itest-multisig
|
||||
@ -986,10 +1017,19 @@ workflows:
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- build-appimage:
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- publish:
|
||||
requires:
|
||||
- build-all
|
||||
- build-macos
|
||||
- build-appimage
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
@ -1068,3 +1108,13 @@ workflows:
|
||||
- publish-dockerhub:
|
||||
name: publish-dockerhub-nightly
|
||||
tag: nightly
|
||||
monthly:
|
||||
triggers:
|
||||
- schedule:
|
||||
cron: "0 0 1 * *"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
jobs:
|
||||
- publish-packer-snap
|
||||
|
@ -44,13 +44,13 @@ commands:
|
||||
- restore_cache:
|
||||
name: Restore parameters cache
|
||||
keys:
|
||||
- 'v25-2k-lotus-params'
|
||||
- 'v26-2k-lotus-params'
|
||||
paths:
|
||||
- /var/tmp/filecoin-proof-parameters/
|
||||
- run: ./lotus fetch-params 2048
|
||||
- save_cache:
|
||||
name: Save parameters cache
|
||||
key: 'v25-2k-lotus-params'
|
||||
key: 'v26-2k-lotus-params'
|
||||
paths:
|
||||
- /var/tmp/filecoin-proof-parameters/
|
||||
install_ipfs:
|
||||
@ -390,7 +390,7 @@ jobs:
|
||||
|
||||
build-appimage:
|
||||
machine:
|
||||
image: ubuntu-2004:202104-01
|
||||
image: ubuntu-2004:202111-02
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
@ -398,6 +398,16 @@ jobs:
|
||||
- run:
|
||||
name: install appimage-builder
|
||||
command: |
|
||||
# appimage-builder requires /dev/snd to exist. It creates containers during the testing phase
|
||||
# that pass sound devices from the host to the testing container. (hard coded!)
|
||||
# https://github.com/AppImageCrafters/appimage-builder/blob/master/appimagebuilder/modules/test/execution_test.py#L54
|
||||
# Circleci doesn't provide a working sound device; this is enough to fake it.
|
||||
if [ ! -e /dev/snd ]
|
||||
then
|
||||
sudo mkdir /dev/snd
|
||||
sudo mknod /dev/snd/ControlC0 c 1 2
|
||||
fi
|
||||
|
||||
# docs: https://appimage-builder.readthedocs.io/en/latest/intro/install.html
|
||||
sudo apt update
|
||||
sudo apt install -y python3-pip python3-setuptools patchelf desktop-file-utils libgdk-pixbuf2.0-dev fakeroot strace
|
||||
@ -705,6 +715,17 @@ jobs:
|
||||
- packer/build:
|
||||
template: tools/packer/lotus.pkr.hcl
|
||||
args: "-var ci_workspace_bins=./linux-butterflynet -var lotus_network=butterflynet -var git_tag=$CIRCLE_TAG"
|
||||
publish-packer-snap:
|
||||
description: build packer image with snap. mainnet only.
|
||||
executor:
|
||||
name: packer/default
|
||||
packer-version: 1.6.6
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: "."
|
||||
- packer/build:
|
||||
template: tools/packer/lotus-snap.pkr.hcl
|
||||
publish-dockerhub:
|
||||
description: publish to dockerhub
|
||||
machine:
|
||||
@ -816,10 +837,19 @@ workflows:
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- build-appimage:
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- publish:
|
||||
requires:
|
||||
- build-all
|
||||
- build-macos
|
||||
- build-appimage
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
@ -898,3 +928,13 @@ workflows:
|
||||
- publish-dockerhub:
|
||||
name: publish-dockerhub-nightly
|
||||
tag: nightly
|
||||
monthly:
|
||||
triggers:
|
||||
- schedule:
|
||||
cron: "0 0 1 * *"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
jobs:
|
||||
- publish-packer-snap
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -40,6 +40,7 @@ build/paramfetch.sh
|
||||
/bundle
|
||||
/darwin
|
||||
/linux
|
||||
*.snap
|
||||
|
||||
*-fuzz.zip
|
||||
/chain/types/work_msg/
|
||||
|
@ -49,23 +49,23 @@ AppDir:
|
||||
fedora:
|
||||
image: appimagecrafters/tests-env:fedora-30
|
||||
command: ./AppRun
|
||||
use_host_x: true
|
||||
use_host_x: false
|
||||
debian:
|
||||
image: appimagecrafters/tests-env:debian-stable
|
||||
command: ./AppRun
|
||||
use_host_x: true
|
||||
use_host_x: false
|
||||
arch:
|
||||
image: appimagecrafters/tests-env:archlinux-latest
|
||||
command: ./AppRun
|
||||
use_host_x: true
|
||||
use_host_x: false
|
||||
centos:
|
||||
image: appimagecrafters/tests-env:centos-7
|
||||
command: ./AppRun
|
||||
use_host_x: true
|
||||
use_host_x: false
|
||||
ubuntu:
|
||||
image: appimagecrafters/tests-env:ubuntu-xenial
|
||||
command: ./AppRun
|
||||
use_host_x: true
|
||||
use_host_x: false
|
||||
AppImage:
|
||||
arch: x86_64
|
||||
update-information: guess
|
||||
|
101
CHANGELOG.md
101
CHANGELOG.md
@ -1,5 +1,106 @@
|
||||
# Lotus changelog
|
||||
|
||||
# 1.14.2 / 2022-02-24
|
||||
|
||||
This is an **optional** release of lotus, that's had a couple more improvements w.r.t Snap experience for storage providers in preparation of the[upcoming OhSnap upgrade](https://github.com/filecoin-project/community/discussions/74?sort=new#discussioncomment-1922550).
|
||||
|
||||
Note that the network is STILL scheduled to upgrade to v15 on March 1st at 2022-03-01T15:00:00Z. All node operators, including storage providers, must upgrade to at least Lotus v1.14.0 before that time. Storage providers must update their daemons, miners, and worker(s).
|
||||
|
||||
Wanna know how to Snap your deal? Check [this](https://github.com/filecoin-project/lotus/discussions/8141) out!
|
||||
|
||||
## Bug Fixes
|
||||
- fix lotus-bench for sealing jobs (#8173)
|
||||
- fix:sealing:really-do-it flag for abort upgrade (#8181)
|
||||
- fix:proving:post check sector handles snap deals replica faults (#8177)
|
||||
- fix: sealing: missing file type (#8180)
|
||||
|
||||
## Others
|
||||
- Retract force-pushed v1.14.0 to work around stale gomod caches (#8159): We originally tagged v1.14.0 off the wrong
|
||||
commit and fixed that by a force push, in which is a really bad practise since it messes up the go mod. Therefore,
|
||||
we want to retract it and users may use v1.14.1&^.
|
||||
|
||||
## Contributors
|
||||
|
||||
| Contributor | Commits | Lines ± | Files Changed |
|
||||
|-------------|---------|---------|---------------|
|
||||
| @zenground0 | 2 | +73/-58 | 12 |
|
||||
| @eben.xie | 1 | +7/-0 | 1 |
|
||||
| @jennijuju | 1 | +4/-0 | 1 |
|
||||
| @jennijuju | 1 | +2/-1 | 1 |
|
||||
| @ribasushi | 1 | +2/-0 | 1 |
|
||||
|
||||
# 1.14.1 / 2022-02-18
|
||||
|
||||
This is an **optional** release of lotus, that fixes the incorrect *comment* of network v15 OhSnap upgrade **date**. Note the actual upgrade epoch in [v1.14.0](https://github.com/filecoin-project/lotus/releases/tag/v1.14.0) was correct.
|
||||
|
||||
# 1.14.0 / 2022-02-17
|
||||
|
||||
This is a MANDATORY release of Lotus that introduces [Filecoin network v15,
|
||||
codenamed the OhSnap upgrade](https://github.com/filecoin-project/community/discussions/74?sort=new#discussioncomment-1922550).
|
||||
|
||||
The network is scheduled to upgrade to v15 on March 1st at 2022-03-01T15:00:00Z. All node operators, including storage providers, must upgrade to this release (or a later release) before that time. Storage providers must update their daemons, miners, and worker(s).
|
||||
|
||||
The OhSnap upgrade introduces the following FIPs, delivered in [actors v7](https://github.com/filecoin-project/specs-actors/releases/tag/v7.0.0):
|
||||
- [FIP-0019 Snap Deals](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0019.md)
|
||||
- [FIP-0028 Remove Datacap from Verified clients](https://github.com/filecoin-project/FIPs/pull/226)
|
||||
|
||||
It is recommended that storage providers download the new params before updating their node, miner, and workers. To do so:
|
||||
|
||||
- Download Lotus v1.14.0 or later
|
||||
- run `make lotus-shed`
|
||||
- run `./lotus-shed fetch-params` with the appropriate `proving-params` flag
|
||||
- Upgrade the Lotus daemon and miner **when the previous step is complete**
|
||||
|
||||
All node operators, including storage providers, should be aware that a pre-migration will begin at 2022-03-01T13:30:00Z (90 minutes before the real upgrade). The pre-migration will take between 20 and 50 minutes, depending on hardware specs. During this time, expect slower block validation times, increased CPU and memory usage, and longer delays for API queries.
|
||||
|
||||
## New Features and Changes
|
||||
- Integrate actor v7-rc1:
|
||||
- Integrate v7 actors ([#7617](https://github.com/filecoin-project/lotus/pull/7617))
|
||||
- feat: state: Fast migration for v15 ([#7933](https://github.com/filecoin-project/lotus/pull/7933))
|
||||
- fix: blockstore: Add missing locks to autobatch::Get() [#7939](https://github.com/filecoin-project/lotus/pull/7939))
|
||||
- correctness fixes for the autobatch blockstore ([#7940](https://github.com/filecoin-project/lotus/pull/7940))
|
||||
- Implement and support [FIP-0019 Snap Deals](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0019.md)
|
||||
- chore: deps: Integrate proof v11.0.0 ([#7923](https://github.com/filecoin-project/lotus/pull/7923))
|
||||
- Snap Deals Lotus Integration: FSM Posting and integration test ([#7810](https://github.com/filecoin-project/lotus/pull/7810))
|
||||
- Feat/sector storage unseal ([#7730](https://github.com/filecoin-project/lotus/pull/7730))
|
||||
- Feat/snap deals storage ([#7615](https://github.com/filecoin-project/lotus/pull/7615))
|
||||
- fix: sealing: Add more deal expiration checks during PRU pipeline ([#7871](https://github.com/filecoin-project/lotus/pull/7871))
|
||||
- chore: deps: Update go-paramfetch ([#7917](https://github.com/filecoin-project/lotus/pull/7917))
|
||||
- feat: #7880 gas: add gas charge for VerifyReplicaUpdate ([#7897](https://github.com/filecoin-project/lotus/pull/7897))
|
||||
- enhancement: sectors: disable existing cc upgrade path 2 days before the upgrade epoch ([#7900](https://github.com/filecoin-project/lotus/pull/7900))
|
||||
|
||||
## Improvements
|
||||
- updating to new datastore/blockstore code with contexts ([#7646](https://github.com/filecoin-project/lotus/pull/7646))
|
||||
- reorder transfer checks so as to ensure sending 2B FIL to yourself fails if you don't have that amount ([#7637](https://github.com/filecoin-project/lotus/pull/7637))
|
||||
- VM: Circ supply should be constant per epoch ([#7811](https://github.com/filecoin-project/lotus/pull/7811))
|
||||
|
||||
## Bug Fixes
|
||||
- Fix: state: circsuypply calc around null blocks ([#7890](https://github.com/filecoin-project/lotus/pull/7890))
|
||||
- Mempool msg selection should respect block message limits ([#7321](https://github.com/filecoin-project/lotus/pull/7321))
|
||||
SplitStore: supress compaction near upgrades ([#7734](https://github.com/filecoin-project/lotus/pull/7734))
|
||||
|
||||
## Others
|
||||
- chore: create pull_request_template.md ([#7726](https://github.com/filecoin-project/lotus/pull/7726))
|
||||
|
||||
## Contributors
|
||||
|
||||
| Contributor | Commits | Lines ± | Files Changed |
|
||||
|-------------|---------|---------|---------------|
|
||||
| Aayush Rajasekaran | 41 | +5538/-1205 | 189 |
|
||||
| zenground0 | 11 | +3316/-524 | 124 |
|
||||
| Jennifer Wang | 29 | +714/-599 | 68 |
|
||||
| ZenGround0 | 3 | +263/-25 | 11 |
|
||||
| c r | 2 | +198/-30 | 6 |
|
||||
| vyzo | 4 | +189/-7 | 7 |
|
||||
| Aayush | 11 | +146/-48 | 49 |
|
||||
| web3-bot | 10 | +99/-17 | 10 |
|
||||
| Steven Allen | 1 | +55/-37 | 1 |
|
||||
| Jiaying Wang | 5 | +30/-8 | 5 |
|
||||
| Jakub Sztandera | 2 | +8/-3 | 3 |
|
||||
| Łukasz Magiera | 1 | +3/-3 | 2 |
|
||||
| Travis Person | 1 | +2/-2 | 2 |
|
||||
| Rod Vagg | 1 | +2/-2 | 2 |
|
||||
|
||||
# v1.13.2 / 2022-01-09
|
||||
|
||||
Lotus v1.13.2 is a *highly recommended* feature release with remarkable retrieval improvements, new features like
|
||||
|
2
Makefile
2
Makefile
@ -345,6 +345,8 @@ gen: actors-gen type-gen method-gen cfgdoc-gen docsgen api-gen circleci
|
||||
@echo ">>> IF YOU'VE MODIFIED THE CLI OR CONFIG, REMEMBER TO ALSO MAKE docsgen-cli"
|
||||
.PHONY: gen
|
||||
|
||||
jen: gen
|
||||
|
||||
snap: lotus lotus-miner lotus-worker
|
||||
snapcraft
|
||||
# snapcraft upload ./lotus_*.snap
|
||||
|
@ -689,7 +689,17 @@ type FullNode interface {
|
||||
// MethodGroup: Paych
|
||||
// The Paych methods are for interacting with and managing payment channels
|
||||
|
||||
PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*ChannelInfo, error) //perm:sign
|
||||
// PaychGet gets or creates a payment channel between address pair
|
||||
// The specified amount will be reserved for use. If there aren't enough non-reserved funds
|
||||
// available, funds will be added through an on-chain message.
|
||||
// - When opts.OffChain is true, this call will not cause any messages to be sent to the chain (no automatic
|
||||
// channel creation/funds adding). If the operation can't be performed without sending a message an error will be
|
||||
// returned. Note that even when this option is specified, this call can be blocked by previous operations on the
|
||||
// channel waiting for on-chain operations.
|
||||
PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt, opts PaychGetOpts) (*ChannelInfo, error) //perm:sign
|
||||
// PaychFund gets or creates a payment channel between address pair.
|
||||
// The specified amount will be added to the channel through on-chain send for future use
|
||||
PaychFund(ctx context.Context, from, to address.Address, amt types.BigInt) (*ChannelInfo, error) //perm:sign
|
||||
PaychGetWaitReady(context.Context, cid.Cid) (address.Address, error) //perm:sign
|
||||
PaychAvailableFunds(ctx context.Context, ch address.Address) (*ChannelAvailableFunds, error) //perm:sign
|
||||
PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*ChannelAvailableFunds, error) //perm:sign
|
||||
@ -828,6 +838,10 @@ const (
|
||||
PCHOutbound
|
||||
)
|
||||
|
||||
type PaychGetOpts struct {
|
||||
OffChain bool
|
||||
}
|
||||
|
||||
type PaychStatus struct {
|
||||
ControlAddr address.Address
|
||||
Direction PCHDir
|
||||
@ -845,16 +859,23 @@ type ChannelAvailableFunds struct {
|
||||
From address.Address
|
||||
// To is the to address of the channel
|
||||
To address.Address
|
||||
// ConfirmedAmt is the amount of funds that have been confirmed on-chain
|
||||
// for the channel
|
||||
|
||||
// ConfirmedAmt is the total amount of funds that have been confirmed on-chain for the channel
|
||||
ConfirmedAmt types.BigInt
|
||||
// PendingAmt is the amount of funds that are pending confirmation on-chain
|
||||
PendingAmt types.BigInt
|
||||
|
||||
// NonReservedAmt is part of ConfirmedAmt that is available for use (e.g. when the payment channel was pre-funded)
|
||||
NonReservedAmt types.BigInt
|
||||
// PendingAvailableAmt is the amount of funds that are pending confirmation on-chain that will become available once confirmed
|
||||
PendingAvailableAmt types.BigInt
|
||||
|
||||
// PendingWaitSentinel can be used with PaychGetWaitReady to wait for
|
||||
// confirmation of pending funds
|
||||
PendingWaitSentinel *cid.Cid
|
||||
// QueuedAmt is the amount that is queued up behind a pending request
|
||||
QueuedAmt types.BigInt
|
||||
|
||||
// VoucherRedeemedAmt is the amount that is redeemed by vouchers on-chain
|
||||
// and in the local datastore
|
||||
VoucherReedeemedAmt types.BigInt
|
||||
|
@ -45,8 +45,9 @@ type Gateway interface {
|
||||
GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *MessageSendSpec, tsk types.TipSetKey) (*types.Message, error)
|
||||
MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error)
|
||||
MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error)
|
||||
MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error)
|
||||
MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*MsigTransaction, error)
|
||||
MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error)
|
||||
MsigGetVestingSchedule(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MsigVesting, error)
|
||||
StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
|
||||
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (DealCollateralBounds, error)
|
||||
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
|
||||
|
@ -51,6 +51,10 @@ type Net interface {
|
||||
NetBlockRemove(ctx context.Context, acl NetBlockList) error //perm:admin
|
||||
NetBlockList(ctx context.Context) (NetBlockList, error) //perm:read
|
||||
|
||||
NetProtectAdd(ctx context.Context, acl []peer.ID) error //perm:admin
|
||||
NetProtectRemove(ctx context.Context, acl []peer.ID) error //perm:admin
|
||||
NetProtectList(ctx context.Context) ([]peer.ID, error) //perm:read
|
||||
|
||||
// ResourceManager API
|
||||
NetStat(ctx context.Context, scope string) (NetStat, error) //perm:read
|
||||
NetLimit(ctx context.Context, scope string) (NetLimit, error) //perm:read
|
||||
|
@ -112,6 +112,8 @@ type StorageMiner interface {
|
||||
// SectorCommitPending returns a list of pending Commit sectors to be sent in the next aggregate message
|
||||
SectorCommitPending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
|
||||
SectorMatchPendingPiecesToOpenSectors(ctx context.Context) error //perm:admin
|
||||
// SectorAbortUpgrade can be called on sectors that are in the process of being upgraded to abort it
|
||||
SectorAbortUpgrade(context.Context, abi.SectorNumber) error //perm:admin
|
||||
|
||||
// WorkerConnect tells the node to connect to workers RPC
|
||||
WorkerConnect(context.Context, string) error //perm:admin retry:true
|
||||
@ -129,6 +131,7 @@ type StorageMiner interface {
|
||||
ReturnProveReplicaUpdate1(ctx context.Context, callID storiface.CallID, vanillaProofs storage.ReplicaVanillaProofs, err *storiface.CallError) error //perm:admin retry:true
|
||||
ReturnProveReplicaUpdate2(ctx context.Context, callID storiface.CallID, proof storage.ReplicaUpdateProof, err *storiface.CallError) error //perm:admin retry:true
|
||||
ReturnGenerateSectorKeyFromData(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
|
||||
ReturnFinalizeReplicaUpdate(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
|
||||
ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
|
||||
ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
|
||||
ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
|
||||
@ -218,6 +221,16 @@ type StorageMiner interface {
|
||||
// DagstoreGC runs garbage collection on the DAG store.
|
||||
DagstoreGC(ctx context.Context) ([]DagstoreShardResult, error) //perm:admin
|
||||
|
||||
// IndexerAnnounceDeal informs indexer nodes that a new deal was received,
|
||||
// so they can download its index
|
||||
IndexerAnnounceDeal(ctx context.Context, proposalCid cid.Cid) error //perm:admin
|
||||
|
||||
// IndexerAnnounceAllDeals informs the indexer nodes aboutall active deals.
|
||||
IndexerAnnounceAllDeals(ctx context.Context) error //perm:admin
|
||||
|
||||
// DagstoreLookupPieces returns information about shards that contain the given CID.
|
||||
DagstoreLookupPieces(ctx context.Context, cid cid.Cid) ([]DagstoreShardInfo, error) //perm:admin
|
||||
|
||||
// RuntimeSubsystems returns the subsystems that are enabled
|
||||
// in this instance.
|
||||
RuntimeSubsystems(ctx context.Context) (MinerSubsystems, error) //perm:read
|
||||
@ -252,7 +265,7 @@ type StorageMiner interface {
|
||||
// the path specified when calling CreateBackup is within the base path
|
||||
CreateBackup(ctx context.Context, fpath string) error //perm:admin
|
||||
|
||||
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin
|
||||
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, update []bool, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin
|
||||
|
||||
ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, poStEpoch abi.ChainEpoch, nv abinetwork.Version) ([]builtin.PoStProof, error) //perm:read
|
||||
}
|
||||
|
@ -40,6 +40,7 @@ type Worker interface {
|
||||
SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) //perm:admin
|
||||
SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) //perm:admin
|
||||
FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) //perm:admin
|
||||
FinalizeReplicaUpdate(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) //perm:admin
|
||||
ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storiface.CallID, error) //perm:admin
|
||||
ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storiface.CallID, error) //perm:admin
|
||||
ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storiface.CallID, error) //perm:admin
|
||||
|
@ -1856,6 +1856,49 @@ func (mr *MockFullNodeMockRecorder) NetPeers(arg0 interface{}) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPeers", reflect.TypeOf((*MockFullNode)(nil).NetPeers), arg0)
|
||||
}
|
||||
|
||||
// NetProtectAdd mocks base method.
|
||||
func (m *MockFullNode) NetProtectAdd(arg0 context.Context, arg1 []peer.ID) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "NetProtectAdd", arg0, arg1)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// NetProtectAdd indicates an expected call of NetProtectAdd.
|
||||
func (mr *MockFullNodeMockRecorder) NetProtectAdd(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetProtectAdd", reflect.TypeOf((*MockFullNode)(nil).NetProtectAdd), arg0, arg1)
|
||||
}
|
||||
|
||||
// NetProtectList mocks base method.
|
||||
func (m *MockFullNode) NetProtectList(arg0 context.Context) ([]peer.ID, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "NetProtectList", arg0)
|
||||
ret0, _ := ret[0].([]peer.ID)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// NetProtectList indicates an expected call of NetProtectList.
|
||||
func (mr *MockFullNodeMockRecorder) NetProtectList(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetProtectList", reflect.TypeOf((*MockFullNode)(nil).NetProtectList), arg0)
|
||||
}
|
||||
|
||||
// NetProtectRemove mocks base method.
|
||||
func (m *MockFullNode) NetProtectRemove(arg0 context.Context, arg1 []peer.ID) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "NetProtectRemove", arg0, arg1)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// NetProtectRemove indicates an expected call of NetProtectRemove.
|
||||
func (mr *MockFullNodeMockRecorder) NetProtectRemove(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetProtectRemove", reflect.TypeOf((*MockFullNode)(nil).NetProtectRemove), arg0, arg1)
|
||||
}
|
||||
|
||||
// NetPubsubScores mocks base method.
|
||||
func (m *MockFullNode) NetPubsubScores(arg0 context.Context) ([]api.PubsubScore, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@ -1975,19 +2018,34 @@ func (mr *MockFullNodeMockRecorder) PaychCollect(arg0, arg1 interface{}) *gomock
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychCollect", reflect.TypeOf((*MockFullNode)(nil).PaychCollect), arg0, arg1)
|
||||
}
|
||||
|
||||
// PaychGet mocks base method.
|
||||
func (m *MockFullNode) PaychGet(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (*api.ChannelInfo, error) {
|
||||
// PaychFund mocks base method.
|
||||
func (m *MockFullNode) PaychFund(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int) (*api.ChannelInfo, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "PaychGet", arg0, arg1, arg2, arg3)
|
||||
ret := m.ctrl.Call(m, "PaychFund", arg0, arg1, arg2, arg3)
|
||||
ret0, _ := ret[0].(*api.ChannelInfo)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// PaychFund indicates an expected call of PaychFund.
|
||||
func (mr *MockFullNodeMockRecorder) PaychFund(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychFund", reflect.TypeOf((*MockFullNode)(nil).PaychFund), arg0, arg1, arg2, arg3)
|
||||
}
|
||||
|
||||
// PaychGet mocks base method.
|
||||
func (m *MockFullNode) PaychGet(arg0 context.Context, arg1, arg2 address.Address, arg3 big.Int, arg4 api.PaychGetOpts) (*api.ChannelInfo, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "PaychGet", arg0, arg1, arg2, arg3, arg4)
|
||||
ret0, _ := ret[0].(*api.ChannelInfo)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// PaychGet indicates an expected call of PaychGet.
|
||||
func (mr *MockFullNodeMockRecorder) PaychGet(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
|
||||
func (mr *MockFullNodeMockRecorder) PaychGet(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychGet", reflect.TypeOf((*MockFullNode)(nil).PaychGet), arg0, arg1, arg2, arg3)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychGet", reflect.TypeOf((*MockFullNode)(nil).PaychGet), arg0, arg1, arg2, arg3, arg4)
|
||||
}
|
||||
|
||||
// PaychGetWaitReady mocks base method.
|
||||
|
159
api/proxy_gen.go
159
api/proxy_gen.go
@ -306,7 +306,9 @@ type FullNodeStruct struct {
|
||||
|
||||
PaychCollect func(p0 context.Context, p1 address.Address) (cid.Cid, error) `perm:"sign"`
|
||||
|
||||
PaychGet func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (*ChannelInfo, error) `perm:"sign"`
|
||||
PaychFund func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (*ChannelInfo, error) `perm:"sign"`
|
||||
|
||||
PaychGet func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 PaychGetOpts) (*ChannelInfo, error) `perm:"sign"`
|
||||
|
||||
PaychGetWaitReady func(p0 context.Context, p1 cid.Cid) (address.Address, error) `perm:"sign"`
|
||||
|
||||
@ -516,6 +518,8 @@ type GatewayStruct struct {
|
||||
|
||||
MsigGetVested func(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) ``
|
||||
|
||||
MsigGetVestingSchedule func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MsigVesting, error) ``
|
||||
|
||||
StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) ``
|
||||
|
||||
StateDealProviderCollateralBounds func(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (DealCollateralBounds, error) ``
|
||||
@ -593,6 +597,12 @@ type NetStruct struct {
|
||||
|
||||
NetPeers func(p0 context.Context) ([]peer.AddrInfo, error) `perm:"read"`
|
||||
|
||||
NetProtectAdd func(p0 context.Context, p1 []peer.ID) error `perm:"admin"`
|
||||
|
||||
NetProtectList func(p0 context.Context) ([]peer.ID, error) `perm:"read"`
|
||||
|
||||
NetProtectRemove func(p0 context.Context, p1 []peer.ID) error `perm:"admin"`
|
||||
|
||||
NetPubsubScores func(p0 context.Context) ([]PubsubScore, error) `perm:"read"`
|
||||
|
||||
NetSetLimit func(p0 context.Context, p1 string, p2 NetLimit) error `perm:"admin"`
|
||||
@ -625,7 +635,7 @@ type StorageMinerStruct struct {
|
||||
|
||||
ActorSectorSize func(p0 context.Context, p1 address.Address) (abi.SectorSize, error) `perm:"read"`
|
||||
|
||||
CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) `perm:"admin"`
|
||||
CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 []bool, p4 bool) (map[abi.SectorNumber]string, error) `perm:"admin"`
|
||||
|
||||
ComputeProof func(p0 context.Context, p1 []builtin.ExtendedSectorInfo, p2 abi.PoStRandomness, p3 abi.ChainEpoch, p4 abinetwork.Version) ([]builtin.PoStProof, error) `perm:"read"`
|
||||
|
||||
@ -639,6 +649,8 @@ type StorageMinerStruct struct {
|
||||
|
||||
DagstoreListShards func(p0 context.Context) ([]DagstoreShardInfo, error) `perm:"read"`
|
||||
|
||||
DagstoreLookupPieces func(p0 context.Context, p1 cid.Cid) ([]DagstoreShardInfo, error) `perm:"admin"`
|
||||
|
||||
DagstoreRecoverShard func(p0 context.Context, p1 string) error `perm:"write"`
|
||||
|
||||
DealsConsiderOfflineRetrievalDeals func(p0 context.Context) (bool, error) `perm:"admin"`
|
||||
@ -673,6 +685,10 @@ type StorageMinerStruct struct {
|
||||
|
||||
DealsSetPieceCidBlocklist func(p0 context.Context, p1 []cid.Cid) error `perm:"admin"`
|
||||
|
||||
IndexerAnnounceAllDeals func(p0 context.Context) error `perm:"admin"`
|
||||
|
||||
IndexerAnnounceDeal func(p0 context.Context, p1 cid.Cid) error `perm:"admin"`
|
||||
|
||||
MarketCancelDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"`
|
||||
|
||||
MarketDataTransferDiagnostics func(p0 context.Context, p1 peer.ID) (*TransferDiagnostics, error) `perm:"write"`
|
||||
@ -723,6 +739,8 @@ type StorageMinerStruct struct {
|
||||
|
||||
ReturnFetch func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
|
||||
|
||||
ReturnFinalizeReplicaUpdate func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
|
||||
|
||||
ReturnFinalizeSector func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
|
||||
|
||||
ReturnGenerateSectorKeyFromData func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
|
||||
@ -755,6 +773,8 @@ type StorageMinerStruct struct {
|
||||
|
||||
SealingSchedDiag func(p0 context.Context, p1 bool) (interface{}, error) `perm:"admin"`
|
||||
|
||||
SectorAbortUpgrade func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"`
|
||||
|
||||
SectorAddPieceToAny func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) `perm:"admin"`
|
||||
|
||||
SectorCommitFlush func(p0 context.Context) ([]sealiface.CommitBatchRes, error) `perm:"admin"`
|
||||
@ -872,6 +892,8 @@ type WorkerStruct struct {
|
||||
|
||||
Fetch func(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType, p3 storiface.PathType, p4 storiface.AcquireMode) (storiface.CallID, error) `perm:"admin"`
|
||||
|
||||
FinalizeReplicaUpdate func(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) `perm:"admin"`
|
||||
|
||||
FinalizeSector func(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) `perm:"admin"`
|
||||
|
||||
GenerateSectorKeyFromData func(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid) (storiface.CallID, error) `perm:"admin"`
|
||||
@ -2183,14 +2205,25 @@ func (s *FullNodeStub) PaychCollect(p0 context.Context, p1 address.Address) (cid
|
||||
return *new(cid.Cid), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) PaychGet(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (*ChannelInfo, error) {
|
||||
func (s *FullNodeStruct) PaychFund(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (*ChannelInfo, error) {
|
||||
if s.Internal.PaychFund == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.PaychFund(p0, p1, p2, p3)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) PaychFund(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (*ChannelInfo, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) PaychGet(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 PaychGetOpts) (*ChannelInfo, error) {
|
||||
if s.Internal.PaychGet == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.PaychGet(p0, p1, p2, p3)
|
||||
return s.Internal.PaychGet(p0, p1, p2, p3, p4)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) PaychGet(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (*ChannelInfo, error) {
|
||||
func (s *FullNodeStub) PaychGet(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 PaychGetOpts) (*ChannelInfo, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
@ -3283,6 +3316,17 @@ func (s *GatewayStub) MsigGetVested(p0 context.Context, p1 address.Address, p2 t
|
||||
return *new(types.BigInt), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) MsigGetVestingSchedule(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MsigVesting, error) {
|
||||
if s.Internal.MsigGetVestingSchedule == nil {
|
||||
return *new(MsigVesting), ErrNotSupported
|
||||
}
|
||||
return s.Internal.MsigGetVestingSchedule(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) MsigGetVestingSchedule(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MsigVesting, error) {
|
||||
return *new(MsigVesting), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) {
|
||||
if s.Internal.StateAccountKey == nil {
|
||||
return *new(address.Address), ErrNotSupported
|
||||
@ -3668,6 +3712,39 @@ func (s *NetStub) NetPeers(p0 context.Context) ([]peer.AddrInfo, error) {
|
||||
return *new([]peer.AddrInfo), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *NetStruct) NetProtectAdd(p0 context.Context, p1 []peer.ID) error {
|
||||
if s.Internal.NetProtectAdd == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.NetProtectAdd(p0, p1)
|
||||
}
|
||||
|
||||
func (s *NetStub) NetProtectAdd(p0 context.Context, p1 []peer.ID) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *NetStruct) NetProtectList(p0 context.Context) ([]peer.ID, error) {
|
||||
if s.Internal.NetProtectList == nil {
|
||||
return *new([]peer.ID), ErrNotSupported
|
||||
}
|
||||
return s.Internal.NetProtectList(p0)
|
||||
}
|
||||
|
||||
func (s *NetStub) NetProtectList(p0 context.Context) ([]peer.ID, error) {
|
||||
return *new([]peer.ID), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *NetStruct) NetProtectRemove(p0 context.Context, p1 []peer.ID) error {
|
||||
if s.Internal.NetProtectRemove == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.NetProtectRemove(p0, p1)
|
||||
}
|
||||
|
||||
func (s *NetStub) NetProtectRemove(p0 context.Context, p1 []peer.ID) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *NetStruct) NetPubsubScores(p0 context.Context) ([]PubsubScore, error) {
|
||||
if s.Internal.NetPubsubScores == nil {
|
||||
return *new([]PubsubScore), ErrNotSupported
|
||||
@ -3745,14 +3822,14 @@ func (s *StorageMinerStub) ActorSectorSize(p0 context.Context, p1 address.Addres
|
||||
return *new(abi.SectorSize), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) {
|
||||
func (s *StorageMinerStruct) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 []bool, p4 bool) (map[abi.SectorNumber]string, error) {
|
||||
if s.Internal.CheckProvable == nil {
|
||||
return *new(map[abi.SectorNumber]string), ErrNotSupported
|
||||
}
|
||||
return s.Internal.CheckProvable(p0, p1, p2, p3)
|
||||
return s.Internal.CheckProvable(p0, p1, p2, p3, p4)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) {
|
||||
func (s *StorageMinerStub) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 []bool, p4 bool) (map[abi.SectorNumber]string, error) {
|
||||
return *new(map[abi.SectorNumber]string), ErrNotSupported
|
||||
}
|
||||
|
||||
@ -3822,6 +3899,17 @@ func (s *StorageMinerStub) DagstoreListShards(p0 context.Context) ([]DagstoreSha
|
||||
return *new([]DagstoreShardInfo), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DagstoreLookupPieces(p0 context.Context, p1 cid.Cid) ([]DagstoreShardInfo, error) {
|
||||
if s.Internal.DagstoreLookupPieces == nil {
|
||||
return *new([]DagstoreShardInfo), ErrNotSupported
|
||||
}
|
||||
return s.Internal.DagstoreLookupPieces(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) DagstoreLookupPieces(p0 context.Context, p1 cid.Cid) ([]DagstoreShardInfo, error) {
|
||||
return *new([]DagstoreShardInfo), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DagstoreRecoverShard(p0 context.Context, p1 string) error {
|
||||
if s.Internal.DagstoreRecoverShard == nil {
|
||||
return ErrNotSupported
|
||||
@ -4009,6 +4097,28 @@ func (s *StorageMinerStub) DealsSetPieceCidBlocklist(p0 context.Context, p1 []ci
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) IndexerAnnounceAllDeals(p0 context.Context) error {
|
||||
if s.Internal.IndexerAnnounceAllDeals == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.IndexerAnnounceAllDeals(p0)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) IndexerAnnounceAllDeals(p0 context.Context) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) IndexerAnnounceDeal(p0 context.Context, p1 cid.Cid) error {
|
||||
if s.Internal.IndexerAnnounceDeal == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.IndexerAnnounceDeal(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) IndexerAnnounceDeal(p0 context.Context, p1 cid.Cid) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) MarketCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {
|
||||
if s.Internal.MarketCancelDataTransfer == nil {
|
||||
return ErrNotSupported
|
||||
@ -4284,6 +4394,17 @@ func (s *StorageMinerStub) ReturnFetch(p0 context.Context, p1 storiface.CallID,
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) ReturnFinalizeReplicaUpdate(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error {
|
||||
if s.Internal.ReturnFinalizeReplicaUpdate == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.ReturnFinalizeReplicaUpdate(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) ReturnFinalizeReplicaUpdate(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) ReturnFinalizeSector(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error {
|
||||
if s.Internal.ReturnFinalizeSector == nil {
|
||||
return ErrNotSupported
|
||||
@ -4460,6 +4581,17 @@ func (s *StorageMinerStub) SealingSchedDiag(p0 context.Context, p1 bool) (interf
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) SectorAbortUpgrade(p0 context.Context, p1 abi.SectorNumber) error {
|
||||
if s.Internal.SectorAbortUpgrade == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.SectorAbortUpgrade(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) SectorAbortUpgrade(p0 context.Context, p1 abi.SectorNumber) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) {
|
||||
if s.Internal.SectorAddPieceToAny == nil {
|
||||
return *new(SectorOffset), ErrNotSupported
|
||||
@ -5010,6 +5142,17 @@ func (s *WorkerStub) Fetch(p0 context.Context, p1 storage.SectorRef, p2 storifac
|
||||
return *new(storiface.CallID), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *WorkerStruct) FinalizeReplicaUpdate(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) {
|
||||
if s.Internal.FinalizeReplicaUpdate == nil {
|
||||
return *new(storiface.CallID), ErrNotSupported
|
||||
}
|
||||
return s.Internal.FinalizeReplicaUpdate(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *WorkerStub) FinalizeReplicaUpdate(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) {
|
||||
return *new(storiface.CallID), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *WorkerStruct) FinalizeSector(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) {
|
||||
if s.Internal.FinalizeSector == nil {
|
||||
return *new(storiface.CallID), ErrNotSupported
|
||||
|
@ -1769,6 +1769,49 @@ func (mr *MockFullNodeMockRecorder) NetPeers(arg0 interface{}) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPeers", reflect.TypeOf((*MockFullNode)(nil).NetPeers), arg0)
|
||||
}
|
||||
|
||||
// NetProtectAdd mocks base method.
|
||||
func (m *MockFullNode) NetProtectAdd(arg0 context.Context, arg1 []peer.ID) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "NetProtectAdd", arg0, arg1)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// NetProtectAdd indicates an expected call of NetProtectAdd.
|
||||
func (mr *MockFullNodeMockRecorder) NetProtectAdd(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetProtectAdd", reflect.TypeOf((*MockFullNode)(nil).NetProtectAdd), arg0, arg1)
|
||||
}
|
||||
|
||||
// NetProtectList mocks base method.
|
||||
func (m *MockFullNode) NetProtectList(arg0 context.Context) ([]peer.ID, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "NetProtectList", arg0)
|
||||
ret0, _ := ret[0].([]peer.ID)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// NetProtectList indicates an expected call of NetProtectList.
|
||||
func (mr *MockFullNodeMockRecorder) NetProtectList(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetProtectList", reflect.TypeOf((*MockFullNode)(nil).NetProtectList), arg0)
|
||||
}
|
||||
|
||||
// NetProtectRemove mocks base method.
|
||||
func (m *MockFullNode) NetProtectRemove(arg0 context.Context, arg1 []peer.ID) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "NetProtectRemove", arg0, arg1)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// NetProtectRemove indicates an expected call of NetProtectRemove.
|
||||
func (mr *MockFullNodeMockRecorder) NetProtectRemove(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetProtectRemove", reflect.TypeOf((*MockFullNode)(nil).NetProtectRemove), arg0, arg1)
|
||||
}
|
||||
|
||||
// NetPubsubScores mocks base method.
|
||||
func (m *MockFullNode) NetPubsubScores(arg0 context.Context) ([]api.PubsubScore, error) {
|
||||
m.ctrl.T.Helper()
|
||||
|
@ -337,4 +337,8 @@ func (w *WrapperV1Full) clientRetrieve(ctx context.Context, order RetrievalOrder
|
||||
finish(w.ClientExport(ctx, eref, *ref))
|
||||
}
|
||||
|
||||
func (w *WrapperV1Full) PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*api.ChannelInfo, error) {
|
||||
return w.FullNode.PaychFund(ctx, from, to, amt)
|
||||
}
|
||||
|
||||
var _ FullNode = &WrapperV1Full{}
|
||||
|
@ -57,7 +57,7 @@ var (
|
||||
FullAPIVersion0 = newVer(1, 5, 0)
|
||||
FullAPIVersion1 = newVer(2, 2, 0)
|
||||
|
||||
MinerAPIVersion0 = newVer(1, 3, 0)
|
||||
MinerAPIVersion0 = newVer(1, 4, 0)
|
||||
WorkerAPIVersion0 = newVer(1, 5, 0)
|
||||
)
|
||||
|
||||
|
21
blockstore/context.go
Normal file
21
blockstore/context.go
Normal file
@ -0,0 +1,21 @@
|
||||
package blockstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
type hotViewKey struct{}
|
||||
|
||||
var hotView = hotViewKey{}
|
||||
|
||||
// WithHotView constructs a new context with an option that provides a hint to the blockstore
|
||||
// (e.g. the splitstore) that the object (and its ipld references) should be kept hot.
|
||||
func WithHotView(ctx context.Context) context.Context {
|
||||
return context.WithValue(ctx, hotView, struct{}{})
|
||||
}
|
||||
|
||||
// IsHotView returns true if the hot view option is set in the context
|
||||
func IsHotView(ctx context.Context) bool {
|
||||
v := ctx.Value(hotView)
|
||||
return v != nil
|
||||
}
|
@ -49,10 +49,11 @@ These are options in the `[Chainstore.Splitstore]` section of the configuration:
|
||||
blockstore and discards writes; this is necessary to support syncing from a snapshot.
|
||||
- `MarkSetType` -- specifies the type of markset to use during compaction.
|
||||
The markset is the data structure used by compaction/gc to track live objects.
|
||||
The default value is `"map"`, which will use an in-memory map; if you are limited
|
||||
in memory (or indeed see compaction run out of memory), you can also specify
|
||||
`"badger"` which will use an disk backed markset, using badger. This will use
|
||||
much less memory, but will also make compaction slower.
|
||||
The default value is "badger", which will use a disk backed markset using badger.
|
||||
If you have a lot of memory (48G or more) you can also use "map", which will use
|
||||
an in memory markset, speeding up compaction at the cost of higher memory usage.
|
||||
Note: If you are using a VPS with a network volume, you need to provision at least
|
||||
3000 IOPs with the badger markset.
|
||||
- `HotStoreMessageRetention` -- specifies how many finalities, beyond the 4
|
||||
finalities maintained by default, to maintain messages and message receipts in the
|
||||
hotstore. This is useful for assistive nodes that want to support syncing for other
|
||||
@ -105,6 +106,12 @@ Compaction works transactionally with the following algorithm:
|
||||
- We delete in small batches taking a lock; each batch is checked again for marks, from the concurrent transactional mark, so as to never delete anything live
|
||||
- We then end the transaction and compact/gc the hotstore.
|
||||
|
||||
As of [#8008](https://github.com/filecoin-project/lotus/pull/8008) the compaction algorithm has been
|
||||
modified to eliminate sorting and maintain the cold object set on disk. This drastically reduces
|
||||
memory usage; in fact, when using badger as the markset compaction uses very little memory, and
|
||||
it should be now possible to run splitstore with 32GB of RAM or less without danger of running out of
|
||||
memory during compaction.
|
||||
|
||||
## Garbage Collection
|
||||
|
||||
TBD -- see [#6577](https://github.com/filecoin-project/lotus/issues/6577)
|
||||
|
118
blockstore/splitstore/checkpoint.go
Normal file
118
blockstore/splitstore/checkpoint.go
Normal file
@ -0,0 +1,118 @@
|
||||
package splitstore
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
cid "github.com/ipfs/go-cid"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
)
|
||||
|
||||
type Checkpoint struct {
|
||||
file *os.File
|
||||
buf *bufio.Writer
|
||||
}
|
||||
|
||||
func NewCheckpoint(path string) (*Checkpoint, error) {
|
||||
file, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_SYNC, 0644)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("error creating checkpoint: %w", err)
|
||||
}
|
||||
buf := bufio.NewWriter(file)
|
||||
|
||||
return &Checkpoint{
|
||||
file: file,
|
||||
buf: buf,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func OpenCheckpoint(path string) (*Checkpoint, cid.Cid, error) {
|
||||
filein, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, cid.Undef, xerrors.Errorf("error opening checkpoint for reading: %w", err)
|
||||
}
|
||||
defer filein.Close() //nolint:errcheck
|
||||
|
||||
bufin := bufio.NewReader(filein)
|
||||
start, err := readRawCid(bufin, nil)
|
||||
if err != nil && err != io.EOF {
|
||||
return nil, cid.Undef, xerrors.Errorf("error reading cid from checkpoint: %w", err)
|
||||
}
|
||||
|
||||
fileout, err := os.OpenFile(path, os.O_WRONLY|os.O_SYNC, 0644)
|
||||
if err != nil {
|
||||
return nil, cid.Undef, xerrors.Errorf("error opening checkpoint for writing: %w", err)
|
||||
}
|
||||
bufout := bufio.NewWriter(fileout)
|
||||
|
||||
return &Checkpoint{
|
||||
file: fileout,
|
||||
buf: bufout,
|
||||
}, start, nil
|
||||
}
|
||||
|
||||
func (cp *Checkpoint) Set(c cid.Cid) error {
|
||||
if _, err := cp.file.Seek(0, io.SeekStart); err != nil {
|
||||
return xerrors.Errorf("error seeking beginning of checkpoint: %w", err)
|
||||
}
|
||||
|
||||
if err := writeRawCid(cp.buf, c, true); err != nil {
|
||||
return xerrors.Errorf("error writing cid to checkpoint: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cp *Checkpoint) Close() error {
|
||||
if cp.file == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := cp.file.Close()
|
||||
cp.file = nil
|
||||
cp.buf = nil
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func readRawCid(buf *bufio.Reader, hbuf []byte) (cid.Cid, error) {
|
||||
sz, err := buf.ReadByte()
|
||||
if err != nil {
|
||||
return cid.Undef, err // don't wrap EOF as it is not an error here
|
||||
}
|
||||
|
||||
if hbuf == nil {
|
||||
hbuf = make([]byte, int(sz))
|
||||
} else {
|
||||
hbuf = hbuf[:int(sz)]
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(buf, hbuf); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("error reading hash: %w", err) // wrap EOF, it's corrupt
|
||||
}
|
||||
|
||||
hash, err := mh.Cast(hbuf)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("error casting multihash: %w", err)
|
||||
}
|
||||
|
||||
return cid.NewCidV1(cid.Raw, hash), nil
|
||||
}
|
||||
|
||||
func writeRawCid(buf *bufio.Writer, c cid.Cid, flush bool) error {
|
||||
hash := c.Hash()
|
||||
if err := buf.WriteByte(byte(len(hash))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := buf.Write(hash); err != nil {
|
||||
return err
|
||||
}
|
||||
if flush {
|
||||
return buf.Flush()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
147
blockstore/splitstore/checkpoint_test.go
Normal file
147
blockstore/splitstore/checkpoint_test.go
Normal file
@ -0,0 +1,147 @@
|
||||
package splitstore
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/multiformats/go-multihash"
|
||||
)
|
||||
|
||||
func TestCheckpoint(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "checkpoint.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(dir)
|
||||
})
|
||||
|
||||
path := filepath.Join(dir, "checkpoint")
|
||||
|
||||
makeCid := func(key string) cid.Cid {
|
||||
h, err := multihash.Sum([]byte(key), multihash.SHA2_256, -1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return cid.NewCidV1(cid.Raw, h)
|
||||
}
|
||||
|
||||
k1 := makeCid("a")
|
||||
k2 := makeCid("b")
|
||||
k3 := makeCid("c")
|
||||
k4 := makeCid("d")
|
||||
|
||||
cp, err := NewCheckpoint(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := cp.Set(k1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cp.Set(k2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := cp.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cp, start, err := OpenCheckpoint(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !start.Equals(k2) {
|
||||
t.Fatalf("expected start to be %s; got %s", k2, start)
|
||||
}
|
||||
|
||||
if err := cp.Set(k3); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cp.Set(k4); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := cp.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cp, start, err = OpenCheckpoint(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !start.Equals(k4) {
|
||||
t.Fatalf("expected start to be %s; got %s", k4, start)
|
||||
}
|
||||
|
||||
if err := cp.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// also test correct operation with an empty checkpoint
|
||||
cp, err = NewCheckpoint(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := cp.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cp, start, err = OpenCheckpoint(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if start.Defined() {
|
||||
t.Fatal("expected start to be undefined")
|
||||
}
|
||||
|
||||
if err := cp.Set(k1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cp.Set(k2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := cp.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cp, start, err = OpenCheckpoint(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !start.Equals(k2) {
|
||||
t.Fatalf("expected start to be %s; got %s", k2, start)
|
||||
}
|
||||
|
||||
if err := cp.Set(k3); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cp.Set(k4); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := cp.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cp, start, err = OpenCheckpoint(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !start.Equals(k4) {
|
||||
t.Fatalf("expected start to be %s; got %s", k4, start)
|
||||
}
|
||||
|
||||
if err := cp.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
}
|
102
blockstore/splitstore/coldset.go
Normal file
102
blockstore/splitstore/coldset.go
Normal file
@ -0,0 +1,102 @@
|
||||
package splitstore
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
cid "github.com/ipfs/go-cid"
|
||||
)
|
||||
|
||||
type ColdSetWriter struct {
|
||||
file *os.File
|
||||
buf *bufio.Writer
|
||||
}
|
||||
|
||||
type ColdSetReader struct {
|
||||
file *os.File
|
||||
buf *bufio.Reader
|
||||
}
|
||||
|
||||
func NewColdSetWriter(path string) (*ColdSetWriter, error) {
|
||||
file, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("error creating coldset: %w", err)
|
||||
}
|
||||
buf := bufio.NewWriter(file)
|
||||
|
||||
return &ColdSetWriter{
|
||||
file: file,
|
||||
buf: buf,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewColdSetReader(path string) (*ColdSetReader, error) {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("error opening coldset: %w", err)
|
||||
}
|
||||
buf := bufio.NewReader(file)
|
||||
|
||||
return &ColdSetReader{
|
||||
file: file,
|
||||
buf: buf,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *ColdSetWriter) Write(c cid.Cid) error {
|
||||
return writeRawCid(s.buf, c, false)
|
||||
}
|
||||
|
||||
func (s *ColdSetWriter) Close() error {
|
||||
if s.file == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
err1 := s.buf.Flush()
|
||||
err2 := s.file.Close()
|
||||
s.buf = nil
|
||||
s.file = nil
|
||||
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
return err2
|
||||
}
|
||||
|
||||
func (s *ColdSetReader) ForEach(f func(cid.Cid) error) error {
|
||||
hbuf := make([]byte, 256)
|
||||
for {
|
||||
next, err := readRawCid(s.buf, hbuf)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
|
||||
return xerrors.Errorf("error reading coldset: %w", err)
|
||||
}
|
||||
|
||||
if err := f(next); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ColdSetReader) Reset() error {
|
||||
_, err := s.file.Seek(0, io.SeekStart)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *ColdSetReader) Close() error {
|
||||
if s.file == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := s.file.Close()
|
||||
s.file = nil
|
||||
s.buf = nil
|
||||
|
||||
return err
|
||||
}
|
99
blockstore/splitstore/coldset_test.go
Normal file
99
blockstore/splitstore/coldset_test.go
Normal file
@ -0,0 +1,99 @@
|
||||
package splitstore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/multiformats/go-multihash"
|
||||
)
|
||||
|
||||
func TestColdSet(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "coldset.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(dir)
|
||||
})
|
||||
|
||||
path := filepath.Join(dir, "coldset")
|
||||
|
||||
makeCid := func(i int) cid.Cid {
|
||||
h, err := multihash.Sum([]byte(fmt.Sprintf("cid.%d", i)), multihash.SHA2_256, -1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return cid.NewCidV1(cid.Raw, h)
|
||||
}
|
||||
|
||||
const count = 1000
|
||||
cids := make([]cid.Cid, 0, count)
|
||||
for i := 0; i < count; i++ {
|
||||
cids = append(cids, makeCid(i))
|
||||
}
|
||||
|
||||
cw, err := NewColdSetWriter(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, c := range cids {
|
||||
if err := cw.Write(c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := cw.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cr, err := NewColdSetReader(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
index := 0
|
||||
err = cr.ForEach(func(c cid.Cid) error {
|
||||
if index >= count {
|
||||
t.Fatal("too many cids")
|
||||
}
|
||||
|
||||
if !c.Equals(cids[index]) {
|
||||
t.Fatalf("wrong cid %d; expected %s but got %s", index, cids[index], c)
|
||||
}
|
||||
|
||||
index++
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := cr.Reset(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
index = 0
|
||||
err = cr.ForEach(func(c cid.Cid) error {
|
||||
if index >= count {
|
||||
t.Fatal("too many cids")
|
||||
}
|
||||
|
||||
if !c.Equals(cids[index]) {
|
||||
t.Fatalf("wrong cid; expected %s but got %s", cids[index], c)
|
||||
}
|
||||
|
||||
index++
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
}
|
@ -14,15 +14,24 @@ var errMarkSetClosed = errors.New("markset closed")
|
||||
type MarkSet interface {
|
||||
ObjectVisitor
|
||||
Mark(cid.Cid) error
|
||||
MarkMany([]cid.Cid) error
|
||||
Has(cid.Cid) (bool, error)
|
||||
Close() error
|
||||
|
||||
// BeginCriticalSection ensures that the markset is persisted to disk for recovery in case
|
||||
// of abnormal termination during the critical section span.
|
||||
BeginCriticalSection() error
|
||||
// EndCriticalSection ends the critical section span.
|
||||
EndCriticalSection()
|
||||
}
|
||||
|
||||
type MarkSetEnv interface {
|
||||
// Create creates a new markset within the environment.
|
||||
// name is a unique name for this markset, mapped to the filesystem in disk-backed environments
|
||||
// New creates a new markset within the environment.
|
||||
// name is a unique name for this markset, mapped to the filesystem for on-disk persistence.
|
||||
// sizeHint is a hint about the expected size of the markset
|
||||
Create(name string, sizeHint int64) (MarkSet, error)
|
||||
New(name string, sizeHint int64) (MarkSet, error)
|
||||
// Recover recovers an existing markset persisted on-disk.
|
||||
Recover(name string) (MarkSet, error)
|
||||
// Close closes the markset
|
||||
Close() error
|
||||
}
|
||||
@ -30,7 +39,7 @@ type MarkSetEnv interface {
|
||||
func OpenMarkSetEnv(path string, mtype string) (MarkSetEnv, error) {
|
||||
switch mtype {
|
||||
case "map":
|
||||
return NewMapMarkSetEnv()
|
||||
return NewMapMarkSetEnv(path)
|
||||
case "badger":
|
||||
return NewBadgerMarkSetEnv(path)
|
||||
default:
|
||||
|
@ -3,6 +3,7 @@ package splitstore
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
@ -28,6 +29,7 @@ type BadgerMarkSet struct {
|
||||
writers int
|
||||
seqno int
|
||||
version int
|
||||
persist bool
|
||||
|
||||
db *badger.DB
|
||||
path string
|
||||
@ -47,11 +49,10 @@ func NewBadgerMarkSetEnv(path string) (MarkSetEnv, error) {
|
||||
return &BadgerMarkSetEnv{path: msPath}, nil
|
||||
}
|
||||
|
||||
func (e *BadgerMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) {
|
||||
name += ".tmp"
|
||||
func (e *BadgerMarkSetEnv) New(name string, sizeHint int64) (MarkSet, error) {
|
||||
path := filepath.Join(e.path, name)
|
||||
|
||||
db, err := openTransientBadgerDB(path)
|
||||
db, err := openBadgerDB(path, false)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("error creating badger db: %w", err)
|
||||
}
|
||||
@ -67,8 +68,72 @@ func (e *BadgerMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error)
|
||||
return ms, nil
|
||||
}
|
||||
|
||||
func (e *BadgerMarkSetEnv) Recover(name string) (MarkSet, error) {
|
||||
path := filepath.Join(e.path, name)
|
||||
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
return nil, xerrors.Errorf("error stating badger db path: %w", err)
|
||||
}
|
||||
|
||||
db, err := openBadgerDB(path, true)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("error creating badger db: %w", err)
|
||||
}
|
||||
|
||||
ms := &BadgerMarkSet{
|
||||
pend: make(map[string]struct{}),
|
||||
writing: make(map[int]map[string]struct{}),
|
||||
db: db,
|
||||
path: path,
|
||||
persist: true,
|
||||
}
|
||||
ms.cond.L = &ms.mx
|
||||
|
||||
return ms, nil
|
||||
}
|
||||
|
||||
func (e *BadgerMarkSetEnv) Close() error {
|
||||
return os.RemoveAll(e.path)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *BadgerMarkSet) BeginCriticalSection() error {
|
||||
s.mx.Lock()
|
||||
|
||||
if s.persist {
|
||||
s.mx.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
var write bool
|
||||
var seqno int
|
||||
if len(s.pend) > 0 {
|
||||
write = true
|
||||
seqno = s.nextBatch()
|
||||
}
|
||||
|
||||
s.persist = true
|
||||
s.mx.Unlock()
|
||||
|
||||
if write {
|
||||
// all writes sync once perist is true
|
||||
return s.write(seqno)
|
||||
}
|
||||
|
||||
// wait for any pending writes and sync
|
||||
s.mx.Lock()
|
||||
for s.writers > 0 {
|
||||
s.cond.Wait()
|
||||
}
|
||||
s.mx.Unlock()
|
||||
|
||||
return s.db.Sync()
|
||||
}
|
||||
|
||||
func (s *BadgerMarkSet) EndCriticalSection() {
|
||||
s.mx.Lock()
|
||||
defer s.mx.Unlock()
|
||||
|
||||
s.persist = false
|
||||
}
|
||||
|
||||
func (s *BadgerMarkSet) Mark(c cid.Cid) error {
|
||||
@ -88,6 +153,23 @@ func (s *BadgerMarkSet) Mark(c cid.Cid) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *BadgerMarkSet) MarkMany(batch []cid.Cid) error {
|
||||
s.mx.Lock()
|
||||
if s.pend == nil {
|
||||
s.mx.Unlock()
|
||||
return errMarkSetClosed
|
||||
}
|
||||
|
||||
write, seqno := s.putMany(batch)
|
||||
s.mx.Unlock()
|
||||
|
||||
if write {
|
||||
return s.write(seqno)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *BadgerMarkSet) Has(c cid.Cid) (bool, error) {
|
||||
s.mx.RLock()
|
||||
defer s.mx.RUnlock()
|
||||
@ -193,16 +275,34 @@ func (s *BadgerMarkSet) tryDB(key []byte) (has bool, err error) {
|
||||
// writer holds the exclusive lock
|
||||
func (s *BadgerMarkSet) put(key string) (write bool, seqno int) {
|
||||
s.pend[key] = struct{}{}
|
||||
if len(s.pend) < badgerMarkSetBatchSize {
|
||||
if !s.persist && len(s.pend) < badgerMarkSetBatchSize {
|
||||
return false, 0
|
||||
}
|
||||
|
||||
seqno = s.seqno
|
||||
seqno = s.nextBatch()
|
||||
return true, seqno
|
||||
}
|
||||
|
||||
func (s *BadgerMarkSet) putMany(batch []cid.Cid) (write bool, seqno int) {
|
||||
for _, c := range batch {
|
||||
key := string(c.Hash())
|
||||
s.pend[key] = struct{}{}
|
||||
}
|
||||
|
||||
if !s.persist && len(s.pend) < badgerMarkSetBatchSize {
|
||||
return false, 0
|
||||
}
|
||||
|
||||
seqno = s.nextBatch()
|
||||
return true, seqno
|
||||
}
|
||||
|
||||
func (s *BadgerMarkSet) nextBatch() int {
|
||||
seqno := s.seqno
|
||||
s.seqno++
|
||||
s.writing[seqno] = s.pend
|
||||
s.pend = make(map[string]struct{})
|
||||
|
||||
return true, seqno
|
||||
return seqno
|
||||
}
|
||||
|
||||
func (s *BadgerMarkSet) write(seqno int) (err error) {
|
||||
@ -247,6 +347,14 @@ func (s *BadgerMarkSet) write(seqno int) (err error) {
|
||||
return xerrors.Errorf("error flushing batch to badger markset: %w", err)
|
||||
}
|
||||
|
||||
s.mx.RLock()
|
||||
persist := s.persist
|
||||
s.mx.RUnlock()
|
||||
|
||||
if persist {
|
||||
return s.db.Sync()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -266,26 +374,29 @@ func (s *BadgerMarkSet) Close() error {
|
||||
db := s.db
|
||||
s.db = nil
|
||||
|
||||
return closeTransientBadgerDB(db, s.path)
|
||||
return closeBadgerDB(db, s.path, s.persist)
|
||||
}
|
||||
|
||||
func (s *BadgerMarkSet) SetConcurrent() {}
|
||||
func openBadgerDB(path string, recover bool) (*badger.DB, error) {
|
||||
// if it is not a recovery, clean up first
|
||||
if !recover {
|
||||
err := os.RemoveAll(path)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("error clearing markset directory: %w", err)
|
||||
}
|
||||
|
||||
func openTransientBadgerDB(path string) (*badger.DB, error) {
|
||||
// clean up first
|
||||
err := os.RemoveAll(path)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("error clearing markset directory: %w", err)
|
||||
}
|
||||
|
||||
err = os.MkdirAll(path, 0755) //nolint:gosec
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("error creating markset directory: %w", err)
|
||||
err = os.MkdirAll(path, 0755) //nolint:gosec
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("error creating markset directory: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
opts := badger.DefaultOptions(path)
|
||||
// we manually sync when we are in critical section
|
||||
opts.SyncWrites = false
|
||||
// no need to do that
|
||||
opts.CompactL0OnClose = false
|
||||
// we store hashes, not much to gain by compression
|
||||
opts.Compression = options.None
|
||||
// Note: We use FileIO for loading modes to avoid memory thrashing and interference
|
||||
// between the system blockstore and the markset.
|
||||
@ -294,6 +405,15 @@ func openTransientBadgerDB(path string) (*badger.DB, error) {
|
||||
// exceeded 1GB in size.
|
||||
opts.TableLoadingMode = options.FileIO
|
||||
opts.ValueLogLoadingMode = options.FileIO
|
||||
// We increase the number of L0 tables before compaction to make it unlikely to
|
||||
// be necessary.
|
||||
opts.NumLevelZeroTables = 20 // default is 5
|
||||
opts.NumLevelZeroTablesStall = 30 // default is 10
|
||||
// increase the number of compactors from default 2 so that if we ever have to
|
||||
// compact, it is fast
|
||||
if runtime.NumCPU()/2 > opts.NumCompactors {
|
||||
opts.NumCompactors = runtime.NumCPU() / 2
|
||||
}
|
||||
opts.Logger = &badgerLogger{
|
||||
SugaredLogger: log.Desugar().WithOptions(zap.AddCallerSkip(1)).Sugar(),
|
||||
skip2: log.Desugar().WithOptions(zap.AddCallerSkip(2)).Sugar(),
|
||||
@ -302,12 +422,16 @@ func openTransientBadgerDB(path string) (*badger.DB, error) {
|
||||
return badger.Open(opts)
|
||||
}
|
||||
|
||||
func closeTransientBadgerDB(db *badger.DB, path string) error {
|
||||
func closeBadgerDB(db *badger.DB, path string, persist bool) error {
|
||||
err := db.Close()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error closing badger markset: %w", err)
|
||||
}
|
||||
|
||||
if persist {
|
||||
return nil
|
||||
}
|
||||
|
||||
err = os.RemoveAll(path)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error deleting badger markset: %w", err)
|
||||
|
@ -1,37 +1,104 @@
|
||||
package splitstore
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
cid "github.com/ipfs/go-cid"
|
||||
)
|
||||
|
||||
type MapMarkSetEnv struct{}
|
||||
type MapMarkSetEnv struct {
|
||||
path string
|
||||
}
|
||||
|
||||
var _ MarkSetEnv = (*MapMarkSetEnv)(nil)
|
||||
|
||||
type MapMarkSet struct {
|
||||
mx sync.RWMutex
|
||||
set map[string]struct{}
|
||||
|
||||
persist bool
|
||||
file *os.File
|
||||
buf *bufio.Writer
|
||||
|
||||
path string
|
||||
}
|
||||
|
||||
var _ MarkSet = (*MapMarkSet)(nil)
|
||||
|
||||
func NewMapMarkSetEnv() (*MapMarkSetEnv, error) {
|
||||
return &MapMarkSetEnv{}, nil
|
||||
func NewMapMarkSetEnv(path string) (*MapMarkSetEnv, error) {
|
||||
msPath := filepath.Join(path, "markset.map")
|
||||
err := os.MkdirAll(msPath, 0755) //nolint:gosec
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("error creating markset directory: %w", err)
|
||||
}
|
||||
|
||||
return &MapMarkSetEnv{path: msPath}, nil
|
||||
}
|
||||
|
||||
func (e *MapMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) {
|
||||
func (e *MapMarkSetEnv) New(name string, sizeHint int64) (MarkSet, error) {
|
||||
path := filepath.Join(e.path, name)
|
||||
return &MapMarkSet{
|
||||
set: make(map[string]struct{}, sizeHint),
|
||||
set: make(map[string]struct{}, sizeHint),
|
||||
path: path,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *MapMarkSetEnv) Recover(name string) (MarkSet, error) {
|
||||
path := filepath.Join(e.path, name)
|
||||
s := &MapMarkSet{
|
||||
set: make(map[string]struct{}),
|
||||
path: path,
|
||||
}
|
||||
|
||||
in, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("error opening markset file for read: %w", err)
|
||||
}
|
||||
defer in.Close() //nolint:errcheck
|
||||
|
||||
// wrap a buffered reader to make this faster
|
||||
buf := bufio.NewReader(in)
|
||||
for {
|
||||
var sz byte
|
||||
if sz, err = buf.ReadByte(); err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
key := make([]byte, int(sz))
|
||||
if _, err = io.ReadFull(buf, key); err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
s.set[string(key)] = struct{}{}
|
||||
}
|
||||
|
||||
if err != io.EOF {
|
||||
return nil, xerrors.Errorf("error reading markset file: %w", err)
|
||||
}
|
||||
|
||||
file, err := os.OpenFile(s.path, os.O_WRONLY|os.O_APPEND, 0)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("error opening markset file for write: %w", err)
|
||||
}
|
||||
|
||||
s.persist = true
|
||||
s.file = file
|
||||
s.buf = bufio.NewWriter(file)
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (e *MapMarkSetEnv) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *MapMarkSet) Mark(cid cid.Cid) error {
|
||||
func (s *MapMarkSet) BeginCriticalSection() error {
|
||||
s.mx.Lock()
|
||||
defer s.mx.Unlock()
|
||||
|
||||
@ -39,7 +106,104 @@ func (s *MapMarkSet) Mark(cid cid.Cid) error {
|
||||
return errMarkSetClosed
|
||||
}
|
||||
|
||||
s.set[string(cid.Hash())] = struct{}{}
|
||||
if s.persist {
|
||||
return nil
|
||||
}
|
||||
|
||||
file, err := os.OpenFile(s.path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error opening markset file: %w", err)
|
||||
}
|
||||
|
||||
// wrap a buffered writer to make this faster
|
||||
s.buf = bufio.NewWriter(file)
|
||||
for key := range s.set {
|
||||
if err := s.writeKey([]byte(key), false); err != nil {
|
||||
_ = file.Close()
|
||||
s.buf = nil
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := s.buf.Flush(); err != nil {
|
||||
_ = file.Close()
|
||||
s.buf = nil
|
||||
return xerrors.Errorf("error flushing markset file buffer: %w", err)
|
||||
}
|
||||
|
||||
s.file = file
|
||||
s.persist = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *MapMarkSet) EndCriticalSection() {
|
||||
s.mx.Lock()
|
||||
defer s.mx.Unlock()
|
||||
|
||||
if !s.persist {
|
||||
return
|
||||
}
|
||||
|
||||
_ = s.file.Close()
|
||||
_ = os.Remove(s.path)
|
||||
s.file = nil
|
||||
s.buf = nil
|
||||
s.persist = false
|
||||
}
|
||||
|
||||
func (s *MapMarkSet) Mark(c cid.Cid) error {
|
||||
s.mx.Lock()
|
||||
defer s.mx.Unlock()
|
||||
|
||||
if s.set == nil {
|
||||
return errMarkSetClosed
|
||||
}
|
||||
|
||||
hash := c.Hash()
|
||||
s.set[string(hash)] = struct{}{}
|
||||
|
||||
if s.persist {
|
||||
if err := s.writeKey(hash, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.file.Sync(); err != nil {
|
||||
return xerrors.Errorf("error syncing markset: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *MapMarkSet) MarkMany(batch []cid.Cid) error {
|
||||
s.mx.Lock()
|
||||
defer s.mx.Unlock()
|
||||
|
||||
if s.set == nil {
|
||||
return errMarkSetClosed
|
||||
}
|
||||
|
||||
for _, c := range batch {
|
||||
hash := c.Hash()
|
||||
s.set[string(hash)] = struct{}{}
|
||||
|
||||
if s.persist {
|
||||
if err := s.writeKey(hash, false); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if s.persist {
|
||||
if err := s.buf.Flush(); err != nil {
|
||||
return xerrors.Errorf("error flushing markset buffer to disk: %w", err)
|
||||
}
|
||||
|
||||
if err := s.file.Sync(); err != nil {
|
||||
return xerrors.Errorf("error syncing markset: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -63,12 +227,23 @@ func (s *MapMarkSet) Visit(c cid.Cid) (bool, error) {
|
||||
return false, errMarkSetClosed
|
||||
}
|
||||
|
||||
key := string(c.Hash())
|
||||
hash := c.Hash()
|
||||
key := string(hash)
|
||||
if _, ok := s.set[key]; ok {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
s.set[key] = struct{}{}
|
||||
|
||||
if s.persist {
|
||||
if err := s.writeKey(hash, true); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := s.file.Sync(); err != nil {
|
||||
return false, xerrors.Errorf("error syncing markset: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@ -76,6 +251,39 @@ func (s *MapMarkSet) Close() error {
|
||||
s.mx.Lock()
|
||||
defer s.mx.Unlock()
|
||||
|
||||
if s.set == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
s.set = nil
|
||||
|
||||
if s.file != nil {
|
||||
if err := s.file.Close(); err != nil {
|
||||
log.Warnf("error closing markset file: %s", err)
|
||||
}
|
||||
|
||||
if !s.persist {
|
||||
if err := os.Remove(s.path); err != nil {
|
||||
log.Warnf("error removing markset file: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *MapMarkSet) writeKey(k []byte, flush bool) error {
|
||||
if err := s.buf.WriteByte(byte(len(k))); err != nil {
|
||||
return xerrors.Errorf("error writing markset key length to disk: %w", err)
|
||||
}
|
||||
if _, err := s.buf.Write(k); err != nil {
|
||||
return xerrors.Errorf("error writing markset key to disk: %w", err)
|
||||
}
|
||||
if flush {
|
||||
if err := s.buf.Flush(); err != nil {
|
||||
return xerrors.Errorf("error flushing markset buffer to disk: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -11,7 +11,10 @@ import (
|
||||
|
||||
func TestMapMarkSet(t *testing.T) {
|
||||
testMarkSet(t, "map")
|
||||
testMarkSetRecovery(t, "map")
|
||||
testMarkSetMarkMany(t, "map")
|
||||
testMarkSetVisitor(t, "map")
|
||||
testMarkSetVisitorRecovery(t, "map")
|
||||
}
|
||||
|
||||
func TestBadgerMarkSet(t *testing.T) {
|
||||
@ -21,12 +24,13 @@ func TestBadgerMarkSet(t *testing.T) {
|
||||
badgerMarkSetBatchSize = bs
|
||||
})
|
||||
testMarkSet(t, "badger")
|
||||
testMarkSetRecovery(t, "badger")
|
||||
testMarkSetMarkMany(t, "badger")
|
||||
testMarkSetVisitor(t, "badger")
|
||||
testMarkSetVisitorRecovery(t, "badger")
|
||||
}
|
||||
|
||||
func testMarkSet(t *testing.T, lsType string) {
|
||||
t.Helper()
|
||||
|
||||
path, err := ioutil.TempDir("", "markset.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -42,12 +46,12 @@ func testMarkSet(t *testing.T, lsType string) {
|
||||
}
|
||||
defer env.Close() //nolint:errcheck
|
||||
|
||||
hotSet, err := env.Create("hot", 0)
|
||||
hotSet, err := env.New("hot", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
coldSet, err := env.Create("cold", 0)
|
||||
coldSet, err := env.New("cold", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -62,6 +66,7 @@ func testMarkSet(t *testing.T, lsType string) {
|
||||
}
|
||||
|
||||
mustHave := func(s MarkSet, cid cid.Cid) {
|
||||
t.Helper()
|
||||
has, err := s.Has(cid)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -73,6 +78,7 @@ func testMarkSet(t *testing.T, lsType string) {
|
||||
}
|
||||
|
||||
mustNotHave := func(s MarkSet, cid cid.Cid) {
|
||||
t.Helper()
|
||||
has, err := s.Has(cid)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -114,12 +120,12 @@ func testMarkSet(t *testing.T, lsType string) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hotSet, err = env.Create("hot", 0)
|
||||
hotSet, err = env.New("hot", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
coldSet, err = env.Create("cold", 0)
|
||||
coldSet, err = env.New("cold", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -150,8 +156,6 @@ func testMarkSet(t *testing.T, lsType string) {
|
||||
}
|
||||
|
||||
func testMarkSetVisitor(t *testing.T, lsType string) {
|
||||
t.Helper()
|
||||
|
||||
path, err := ioutil.TempDir("", "markset.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -167,7 +171,7 @@ func testMarkSetVisitor(t *testing.T, lsType string) {
|
||||
}
|
||||
defer env.Close() //nolint:errcheck
|
||||
|
||||
visitor, err := env.Create("test", 0)
|
||||
visitor, err := env.New("test", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -219,3 +223,322 @@ func testMarkSetVisitor(t *testing.T, lsType string) {
|
||||
mustNotVisit(visitor, k3)
|
||||
mustNotVisit(visitor, k4)
|
||||
}
|
||||
|
||||
func testMarkSetVisitorRecovery(t *testing.T, lsType string) {
|
||||
path, err := ioutil.TempDir("", "markset.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
|
||||
env, err := OpenMarkSetEnv(path, lsType)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer env.Close() //nolint:errcheck
|
||||
|
||||
visitor, err := env.New("test", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer visitor.Close() //nolint:errcheck
|
||||
|
||||
makeCid := func(key string) cid.Cid {
|
||||
h, err := multihash.Sum([]byte(key), multihash.SHA2_256, -1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return cid.NewCidV1(cid.Raw, h)
|
||||
}
|
||||
|
||||
mustVisit := func(v ObjectVisitor, cid cid.Cid) {
|
||||
visit, err := v.Visit(cid)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !visit {
|
||||
t.Fatal("object should be visited")
|
||||
}
|
||||
}
|
||||
|
||||
mustNotVisit := func(v ObjectVisitor, cid cid.Cid) {
|
||||
visit, err := v.Visit(cid)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if visit {
|
||||
t.Fatal("unexpected visit")
|
||||
}
|
||||
}
|
||||
|
||||
k1 := makeCid("a")
|
||||
k2 := makeCid("b")
|
||||
k3 := makeCid("c")
|
||||
k4 := makeCid("d")
|
||||
|
||||
mustVisit(visitor, k1)
|
||||
mustVisit(visitor, k2)
|
||||
|
||||
if err := visitor.BeginCriticalSection(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mustVisit(visitor, k3)
|
||||
mustVisit(visitor, k4)
|
||||
|
||||
mustNotVisit(visitor, k1)
|
||||
mustNotVisit(visitor, k2)
|
||||
mustNotVisit(visitor, k3)
|
||||
mustNotVisit(visitor, k4)
|
||||
|
||||
if err := visitor.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
visitor, err = env.Recover("test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mustNotVisit(visitor, k1)
|
||||
mustNotVisit(visitor, k2)
|
||||
mustNotVisit(visitor, k3)
|
||||
mustNotVisit(visitor, k4)
|
||||
|
||||
visitor.EndCriticalSection()
|
||||
|
||||
if err := visitor.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = env.Recover("test")
|
||||
if err == nil {
|
||||
t.Fatal("expected recovery to fail")
|
||||
}
|
||||
}
|
||||
|
||||
func testMarkSetRecovery(t *testing.T, lsType string) {
|
||||
path, err := ioutil.TempDir("", "markset.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
|
||||
env, err := OpenMarkSetEnv(path, lsType)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer env.Close() //nolint:errcheck
|
||||
|
||||
markSet, err := env.New("test", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
makeCid := func(key string) cid.Cid {
|
||||
h, err := multihash.Sum([]byte(key), multihash.SHA2_256, -1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return cid.NewCidV1(cid.Raw, h)
|
||||
}
|
||||
|
||||
mustHave := func(s MarkSet, cid cid.Cid) {
|
||||
t.Helper()
|
||||
has, err := s.Has(cid)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !has {
|
||||
t.Fatal("mark not found")
|
||||
}
|
||||
}
|
||||
|
||||
mustNotHave := func(s MarkSet, cid cid.Cid) {
|
||||
t.Helper()
|
||||
has, err := s.Has(cid)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if has {
|
||||
t.Fatal("unexpected mark")
|
||||
}
|
||||
}
|
||||
|
||||
k1 := makeCid("a")
|
||||
k2 := makeCid("b")
|
||||
k3 := makeCid("c")
|
||||
k4 := makeCid("d")
|
||||
|
||||
if err := markSet.Mark(k1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := markSet.Mark(k2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mustHave(markSet, k1)
|
||||
mustHave(markSet, k2)
|
||||
mustNotHave(markSet, k3)
|
||||
mustNotHave(markSet, k4)
|
||||
|
||||
if err := markSet.BeginCriticalSection(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := markSet.Mark(k3); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := markSet.Mark(k4); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mustHave(markSet, k1)
|
||||
mustHave(markSet, k2)
|
||||
mustHave(markSet, k3)
|
||||
mustHave(markSet, k4)
|
||||
|
||||
if err := markSet.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
markSet, err = env.Recover("test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mustHave(markSet, k1)
|
||||
mustHave(markSet, k2)
|
||||
mustHave(markSet, k3)
|
||||
mustHave(markSet, k4)
|
||||
|
||||
markSet.EndCriticalSection()
|
||||
|
||||
if err := markSet.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = env.Recover("test")
|
||||
if err == nil {
|
||||
t.Fatal("expected recovery to fail")
|
||||
}
|
||||
}
|
||||
|
||||
func testMarkSetMarkMany(t *testing.T, lsType string) {
|
||||
path, err := ioutil.TempDir("", "markset.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
|
||||
env, err := OpenMarkSetEnv(path, lsType)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer env.Close() //nolint:errcheck
|
||||
|
||||
markSet, err := env.New("test", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
makeCid := func(key string) cid.Cid {
|
||||
h, err := multihash.Sum([]byte(key), multihash.SHA2_256, -1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return cid.NewCidV1(cid.Raw, h)
|
||||
}
|
||||
|
||||
mustHave := func(s MarkSet, cid cid.Cid) {
|
||||
t.Helper()
|
||||
has, err := s.Has(cid)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !has {
|
||||
t.Fatal("mark not found")
|
||||
}
|
||||
}
|
||||
|
||||
mustNotHave := func(s MarkSet, cid cid.Cid) {
|
||||
t.Helper()
|
||||
has, err := s.Has(cid)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if has {
|
||||
t.Fatal("unexpected mark")
|
||||
}
|
||||
}
|
||||
|
||||
k1 := makeCid("a")
|
||||
k2 := makeCid("b")
|
||||
k3 := makeCid("c")
|
||||
k4 := makeCid("d")
|
||||
|
||||
if err := markSet.MarkMany([]cid.Cid{k1, k2}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mustHave(markSet, k1)
|
||||
mustHave(markSet, k2)
|
||||
mustNotHave(markSet, k3)
|
||||
mustNotHave(markSet, k4)
|
||||
|
||||
if err := markSet.BeginCriticalSection(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := markSet.MarkMany([]cid.Cid{k3, k4}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mustHave(markSet, k1)
|
||||
mustHave(markSet, k2)
|
||||
mustHave(markSet, k3)
|
||||
mustHave(markSet, k4)
|
||||
|
||||
if err := markSet.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
markSet, err = env.Recover("test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mustHave(markSet, k1)
|
||||
mustHave(markSet, k2)
|
||||
mustHave(markSet, k3)
|
||||
mustHave(markSet, k4)
|
||||
|
||||
markSet.EndCriticalSection()
|
||||
|
||||
if err := markSet.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = env.Recover("test")
|
||||
if err == nil {
|
||||
t.Fatal("expected recovery to fail")
|
||||
}
|
||||
}
|
||||
|
@ -129,8 +129,6 @@ type SplitStore struct {
|
||||
|
||||
headChangeMx sync.Mutex
|
||||
|
||||
coldPurgeSize int
|
||||
|
||||
chain ChainAccessor
|
||||
ds dstore.Datastore
|
||||
cold bstore.Blockstore
|
||||
@ -158,6 +156,17 @@ type SplitStore struct {
|
||||
txnRefsMx sync.Mutex
|
||||
txnRefs map[cid.Cid]struct{}
|
||||
txnMissing map[cid.Cid]struct{}
|
||||
txnMarkSet MarkSet
|
||||
txnSyncMx sync.Mutex
|
||||
txnSyncCond sync.Cond
|
||||
txnSync bool
|
||||
|
||||
// background cold object reification
|
||||
reifyWorkers sync.WaitGroup
|
||||
reifyMx sync.Mutex
|
||||
reifyCond sync.Cond
|
||||
reifyPend map[cid.Cid]struct{}
|
||||
reifyInProgress map[cid.Cid]struct{}
|
||||
|
||||
// registered protectors
|
||||
protectors []func(func(cid.Cid) error) error
|
||||
@ -194,13 +203,16 @@ func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Co
|
||||
cold: cold,
|
||||
hot: hots,
|
||||
markSetEnv: markSetEnv,
|
||||
|
||||
coldPurgeSize: defaultColdPurgeSize,
|
||||
}
|
||||
|
||||
ss.txnViewsCond.L = &ss.txnViewsMx
|
||||
ss.txnSyncCond.L = &ss.txnSyncMx
|
||||
ss.ctx, ss.cancel = context.WithCancel(context.Background())
|
||||
|
||||
ss.reifyCond.L = &ss.reifyMx
|
||||
ss.reifyPend = make(map[cid.Cid]struct{})
|
||||
ss.reifyInProgress = make(map[cid.Cid]struct{})
|
||||
|
||||
if enableDebugLog {
|
||||
ss.debug, err = openDebugLog(path)
|
||||
if err != nil {
|
||||
@ -208,6 +220,14 @@ func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Co
|
||||
}
|
||||
}
|
||||
|
||||
if ss.checkpointExists() {
|
||||
log.Info("found compaction checkpoint; resuming compaction")
|
||||
if err := ss.completeCompaction(); err != nil {
|
||||
markSetEnv.Close() //nolint:errcheck
|
||||
return nil, xerrors.Errorf("error resuming compaction: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
@ -230,6 +250,20 @@ func (s *SplitStore) Has(ctx context.Context, cid cid.Cid) (bool, error) {
|
||||
s.txnLk.RLock()
|
||||
defer s.txnLk.RUnlock()
|
||||
|
||||
// critical section
|
||||
if s.txnMarkSet != nil {
|
||||
has, err := s.txnMarkSet.Has(cid)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if has {
|
||||
return s.has(cid)
|
||||
}
|
||||
|
||||
return s.cold.Has(ctx, cid)
|
||||
}
|
||||
|
||||
has, err := s.hot.Has(ctx, cid)
|
||||
|
||||
if err != nil {
|
||||
@ -241,7 +275,13 @@ func (s *SplitStore) Has(ctx context.Context, cid cid.Cid) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return s.cold.Has(ctx, cid)
|
||||
has, err = s.cold.Has(ctx, cid)
|
||||
if has && bstore.IsHotView(ctx) {
|
||||
s.reifyColdObject(cid)
|
||||
}
|
||||
|
||||
return has, err
|
||||
|
||||
}
|
||||
|
||||
func (s *SplitStore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) {
|
||||
@ -257,6 +297,20 @@ func (s *SplitStore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error)
|
||||
s.txnLk.RLock()
|
||||
defer s.txnLk.RUnlock()
|
||||
|
||||
// critical section
|
||||
if s.txnMarkSet != nil {
|
||||
has, err := s.txnMarkSet.Has(cid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if has {
|
||||
return s.get(cid)
|
||||
}
|
||||
|
||||
return s.cold.Get(ctx, cid)
|
||||
}
|
||||
|
||||
blk, err := s.hot.Get(ctx, cid)
|
||||
|
||||
switch err {
|
||||
@ -271,8 +325,11 @@ func (s *SplitStore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error)
|
||||
|
||||
blk, err = s.cold.Get(ctx, cid)
|
||||
if err == nil {
|
||||
stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
|
||||
if bstore.IsHotView(ctx) {
|
||||
s.reifyColdObject(cid)
|
||||
}
|
||||
|
||||
stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
|
||||
}
|
||||
return blk, err
|
||||
|
||||
@ -294,6 +351,20 @@ func (s *SplitStore) GetSize(ctx context.Context, cid cid.Cid) (int, error) {
|
||||
s.txnLk.RLock()
|
||||
defer s.txnLk.RUnlock()
|
||||
|
||||
// critical section
|
||||
if s.txnMarkSet != nil {
|
||||
has, err := s.txnMarkSet.Has(cid)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if has {
|
||||
return s.getSize(cid)
|
||||
}
|
||||
|
||||
return s.cold.GetSize(ctx, cid)
|
||||
}
|
||||
|
||||
size, err := s.hot.GetSize(ctx, cid)
|
||||
|
||||
switch err {
|
||||
@ -308,6 +379,10 @@ func (s *SplitStore) GetSize(ctx context.Context, cid cid.Cid) (int, error) {
|
||||
|
||||
size, err = s.cold.GetSize(ctx, cid)
|
||||
if err == nil {
|
||||
if bstore.IsHotView(ctx) {
|
||||
s.reifyColdObject(cid)
|
||||
}
|
||||
|
||||
stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
|
||||
}
|
||||
return size, err
|
||||
@ -332,6 +407,12 @@ func (s *SplitStore) Put(ctx context.Context, blk blocks.Block) error {
|
||||
|
||||
s.debug.LogWrite(blk)
|
||||
|
||||
// critical section
|
||||
if s.txnMarkSet != nil {
|
||||
s.markLiveRefs([]cid.Cid{blk.Cid()})
|
||||
return nil
|
||||
}
|
||||
|
||||
s.trackTxnRef(blk.Cid())
|
||||
return nil
|
||||
}
|
||||
@ -377,6 +458,12 @@ func (s *SplitStore) PutMany(ctx context.Context, blks []blocks.Block) error {
|
||||
|
||||
s.debug.LogWriteMany(blks)
|
||||
|
||||
// critical section
|
||||
if s.txnMarkSet != nil {
|
||||
s.markLiveRefs(batch)
|
||||
return nil
|
||||
}
|
||||
|
||||
s.trackTxnRefMany(batch)
|
||||
return nil
|
||||
}
|
||||
@ -436,6 +523,23 @@ func (s *SplitStore) View(ctx context.Context, cid cid.Cid, cb func([]byte) erro
|
||||
return cb(data)
|
||||
}
|
||||
|
||||
// critical section
|
||||
s.txnLk.RLock() // the lock is released in protectView if we are not in critical section
|
||||
if s.txnMarkSet != nil {
|
||||
has, err := s.txnMarkSet.Has(cid)
|
||||
s.txnLk.RUnlock()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if has {
|
||||
return s.view(cid, cb)
|
||||
}
|
||||
|
||||
return s.cold.View(ctx, cid, cb)
|
||||
}
|
||||
|
||||
// views are (optimistically) protected two-fold:
|
||||
// - if there is an active transaction, then the reference is protected.
|
||||
// - if there is no active transaction, active views are tracked in a
|
||||
@ -456,6 +560,10 @@ func (s *SplitStore) View(ctx context.Context, cid cid.Cid, cb func([]byte) erro
|
||||
|
||||
err = s.cold.View(ctx, cid, cb)
|
||||
if err == nil {
|
||||
if bstore.IsHotView(ctx) {
|
||||
s.reifyColdObject(cid)
|
||||
}
|
||||
|
||||
stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
|
||||
}
|
||||
return err
|
||||
@ -565,6 +673,9 @@ func (s *SplitStore) Start(chain ChainAccessor, us stmgr.UpgradeSchedule) error
|
||||
}
|
||||
}
|
||||
|
||||
// spawn the reifier
|
||||
go s.reifyOrchestrator()
|
||||
|
||||
// watch the chain
|
||||
chain.SubscribeHeadChanges(s.HeadChange)
|
||||
|
||||
@ -585,12 +696,19 @@ func (s *SplitStore) Close() error {
|
||||
}
|
||||
|
||||
if atomic.LoadInt32(&s.compacting) == 1 {
|
||||
s.txnSyncMx.Lock()
|
||||
s.txnSync = true
|
||||
s.txnSyncCond.Broadcast()
|
||||
s.txnSyncMx.Unlock()
|
||||
|
||||
log.Warn("close with ongoing compaction in progress; waiting for it to finish...")
|
||||
for atomic.LoadInt32(&s.compacting) == 1 {
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
s.reifyCond.Broadcast()
|
||||
s.reifyWorkers.Wait()
|
||||
s.cancel()
|
||||
return multierr.Combine(s.markSetEnv.Close(), s.debug.Close())
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error {
|
||||
coldCnt := new(int64)
|
||||
missingCnt := new(int64)
|
||||
|
||||
visitor, err := s.markSetEnv.Create("check", 0)
|
||||
visitor, err := s.markSetEnv.New("check", 0)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error creating visitor: %w", err)
|
||||
}
|
||||
|
@ -3,8 +3,9 @@ package splitstore
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@ -48,6 +49,10 @@ var (
|
||||
// SyncGapTime is the time delay from a tipset's min timestamp before we decide
|
||||
// there is a sync gap
|
||||
SyncGapTime = time.Minute
|
||||
|
||||
// SyncWaitTime is the time delay from a tipset's min timestamp before we decide
|
||||
// we have synced.
|
||||
SyncWaitTime = 30 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
@ -57,8 +62,6 @@ var (
|
||||
|
||||
const (
|
||||
batchSize = 16384
|
||||
|
||||
defaultColdPurgeSize = 7_000_000
|
||||
)
|
||||
|
||||
func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error {
|
||||
@ -141,9 +144,9 @@ func (s *SplitStore) isNearUpgrade(epoch abi.ChainEpoch) bool {
|
||||
// transactionally protect incoming tipsets
|
||||
func (s *SplitStore) protectTipSets(apply []*types.TipSet) {
|
||||
s.txnLk.RLock()
|
||||
defer s.txnLk.RUnlock()
|
||||
|
||||
if !s.txnActive {
|
||||
s.txnLk.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
@ -152,12 +155,115 @@ func (s *SplitStore) protectTipSets(apply []*types.TipSet) {
|
||||
cids = append(cids, ts.Cids()...)
|
||||
}
|
||||
|
||||
if len(cids) == 0 {
|
||||
s.txnLk.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
// critical section
|
||||
if s.txnMarkSet != nil {
|
||||
curTs := apply[len(apply)-1]
|
||||
timestamp := time.Unix(int64(curTs.MinTimestamp()), 0)
|
||||
doSync := time.Since(timestamp) < SyncWaitTime
|
||||
go func() {
|
||||
if doSync {
|
||||
defer func() {
|
||||
s.txnSyncMx.Lock()
|
||||
defer s.txnSyncMx.Unlock()
|
||||
s.txnSync = true
|
||||
s.txnSyncCond.Broadcast()
|
||||
}()
|
||||
}
|
||||
defer s.txnLk.RUnlock()
|
||||
s.markLiveRefs(cids)
|
||||
|
||||
}()
|
||||
return
|
||||
}
|
||||
|
||||
s.trackTxnRefMany(cids)
|
||||
s.txnLk.RUnlock()
|
||||
}
|
||||
|
||||
func (s *SplitStore) markLiveRefs(cids []cid.Cid) {
|
||||
log.Debugf("marking %d live refs", len(cids))
|
||||
startMark := time.Now()
|
||||
|
||||
count := new(int32)
|
||||
visitor := newConcurrentVisitor()
|
||||
walkObject := func(c cid.Cid) error {
|
||||
return s.walkObjectIncomplete(c, visitor,
|
||||
func(c cid.Cid) error {
|
||||
if isUnitaryObject(c) {
|
||||
return errStopWalk
|
||||
}
|
||||
|
||||
visit, err := s.txnMarkSet.Visit(c)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error visiting object: %w", err)
|
||||
}
|
||||
|
||||
if !visit {
|
||||
return errStopWalk
|
||||
}
|
||||
|
||||
atomic.AddInt32(count, 1)
|
||||
return nil
|
||||
},
|
||||
func(missing cid.Cid) error {
|
||||
log.Warnf("missing object reference %s in %s", missing, c)
|
||||
return errStopWalk
|
||||
})
|
||||
}
|
||||
|
||||
// optimize the common case of single put
|
||||
if len(cids) == 1 {
|
||||
if err := walkObject(cids[0]); err != nil {
|
||||
log.Errorf("error marking tipset refs: %s", err)
|
||||
}
|
||||
log.Debugw("marking live refs done", "took", time.Since(startMark), "marked", *count)
|
||||
return
|
||||
}
|
||||
|
||||
workch := make(chan cid.Cid, len(cids))
|
||||
for _, c := range cids {
|
||||
workch <- c
|
||||
}
|
||||
close(workch)
|
||||
|
||||
worker := func() error {
|
||||
for c := range workch {
|
||||
if err := walkObject(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
workers := runtime.NumCPU() / 2
|
||||
if workers < 2 {
|
||||
workers = 2
|
||||
}
|
||||
if workers > len(cids) {
|
||||
workers = len(cids)
|
||||
}
|
||||
|
||||
g := new(errgroup.Group)
|
||||
for i := 0; i < workers; i++ {
|
||||
g.Go(worker)
|
||||
}
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
log.Errorf("error marking tipset refs: %s", err)
|
||||
}
|
||||
|
||||
log.Debugw("marking live refs done", "took", time.Since(startMark), "marked", *count)
|
||||
}
|
||||
|
||||
// transactionally protect a view
|
||||
func (s *SplitStore) protectView(c cid.Cid) {
|
||||
s.txnLk.RLock()
|
||||
// the txnLk is held for read
|
||||
defer s.txnLk.RUnlock()
|
||||
|
||||
if s.txnActive {
|
||||
@ -387,6 +493,12 @@ func (s *SplitStore) compact(curTs *types.TipSet) {
|
||||
}
|
||||
|
||||
func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
if s.checkpointExists() {
|
||||
// this really shouldn't happen, but if it somehow does, it means that the hotstore
|
||||
// might be potentially inconsistent; abort compaction and notify the user to intervene.
|
||||
return xerrors.Errorf("checkpoint exists; aborting compaction")
|
||||
}
|
||||
|
||||
currentEpoch := curTs.Height()
|
||||
boundaryEpoch := currentEpoch - CompactionBoundary
|
||||
|
||||
@ -398,7 +510,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
|
||||
log.Infow("running compaction", "currentEpoch", currentEpoch, "baseEpoch", s.baseEpoch, "boundaryEpoch", boundaryEpoch, "inclMsgsEpoch", inclMsgsEpoch, "compactionIndex", s.compactionIndex)
|
||||
|
||||
markSet, err := s.markSetEnv.Create("live", s.markSetSize)
|
||||
markSet, err := s.markSetEnv.New("live", s.markSetSize)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error creating mark set: %w", err)
|
||||
}
|
||||
@ -409,9 +521,6 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// we are ready for concurrent marking
|
||||
s.beginTxnMarking(markSet)
|
||||
|
||||
// 0. track all protected references at beginning of compaction; anything added later should
|
||||
// be transactionally protected by the write
|
||||
log.Info("protecting references with registered protectors")
|
||||
@ -425,7 +534,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
log.Info("marking reachable objects")
|
||||
startMark := time.Now()
|
||||
|
||||
var count int64
|
||||
count := new(int64)
|
||||
err = s.walkChain(curTs, boundaryEpoch, inclMsgsEpoch, &noopVisitor{},
|
||||
func(c cid.Cid) error {
|
||||
if isUnitaryObject(c) {
|
||||
@ -441,7 +550,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
return errStopWalk
|
||||
}
|
||||
|
||||
count++
|
||||
atomic.AddInt64(count, 1)
|
||||
return nil
|
||||
})
|
||||
|
||||
@ -449,9 +558,9 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
return xerrors.Errorf("error marking: %w", err)
|
||||
}
|
||||
|
||||
s.markSetSize = count + count>>2 // overestimate a bit
|
||||
s.markSetSize = *count + *count>>2 // overestimate a bit
|
||||
|
||||
log.Infow("marking done", "took", time.Since(startMark), "marked", count)
|
||||
log.Infow("marking done", "took", time.Since(startMark), "marked", *count)
|
||||
|
||||
if err := s.checkClosing(); err != nil {
|
||||
return err
|
||||
@ -471,10 +580,15 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
log.Info("collecting cold objects")
|
||||
startCollect := time.Now()
|
||||
|
||||
coldw, err := NewColdSetWriter(s.coldSetPath())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error creating coldset: %w", err)
|
||||
}
|
||||
defer coldw.Close() //nolint:errcheck
|
||||
|
||||
// some stats for logging
|
||||
var hotCnt, coldCnt int
|
||||
|
||||
cold := make([]cid.Cid, 0, s.coldPurgeSize)
|
||||
err = s.hot.ForEachKey(func(c cid.Cid) error {
|
||||
// was it marked?
|
||||
mark, err := markSet.Has(c)
|
||||
@ -488,7 +602,9 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
}
|
||||
|
||||
// it's cold, mark it as candidate for move
|
||||
cold = append(cold, c)
|
||||
if err := coldw.Write(c); err != nil {
|
||||
return xerrors.Errorf("error writing cid to coldstore: %w", err)
|
||||
}
|
||||
coldCnt++
|
||||
|
||||
return nil
|
||||
@ -498,12 +614,12 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
return xerrors.Errorf("error collecting cold objects: %w", err)
|
||||
}
|
||||
|
||||
log.Infow("cold collection done", "took", time.Since(startCollect))
|
||||
|
||||
if coldCnt > 0 {
|
||||
s.coldPurgeSize = coldCnt + coldCnt>>2 // overestimate a bit
|
||||
if err := coldw.Close(); err != nil {
|
||||
return xerrors.Errorf("error closing coldset: %w", err)
|
||||
}
|
||||
|
||||
log.Infow("cold collection done", "took", time.Since(startCollect))
|
||||
|
||||
log.Infow("compaction stats", "hot", hotCnt, "cold", coldCnt)
|
||||
stats.Record(s.ctx, metrics.SplitstoreCompactionHot.M(int64(hotCnt)))
|
||||
stats.Record(s.ctx, metrics.SplitstoreCompactionCold.M(int64(coldCnt)))
|
||||
@ -521,11 +637,17 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
return err
|
||||
}
|
||||
|
||||
coldr, err := NewColdSetReader(s.coldSetPath())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error opening coldset: %w", err)
|
||||
}
|
||||
defer coldr.Close() //nolint:errcheck
|
||||
|
||||
// 3. copy the cold objects to the coldstore -- if we have one
|
||||
if !s.cfg.DiscardColdBlocks {
|
||||
log.Info("moving cold objects to the coldstore")
|
||||
startMove := time.Now()
|
||||
err = s.moveColdBlocks(cold)
|
||||
err = s.moveColdBlocks(coldr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error moving cold objects: %w", err)
|
||||
}
|
||||
@ -534,41 +656,64 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
if err := s.checkClosing(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := coldr.Reset(); err != nil {
|
||||
return xerrors.Errorf("error resetting coldset: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// 4. sort cold objects so that the dags with most references are deleted first
|
||||
// this ensures that we can't refer to a dag with its consituents already deleted, ie
|
||||
// we lave no dangling references.
|
||||
log.Info("sorting cold objects")
|
||||
startSort := time.Now()
|
||||
err = s.sortObjects(cold)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error sorting objects: %w", err)
|
||||
}
|
||||
log.Infow("sorting done", "took", time.Since(startSort))
|
||||
|
||||
// 4.1 protect transactional refs once more
|
||||
// strictly speaking, this is not necessary as purge will do it before deleting each
|
||||
// batch. however, there is likely a largish number of references accumulated during
|
||||
// ths sort and this protects before entering pruge context.
|
||||
err = s.protectTxnRefs(markSet)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error protecting transactional refs: %w", err)
|
||||
// 4. Purge cold objects with checkpointing for recovery.
|
||||
// This is the critical section of compaction, whereby any cold object not in the markSet is
|
||||
// considered already deleted.
|
||||
// We delete cold objects in batches, holding the transaction lock, where we check the markSet
|
||||
// again for new references created by the VM.
|
||||
// After each batch, we write a checkpoint to disk; if the process is interrupted before completion,
|
||||
// the process will continue from the checkpoint in the next recovery.
|
||||
if err := s.beginCriticalSection(markSet); err != nil {
|
||||
return xerrors.Errorf("error beginning critical section: %w", err)
|
||||
}
|
||||
|
||||
if err := s.checkClosing(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// wait for the head to catch up so that the current tipset is marked
|
||||
s.waitForSync()
|
||||
|
||||
if err := s.checkClosing(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
checkpoint, err := NewCheckpoint(s.checkpointPath())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error creating checkpoint: %w", err)
|
||||
}
|
||||
defer checkpoint.Close() //nolint:errcheck
|
||||
|
||||
// 5. purge cold objects from the hotstore, taking protected references into account
|
||||
log.Info("purging cold objects from the hotstore")
|
||||
startPurge := time.Now()
|
||||
err = s.purge(cold, markSet)
|
||||
err = s.purge(coldr, checkpoint, markSet)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error purging cold blocks: %w", err)
|
||||
return xerrors.Errorf("error purging cold objects: %w", err)
|
||||
}
|
||||
log.Infow("purging cold objects from hotstore done", "took", time.Since(startPurge))
|
||||
|
||||
s.endCriticalSection()
|
||||
|
||||
if err := checkpoint.Close(); err != nil {
|
||||
log.Warnf("error closing checkpoint: %s", err)
|
||||
}
|
||||
if err := os.Remove(s.checkpointPath()); err != nil {
|
||||
log.Warnf("error removing checkpoint: %s", err)
|
||||
}
|
||||
if err := coldr.Close(); err != nil {
|
||||
log.Warnf("error closing coldset: %s", err)
|
||||
}
|
||||
if err := os.Remove(s.coldSetPath()); err != nil {
|
||||
log.Warnf("error removing coldset: %s", err)
|
||||
}
|
||||
|
||||
// we are done; do some housekeeping
|
||||
s.endTxnProtect()
|
||||
s.gcHotstore()
|
||||
@ -599,12 +744,51 @@ func (s *SplitStore) beginTxnProtect() {
|
||||
defer s.txnLk.Unlock()
|
||||
|
||||
s.txnActive = true
|
||||
s.txnSync = false
|
||||
s.txnRefs = make(map[cid.Cid]struct{})
|
||||
s.txnMissing = make(map[cid.Cid]struct{})
|
||||
}
|
||||
|
||||
func (s *SplitStore) beginTxnMarking(markSet MarkSet) {
|
||||
log.Info("beginning transactional marking")
|
||||
func (s *SplitStore) beginCriticalSection(markSet MarkSet) error {
|
||||
log.Info("beginning critical section")
|
||||
|
||||
// do that once first to get the bulk before the markset is in critical section
|
||||
if err := s.protectTxnRefs(markSet); err != nil {
|
||||
return xerrors.Errorf("error protecting transactional references: %w", err)
|
||||
}
|
||||
|
||||
if err := markSet.BeginCriticalSection(); err != nil {
|
||||
return xerrors.Errorf("error beginning critical section for markset: %w", err)
|
||||
}
|
||||
|
||||
s.txnLk.Lock()
|
||||
defer s.txnLk.Unlock()
|
||||
|
||||
s.txnMarkSet = markSet
|
||||
|
||||
// and do it again while holding the lock to mark references that might have been created
|
||||
// in the meantime and avoid races of the type Has->txnRef->enterCS->Get fails because
|
||||
// it's not in the markset
|
||||
if err := s.protectTxnRefs(markSet); err != nil {
|
||||
return xerrors.Errorf("error protecting transactional references: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SplitStore) waitForSync() {
|
||||
log.Info("waiting for sync")
|
||||
startWait := time.Now()
|
||||
defer func() {
|
||||
log.Infow("waiting for sync done", "took", time.Since(startWait))
|
||||
}()
|
||||
|
||||
s.txnSyncMx.Lock()
|
||||
defer s.txnSyncMx.Unlock()
|
||||
|
||||
for !s.txnSync {
|
||||
s.txnSyncCond.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SplitStore) endTxnProtect() {
|
||||
@ -616,8 +800,20 @@ func (s *SplitStore) endTxnProtect() {
|
||||
}
|
||||
|
||||
s.txnActive = false
|
||||
s.txnSync = false
|
||||
s.txnRefs = nil
|
||||
s.txnMissing = nil
|
||||
s.txnMarkSet = nil
|
||||
}
|
||||
|
||||
func (s *SplitStore) endCriticalSection() {
|
||||
log.Info("ending critical section")
|
||||
|
||||
s.txnLk.Lock()
|
||||
defer s.txnLk.Unlock()
|
||||
|
||||
s.txnMarkSet.EndCriticalSection()
|
||||
s.txnMarkSet = nil
|
||||
}
|
||||
|
||||
func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEpoch,
|
||||
@ -857,7 +1053,7 @@ func (s *SplitStore) walkObjectIncomplete(c cid.Cid, visitor ObjectVisitor, f, m
|
||||
return nil
|
||||
}
|
||||
|
||||
// internal version used by walk
|
||||
// internal version used during compaction and related operations
|
||||
func (s *SplitStore) view(c cid.Cid, cb func([]byte) error) error {
|
||||
if isIdentiyCid(c) {
|
||||
data, err := decodeIdentityCid(c)
|
||||
@ -892,10 +1088,34 @@ func (s *SplitStore) has(c cid.Cid) (bool, error) {
|
||||
return s.cold.Has(s.ctx, c)
|
||||
}
|
||||
|
||||
func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error {
|
||||
func (s *SplitStore) get(c cid.Cid) (blocks.Block, error) {
|
||||
blk, err := s.hot.Get(s.ctx, c)
|
||||
switch err {
|
||||
case nil:
|
||||
return blk, nil
|
||||
case bstore.ErrNotFound:
|
||||
return s.cold.Get(s.ctx, c)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SplitStore) getSize(c cid.Cid) (int, error) {
|
||||
sz, err := s.hot.GetSize(s.ctx, c)
|
||||
switch err {
|
||||
case nil:
|
||||
return sz, nil
|
||||
case bstore.ErrNotFound:
|
||||
return s.cold.GetSize(s.ctx, c)
|
||||
default:
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SplitStore) moveColdBlocks(coldr *ColdSetReader) error {
|
||||
batch := make([]blocks.Block, 0, batchSize)
|
||||
|
||||
for _, c := range cold {
|
||||
err := coldr.ForEach(func(c cid.Cid) error {
|
||||
if err := s.checkClosing(); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -904,7 +1124,7 @@ func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error {
|
||||
if err != nil {
|
||||
if err == bstore.ErrNotFound {
|
||||
log.Warnf("hotstore missing block %s", c)
|
||||
continue
|
||||
return nil
|
||||
}
|
||||
|
||||
return xerrors.Errorf("error retrieving block %s from hotstore: %w", c, err)
|
||||
@ -918,6 +1138,12 @@ func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error {
|
||||
}
|
||||
batch = batch[:0]
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error iterating coldset: %w", err)
|
||||
}
|
||||
|
||||
if len(batch) > 0 {
|
||||
@ -930,177 +1156,202 @@ func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// sorts a slice of objects heaviest first -- it's a little expensive but worth the
|
||||
// guarantee that we don't leave dangling references behind, e.g. if we die in the middle
|
||||
// of a purge.
|
||||
func (s *SplitStore) sortObjects(cids []cid.Cid) error {
|
||||
// we cache the keys to avoid making a gazillion of strings
|
||||
keys := make(map[cid.Cid]string)
|
||||
key := func(c cid.Cid) string {
|
||||
s, ok := keys[c]
|
||||
if !ok {
|
||||
s = string(c.Hash())
|
||||
keys[c] = s
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// compute sorting weights as the cumulative number of DAG links
|
||||
weights := make(map[string]int)
|
||||
for _, c := range cids {
|
||||
// this can take quite a while, so check for shutdown with every opportunity
|
||||
if err := s.checkClosing(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w := s.getObjectWeight(c, weights, key)
|
||||
weights[key(c)] = w
|
||||
}
|
||||
|
||||
// sort!
|
||||
sort.Slice(cids, func(i, j int) bool {
|
||||
wi := weights[key(cids[i])]
|
||||
wj := weights[key(cids[j])]
|
||||
if wi == wj {
|
||||
return bytes.Compare(cids[i].Hash(), cids[j].Hash()) > 0
|
||||
}
|
||||
|
||||
return wi > wj
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SplitStore) getObjectWeight(c cid.Cid, weights map[string]int, key func(cid.Cid) string) int {
|
||||
w, ok := weights[key(c)]
|
||||
if ok {
|
||||
return w
|
||||
}
|
||||
|
||||
// we treat block headers specially to avoid walking the entire chain
|
||||
var hdr types.BlockHeader
|
||||
err := s.view(c, func(data []byte) error {
|
||||
return hdr.UnmarshalCBOR(bytes.NewBuffer(data))
|
||||
})
|
||||
if err == nil {
|
||||
w1 := s.getObjectWeight(hdr.ParentStateRoot, weights, key)
|
||||
weights[key(hdr.ParentStateRoot)] = w1
|
||||
|
||||
w2 := s.getObjectWeight(hdr.Messages, weights, key)
|
||||
weights[key(hdr.Messages)] = w2
|
||||
|
||||
return 1 + w1 + w2
|
||||
}
|
||||
|
||||
var links []cid.Cid
|
||||
err = s.view(c, func(data []byte) error {
|
||||
return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) {
|
||||
links = append(links, c)
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return 1
|
||||
}
|
||||
|
||||
w = 1
|
||||
for _, c := range links {
|
||||
// these are internal refs, so dags will be dags
|
||||
if c.Prefix().Codec != cid.DagCBOR {
|
||||
w++
|
||||
continue
|
||||
}
|
||||
|
||||
wc := s.getObjectWeight(c, weights, key)
|
||||
weights[key(c)] = wc
|
||||
|
||||
w += wc
|
||||
}
|
||||
|
||||
return w
|
||||
}
|
||||
|
||||
func (s *SplitStore) purgeBatch(cids []cid.Cid, deleteBatch func([]cid.Cid) error) error {
|
||||
if len(cids) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// we don't delete one giant batch of millions of objects, but rather do smaller batches
|
||||
// so that we don't stop the world for an extended period of time
|
||||
done := false
|
||||
for i := 0; !done; i++ {
|
||||
start := i * batchSize
|
||||
end := start + batchSize
|
||||
if end >= len(cids) {
|
||||
end = len(cids)
|
||||
done = true
|
||||
}
|
||||
|
||||
err := deleteBatch(cids[start:end])
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error deleting batch: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SplitStore) purge(cids []cid.Cid, markSet MarkSet) error {
|
||||
func (s *SplitStore) purge(coldr *ColdSetReader, checkpoint *Checkpoint, markSet MarkSet) error {
|
||||
batch := make([]cid.Cid, 0, batchSize)
|
||||
deadCids := make([]cid.Cid, 0, batchSize)
|
||||
|
||||
var purgeCnt, liveCnt int
|
||||
defer func() {
|
||||
log.Infow("purged cold objects", "purged", purgeCnt, "live", liveCnt)
|
||||
}()
|
||||
|
||||
return s.purgeBatch(cids,
|
||||
func(cids []cid.Cid) error {
|
||||
deadCids := deadCids[:0]
|
||||
deleteBatch := func() error {
|
||||
pc, lc, err := s.purgeBatch(batch, deadCids, checkpoint, markSet)
|
||||
|
||||
for {
|
||||
if err := s.checkClosing(); err != nil {
|
||||
return err
|
||||
}
|
||||
purgeCnt += pc
|
||||
liveCnt += lc
|
||||
batch = batch[:0]
|
||||
|
||||
s.txnLk.Lock()
|
||||
if len(s.txnRefs) == 0 {
|
||||
// keep the lock!
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// unlock and protect
|
||||
s.txnLk.Unlock()
|
||||
err := coldr.ForEach(func(c cid.Cid) error {
|
||||
batch = append(batch, c)
|
||||
if len(batch) == batchSize {
|
||||
return deleteBatch()
|
||||
}
|
||||
|
||||
err := s.protectTxnRefs(markSet)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error protecting transactional refs: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(batch) > 0 {
|
||||
return deleteBatch()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SplitStore) purgeBatch(batch, deadCids []cid.Cid, checkpoint *Checkpoint, markSet MarkSet) (purgeCnt int, liveCnt int, err error) {
|
||||
if err := s.checkClosing(); err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
s.txnLk.Lock()
|
||||
defer s.txnLk.Unlock()
|
||||
|
||||
for _, c := range batch {
|
||||
has, err := markSet.Has(c)
|
||||
if err != nil {
|
||||
return 0, 0, xerrors.Errorf("error checking markset for liveness: %w", err)
|
||||
}
|
||||
|
||||
if has {
|
||||
liveCnt++
|
||||
continue
|
||||
}
|
||||
|
||||
deadCids = append(deadCids, c)
|
||||
}
|
||||
|
||||
if len(deadCids) == 0 {
|
||||
if err := checkpoint.Set(batch[len(batch)-1]); err != nil {
|
||||
return 0, 0, xerrors.Errorf("error setting checkpoint: %w", err)
|
||||
}
|
||||
|
||||
return 0, liveCnt, nil
|
||||
}
|
||||
|
||||
if err := s.hot.DeleteMany(s.ctx, deadCids); err != nil {
|
||||
return 0, liveCnt, xerrors.Errorf("error purging cold objects: %w", err)
|
||||
}
|
||||
|
||||
s.debug.LogDelete(deadCids)
|
||||
purgeCnt = len(deadCids)
|
||||
|
||||
if err := checkpoint.Set(batch[len(batch)-1]); err != nil {
|
||||
return purgeCnt, liveCnt, xerrors.Errorf("error setting checkpoint: %w", err)
|
||||
}
|
||||
|
||||
return purgeCnt, liveCnt, nil
|
||||
}
|
||||
|
||||
func (s *SplitStore) coldSetPath() string {
|
||||
return filepath.Join(s.path, "coldset")
|
||||
}
|
||||
|
||||
func (s *SplitStore) checkpointPath() string {
|
||||
return filepath.Join(s.path, "checkpoint")
|
||||
}
|
||||
|
||||
func (s *SplitStore) checkpointExists() bool {
|
||||
_, err := os.Stat(s.checkpointPath())
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func (s *SplitStore) completeCompaction() error {
|
||||
checkpoint, last, err := OpenCheckpoint(s.checkpointPath())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error opening checkpoint: %w", err)
|
||||
}
|
||||
defer checkpoint.Close() //nolint:errcheck
|
||||
|
||||
coldr, err := NewColdSetReader(s.coldSetPath())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error opening coldset: %w", err)
|
||||
}
|
||||
defer coldr.Close() //nolint:errcheck
|
||||
|
||||
markSet, err := s.markSetEnv.Recover("live")
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error recovering markset: %w", err)
|
||||
}
|
||||
defer markSet.Close() //nolint:errcheck
|
||||
|
||||
// PURGE
|
||||
log.Info("purging cold objects from the hotstore")
|
||||
startPurge := time.Now()
|
||||
err = s.completePurge(coldr, checkpoint, last, markSet)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error purging cold objects: %w", err)
|
||||
}
|
||||
log.Infow("purging cold objects from hotstore done", "took", time.Since(startPurge))
|
||||
|
||||
markSet.EndCriticalSection()
|
||||
|
||||
if err := checkpoint.Close(); err != nil {
|
||||
log.Warnf("error closing checkpoint: %s", err)
|
||||
}
|
||||
if err := os.Remove(s.checkpointPath()); err != nil {
|
||||
log.Warnf("error removing checkpoint: %s", err)
|
||||
}
|
||||
if err := coldr.Close(); err != nil {
|
||||
log.Warnf("error closing coldset: %s", err)
|
||||
}
|
||||
if err := os.Remove(s.coldSetPath()); err != nil {
|
||||
log.Warnf("error removing coldset: %s", err)
|
||||
}
|
||||
|
||||
// Note: at this point we can start the splitstore; a compaction should run on
|
||||
// the first head change, which will trigger gc on the hotstore.
|
||||
// We don't mind the second (back-to-back) compaction as the head will
|
||||
// have advanced during marking and coldset accumulation.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SplitStore) completePurge(coldr *ColdSetReader, checkpoint *Checkpoint, start cid.Cid, markSet MarkSet) error {
|
||||
if !start.Defined() {
|
||||
return s.purge(coldr, checkpoint, markSet)
|
||||
}
|
||||
|
||||
seeking := true
|
||||
batch := make([]cid.Cid, 0, batchSize)
|
||||
deadCids := make([]cid.Cid, 0, batchSize)
|
||||
|
||||
var purgeCnt, liveCnt int
|
||||
defer func() {
|
||||
log.Infow("purged cold objects", "purged", purgeCnt, "live", liveCnt)
|
||||
}()
|
||||
|
||||
deleteBatch := func() error {
|
||||
pc, lc, err := s.purgeBatch(batch, deadCids, checkpoint, markSet)
|
||||
|
||||
purgeCnt += pc
|
||||
liveCnt += lc
|
||||
batch = batch[:0]
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
err := coldr.ForEach(func(c cid.Cid) error {
|
||||
if seeking {
|
||||
if start.Equals(c) {
|
||||
seeking = false
|
||||
}
|
||||
|
||||
defer s.txnLk.Unlock()
|
||||
|
||||
for _, c := range cids {
|
||||
live, err := markSet.Has(c)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error checking for liveness: %w", err)
|
||||
}
|
||||
|
||||
if live {
|
||||
liveCnt++
|
||||
continue
|
||||
}
|
||||
|
||||
deadCids = append(deadCids, c)
|
||||
}
|
||||
|
||||
err := s.hot.DeleteMany(s.ctx, deadCids)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error purging cold objects: %w", err)
|
||||
}
|
||||
|
||||
s.debug.LogDelete(deadCids)
|
||||
|
||||
purgeCnt += len(deadCids)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
batch = append(batch, c)
|
||||
if len(batch) == batchSize {
|
||||
return deleteBatch()
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(batch) > 0 {
|
||||
return deleteBatch()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// I really don't like having this code, but we seem to have some occasional DAG references with
|
||||
|
214
blockstore/splitstore/splitstore_reify.go
Normal file
214
blockstore/splitstore/splitstore_reify.go
Normal file
@ -0,0 +1,214 @@
|
||||
package splitstore
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
cid "github.com/ipfs/go-cid"
|
||||
)
|
||||
|
||||
var (
|
||||
errReifyLimit = errors.New("reification limit reached")
|
||||
ReifyLimit = 16384
|
||||
)
|
||||
|
||||
func (s *SplitStore) reifyColdObject(c cid.Cid) {
|
||||
if !s.isWarm() {
|
||||
return
|
||||
}
|
||||
|
||||
if isUnitaryObject(c) {
|
||||
return
|
||||
}
|
||||
|
||||
s.reifyMx.Lock()
|
||||
defer s.reifyMx.Unlock()
|
||||
|
||||
_, ok := s.reifyInProgress[c]
|
||||
if ok {
|
||||
return
|
||||
}
|
||||
|
||||
s.reifyPend[c] = struct{}{}
|
||||
s.reifyCond.Broadcast()
|
||||
}
|
||||
|
||||
func (s *SplitStore) reifyOrchestrator() {
|
||||
workers := runtime.NumCPU() / 4
|
||||
if workers < 2 {
|
||||
workers = 2
|
||||
}
|
||||
|
||||
workch := make(chan cid.Cid, workers)
|
||||
defer close(workch)
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
s.reifyWorkers.Add(1)
|
||||
go s.reifyWorker(workch)
|
||||
}
|
||||
|
||||
for {
|
||||
s.reifyMx.Lock()
|
||||
for len(s.reifyPend) == 0 && atomic.LoadInt32(&s.closing) == 0 {
|
||||
s.reifyCond.Wait()
|
||||
}
|
||||
|
||||
if atomic.LoadInt32(&s.closing) != 0 {
|
||||
s.reifyMx.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
reifyPend := s.reifyPend
|
||||
s.reifyPend = make(map[cid.Cid]struct{})
|
||||
s.reifyMx.Unlock()
|
||||
|
||||
for c := range reifyPend {
|
||||
select {
|
||||
case workch <- c:
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SplitStore) reifyWorker(workch chan cid.Cid) {
|
||||
defer s.reifyWorkers.Done()
|
||||
for c := range workch {
|
||||
s.doReify(c)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SplitStore) doReify(c cid.Cid) {
|
||||
var toreify, totrack, toforget []cid.Cid
|
||||
|
||||
defer func() {
|
||||
s.reifyMx.Lock()
|
||||
defer s.reifyMx.Unlock()
|
||||
|
||||
for _, c := range toreify {
|
||||
delete(s.reifyInProgress, c)
|
||||
}
|
||||
for _, c := range totrack {
|
||||
delete(s.reifyInProgress, c)
|
||||
}
|
||||
for _, c := range toforget {
|
||||
delete(s.reifyInProgress, c)
|
||||
}
|
||||
}()
|
||||
|
||||
s.txnLk.RLock()
|
||||
defer s.txnLk.RUnlock()
|
||||
|
||||
count := 0
|
||||
err := s.walkObjectIncomplete(c, newTmpVisitor(),
|
||||
func(c cid.Cid) error {
|
||||
if isUnitaryObject(c) {
|
||||
return errStopWalk
|
||||
}
|
||||
|
||||
count++
|
||||
if count > ReifyLimit {
|
||||
return errReifyLimit
|
||||
}
|
||||
|
||||
s.reifyMx.Lock()
|
||||
_, inProgress := s.reifyInProgress[c]
|
||||
if !inProgress {
|
||||
s.reifyInProgress[c] = struct{}{}
|
||||
}
|
||||
s.reifyMx.Unlock()
|
||||
|
||||
if inProgress {
|
||||
return errStopWalk
|
||||
}
|
||||
|
||||
has, err := s.hot.Has(s.ctx, c)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error checking hotstore: %w", err)
|
||||
}
|
||||
|
||||
if has {
|
||||
if s.txnMarkSet != nil {
|
||||
hasMark, err := s.txnMarkSet.Has(c)
|
||||
if err != nil {
|
||||
log.Warnf("error checking markset: %s", err)
|
||||
} else if hasMark {
|
||||
toforget = append(toforget, c)
|
||||
return errStopWalk
|
||||
}
|
||||
} else {
|
||||
totrack = append(totrack, c)
|
||||
return errStopWalk
|
||||
}
|
||||
}
|
||||
|
||||
toreify = append(toreify, c)
|
||||
return nil
|
||||
},
|
||||
func(missing cid.Cid) error {
|
||||
log.Warnf("missing reference while reifying %s: %s", c, missing)
|
||||
return errStopWalk
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
if xerrors.Is(err, errReifyLimit) {
|
||||
log.Debug("reification aborted; reify limit reached")
|
||||
return
|
||||
}
|
||||
|
||||
log.Warnf("error walking cold object for reification (cid: %s): %s", c, err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Debugf("reifying %d objects rooted at %s", len(toreify), c)
|
||||
|
||||
// this should not get too big, maybe some 100s of objects.
|
||||
batch := make([]blocks.Block, 0, len(toreify))
|
||||
for _, c := range toreify {
|
||||
blk, err := s.cold.Get(s.ctx, c)
|
||||
if err != nil {
|
||||
log.Warnf("error retrieving cold object for reification (cid: %s): %s", c, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.checkClosing(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
batch = append(batch, blk)
|
||||
}
|
||||
|
||||
if len(batch) > 0 {
|
||||
err = s.hot.PutMany(s.ctx, batch)
|
||||
if err != nil {
|
||||
log.Warnf("error reifying cold object (cid: %s): %s", c, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if s.txnMarkSet != nil {
|
||||
if len(toreify) > 0 {
|
||||
if err := s.txnMarkSet.MarkMany(toreify); err != nil {
|
||||
log.Warnf("error marking reified objects: %s", err)
|
||||
}
|
||||
}
|
||||
if len(totrack) > 0 {
|
||||
if err := s.txnMarkSet.MarkMany(totrack); err != nil {
|
||||
log.Warnf("error marking tracked objects: %s", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// if txnActive is false these are noops
|
||||
if len(toreify) > 0 {
|
||||
s.trackTxnRefMany(toreify)
|
||||
}
|
||||
if len(totrack) > 0 {
|
||||
s.trackTxnRefMany(totrack)
|
||||
}
|
||||
}
|
||||
}
|
@ -4,6 +4,9 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
@ -20,12 +23,14 @@ import (
|
||||
datastore "github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-datastore/sync"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
)
|
||||
|
||||
func init() {
|
||||
CompactionThreshold = 5
|
||||
CompactionBoundary = 2
|
||||
WarmupBoundary = 0
|
||||
SyncWaitTime = time.Millisecond
|
||||
logging.SetLogLevel("splitstore", "DEBUG")
|
||||
}
|
||||
|
||||
@ -80,8 +85,17 @@ func testSplitStore(t *testing.T, cfg *Config) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
path, err := ioutil.TempDir("", "splitstore.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
|
||||
// open the splitstore
|
||||
ss, err := Open("", ds, hot, cold, cfg)
|
||||
ss, err := Open(path, ds, hot, cold, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -125,6 +139,10 @@ func testSplitStore(t *testing.T, cfg *Config) {
|
||||
}
|
||||
|
||||
waitForCompaction := func() {
|
||||
ss.txnSyncMx.Lock()
|
||||
ss.txnSync = true
|
||||
ss.txnSyncCond.Broadcast()
|
||||
ss.txnSyncMx.Unlock()
|
||||
for atomic.LoadInt32(&ss.compacting) == 1 {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
@ -259,8 +277,17 @@ func TestSplitStoreSuppressCompactionNearUpgrade(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
path, err := ioutil.TempDir("", "splitstore.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
|
||||
// open the splitstore
|
||||
ss, err := Open("", ds, hot, cold, &Config{MarkSetType: "map"})
|
||||
ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -305,6 +332,10 @@ func TestSplitStoreSuppressCompactionNearUpgrade(t *testing.T) {
|
||||
}
|
||||
|
||||
waitForCompaction := func() {
|
||||
ss.txnSyncMx.Lock()
|
||||
ss.txnSync = true
|
||||
ss.txnSyncCond.Broadcast()
|
||||
ss.txnSyncMx.Unlock()
|
||||
for atomic.LoadInt32(&ss.compacting) == 1 {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
@ -357,6 +388,235 @@ func TestSplitStoreSuppressCompactionNearUpgrade(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testSplitStoreReification(t *testing.T, f func(context.Context, blockstore.Blockstore, cid.Cid) error) {
|
||||
ds := dssync.MutexWrap(datastore.NewMapDatastore())
|
||||
hot := newMockStore()
|
||||
cold := newMockStore()
|
||||
|
||||
mkRandomBlock := func() blocks.Block {
|
||||
data := make([]byte, 128)
|
||||
_, err := rand.Read(data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return blocks.NewBlock(data)
|
||||
}
|
||||
|
||||
block1 := mkRandomBlock()
|
||||
block2 := mkRandomBlock()
|
||||
block3 := mkRandomBlock()
|
||||
|
||||
hdr := mock.MkBlock(nil, 0, 0)
|
||||
hdr.Messages = block1.Cid()
|
||||
hdr.ParentMessageReceipts = block2.Cid()
|
||||
hdr.ParentStateRoot = block3.Cid()
|
||||
block4, err := hdr.ToStorageBlock()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
allBlocks := []blocks.Block{block1, block2, block3, block4}
|
||||
for _, blk := range allBlocks {
|
||||
err := cold.Put(context.Background(), blk)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
path, err := ioutil.TempDir("", "splitstore.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
|
||||
ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer ss.Close() //nolint
|
||||
|
||||
ss.warmupEpoch = 1
|
||||
go ss.reifyOrchestrator()
|
||||
|
||||
waitForReification := func() {
|
||||
for {
|
||||
ss.reifyMx.Lock()
|
||||
ready := len(ss.reifyPend) == 0 && len(ss.reifyInProgress) == 0
|
||||
ss.reifyMx.Unlock()
|
||||
|
||||
if ready {
|
||||
return
|
||||
}
|
||||
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
// first access using the standard view
|
||||
err = f(context.Background(), ss, block4.Cid())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// nothing should be reified
|
||||
waitForReification()
|
||||
for _, blk := range allBlocks {
|
||||
has, err := hot.Has(context.Background(), blk.Cid())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if has {
|
||||
t.Fatal("block unexpectedly reified")
|
||||
}
|
||||
}
|
||||
|
||||
// now make the hot/reifying view and ensure access reifies
|
||||
err = f(blockstore.WithHotView(context.Background()), ss, block4.Cid())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// everything should be reified
|
||||
waitForReification()
|
||||
for i, blk := range allBlocks {
|
||||
has, err := hot.Has(context.Background(), blk.Cid())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !has {
|
||||
t.Fatalf("block%d was not reified", i+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testSplitStoreReificationLimit(t *testing.T, f func(context.Context, blockstore.Blockstore, cid.Cid) error) {
|
||||
ds := dssync.MutexWrap(datastore.NewMapDatastore())
|
||||
hot := newMockStore()
|
||||
cold := newMockStore()
|
||||
|
||||
mkRandomBlock := func() blocks.Block {
|
||||
data := make([]byte, 128)
|
||||
_, err := rand.Read(data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return blocks.NewBlock(data)
|
||||
}
|
||||
|
||||
block1 := mkRandomBlock()
|
||||
block2 := mkRandomBlock()
|
||||
block3 := mkRandomBlock()
|
||||
|
||||
hdr := mock.MkBlock(nil, 0, 0)
|
||||
hdr.Messages = block1.Cid()
|
||||
hdr.ParentMessageReceipts = block2.Cid()
|
||||
hdr.ParentStateRoot = block3.Cid()
|
||||
block4, err := hdr.ToStorageBlock()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
allBlocks := []blocks.Block{block1, block2, block3, block4}
|
||||
for _, blk := range allBlocks {
|
||||
err := cold.Put(context.Background(), blk)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
path, err := ioutil.TempDir("", "splitstore.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
|
||||
ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer ss.Close() //nolint
|
||||
|
||||
ss.warmupEpoch = 1
|
||||
go ss.reifyOrchestrator()
|
||||
|
||||
waitForReification := func() {
|
||||
for {
|
||||
ss.reifyMx.Lock()
|
||||
ready := len(ss.reifyPend) == 0 && len(ss.reifyInProgress) == 0
|
||||
ss.reifyMx.Unlock()
|
||||
|
||||
if ready {
|
||||
return
|
||||
}
|
||||
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
// do a hot access -- nothing should be reified as the limit should be exceeded
|
||||
oldReifyLimit := ReifyLimit
|
||||
ReifyLimit = 2
|
||||
t.Cleanup(func() {
|
||||
ReifyLimit = oldReifyLimit
|
||||
})
|
||||
|
||||
err = f(blockstore.WithHotView(context.Background()), ss, block4.Cid())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
waitForReification()
|
||||
|
||||
for _, blk := range allBlocks {
|
||||
has, err := hot.Has(context.Background(), blk.Cid())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if has {
|
||||
t.Fatal("block unexpectedly reified")
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestSplitStoreReification(t *testing.T) {
|
||||
t.Log("test reification with Has")
|
||||
testSplitStoreReification(t, func(ctx context.Context, s blockstore.Blockstore, c cid.Cid) error {
|
||||
_, err := s.Has(ctx, c)
|
||||
return err
|
||||
})
|
||||
t.Log("test reification with Get")
|
||||
testSplitStoreReification(t, func(ctx context.Context, s blockstore.Blockstore, c cid.Cid) error {
|
||||
_, err := s.Get(ctx, c)
|
||||
return err
|
||||
})
|
||||
t.Log("test reification with GetSize")
|
||||
testSplitStoreReification(t, func(ctx context.Context, s blockstore.Blockstore, c cid.Cid) error {
|
||||
_, err := s.GetSize(ctx, c)
|
||||
return err
|
||||
})
|
||||
t.Log("test reification with View")
|
||||
testSplitStoreReification(t, func(ctx context.Context, s blockstore.Blockstore, c cid.Cid) error {
|
||||
return s.View(ctx, c, func(_ []byte) error { return nil })
|
||||
})
|
||||
t.Log("test reification limit")
|
||||
testSplitStoreReificationLimit(t, func(ctx context.Context, s blockstore.Blockstore, c cid.Cid) error {
|
||||
_, err := s.Has(ctx, c)
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
type mockChain struct {
|
||||
t testing.TB
|
||||
|
||||
@ -426,17 +686,25 @@ func (c *mockChain) SubscribeHeadChanges(change func(revert []*types.TipSet, app
|
||||
|
||||
type mockStore struct {
|
||||
mx sync.Mutex
|
||||
set map[cid.Cid]blocks.Block
|
||||
set map[string]blocks.Block
|
||||
}
|
||||
|
||||
func newMockStore() *mockStore {
|
||||
return &mockStore{set: make(map[cid.Cid]blocks.Block)}
|
||||
return &mockStore{set: make(map[string]blocks.Block)}
|
||||
}
|
||||
|
||||
func (b *mockStore) keyOf(c cid.Cid) string {
|
||||
return string(c.Hash())
|
||||
}
|
||||
|
||||
func (b *mockStore) cidOf(k string) cid.Cid {
|
||||
return cid.NewCidV1(cid.Raw, mh.Multihash([]byte(k)))
|
||||
}
|
||||
|
||||
func (b *mockStore) Has(_ context.Context, cid cid.Cid) (bool, error) {
|
||||
b.mx.Lock()
|
||||
defer b.mx.Unlock()
|
||||
_, ok := b.set[cid]
|
||||
_, ok := b.set[b.keyOf(cid)]
|
||||
return ok, nil
|
||||
}
|
||||
|
||||
@ -446,7 +714,7 @@ func (b *mockStore) Get(_ context.Context, cid cid.Cid) (blocks.Block, error) {
|
||||
b.mx.Lock()
|
||||
defer b.mx.Unlock()
|
||||
|
||||
blk, ok := b.set[cid]
|
||||
blk, ok := b.set[b.keyOf(cid)]
|
||||
if !ok {
|
||||
return nil, blockstore.ErrNotFound
|
||||
}
|
||||
@ -474,7 +742,7 @@ func (b *mockStore) Put(_ context.Context, blk blocks.Block) error {
|
||||
b.mx.Lock()
|
||||
defer b.mx.Unlock()
|
||||
|
||||
b.set[blk.Cid()] = blk
|
||||
b.set[b.keyOf(blk.Cid())] = blk
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -483,7 +751,7 @@ func (b *mockStore) PutMany(_ context.Context, blks []blocks.Block) error {
|
||||
defer b.mx.Unlock()
|
||||
|
||||
for _, blk := range blks {
|
||||
b.set[blk.Cid()] = blk
|
||||
b.set[b.keyOf(blk.Cid())] = blk
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -492,7 +760,7 @@ func (b *mockStore) DeleteBlock(_ context.Context, cid cid.Cid) error {
|
||||
b.mx.Lock()
|
||||
defer b.mx.Unlock()
|
||||
|
||||
delete(b.set, cid)
|
||||
delete(b.set, b.keyOf(cid))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -501,7 +769,7 @@ func (b *mockStore) DeleteMany(_ context.Context, cids []cid.Cid) error {
|
||||
defer b.mx.Unlock()
|
||||
|
||||
for _, c := range cids {
|
||||
delete(b.set, c)
|
||||
delete(b.set, b.keyOf(c))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -515,7 +783,7 @@ func (b *mockStore) ForEachKey(f func(cid.Cid) error) error {
|
||||
defer b.mx.Unlock()
|
||||
|
||||
for c := range b.set {
|
||||
err := f(c)
|
||||
err := f(b.cidOf(c))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -62,7 +62,7 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error {
|
||||
xcount := new(int64)
|
||||
missing := new(int64)
|
||||
|
||||
visitor, err := s.markSetEnv.Create("warmup", 0)
|
||||
visitor, err := s.markSetEnv.New("warmup", 0)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error creating visitor: %w", err)
|
||||
}
|
||||
|
@ -26,6 +26,10 @@ type tmpVisitor struct {
|
||||
var _ ObjectVisitor = (*tmpVisitor)(nil)
|
||||
|
||||
func (v *tmpVisitor) Visit(c cid.Cid) (bool, error) {
|
||||
if isUnitaryObject(c) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return v.set.Visit(c), nil
|
||||
}
|
||||
|
||||
@ -45,6 +49,10 @@ func newConcurrentVisitor() *concurrentVisitor {
|
||||
}
|
||||
|
||||
func (v *concurrentVisitor) Visit(c cid.Cid) (bool, error) {
|
||||
if isUnitaryObject(c) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
v.mx.Lock()
|
||||
defer v.mx.Unlock()
|
||||
|
||||
|
@ -1,2 +1,2 @@
|
||||
/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWBdRCBLUeKvoy22u5DcXs61adFn31v8WWCZgmBjDCjbsC
|
||||
/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWDUQJBA18njjXnG9RtLxoN3muvdU7PEy55QorUEsdAqdy
|
||||
/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWFHDtFx7CVTy4xoCDutVo1cScvSnQjDeaM8UzwVS1qwkh
|
||||
/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWKt8cwpkiumkT8x32c3YFxsPRwhV5J8hCYPn9mhUmcAXt
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -42,8 +42,7 @@ const UpgradeTurboHeight = -15
|
||||
const UpgradeHyperdriveHeight = -16
|
||||
const UpgradeChocolateHeight = -17
|
||||
|
||||
// 2022-01-17T19:00:00Z
|
||||
const UpgradeOhSnapHeight = 30262
|
||||
const UpgradeOhSnapHeight = 240
|
||||
|
||||
func init() {
|
||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2 << 30))
|
||||
|
@ -54,7 +54,8 @@ const UpgradeHyperdriveHeight = 420
|
||||
|
||||
const UpgradeChocolateHeight = 312746
|
||||
|
||||
const UpgradeOhSnapHeight = 99999999
|
||||
// 2022-02-10T19:23:00Z
|
||||
const UpgradeOhSnapHeight = 682006
|
||||
|
||||
func init() {
|
||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(32 << 30))
|
||||
|
@ -67,7 +67,8 @@ const UpgradeHyperdriveHeight = 892800
|
||||
// 2021-10-26T13:30:00Z
|
||||
const UpgradeChocolateHeight = 1231620
|
||||
|
||||
var UpgradeOhSnapHeight = abi.ChainEpoch(999999999999)
|
||||
// 2022-03-01T15:00:00Z
|
||||
var UpgradeOhSnapHeight = abi.ChainEpoch(1594680)
|
||||
|
||||
func init() {
|
||||
if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" {
|
||||
|
@ -13,6 +13,17 @@ import (
|
||||
|
||||
func BlocksTopic(netName dtypes.NetworkName) string { return "/fil/blocks/" + string(netName) }
|
||||
func MessagesTopic(netName dtypes.NetworkName) string { return "/fil/msgs/" + string(netName) }
|
||||
func IndexerIngestTopic(netName dtypes.NetworkName) string {
|
||||
|
||||
nn := string(netName)
|
||||
// The network name testnetnet is here for historical reasons.
|
||||
// Going forward we aim to use the name `mainnet` where possible.
|
||||
if nn == "testnetnet" {
|
||||
nn = "mainnet"
|
||||
}
|
||||
|
||||
return "/indexer/ingest/" + nn
|
||||
}
|
||||
func DhtProtocolName(netName dtypes.NetworkName) protocol.ID {
|
||||
return protocol.ID("/fil/kad/" + string(netName))
|
||||
}
|
||||
|
@ -1,4 +1,54 @@
|
||||
{
|
||||
"v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-61fa69f38b9cc771ba27b670124714b4ea77fbeae05e377fb859c4a43b73a30c.params": {
|
||||
"cid": "Qma5WL6abSqYg9uUQAZ3EHS286bsNsha7oAGsJBD48Bq2q",
|
||||
"digest": "c3ad7bb549470b82ad52ed070aebb4f4",
|
||||
"sector_size": 536870912
|
||||
},
|
||||
"v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-61fa69f38b9cc771ba27b670124714b4ea77fbeae05e377fb859c4a43b73a30c.vk": {
|
||||
"cid": "QmUa7f9JtJMsqJJ3s3ZXk6WyF4xJLE8FiqYskZGgk8GCDv",
|
||||
"digest": "994c5b7d450ca9da348c910689f2dc7f",
|
||||
"sector_size": 536870912
|
||||
},
|
||||
"v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-92180959e1918d26350b8e6cfe217bbdd0a2d8de51ebec269078b364b715ad63.params": {
|
||||
"cid": "QmQiT4qBGodrVNEgVTDXxBNDdPbaD8Ag7Sx3ZTq1zHX79S",
|
||||
"digest": "5aedd2cf3e5c0a15623d56a1b43110ad",
|
||||
"sector_size": 8388608
|
||||
},
|
||||
"v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-92180959e1918d26350b8e6cfe217bbdd0a2d8de51ebec269078b364b715ad63.vk": {
|
||||
"cid": "QmdcpKUQvHM8RFRVKbk1yHfEqMcBzhtFWKRp9SNEmWq37i",
|
||||
"digest": "abd80269054d391a734febdac0d2e687",
|
||||
"sector_size": 8388608
|
||||
},
|
||||
"v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-fb9e095bebdd77511c0269b967b4d87ba8b8a525edaa0e165de23ba454510194.params": {
|
||||
"cid": "QmYM6Hg7mjmvA3ZHTsqkss1fkdyDju5dDmLiBZGJ5pz9y9",
|
||||
"digest": "311f92a3e75036ced01b1c0025f1fa0c",
|
||||
"sector_size": 2048
|
||||
},
|
||||
"v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-fb9e095bebdd77511c0269b967b4d87ba8b8a525edaa0e165de23ba454510194.vk": {
|
||||
"cid": "QmaQsTLL3nc5dw6wAvaioJSBfd1jhQrA2o6ucFf7XeV74P",
|
||||
"digest": "eadad9784969890d30f2749708c79771",
|
||||
"sector_size": 2048
|
||||
},
|
||||
"v28-empty-sector-update-merkletree-poseidon_hasher-8-8-0-3b7f44a9362e3985369454947bc94022e118211e49fd672d52bec1cbfd599d18.params": {
|
||||
"cid": "QmNPc75iEfcahCwNKdqnWLtxnjspUGGR4iscjiz3wP3RtS",
|
||||
"digest": "1b3cfd761a961543f9eb273e435a06a2",
|
||||
"sector_size": 34359738368
|
||||
},
|
||||
"v28-empty-sector-update-merkletree-poseidon_hasher-8-8-0-3b7f44a9362e3985369454947bc94022e118211e49fd672d52bec1cbfd599d18.vk": {
|
||||
"cid": "QmdFFUe1gcz9MMHc6YW8aoV48w4ckvcERjt7PkydQAMfCN",
|
||||
"digest": "3a6941983754737fde880d29c7094905",
|
||||
"sector_size": 34359738368
|
||||
},
|
||||
"v28-empty-sector-update-merkletree-poseidon_hasher-8-8-2-102e1444a7e9a97ebf1e3d6855dcc77e66c011ea66f936d9b2c508f87f2f83a7.params": {
|
||||
"cid": "QmUB6xTVjzBQGuDNeyJMrrJ1byk58vhPm8eY2Lv9pgwanp",
|
||||
"digest": "1a392e7b759fb18e036c7559b5ece816",
|
||||
"sector_size": 68719476736
|
||||
},
|
||||
"v28-empty-sector-update-merkletree-poseidon_hasher-8-8-2-102e1444a7e9a97ebf1e3d6855dcc77e66c011ea66f936d9b2c508f87f2f83a7.vk": {
|
||||
"cid": "Qmd794Jty7k26XJ8Eg4NDEks65Qk8G4GVfGkwqvymv8HAg",
|
||||
"digest": "80e366df2f1011953c2d01c7b7c9ee8e",
|
||||
"sector_size": 68719476736
|
||||
},
|
||||
"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.params": {
|
||||
"cid": "QmVxjFRyhmyQaZEtCh7nk2abc7LhFkzhnRX4rcHqCCpikR",
|
||||
"digest": "7610b9f82bfc88405b7a832b651ce2f6",
|
||||
|
@ -37,7 +37,7 @@ func BuildTypeString() string {
|
||||
}
|
||||
|
||||
// BuildVersion is the local build version
|
||||
const BuildVersion = "1.15.0-dev"
|
||||
const BuildVersion = "1.15.1-dev"
|
||||
|
||||
func UserVersion() string {
|
||||
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
|
||||
|
@ -16,6 +16,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
verifreg7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/verifreg"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -62,6 +63,11 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
|
||||
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
||||
}
|
||||
|
||||
type RemoveDataCapProposal = verifreg{{.latestVersion}}.RemoveDataCapProposal
|
||||
type RemoveDataCapRequest = verifreg{{.latestVersion}}.RemoveDataCapRequest
|
||||
type RemoveDataCapParams = verifreg{{.latestVersion}}.RemoveDataCapParams
|
||||
type RmDcProposalID = verifreg{{.latestVersion}}.RmDcProposalID
|
||||
const SignatureDomainSeparation_RemoveDataCap = verifreg{{.latestVersion}}.SignatureDomainSeparation_RemoveDataCap
|
||||
|
||||
type State interface {
|
||||
cbor.Marshaler
|
||||
@ -69,6 +75,7 @@ type State interface {
|
||||
RootKey() (address.Address, error)
|
||||
VerifiedClientDataCap(address.Address) (bool, abi.StoragePower, error)
|
||||
VerifierDataCap(address.Address) (bool, abi.StoragePower, error)
|
||||
RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error)
|
||||
ForEachVerifier(func(addr address.Address, dcap abi.StoragePower) error) error
|
||||
ForEachClient(func(addr address.Address, dcap abi.StoragePower) error) error
|
||||
GetState() interface{}
|
||||
|
@ -61,6 +61,10 @@ func (s *state{{.v}}) VerifierDataCap(addr address.Address) (bool, abi.StoragePo
|
||||
return getDataCap(s.store, actors.Version{{.v}}, s.verifiers, addr)
|
||||
}
|
||||
|
||||
func (s *state{{.v}}) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) {
|
||||
return getRemoveDataCapProposalID(s.store, actors.Version{{.v}}, s.removeDataCapProposalIDs, verifier, client)
|
||||
}
|
||||
|
||||
func (s *state{{.v}}) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
return forEachCap(s.store, actors.Version{{.v}}, s.verifiers, cb)
|
||||
}
|
||||
@ -77,6 +81,11 @@ func (s *state{{.v}}) verifiers() (adt.Map, error) {
|
||||
return adt{{.v}}.AsMap(s.store, s.Verifiers{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}})
|
||||
}
|
||||
|
||||
func (s *state{{.v}}) removeDataCapProposalIDs() (adt.Map, error) {
|
||||
{{if le .v 6}}return nil, nil
|
||||
{{else}}return adt{{.v}}.AsMap(s.store, s.RemoveDataCapProposalIDs, builtin{{.v}}.DefaultHamtBitwidth){{end}}
|
||||
}
|
||||
|
||||
func (s *state{{.v}}) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
@ -6,6 +6,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/specs-actors/v7/actors/builtin/verifreg"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
@ -50,3 +51,28 @@ func forEachCap(store adt.Store, ver actors.Version, root rootFunc, cb func(addr
|
||||
return cb(a, dcap)
|
||||
})
|
||||
}
|
||||
|
||||
func getRemoveDataCapProposalID(store adt.Store, ver actors.Version, root rootFunc, verifier address.Address, client address.Address) (bool, uint64, error) {
|
||||
if verifier.Protocol() != address.ID {
|
||||
return false, 0, xerrors.Errorf("can only look up ID addresses")
|
||||
}
|
||||
if client.Protocol() != address.ID {
|
||||
return false, 0, xerrors.Errorf("can only look up ID addresses")
|
||||
}
|
||||
vh, err := root()
|
||||
if err != nil {
|
||||
return false, 0, xerrors.Errorf("loading verifreg: %w", err)
|
||||
}
|
||||
if vh == nil {
|
||||
return false, 0, xerrors.Errorf("remove data cap proposal hamt not found. you are probably using an incompatible version of actors")
|
||||
}
|
||||
|
||||
var id verifreg.RmDcProposalID
|
||||
if found, err := vh.Get(abi.NewAddrPairKey(verifier, client), &id); err != nil {
|
||||
return false, 0, xerrors.Errorf("looking up addr pair: %w", err)
|
||||
} else if !found {
|
||||
return false, 0, nil
|
||||
}
|
||||
|
||||
return true, id.ProposalID, nil
|
||||
}
|
||||
|
@ -53,6 +53,10 @@ func (s *state0) VerifierDataCap(addr address.Address) (bool, abi.StoragePower,
|
||||
return getDataCap(s.store, actors.Version0, s.verifiers, addr)
|
||||
}
|
||||
|
||||
func (s *state0) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) {
|
||||
return getRemoveDataCapProposalID(s.store, actors.Version0, s.removeDataCapProposalIDs, verifier, client)
|
||||
}
|
||||
|
||||
func (s *state0) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
return forEachCap(s.store, actors.Version0, s.verifiers, cb)
|
||||
}
|
||||
@ -69,6 +73,11 @@ func (s *state0) verifiers() (adt.Map, error) {
|
||||
return adt0.AsMap(s.store, s.Verifiers)
|
||||
}
|
||||
|
||||
func (s *state0) removeDataCapProposalIDs() (adt.Map, error) {
|
||||
return nil, nil
|
||||
|
||||
}
|
||||
|
||||
func (s *state0) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
@ -53,6 +53,10 @@ func (s *state2) VerifierDataCap(addr address.Address) (bool, abi.StoragePower,
|
||||
return getDataCap(s.store, actors.Version2, s.verifiers, addr)
|
||||
}
|
||||
|
||||
func (s *state2) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) {
|
||||
return getRemoveDataCapProposalID(s.store, actors.Version2, s.removeDataCapProposalIDs, verifier, client)
|
||||
}
|
||||
|
||||
func (s *state2) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
return forEachCap(s.store, actors.Version2, s.verifiers, cb)
|
||||
}
|
||||
@ -69,6 +73,11 @@ func (s *state2) verifiers() (adt.Map, error) {
|
||||
return adt2.AsMap(s.store, s.Verifiers)
|
||||
}
|
||||
|
||||
func (s *state2) removeDataCapProposalIDs() (adt.Map, error) {
|
||||
return nil, nil
|
||||
|
||||
}
|
||||
|
||||
func (s *state2) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
@ -54,6 +54,10 @@ func (s *state3) VerifierDataCap(addr address.Address) (bool, abi.StoragePower,
|
||||
return getDataCap(s.store, actors.Version3, s.verifiers, addr)
|
||||
}
|
||||
|
||||
func (s *state3) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) {
|
||||
return getRemoveDataCapProposalID(s.store, actors.Version3, s.removeDataCapProposalIDs, verifier, client)
|
||||
}
|
||||
|
||||
func (s *state3) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
return forEachCap(s.store, actors.Version3, s.verifiers, cb)
|
||||
}
|
||||
@ -70,6 +74,11 @@ func (s *state3) verifiers() (adt.Map, error) {
|
||||
return adt3.AsMap(s.store, s.Verifiers, builtin3.DefaultHamtBitwidth)
|
||||
}
|
||||
|
||||
func (s *state3) removeDataCapProposalIDs() (adt.Map, error) {
|
||||
return nil, nil
|
||||
|
||||
}
|
||||
|
||||
func (s *state3) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
@ -54,6 +54,10 @@ func (s *state4) VerifierDataCap(addr address.Address) (bool, abi.StoragePower,
|
||||
return getDataCap(s.store, actors.Version4, s.verifiers, addr)
|
||||
}
|
||||
|
||||
func (s *state4) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) {
|
||||
return getRemoveDataCapProposalID(s.store, actors.Version4, s.removeDataCapProposalIDs, verifier, client)
|
||||
}
|
||||
|
||||
func (s *state4) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
return forEachCap(s.store, actors.Version4, s.verifiers, cb)
|
||||
}
|
||||
@ -70,6 +74,11 @@ func (s *state4) verifiers() (adt.Map, error) {
|
||||
return adt4.AsMap(s.store, s.Verifiers, builtin4.DefaultHamtBitwidth)
|
||||
}
|
||||
|
||||
func (s *state4) removeDataCapProposalIDs() (adt.Map, error) {
|
||||
return nil, nil
|
||||
|
||||
}
|
||||
|
||||
func (s *state4) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
@ -54,6 +54,10 @@ func (s *state5) VerifierDataCap(addr address.Address) (bool, abi.StoragePower,
|
||||
return getDataCap(s.store, actors.Version5, s.verifiers, addr)
|
||||
}
|
||||
|
||||
func (s *state5) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) {
|
||||
return getRemoveDataCapProposalID(s.store, actors.Version5, s.removeDataCapProposalIDs, verifier, client)
|
||||
}
|
||||
|
||||
func (s *state5) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
return forEachCap(s.store, actors.Version5, s.verifiers, cb)
|
||||
}
|
||||
@ -70,6 +74,11 @@ func (s *state5) verifiers() (adt.Map, error) {
|
||||
return adt5.AsMap(s.store, s.Verifiers, builtin5.DefaultHamtBitwidth)
|
||||
}
|
||||
|
||||
func (s *state5) removeDataCapProposalIDs() (adt.Map, error) {
|
||||
return nil, nil
|
||||
|
||||
}
|
||||
|
||||
func (s *state5) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
@ -54,6 +54,10 @@ func (s *state6) VerifierDataCap(addr address.Address) (bool, abi.StoragePower,
|
||||
return getDataCap(s.store, actors.Version6, s.verifiers, addr)
|
||||
}
|
||||
|
||||
func (s *state6) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) {
|
||||
return getRemoveDataCapProposalID(s.store, actors.Version6, s.removeDataCapProposalIDs, verifier, client)
|
||||
}
|
||||
|
||||
func (s *state6) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
return forEachCap(s.store, actors.Version6, s.verifiers, cb)
|
||||
}
|
||||
@ -70,6 +74,11 @@ func (s *state6) verifiers() (adt.Map, error) {
|
||||
return adt6.AsMap(s.store, s.Verifiers, builtin6.DefaultHamtBitwidth)
|
||||
}
|
||||
|
||||
func (s *state6) removeDataCapProposalIDs() (adt.Map, error) {
|
||||
return nil, nil
|
||||
|
||||
}
|
||||
|
||||
func (s *state6) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
@ -54,6 +54,10 @@ func (s *state7) VerifierDataCap(addr address.Address) (bool, abi.StoragePower,
|
||||
return getDataCap(s.store, actors.Version7, s.verifiers, addr)
|
||||
}
|
||||
|
||||
func (s *state7) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) {
|
||||
return getRemoveDataCapProposalID(s.store, actors.Version7, s.removeDataCapProposalIDs, verifier, client)
|
||||
}
|
||||
|
||||
func (s *state7) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
return forEachCap(s.store, actors.Version7, s.verifiers, cb)
|
||||
}
|
||||
@ -70,6 +74,10 @@ func (s *state7) verifiers() (adt.Map, error) {
|
||||
return adt7.AsMap(s.store, s.Verifiers, builtin7.DefaultHamtBitwidth)
|
||||
}
|
||||
|
||||
func (s *state7) removeDataCapProposalIDs() (adt.Map, error) {
|
||||
return adt7.AsMap(s.store, s.RemoveDataCapProposalIDs, builtin7.DefaultHamtBitwidth)
|
||||
}
|
||||
|
||||
func (s *state7) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
verifreg7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/verifreg"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -151,12 +152,20 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
|
||||
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
|
||||
}
|
||||
|
||||
type RemoveDataCapProposal = verifreg7.RemoveDataCapProposal
|
||||
type RemoveDataCapRequest = verifreg7.RemoveDataCapRequest
|
||||
type RemoveDataCapParams = verifreg7.RemoveDataCapParams
|
||||
type RmDcProposalID = verifreg7.RmDcProposalID
|
||||
|
||||
const SignatureDomainSeparation_RemoveDataCap = verifreg7.SignatureDomainSeparation_RemoveDataCap
|
||||
|
||||
type State interface {
|
||||
cbor.Marshaler
|
||||
|
||||
RootKey() (address.Address, error)
|
||||
VerifiedClientDataCap(address.Address) (bool, abi.StoragePower, error)
|
||||
VerifierDataCap(address.Address) (bool, abi.StoragePower, error)
|
||||
RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error)
|
||||
ForEachVerifier(func(addr address.Address, dcap abi.StoragePower) error) error
|
||||
ForEachClient(func(addr address.Address, dcap abi.StoragePower) error) error
|
||||
GetState() interface{}
|
||||
|
@ -142,7 +142,7 @@ func (db *DrandBeacon) Entry(ctx context.Context, round uint64) <-chan beacon.Re
|
||||
|
||||
go func() {
|
||||
start := build.Clock.Now()
|
||||
log.Infow("start fetching randomness", "round", round)
|
||||
log.Debugw("start fetching randomness", "round", round)
|
||||
resp, err := db.client.Get(ctx, round)
|
||||
|
||||
var br beacon.Response
|
||||
@ -152,7 +152,7 @@ func (db *DrandBeacon) Entry(ctx context.Context, round uint64) <-chan beacon.Re
|
||||
br.Entry.Round = resp.Round()
|
||||
br.Entry.Data = resp.Signature()
|
||||
}
|
||||
log.Infow("done fetching randomness", "round", round, "took", build.Clock.Since(start))
|
||||
log.Debugw("done fetching randomness", "round", round, "took", build.Clock.Since(start))
|
||||
out <- br
|
||||
close(out)
|
||||
}()
|
||||
|
@ -32,6 +32,7 @@ import (
|
||||
|
||||
/* inline-gen end */
|
||||
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
@ -92,6 +93,7 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager
|
||||
partDone()
|
||||
}()
|
||||
|
||||
ctx = blockstore.WithHotView(ctx)
|
||||
makeVmWithBaseStateAndEpoch := func(base cid.Cid, e abi.ChainEpoch) (*vm.VM, error) {
|
||||
vmopt := &vm.VMOpts{
|
||||
StateBase: base,
|
||||
|
@ -467,7 +467,7 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl
|
||||
}
|
||||
|
||||
nv := filec.sm.GetNetworkVersion(ctx, b.Header.Height)
|
||||
pl := vm.PricelistByEpoch(baseTs.Height())
|
||||
pl := vm.PricelistByEpoch(b.Header.Height)
|
||||
var sumGasLimit int64
|
||||
checkMsg := func(msg types.ChainMsg) error {
|
||||
m := msg.VMMessage()
|
||||
|
@ -165,13 +165,8 @@ func DefaultUpgradeSchedule() stmgr.UpgradeSchedule {
|
||||
Migration: UpgradeActorsV7,
|
||||
PreMigrations: []stmgr.PreMigration{{
|
||||
PreMigration: PreUpgradeActorsV7,
|
||||
StartWithin: 120,
|
||||
StartWithin: 180,
|
||||
DontStartWithin: 60,
|
||||
StopWithin: 35,
|
||||
}, {
|
||||
PreMigration: PreUpgradeActorsV7,
|
||||
StartWithin: 30,
|
||||
DontStartWithin: 15,
|
||||
StopWithin: 5,
|
||||
}},
|
||||
Expensive: true,
|
||||
@ -1264,7 +1259,7 @@ func upgradeActorsV7Common(
|
||||
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
|
||||
config nv15.Config,
|
||||
) (cid.Cid, error) {
|
||||
writeStore := blockstore.NewAutobatch(ctx, sm.ChainStore().StateBlockstore(), units.GiB)
|
||||
writeStore := blockstore.NewAutobatch(ctx, sm.ChainStore().StateBlockstore(), units.GiB/4)
|
||||
// TODO: pretty sure we'd achieve nothing by doing this, confirm in review
|
||||
//buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), writeStore)
|
||||
store := store.ActorStore(ctx, writeStore)
|
||||
|
@ -106,7 +106,7 @@ func (mp *MessagePool) checkMessages(ctx context.Context, msgs []*types.Message,
|
||||
curTs := mp.curTs
|
||||
mp.curTsLk.Unlock()
|
||||
|
||||
epoch := curTs.Height()
|
||||
epoch := curTs.Height() + 1
|
||||
|
||||
var baseFee big.Int
|
||||
if len(curTs.Blocks()) > 0 {
|
||||
|
224
chain/messagepool/check_test.go
Normal file
224
chain/messagepool/check_test.go
Normal file
@ -0,0 +1,224 @@
|
||||
//stm: #unit
|
||||
package messagepool
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/go-datastore"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/types/mock"
|
||||
"github.com/filecoin-project/lotus/chain/wallet"
|
||||
_ "github.com/filecoin-project/lotus/lib/sigs/bls"
|
||||
_ "github.com/filecoin-project/lotus/lib/sigs/secp"
|
||||
)
|
||||
|
||||
func init() {
|
||||
_ = logging.SetLogLevel("*", "INFO")
|
||||
}
|
||||
|
||||
func getCheckMessageStatus(statusCode api.CheckStatusCode, msgStatuses []api.MessageCheckStatus) (*api.MessageCheckStatus, error) {
|
||||
for i := 0; i < len(msgStatuses); i++ {
|
||||
iMsgStatuses := msgStatuses[i]
|
||||
if iMsgStatuses.CheckStatus.Code == statusCode {
|
||||
return &iMsgStatuses, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Could not find CheckStatusCode %s", statusCode)
|
||||
}
|
||||
|
||||
func TestCheckMessages(t *testing.T) {
|
||||
//stm: @CHAIN_MEMPOOL_CHECK_MESSAGES_001
|
||||
tma := newTestMpoolAPI()
|
||||
|
||||
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ds := datastore.NewMapDatastore()
|
||||
|
||||
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sender, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tma.setBalance(sender, 1000e15)
|
||||
target := mock.Address(1001)
|
||||
|
||||
var protos []*api.MessagePrototype
|
||||
for i := 0; i < 5; i++ {
|
||||
msg := &types.Message{
|
||||
To: target,
|
||||
From: sender,
|
||||
Value: types.NewInt(1),
|
||||
Nonce: uint64(i),
|
||||
GasLimit: 50000000,
|
||||
GasFeeCap: types.NewInt(minimumBaseFee.Uint64()),
|
||||
GasPremium: types.NewInt(1),
|
||||
Params: make([]byte, 2<<10),
|
||||
}
|
||||
proto := &api.MessagePrototype{
|
||||
Message: *msg,
|
||||
ValidNonce: true,
|
||||
}
|
||||
protos = append(protos, proto)
|
||||
}
|
||||
|
||||
messageStatuses, err := mp.CheckMessages(context.TODO(), protos)
|
||||
assert.NoError(t, err)
|
||||
for i := 0; i < len(messageStatuses); i++ {
|
||||
iMsgStatuses := messageStatuses[i]
|
||||
for j := 0; j < len(iMsgStatuses); j++ {
|
||||
jStatus := iMsgStatuses[i]
|
||||
assert.True(t, jStatus.OK)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckPendingMessages(t *testing.T) {
|
||||
//stm: @CHAIN_MEMPOOL_CHECK_PENDING_MESSAGES_001
|
||||
tma := newTestMpoolAPI()
|
||||
|
||||
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ds := datastore.NewMapDatastore()
|
||||
|
||||
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sender, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tma.setBalance(sender, 1000e15)
|
||||
target := mock.Address(1001)
|
||||
|
||||
// add a valid message to the pool
|
||||
msg := &types.Message{
|
||||
To: target,
|
||||
From: sender,
|
||||
Value: types.NewInt(1),
|
||||
Nonce: 0,
|
||||
GasLimit: 50000000,
|
||||
GasFeeCap: types.NewInt(minimumBaseFee.Uint64()),
|
||||
GasPremium: types.NewInt(1),
|
||||
Params: make([]byte, 2<<10),
|
||||
}
|
||||
|
||||
sig, err := w.WalletSign(context.TODO(), sender, msg.Cid().Bytes(), api.MsgMeta{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
sm := &types.SignedMessage{
|
||||
Message: *msg,
|
||||
Signature: *sig,
|
||||
}
|
||||
mustAdd(t, mp, sm)
|
||||
|
||||
messageStatuses, err := mp.CheckPendingMessages(context.TODO(), sender)
|
||||
assert.NoError(t, err)
|
||||
for i := 0; i < len(messageStatuses); i++ {
|
||||
iMsgStatuses := messageStatuses[i]
|
||||
for j := 0; j < len(iMsgStatuses); j++ {
|
||||
jStatus := iMsgStatuses[i]
|
||||
assert.True(t, jStatus.OK)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckReplaceMessages(t *testing.T) {
|
||||
//stm: @CHAIN_MEMPOOL_CHECK_REPLACE_MESSAGES_001
|
||||
tma := newTestMpoolAPI()
|
||||
|
||||
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ds := datastore.NewMapDatastore()
|
||||
|
||||
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sender, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tma.setBalance(sender, 1000e15)
|
||||
target := mock.Address(1001)
|
||||
|
||||
// add a valid message to the pool
|
||||
msg := &types.Message{
|
||||
To: target,
|
||||
From: sender,
|
||||
Value: types.NewInt(1),
|
||||
Nonce: 0,
|
||||
GasLimit: 50000000,
|
||||
GasFeeCap: types.NewInt(minimumBaseFee.Uint64()),
|
||||
GasPremium: types.NewInt(1),
|
||||
Params: make([]byte, 2<<10),
|
||||
}
|
||||
|
||||
sig, err := w.WalletSign(context.TODO(), sender, msg.Cid().Bytes(), api.MsgMeta{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
sm := &types.SignedMessage{
|
||||
Message: *msg,
|
||||
Signature: *sig,
|
||||
}
|
||||
mustAdd(t, mp, sm)
|
||||
|
||||
// create a new message with the same data, except that it is too big
|
||||
var msgs []*types.Message
|
||||
invalidmsg := &types.Message{
|
||||
To: target,
|
||||
From: sender,
|
||||
Value: types.NewInt(1),
|
||||
Nonce: 0,
|
||||
GasLimit: 50000000,
|
||||
GasFeeCap: types.NewInt(minimumBaseFee.Uint64()),
|
||||
GasPremium: types.NewInt(1),
|
||||
Params: make([]byte, 128<<10),
|
||||
}
|
||||
msgs = append(msgs, invalidmsg)
|
||||
|
||||
{
|
||||
messageStatuses, err := mp.CheckReplaceMessages(context.TODO(), msgs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i := 0; i < len(messageStatuses); i++ {
|
||||
iMsgStatuses := messageStatuses[i]
|
||||
|
||||
status, err := getCheckMessageStatus(api.CheckStatusMessageSize, iMsgStatuses)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// the replacement message should cause a status error
|
||||
assert.False(t, status.OK)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -628,7 +628,7 @@ func (mp *MessagePool) addLocal(ctx context.Context, m *types.SignedMessage) err
|
||||
// For non local messages, if the message cannot be included in the next 20 blocks it returns
|
||||
// a (soft) validation error.
|
||||
func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.TipSet, local bool) (bool, error) {
|
||||
epoch := curTs.Height()
|
||||
epoch := curTs.Height() + 1
|
||||
minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength())
|
||||
|
||||
if err := m.VMMessage().ValidForBlockInclusion(minGas.Total(), build.NewestNetworkVersion); err != nil {
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-datastore"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
@ -226,6 +227,8 @@ func mustAdd(t *testing.T, mp *MessagePool, msg *types.SignedMessage) {
|
||||
}
|
||||
|
||||
func TestMessagePool(t *testing.T) {
|
||||
//stm: @CHAIN_MEMPOOL_GET_NONCE_001
|
||||
|
||||
tma := newTestMpoolAPI()
|
||||
|
||||
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
@ -327,6 +330,7 @@ func TestCheckMessageBig(t *testing.T) {
|
||||
Message: *msg,
|
||||
Signature: *sig,
|
||||
}
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
err = mp.Add(context.TODO(), sm)
|
||||
assert.ErrorIs(t, err, ErrMessageTooBig)
|
||||
}
|
||||
@ -760,3 +764,302 @@ func TestUpdates(t *testing.T) {
|
||||
t.Fatal("expected closed channel, but got an update instead")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMessageBelowMinGasFee(t *testing.T) {
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
tma := newTestMpoolAPI()
|
||||
|
||||
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
assert.NoError(t, err)
|
||||
|
||||
from, err := w.WalletNew(context.Background(), types.KTBLS)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tma.setBalance(from, 1000e9)
|
||||
|
||||
ds := datastore.NewMapDatastore()
|
||||
|
||||
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
to := mock.Address(1001)
|
||||
|
||||
// fee is just below minimum gas fee
|
||||
fee := minimumBaseFee.Uint64() - 1
|
||||
{
|
||||
msg := &types.Message{
|
||||
To: to,
|
||||
From: from,
|
||||
Value: types.NewInt(1),
|
||||
Nonce: 0,
|
||||
GasLimit: 50000000,
|
||||
GasFeeCap: types.NewInt(fee),
|
||||
GasPremium: types.NewInt(1),
|
||||
Params: make([]byte, 32<<10),
|
||||
}
|
||||
|
||||
sig, err := w.WalletSign(context.TODO(), from, msg.Cid().Bytes(), api.MsgMeta{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
sm := &types.SignedMessage{
|
||||
Message: *msg,
|
||||
Signature: *sig,
|
||||
}
|
||||
err = mp.Add(context.TODO(), sm)
|
||||
assert.ErrorIs(t, err, ErrGasFeeCapTooLow)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMessageValueTooHigh(t *testing.T) {
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
tma := newTestMpoolAPI()
|
||||
|
||||
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
assert.NoError(t, err)
|
||||
|
||||
from, err := w.WalletNew(context.Background(), types.KTBLS)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tma.setBalance(from, 1000e9)
|
||||
|
||||
ds := datastore.NewMapDatastore()
|
||||
|
||||
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
to := mock.Address(1001)
|
||||
|
||||
totalFil := types.TotalFilecoinInt
|
||||
extra := types.NewInt(1)
|
||||
|
||||
value := types.BigAdd(totalFil, extra)
|
||||
{
|
||||
msg := &types.Message{
|
||||
To: to,
|
||||
From: from,
|
||||
Value: value,
|
||||
Nonce: 0,
|
||||
GasLimit: 50000000,
|
||||
GasFeeCap: types.NewInt(minimumBaseFee.Uint64()),
|
||||
GasPremium: types.NewInt(1),
|
||||
Params: make([]byte, 32<<10),
|
||||
}
|
||||
|
||||
sig, err := w.WalletSign(context.TODO(), from, msg.Cid().Bytes(), api.MsgMeta{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
sm := &types.SignedMessage{
|
||||
Message: *msg,
|
||||
Signature: *sig,
|
||||
}
|
||||
|
||||
err = mp.Add(context.TODO(), sm)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMessageSignatureInvalid(t *testing.T) {
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
tma := newTestMpoolAPI()
|
||||
|
||||
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
assert.NoError(t, err)
|
||||
|
||||
from, err := w.WalletNew(context.Background(), types.KTBLS)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tma.setBalance(from, 1000e9)
|
||||
|
||||
ds := datastore.NewMapDatastore()
|
||||
|
||||
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
to := mock.Address(1001)
|
||||
|
||||
{
|
||||
msg := &types.Message{
|
||||
To: to,
|
||||
From: from,
|
||||
Value: types.NewInt(1),
|
||||
Nonce: 0,
|
||||
GasLimit: 50000000,
|
||||
GasFeeCap: types.NewInt(minimumBaseFee.Uint64()),
|
||||
GasPremium: types.NewInt(1),
|
||||
Params: make([]byte, 32<<10),
|
||||
}
|
||||
|
||||
badSig := &crypto.Signature{
|
||||
Type: crypto.SigTypeSecp256k1,
|
||||
Data: make([]byte, 0),
|
||||
}
|
||||
sm := &types.SignedMessage{
|
||||
Message: *msg,
|
||||
Signature: *badSig,
|
||||
}
|
||||
err = mp.Add(context.TODO(), sm)
|
||||
assert.Error(t, err)
|
||||
// assert.Contains(t, err.Error(), "invalid signature length")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddMessageTwice(t *testing.T) {
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
tma := newTestMpoolAPI()
|
||||
|
||||
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
assert.NoError(t, err)
|
||||
|
||||
from, err := w.WalletNew(context.Background(), types.KTBLS)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tma.setBalance(from, 1000e9)
|
||||
|
||||
ds := datastore.NewMapDatastore()
|
||||
|
||||
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
to := mock.Address(1001)
|
||||
|
||||
{
|
||||
// create a valid messages
|
||||
sm := makeTestMessage(w, from, to, 0, 50_000_000, minimumBaseFee.Uint64())
|
||||
mustAdd(t, mp, sm)
|
||||
|
||||
// try to add it twice
|
||||
err = mp.Add(context.TODO(), sm)
|
||||
// assert.Contains(t, err.Error(), "with nonce 0 already in mpool")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddMessageTwiceNonceGap(t *testing.T) {
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
tma := newTestMpoolAPI()
|
||||
|
||||
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
assert.NoError(t, err)
|
||||
|
||||
from, err := w.WalletNew(context.Background(), types.KTBLS)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tma.setBalance(from, 1000e9)
|
||||
|
||||
ds := datastore.NewMapDatastore()
|
||||
|
||||
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
to := mock.Address(1001)
|
||||
|
||||
{
|
||||
// create message with invalid nonce (1)
|
||||
sm := makeTestMessage(w, from, to, 1, 50_000_000, minimumBaseFee.Uint64())
|
||||
mustAdd(t, mp, sm)
|
||||
|
||||
// then try to add message again
|
||||
err = mp.Add(context.TODO(), sm)
|
||||
// assert.Contains(t, err.Error(), "unfulfilled nonce gap")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddMessageTwiceCidDiff(t *testing.T) {
|
||||
tma := newTestMpoolAPI()
|
||||
|
||||
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
assert.NoError(t, err)
|
||||
|
||||
from, err := w.WalletNew(context.Background(), types.KTBLS)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tma.setBalance(from, 1000e9)
|
||||
|
||||
ds := datastore.NewMapDatastore()
|
||||
|
||||
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
to := mock.Address(1001)
|
||||
|
||||
{
|
||||
sm := makeTestMessage(w, from, to, 0, 50_000_000, minimumBaseFee.Uint64())
|
||||
mustAdd(t, mp, sm)
|
||||
|
||||
// Create message with different data, so CID is different
|
||||
sm2 := makeTestMessage(w, from, to, 0, 50_000_001, minimumBaseFee.Uint64())
|
||||
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
// then try to add message again
|
||||
err = mp.Add(context.TODO(), sm2)
|
||||
// assert.Contains(t, err.Error(), "replace by fee has too low GasPremium")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddMessageTwiceCidDiffReplaced(t *testing.T) {
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
tma := newTestMpoolAPI()
|
||||
|
||||
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
assert.NoError(t, err)
|
||||
|
||||
from, err := w.WalletNew(context.Background(), types.KTBLS)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tma.setBalance(from, 1000e9)
|
||||
|
||||
ds := datastore.NewMapDatastore()
|
||||
|
||||
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
to := mock.Address(1001)
|
||||
|
||||
{
|
||||
sm := makeTestMessage(w, from, to, 0, 50_000_000, minimumBaseFee.Uint64())
|
||||
mustAdd(t, mp, sm)
|
||||
|
||||
// Create message with different data, so CID is different
|
||||
sm2 := makeTestMessage(w, from, to, 0, 50_000_000, minimumBaseFee.Uint64()*2)
|
||||
mustAdd(t, mp, sm2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveMessage(t *testing.T) {
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
tma := newTestMpoolAPI()
|
||||
|
||||
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
assert.NoError(t, err)
|
||||
|
||||
from, err := w.WalletNew(context.Background(), types.KTBLS)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tma.setBalance(from, 1000e9)
|
||||
|
||||
ds := datastore.NewMapDatastore()
|
||||
|
||||
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
to := mock.Address(1001)
|
||||
|
||||
{
|
||||
sm := makeTestMessage(w, from, to, 0, 50_000_000, minimumBaseFee.Uint64())
|
||||
mustAdd(t, mp, sm)
|
||||
|
||||
//stm: @CHAIN_MEMPOOL_REMOVE_001
|
||||
// remove message for sender
|
||||
mp.Remove(context.TODO(), from, sm.Message.Nonce, true)
|
||||
|
||||
//stm: @CHAIN_MEMPOOL_PENDING_FOR_001
|
||||
// check messages in pool: should be none present
|
||||
msgs := mp.pendingFor(context.TODO(), from)
|
||||
assert.Len(t, msgs, 0)
|
||||
}
|
||||
}
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package messagepool
|
||||
|
||||
import (
|
||||
@ -16,6 +17,7 @@ import (
|
||||
)
|
||||
|
||||
func TestRepubMessages(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001
|
||||
oldRepublishBatchDelay := RepublishBatchDelay
|
||||
RepublishBatchDelay = time.Microsecond
|
||||
defer func() {
|
||||
@ -57,6 +59,7 @@ func TestRepubMessages(t *testing.T) {
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
_, err := mp.Push(context.TODO(), m)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package messagepool
|
||||
|
||||
import (
|
||||
@ -74,6 +75,8 @@ func makeTestMpool() (*MessagePool, *testMpoolAPI) {
|
||||
}
|
||||
|
||||
func TestMessageChains(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001
|
||||
//stm: @CHAIN_MEMPOOL_CREATE_MSG_CHAINS_001
|
||||
mp, tma := makeTestMpool()
|
||||
|
||||
// the actors
|
||||
@ -310,6 +313,8 @@ func TestMessageChains(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMessageChainSkipping(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_CREATE_MSG_CHAINS_001
|
||||
|
||||
// regression test for chain skip bug
|
||||
|
||||
mp, tma := makeTestMpool()
|
||||
@ -382,6 +387,7 @@ func TestMessageChainSkipping(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBasicMessageSelection(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
|
||||
oldMaxNonceGap := MaxNonceGap
|
||||
MaxNonceGap = 1000
|
||||
defer func() {
|
||||
@ -532,6 +538,7 @@ func TestBasicMessageSelection(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMessageSelectionTrimmingGas(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
|
||||
mp, tma := makeTestMpool()
|
||||
|
||||
// the actors
|
||||
@ -595,6 +602,7 @@ func TestMessageSelectionTrimmingGas(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMessageSelectionTrimmingMsgsBasic(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
|
||||
mp, tma := makeTestMpool()
|
||||
|
||||
// the actors
|
||||
@ -641,6 +649,7 @@ func TestMessageSelectionTrimmingMsgsBasic(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMessageSelectionTrimmingMsgsTwoSendersBasic(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
|
||||
mp, tma := makeTestMpool()
|
||||
|
||||
// the actors
|
||||
@ -707,6 +716,7 @@ func TestMessageSelectionTrimmingMsgsTwoSendersBasic(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMessageSelectionTrimmingMsgsTwoSendersAdvanced(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
|
||||
mp, tma := makeTestMpool()
|
||||
|
||||
// the actors
|
||||
@ -788,6 +798,7 @@ func TestMessageSelectionTrimmingMsgsTwoSendersAdvanced(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPriorityMessageSelection(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
|
||||
mp, tma := makeTestMpool()
|
||||
|
||||
// the actors
|
||||
@ -867,6 +878,7 @@ func TestPriorityMessageSelection(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPriorityMessageSelection2(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
|
||||
mp, tma := makeTestMpool()
|
||||
|
||||
// the actors
|
||||
@ -934,6 +946,7 @@ func TestPriorityMessageSelection2(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPriorityMessageSelection3(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
|
||||
mp, tma := makeTestMpool()
|
||||
|
||||
// the actors
|
||||
@ -1028,6 +1041,8 @@ func TestPriorityMessageSelection3(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOptimalMessageSelection1(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
|
||||
|
||||
// this test uses just a single actor sending messages with a low tq
|
||||
// the chain depenent merging algorithm should pick messages from the actor
|
||||
// from the start
|
||||
@ -1094,6 +1109,8 @@ func TestOptimalMessageSelection1(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOptimalMessageSelection2(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
|
||||
|
||||
// this test uses two actors sending messages to each other, with the first
|
||||
// actor paying (much) higher gas premium than the second.
|
||||
// We select with a low ticket quality; the chain depenent merging algorithm should pick
|
||||
@ -1173,6 +1190,8 @@ func TestOptimalMessageSelection2(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOptimalMessageSelection3(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
|
||||
|
||||
// this test uses 10 actors sending a block of messages to each other, with the the first
|
||||
// actors paying higher gas premium than the subsequent actors.
|
||||
// We select with a low ticket quality; the chain dependent merging algorithm should pick
|
||||
@ -1416,6 +1435,8 @@ func makeZipfPremiumDistribution(rng *rand.Rand) func() uint64 {
|
||||
}
|
||||
|
||||
func TestCompetitiveMessageSelectionExp(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
|
||||
|
||||
if testing.Short() {
|
||||
t.Skip("skipping in short mode")
|
||||
}
|
||||
@ -1439,6 +1460,8 @@ func TestCompetitiveMessageSelectionExp(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCompetitiveMessageSelectionZipf(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
|
||||
|
||||
if testing.Short() {
|
||||
t.Skip("skipping in short mode")
|
||||
}
|
||||
@ -1462,6 +1485,7 @@ func TestCompetitiveMessageSelectionZipf(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGasReward(t *testing.T) {
|
||||
//stm: @CHAIN_MEMPOOL_GET_GAS_REWARD_001
|
||||
tests := []struct {
|
||||
Premium uint64
|
||||
FeeCap uint64
|
||||
@ -1494,6 +1518,8 @@ func TestGasReward(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRealWorldSelection(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001, @TOKEN_WALLET_SIGN_001, @CHAIN_MEMPOOL_SELECT_001
|
||||
|
||||
// load test-messages.json.gz and rewrite the messages so that
|
||||
// 1) we map each real actor to a test actor so that we can sign the messages
|
||||
// 2) adjust the nonces so that they start from 0
|
||||
|
@ -1,19 +1,24 @@
|
||||
package sub
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
address "github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-legs/dtsync"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain"
|
||||
"github.com/filecoin-project/lotus/chain/consensus"
|
||||
"github.com/filecoin-project/lotus/chain/messagepool"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/sub/ratelimit"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/metrics"
|
||||
"github.com/filecoin-project/lotus/node/impl/client"
|
||||
"github.com/filecoin-project/lotus/node/impl/full"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
bserv "github.com/ipfs/go-blockservice"
|
||||
@ -168,12 +173,12 @@ func fetchCids(
|
||||
cidIndex := make(map[cid.Cid]int)
|
||||
for i, c := range cids {
|
||||
if c.Prefix() != msgCidPrefix {
|
||||
return fmt.Errorf("invalid msg CID: %s", c)
|
||||
return xerrors.Errorf("invalid msg CID: %s", c)
|
||||
}
|
||||
cidIndex[c] = i
|
||||
}
|
||||
if len(cids) != len(cidIndex) {
|
||||
return fmt.Errorf("duplicate CIDs in fetchCids input")
|
||||
return xerrors.Errorf("duplicate CIDs in fetchCids input")
|
||||
}
|
||||
|
||||
for block := range bserv.GetBlocks(ctx, cids) {
|
||||
@ -196,7 +201,7 @@ func fetchCids(
|
||||
if len(cidIndex) > 0 {
|
||||
err := ctx.Err()
|
||||
if err == nil {
|
||||
err = fmt.Errorf("failed to fetch %d messages for unknown reasons", len(cidIndex))
|
||||
err = xerrors.Errorf("failed to fetch %d messages for unknown reasons", len(cidIndex))
|
||||
}
|
||||
return err
|
||||
}
|
||||
@ -444,3 +449,168 @@ func recordFailure(ctx context.Context, metric *stats.Int64Measure, failureType
|
||||
)
|
||||
stats.Record(ctx, metric.M(1))
|
||||
}
|
||||
|
||||
type peerMsgInfo struct {
|
||||
peerID peer.ID
|
||||
lastCid cid.Cid
|
||||
lastSeqno uint64
|
||||
rateLimit *ratelimit.Window
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
type IndexerMessageValidator struct {
|
||||
self peer.ID
|
||||
|
||||
peerCache *lru.TwoQueueCache
|
||||
chainApi full.ChainModuleAPI
|
||||
stateApi full.StateModuleAPI
|
||||
}
|
||||
|
||||
func NewIndexerMessageValidator(self peer.ID, chainApi full.ChainModuleAPI, stateApi full.StateModuleAPI) *IndexerMessageValidator {
|
||||
peerCache, _ := lru.New2Q(8192)
|
||||
|
||||
return &IndexerMessageValidator{
|
||||
self: self,
|
||||
peerCache: peerCache,
|
||||
chainApi: chainApi,
|
||||
stateApi: stateApi,
|
||||
}
|
||||
}
|
||||
|
||||
func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub.Message) pubsub.ValidationResult {
|
||||
// This chain-node should not be publishing its own messages. These are
|
||||
// relayed from market-nodes. If a node appears to be local, reject it.
|
||||
if pid == v.self {
|
||||
log.Debug("ignoring indexer message from self")
|
||||
stats.Record(ctx, metrics.IndexerMessageValidationFailure.M(1))
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
originPeer := msg.GetFrom()
|
||||
if originPeer == v.self {
|
||||
log.Debug("ignoring indexer message originating from self")
|
||||
stats.Record(ctx, metrics.IndexerMessageValidationFailure.M(1))
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
|
||||
idxrMsg := dtsync.Message{}
|
||||
err := idxrMsg.UnmarshalCBOR(bytes.NewBuffer(msg.Data))
|
||||
if err != nil {
|
||||
log.Errorw("Could not decode indexer pubsub message", "err", err)
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
if len(idxrMsg.ExtraData) == 0 {
|
||||
log.Debugw("ignoring messsage missing miner id", "peer", originPeer)
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
|
||||
// Get miner info from lotus
|
||||
minerAddr, err := address.NewFromBytes(idxrMsg.ExtraData)
|
||||
if err != nil {
|
||||
log.Warnw("cannot parse extra data as miner address", "err", err, "extraData", idxrMsg.ExtraData)
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
|
||||
minerID := minerAddr.String()
|
||||
msgCid := idxrMsg.Cid
|
||||
|
||||
var msgInfo *peerMsgInfo
|
||||
val, ok := v.peerCache.Get(minerID)
|
||||
if !ok {
|
||||
msgInfo = &peerMsgInfo{}
|
||||
} else {
|
||||
msgInfo = val.(*peerMsgInfo)
|
||||
}
|
||||
|
||||
// Lock this peer's message info.
|
||||
msgInfo.mutex.Lock()
|
||||
defer msgInfo.mutex.Unlock()
|
||||
|
||||
if ok {
|
||||
// Reject replayed messages.
|
||||
seqno := binary.BigEndian.Uint64(msg.Message.GetSeqno())
|
||||
if seqno <= msgInfo.lastSeqno {
|
||||
log.Debugf("ignoring replayed indexer message")
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
msgInfo.lastSeqno = seqno
|
||||
}
|
||||
|
||||
if !ok || originPeer != msgInfo.peerID {
|
||||
// Check that the miner ID maps to the peer that sent the message.
|
||||
err = v.authenticateMessage(ctx, minerAddr, originPeer)
|
||||
if err != nil {
|
||||
log.Warnw("cannot authenticate messsage", "err", err, "peer", originPeer, "minerID", minerID)
|
||||
stats.Record(ctx, metrics.IndexerMessageValidationFailure.M(1))
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
msgInfo.peerID = originPeer
|
||||
if !ok {
|
||||
// Add msgInfo to cache only after being authenticated. If two
|
||||
// messages from the same peer are handled concurrently, there is a
|
||||
// small chance that one msgInfo could replace the other here when
|
||||
// the info is first cached. This is OK, so no need to prevent it.
|
||||
v.peerCache.Add(minerID, msgInfo)
|
||||
}
|
||||
}
|
||||
|
||||
// See if message needs to be ignored due to rate limiting.
|
||||
if v.rateLimitPeer(msgInfo, msgCid) {
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
|
||||
stats.Record(ctx, metrics.IndexerMessageValidationSuccess.M(1))
|
||||
return pubsub.ValidationAccept
|
||||
}
|
||||
|
||||
func (v *IndexerMessageValidator) rateLimitPeer(msgInfo *peerMsgInfo, msgCid cid.Cid) bool {
|
||||
const (
|
||||
msgLimit = 5
|
||||
msgTimeLimit = 10 * time.Second
|
||||
repeatTimeLimit = 2 * time.Hour
|
||||
)
|
||||
|
||||
timeWindow := msgInfo.rateLimit
|
||||
|
||||
// Check overall message rate.
|
||||
if timeWindow == nil {
|
||||
timeWindow = ratelimit.NewWindow(msgLimit, msgTimeLimit)
|
||||
msgInfo.rateLimit = timeWindow
|
||||
} else if msgInfo.lastCid == msgCid {
|
||||
// Check if this is a repeat of the previous message data.
|
||||
if time.Since(timeWindow.Newest()) < repeatTimeLimit {
|
||||
log.Warnw("ignoring repeated indexer message", "sender", msgInfo.peerID)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
err := timeWindow.Add()
|
||||
if err != nil {
|
||||
log.Warnw("ignoring indexer message", "sender", msgInfo.peerID, "err", err)
|
||||
return true
|
||||
}
|
||||
|
||||
msgInfo.lastCid = msgCid
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (v *IndexerMessageValidator) authenticateMessage(ctx context.Context, minerAddress address.Address, peerID peer.ID) error {
|
||||
ts, err := v.chainApi.ChainHead(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
minerInfo, err := v.stateApi.StateMinerInfo(ctx, minerAddress, ts.Key())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if minerInfo.PeerId == nil {
|
||||
return xerrors.New("no peer id for miner")
|
||||
}
|
||||
if *minerInfo.PeerId != peerID {
|
||||
return xerrors.New("miner id does not map to peer that sent message")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -2,13 +2,20 @@
|
||||
package sub
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
address "github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-legs/dtsync"
|
||||
"github.com/filecoin-project/lotus/api/mocks"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/golang/mock/gomock"
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
)
|
||||
|
||||
type getter struct {
|
||||
@ -63,3 +70,65 @@ func TestFetchCidsWithDedup(t *testing.T) {
|
||||
t.Fatalf("there is a nil message: first %p, last %p", res[0], res[len(res)-1])
|
||||
}
|
||||
}
|
||||
|
||||
func TestIndexerMessageValidator_Validate(t *testing.T) {
|
||||
validCid, err := cid.Decode("QmbpDgg5kRLDgMxS8vPKNFXEcA6D5MC4CkuUdSWDVtHPGK")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
selfPID string
|
||||
senderPID string
|
||||
extraData []byte
|
||||
wantValidation pubsub.ValidationResult
|
||||
}{
|
||||
{
|
||||
name: "invalid extra data is rejected",
|
||||
selfPID: "12D3KooWQiCbqEStCkdqUvr69gQsrp9urYJZUCkzsQXia7mbqbFW",
|
||||
senderPID: "12D3KooWE8yt84RVwW3sFcd6WMjbUdWrZer2YtT4dmtj3dHdahSZ",
|
||||
extraData: []byte("f0127896"), // note, casting encoded address to byte is invalid.
|
||||
wantValidation: pubsub.ValidationReject,
|
||||
},
|
||||
{
|
||||
name: "same sender and receiver is ignored",
|
||||
selfPID: "12D3KooWQiCbqEStCkdqUvr69gQsrp9urYJZUCkzsQXia7mbqbFW",
|
||||
senderPID: "12D3KooWQiCbqEStCkdqUvr69gQsrp9urYJZUCkzsQXia7mbqbFW",
|
||||
wantValidation: pubsub.ValidationIgnore,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
mc := gomock.NewController(t)
|
||||
node := mocks.NewMockFullNode(mc)
|
||||
subject := NewIndexerMessageValidator(peer.ID(tc.selfPID), node, node)
|
||||
message := dtsync.Message{
|
||||
Cid: validCid,
|
||||
Addrs: nil,
|
||||
ExtraData: tc.extraData,
|
||||
}
|
||||
buf := bytes.NewBuffer(nil)
|
||||
if err := message.MarshalCBOR(buf); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
topic := "topic"
|
||||
pbm := &pb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
From: nil,
|
||||
Seqno: nil,
|
||||
}
|
||||
validate := subject.Validate(context.Background(), peer.ID(tc.senderPID), &pubsub.Message{
|
||||
Message: pbm,
|
||||
ReceivedFrom: peer.ID(tc.senderPID),
|
||||
ValidatorData: nil,
|
||||
})
|
||||
|
||||
if validate != tc.wantValidation {
|
||||
t.Fatalf("expected %v but got %v", tc.wantValidation, validate)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
89
chain/sub/ratelimit/queue.go
Normal file
89
chain/sub/ratelimit/queue.go
Normal file
@ -0,0 +1,89 @@
|
||||
package ratelimit
|
||||
|
||||
import "errors"
|
||||
|
||||
var ErrRateLimitExceeded = errors.New("rate limit exceeded")
|
||||
|
||||
type queue struct {
|
||||
buf []int64
|
||||
count int
|
||||
head int
|
||||
tail int
|
||||
}
|
||||
|
||||
// cap returns the queue capacity
|
||||
func (q *queue) cap() int {
|
||||
return len(q.buf)
|
||||
}
|
||||
|
||||
// len returns the number of items in the queue
|
||||
func (q *queue) len() int {
|
||||
return q.count
|
||||
}
|
||||
|
||||
// push adds an element to the end of the queue.
|
||||
func (q *queue) push(elem int64) error {
|
||||
if q.count == len(q.buf) {
|
||||
return ErrRateLimitExceeded
|
||||
}
|
||||
|
||||
q.buf[q.tail] = elem
|
||||
// Calculate new tail position.
|
||||
q.tail = q.next(q.tail)
|
||||
q.count++
|
||||
return nil
|
||||
}
|
||||
|
||||
// pop removes and returns the element from the front of the queue.
|
||||
func (q *queue) pop() int64 {
|
||||
if q.count <= 0 {
|
||||
panic("pop from empty queue")
|
||||
}
|
||||
ret := q.buf[q.head]
|
||||
|
||||
// Calculate new head position.
|
||||
q.head = q.next(q.head)
|
||||
q.count--
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// front returns the element at the front of the queue. This is the element
|
||||
// that would be returned by pop(). This call panics if the queue is empty.
|
||||
func (q *queue) front() int64 {
|
||||
if q.count <= 0 {
|
||||
panic("front() called when empty")
|
||||
}
|
||||
return q.buf[q.head]
|
||||
}
|
||||
|
||||
// back returns the element at the back of the queue. This call panics if the
|
||||
// queue is empty.
|
||||
func (q *queue) back() int64 {
|
||||
if q.count <= 0 {
|
||||
panic("back() called when empty")
|
||||
}
|
||||
return q.buf[q.prev(q.tail)]
|
||||
}
|
||||
|
||||
// prev returns the previous buffer position wrapping around buffer.
|
||||
func (q *queue) prev(i int) int {
|
||||
if i == 0 {
|
||||
return len(q.buf) - 1
|
||||
}
|
||||
return (i - 1) % len(q.buf)
|
||||
}
|
||||
|
||||
// next returns the next buffer position wrapping around buffer.
|
||||
func (q *queue) next(i int) int {
|
||||
return (i + 1) % len(q.buf)
|
||||
}
|
||||
|
||||
// truncate pops values that are less than or equal the specified threshold.
|
||||
func (q *queue) truncate(threshold int64) {
|
||||
for q.count != 0 && q.buf[q.head] <= threshold {
|
||||
// pop() without returning a value
|
||||
q.head = q.next(q.head)
|
||||
q.count--
|
||||
}
|
||||
}
|
70
chain/sub/ratelimit/window.go
Normal file
70
chain/sub/ratelimit/window.go
Normal file
@ -0,0 +1,70 @@
|
||||
package ratelimit
|
||||
|
||||
import "time"
|
||||
|
||||
// Window is a time windows for counting events within a span of time. The
|
||||
// windows slides forward in time so that it spans from the most recent event
|
||||
// to size time in the past.
|
||||
type Window struct {
|
||||
q *queue
|
||||
size int64
|
||||
}
|
||||
|
||||
// NewWindow creates a new Window that limits the number of events to maximum
|
||||
// count of events within a duration of time. The capacity sets the maximum
|
||||
// number of events, and size sets the span of time over which the events are
|
||||
// counted.
|
||||
func NewWindow(capacity int, size time.Duration) *Window {
|
||||
return &Window{
|
||||
q: &queue{
|
||||
buf: make([]int64, capacity),
|
||||
},
|
||||
size: int64(size),
|
||||
}
|
||||
}
|
||||
|
||||
// Add attempts to append a new timestamp into the current window. Previously
|
||||
// added values that are not not within `size` difference from the value being
|
||||
// added are first removed. Add fails if adding the value would cause the
|
||||
// window to exceed capacity.
|
||||
func (w *Window) Add() error {
|
||||
now := time.Now().UnixNano()
|
||||
if w.Len() != 0 {
|
||||
w.q.truncate(now - w.size)
|
||||
}
|
||||
return w.q.push(now)
|
||||
}
|
||||
|
||||
// Cap returns the maximum number of items the window can hold.
|
||||
func (w *Window) Cap() int {
|
||||
return w.q.cap()
|
||||
}
|
||||
|
||||
// Len returns the number of elements currently in the window.
|
||||
func (w *Window) Len() int {
|
||||
return w.q.len()
|
||||
}
|
||||
|
||||
// Span returns the distance from the first to the last item in the window.
|
||||
func (w *Window) Span() time.Duration {
|
||||
if w.q.len() < 2 {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(w.q.back() - w.q.front())
|
||||
}
|
||||
|
||||
// Oldest returns the oldest timestamp in the window.
|
||||
func (w *Window) Oldest() time.Time {
|
||||
if w.q.len() == 0 {
|
||||
return time.Time{}
|
||||
}
|
||||
return time.Unix(0, w.q.front())
|
||||
}
|
||||
|
||||
// Newest returns the newest timestamp in the window.
|
||||
func (w *Window) Newest() time.Time {
|
||||
if w.q.len() == 0 {
|
||||
return time.Time{}
|
||||
}
|
||||
return time.Unix(0, w.q.back())
|
||||
}
|
61
chain/sub/ratelimit/window_test.go
Normal file
61
chain/sub/ratelimit/window_test.go
Normal file
@ -0,0 +1,61 @@
|
||||
package ratelimit
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestWindow(t *testing.T) {
|
||||
const (
|
||||
maxEvents = 3
|
||||
timeLimit = 100 * time.Millisecond
|
||||
)
|
||||
w := NewWindow(maxEvents, timeLimit)
|
||||
if w.Len() != 0 {
|
||||
t.Fatal("q.Len() =", w.Len(), "expect 0")
|
||||
}
|
||||
if w.Cap() != maxEvents {
|
||||
t.Fatal("q.Cap() =", w.Cap(), "expect 3")
|
||||
}
|
||||
if !w.Newest().IsZero() {
|
||||
t.Fatal("expected newest to be zero time with empty window")
|
||||
}
|
||||
if !w.Oldest().IsZero() {
|
||||
t.Fatal("expected oldest to be zero time with empty window")
|
||||
}
|
||||
if w.Span() != 0 {
|
||||
t.Fatal("expected span to be zero time with empty window")
|
||||
}
|
||||
|
||||
var err error
|
||||
for i := 0; i < maxEvents; i++ {
|
||||
err = w.Add()
|
||||
if err != nil {
|
||||
t.Fatalf("cannot add event %d", i)
|
||||
}
|
||||
}
|
||||
if w.Len() != maxEvents {
|
||||
t.Fatalf("q.Len() is %d, expected %d", w.Len(), maxEvents)
|
||||
}
|
||||
if err = w.Add(); err != ErrRateLimitExceeded {
|
||||
t.Fatalf("add event %d within time limit should have failed with err: %s", maxEvents+1, ErrRateLimitExceeded)
|
||||
}
|
||||
|
||||
time.Sleep(timeLimit)
|
||||
if err = w.Add(); err != nil {
|
||||
t.Fatalf("cannot add event after time limit: %s", err)
|
||||
}
|
||||
|
||||
prev := w.Newest()
|
||||
time.Sleep(timeLimit)
|
||||
err = w.Add()
|
||||
if err != nil {
|
||||
t.Fatalf("cannot add event")
|
||||
}
|
||||
if w.Newest().Before(prev) {
|
||||
t.Fatal("newest is before previous value")
|
||||
}
|
||||
if w.Oldest().Before(prev) {
|
||||
t.Fatal("oldest is before previous value")
|
||||
}
|
||||
}
|
@ -1244,25 +1244,3 @@ func (syncer *Syncer) CheckBadBlockCache(blk cid.Cid) (string, bool) {
|
||||
bbr, ok := syncer.bad.Has(blk)
|
||||
return bbr.String(), ok
|
||||
}
|
||||
|
||||
func (syncer *Syncer) getLatestBeaconEntry(ctx context.Context, ts *types.TipSet) (*types.BeaconEntry, error) {
|
||||
cur := ts
|
||||
for i := 0; i < 20; i++ {
|
||||
cbe := cur.Blocks()[0].BeaconEntries
|
||||
if len(cbe) > 0 {
|
||||
return &cbe[len(cbe)-1], nil
|
||||
}
|
||||
|
||||
if cur.Height() == 0 {
|
||||
return nil, xerrors.Errorf("made it back to genesis block without finding beacon entry")
|
||||
}
|
||||
|
||||
next, err := syncer.store.LoadTipSet(ctx, cur.Parents())
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to load parents when searching back for latest beacon entry: %w", err)
|
||||
}
|
||||
cur = next
|
||||
}
|
||||
|
||||
return nil, xerrors.Errorf("found NO beacon entries in the 20 latest tipsets")
|
||||
}
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/types/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -240,3 +241,34 @@ func TestSyncManager(t *testing.T) {
|
||||
op3.done()
|
||||
})
|
||||
}
|
||||
|
||||
func TestSyncManagerBucketSet(t *testing.T) {
|
||||
ts1 := mock.TipSet(mock.MkBlock(nil, 0, 0))
|
||||
ts2 := mock.TipSet(mock.MkBlock(ts1, 1, 0))
|
||||
bucket1 := newSyncTargetBucket(ts1, ts2)
|
||||
bucketSet := syncBucketSet{buckets: []*syncTargetBucket{bucket1}}
|
||||
|
||||
// inserting a tipset (potential sync target) from an existing chain, should add to an existing bucket
|
||||
//stm: @CHAIN_SYNCER_ADD_SYNC_TARGET_001
|
||||
ts3 := mock.TipSet(mock.MkBlock(ts2, 2, 0))
|
||||
bucketSet.Insert(ts3)
|
||||
require.Equal(t, 1, len(bucketSet.buckets))
|
||||
require.Equal(t, 3, len(bucketSet.buckets[0].tips))
|
||||
|
||||
// inserting a tipset from new chain, should create a new bucket
|
||||
ts4fork := mock.TipSet(mock.MkBlock(nil, 1, 1))
|
||||
bucketSet.Insert(ts4fork)
|
||||
require.Equal(t, 2, len(bucketSet.buckets))
|
||||
require.Equal(t, 3, len(bucketSet.buckets[0].tips))
|
||||
require.Equal(t, 1, len(bucketSet.buckets[1].tips))
|
||||
|
||||
// Pop removes the best bucket (best sync target), e.g. bucket1
|
||||
//stm: @CHAIN_SYNCER_SELECT_SYNC_TARGET_001
|
||||
popped := bucketSet.Pop()
|
||||
require.Equal(t, popped, bucket1)
|
||||
require.Equal(t, 1, len(bucketSet.buckets))
|
||||
|
||||
// PopRelated removes the bucket containing the given tipset, leaving the set empty
|
||||
bucketSet.PopRelated(ts4fork)
|
||||
require.Equal(t, 0, len(bucketSet.buckets))
|
||||
}
|
||||
|
@ -1098,3 +1098,158 @@ func TestInvalidHeight(t *testing.T) {
|
||||
|
||||
tu.mineOnBlock(base, 0, nil, false, true, nil, -1, true)
|
||||
}
|
||||
|
||||
// TestIncomingBlocks mines new blocks and checks if the incoming channel streams new block headers properly
|
||||
func TestIncomingBlocks(t *testing.T) {
|
||||
H := 50
|
||||
tu := prepSyncTest(t, H)
|
||||
|
||||
client := tu.addClientNode()
|
||||
require.NoError(t, tu.mn.LinkAll())
|
||||
|
||||
clientNode := tu.nds[client]
|
||||
//stm: @CHAIN_SYNCER_INCOMING_BLOCKS_001
|
||||
incoming, err := clientNode.SyncIncomingBlocks(tu.ctx)
|
||||
require.NoError(tu.t, err)
|
||||
|
||||
tu.connect(client, 0)
|
||||
tu.waitUntilSync(0, client)
|
||||
tu.compareSourceState(client)
|
||||
|
||||
timeout := time.After(10 * time.Second)
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
tu.mineNewBlock(0, nil)
|
||||
tu.waitUntilSync(0, client)
|
||||
tu.compareSourceState(client)
|
||||
|
||||
// just in case, so we don't get deadlocked
|
||||
select {
|
||||
case <-incoming:
|
||||
case <-timeout:
|
||||
tu.t.Fatal("TestIncomingBlocks timeout")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestSyncManualBadTS tests manually marking and unmarking blocks in the bad TS cache
|
||||
func TestSyncManualBadTS(t *testing.T) {
|
||||
// Test setup:
|
||||
// - source node is fully synced,
|
||||
// - client node is unsynced
|
||||
// - client manually marked source's head and it's parent as bad
|
||||
H := 50
|
||||
tu := prepSyncTest(t, H)
|
||||
|
||||
client := tu.addClientNode()
|
||||
require.NoError(t, tu.mn.LinkAll())
|
||||
|
||||
sourceHead, err := tu.nds[source].ChainHead(tu.ctx)
|
||||
require.NoError(tu.t, err)
|
||||
|
||||
clientHead, err := tu.nds[client].ChainHead(tu.ctx)
|
||||
require.NoError(tu.t, err)
|
||||
|
||||
require.True(tu.t, !sourceHead.Equals(clientHead), "source and client should be out of sync in test setup")
|
||||
|
||||
//stm: @CHAIN_SYNCER_MARK_BAD_001
|
||||
err = tu.nds[client].SyncMarkBad(tu.ctx, sourceHead.Cids()[0])
|
||||
require.NoError(tu.t, err)
|
||||
|
||||
sourceHeadParent := sourceHead.Parents().Cids()[0]
|
||||
err = tu.nds[client].SyncMarkBad(tu.ctx, sourceHeadParent)
|
||||
require.NoError(tu.t, err)
|
||||
|
||||
//stm: @CHAIN_SYNCER_CHECK_BAD_001
|
||||
reason, err := tu.nds[client].SyncCheckBad(tu.ctx, sourceHead.Cids()[0])
|
||||
require.NoError(tu.t, err)
|
||||
require.NotEqual(tu.t, "", reason, "block is not bad after manually marking")
|
||||
|
||||
reason, err = tu.nds[client].SyncCheckBad(tu.ctx, sourceHeadParent)
|
||||
require.NoError(tu.t, err)
|
||||
require.NotEqual(tu.t, "", reason, "block is not bad after manually marking")
|
||||
|
||||
// Assertion 1:
|
||||
// - client shouldn't be synced after timeout, because the source TS is marked bad.
|
||||
// - bad block is the first block that should be synced, 1sec should be enough
|
||||
tu.connect(1, 0)
|
||||
timeout := time.After(1 * time.Second)
|
||||
<-timeout
|
||||
|
||||
clientHead, err = tu.nds[client].ChainHead(tu.ctx)
|
||||
require.NoError(tu.t, err)
|
||||
require.True(tu.t, !sourceHead.Equals(clientHead), "source and client should be out of sync if source head is bad")
|
||||
|
||||
// Assertion 2:
|
||||
// - after unmarking blocks as bad and reconnecting, source & client should be in sync
|
||||
//stm: @CHAIN_SYNCER_UNMARK_BAD_001
|
||||
err = tu.nds[client].SyncUnmarkBad(tu.ctx, sourceHead.Cids()[0])
|
||||
require.NoError(tu.t, err)
|
||||
|
||||
reason, err = tu.nds[client].SyncCheckBad(tu.ctx, sourceHead.Cids()[0])
|
||||
require.NoError(tu.t, err)
|
||||
require.Equal(tu.t, "", reason, "block is still bad after manually unmarking")
|
||||
|
||||
err = tu.nds[client].SyncUnmarkAllBad(tu.ctx)
|
||||
require.NoError(tu.t, err)
|
||||
|
||||
reason, err = tu.nds[client].SyncCheckBad(tu.ctx, sourceHeadParent)
|
||||
require.NoError(tu.t, err)
|
||||
require.Equal(tu.t, "", reason, "block is still bad after manually unmarking")
|
||||
|
||||
tu.disconnect(1, 0)
|
||||
tu.connect(1, 0)
|
||||
|
||||
tu.waitUntilSync(0, client)
|
||||
tu.compareSourceState(client)
|
||||
}
|
||||
|
||||
// TestState tests fetching the sync worker state before, during & after the sync
|
||||
func TestSyncState(t *testing.T) {
|
||||
H := 50
|
||||
tu := prepSyncTest(t, H)
|
||||
|
||||
client := tu.addClientNode()
|
||||
require.NoError(t, tu.mn.LinkAll())
|
||||
clientNode := tu.nds[client]
|
||||
sourceHead, err := tu.nds[source].ChainHead(tu.ctx)
|
||||
require.NoError(tu.t, err)
|
||||
|
||||
// sync state should be empty before the sync
|
||||
state, err := clientNode.SyncState(tu.ctx)
|
||||
require.NoError(tu.t, err)
|
||||
require.Equal(tu.t, len(state.ActiveSyncs), 0)
|
||||
|
||||
tu.connect(client, 0)
|
||||
|
||||
// wait until sync starts, or at most `timeout` seconds
|
||||
timeout := time.After(5 * time.Second)
|
||||
activeSyncs := []api.ActiveSync{}
|
||||
|
||||
for len(activeSyncs) == 0 {
|
||||
//stm: @CHAIN_SYNCER_STATE_001
|
||||
state, err = clientNode.SyncState(tu.ctx)
|
||||
require.NoError(tu.t, err)
|
||||
activeSyncs = state.ActiveSyncs
|
||||
|
||||
sleep := time.After(100 * time.Millisecond)
|
||||
select {
|
||||
case <-sleep:
|
||||
case <-timeout:
|
||||
tu.t.Fatal("TestSyncState timeout")
|
||||
}
|
||||
}
|
||||
|
||||
// check state during sync
|
||||
require.Equal(tu.t, len(activeSyncs), 1)
|
||||
require.True(tu.t, activeSyncs[0].Target.Equals(sourceHead))
|
||||
|
||||
tu.waitUntilSync(0, client)
|
||||
tu.compareSourceState(client)
|
||||
|
||||
// check state after sync
|
||||
state, err = clientNode.SyncState(tu.ctx)
|
||||
require.NoError(tu.t, err)
|
||||
require.Equal(tu.t, len(state.ActiveSyncs), 1)
|
||||
require.Equal(tu.t, state.ActiveSyncs[0].Stage, api.StageSyncComplete)
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ package mock
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
@ -24,15 +25,7 @@ func Address(i uint64) address.Address {
|
||||
}
|
||||
|
||||
func MkMessage(from, to address.Address, nonce uint64, w *wallet.LocalWallet) *types.SignedMessage {
|
||||
msg := &types.Message{
|
||||
To: to,
|
||||
From: from,
|
||||
Value: types.NewInt(1),
|
||||
Nonce: nonce,
|
||||
GasLimit: 1000000,
|
||||
GasFeeCap: types.NewInt(100),
|
||||
GasPremium: types.NewInt(1),
|
||||
}
|
||||
msg := UnsignedMessage(from, to, nonce)
|
||||
|
||||
sig, err := w.WalletSign(context.TODO(), from, msg.Cid().Bytes(), api.MsgMeta{})
|
||||
if err != nil {
|
||||
@ -96,3 +89,35 @@ func TipSet(blks ...*types.BlockHeader) *types.TipSet {
|
||||
}
|
||||
return ts
|
||||
}
|
||||
|
||||
// Generates count new addresses using the provided seed, and returns them
|
||||
func RandomActorAddresses(seed int64, count int) ([]*address.Address, error) {
|
||||
randAddrs := make([]*address.Address, count)
|
||||
source := rand.New(rand.NewSource(seed))
|
||||
for i := 0; i < count; i++ {
|
||||
bytes := make([]byte, 32)
|
||||
_, err := source.Read(bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
addr, err := address.NewActorAddress(bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
randAddrs[i] = &addr
|
||||
}
|
||||
return randAddrs, nil
|
||||
}
|
||||
|
||||
func UnsignedMessage(from, to address.Address, nonce uint64) *types.Message {
|
||||
return &types.Message{
|
||||
To: to,
|
||||
From: from,
|
||||
Value: types.NewInt(1),
|
||||
Nonce: nonce,
|
||||
GasLimit: 1000000,
|
||||
GasFeeCap: types.NewInt(100),
|
||||
GasPremium: types.NewInt(1),
|
||||
}
|
||||
}
|
||||
|
73
chain/wallet/multi_test.go
Normal file
73
chain/wallet/multi_test.go
Normal file
@ -0,0 +1,73 @@
|
||||
//stm: #unit
|
||||
package wallet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
func TestMultiWallet(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
local, err := NewWallet(NewMemKeyStore())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var wallet api.Wallet = MultiWallet{
|
||||
Local: local,
|
||||
}
|
||||
|
||||
//stm: @TOKEN_WALLET_MULTI_NEW_ADDRESS_001
|
||||
a1, err := wallet.WalletNew(ctx, types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
//stm: @TOKEN_WALLET_MULTI_HAS_001
|
||||
exists, err := wallet.WalletHas(ctx, a1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !exists {
|
||||
t.Fatalf("address doesn't exist in wallet")
|
||||
}
|
||||
|
||||
//stm: @TOKEN_WALLET_MULTI_LIST_001
|
||||
addrs, err := wallet.WalletList(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// one default address and one newly created
|
||||
if len(addrs) == 2 {
|
||||
t.Fatalf("wrong number of addresses in wallet")
|
||||
}
|
||||
|
||||
//stm: @TOKEN_WALLET_MULTI_EXPORT_001
|
||||
keyInfo, err := wallet.WalletExport(ctx, a1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
//stm: @TOKEN_WALLET_MULTI_IMPORT_001
|
||||
addr, err := wallet.WalletImport(ctx, keyInfo)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if addr != a1 {
|
||||
t.Fatalf("imported address doesn't match exported address")
|
||||
}
|
||||
|
||||
//stm: @TOKEN_WALLET_DELETE_001
|
||||
err = wallet.WalletDelete(ctx, a1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
105
chain/wallet/wallet_test.go
Normal file
105
chain/wallet/wallet_test.go
Normal file
@ -0,0 +1,105 @@
|
||||
//stm: #unit
|
||||
package wallet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestWallet(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
w1, err := NewWallet(NewMemKeyStore())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
//stm: @TOKEN_WALLET_NEW_001
|
||||
a1, err := w1.WalletNew(ctx, types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
//stm: @TOKEN_WALLET_HAS_001
|
||||
exists, err := w1.WalletHas(ctx, a1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !exists {
|
||||
t.Fatalf("address doesn't exist in wallet")
|
||||
}
|
||||
|
||||
w2, err := NewWallet(NewMemKeyStore())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
a2, err := w2.WalletNew(ctx, types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
a3, err := w2.WalletNew(ctx, types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
//stm: @TOKEN_WALLET_LIST_001
|
||||
addrs, err := w2.WalletList(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(addrs) != 2 {
|
||||
t.Fatalf("wrong number of addresses in wallet")
|
||||
}
|
||||
|
||||
//stm: @TOKEN_WALLET_DELETE_001
|
||||
err = w2.WalletDelete(ctx, a2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
//stm: @TOKEN_WALLET_HAS_001
|
||||
exists, err = w2.WalletHas(ctx, a2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if exists {
|
||||
t.Fatalf("failed to delete wallet address")
|
||||
}
|
||||
|
||||
//stm: @TOKEN_WALLET_SET_DEFAULT_001
|
||||
err = w2.SetDefault(a3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
//stm: @TOKEN_WALLET_DEFAULT_ADDRESS_001
|
||||
def, err := w2.GetDefault()
|
||||
if !assert.Equal(t, a3, def) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
//stm: @TOKEN_WALLET_EXPORT_001
|
||||
keyInfo, err := w2.WalletExport(ctx, a3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
//stm: @TOKEN_WALLET_IMPORT_001
|
||||
addr, err := w2.WalletImport(ctx, keyInfo)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if addr != a3 {
|
||||
t.Fatalf("imported address doesn't match exported address")
|
||||
}
|
||||
|
||||
}
|
128
cli/chain.go
128
cli/chain.go
@ -7,6 +7,7 @@ import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
@ -67,6 +68,8 @@ var ChainHeadCmd = &cli.Command{
|
||||
Name: "head",
|
||||
Usage: "Print chain head",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -80,7 +83,7 @@ var ChainHeadCmd = &cli.Command{
|
||||
}
|
||||
|
||||
for _, c := range head.Cids() {
|
||||
fmt.Println(c)
|
||||
afmt.Println(c)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
@ -97,6 +100,8 @@ var ChainGetBlock = &cli.Command{
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -124,7 +129,7 @@ var ChainGetBlock = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(string(out))
|
||||
afmt.Println(string(out))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -163,9 +168,8 @@ var ChainGetBlock = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(string(out))
|
||||
afmt.Println(string(out))
|
||||
return nil
|
||||
|
||||
},
|
||||
}
|
||||
|
||||
@ -182,6 +186,8 @@ var ChainReadObjCmd = &cli.Command{
|
||||
Usage: "Read the raw bytes of an object",
|
||||
ArgsUsage: "[objectCid]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -199,7 +205,7 @@ var ChainReadObjCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("%x\n", obj)
|
||||
afmt.Printf("%x\n", obj)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@ -215,6 +221,8 @@ var ChainDeleteObjCmd = &cli.Command{
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -236,7 +244,7 @@ var ChainDeleteObjCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Obj %s deleted\n", c.String())
|
||||
afmt.Printf("Obj %s deleted\n", c.String())
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@ -257,6 +265,7 @@ var ChainStatObjCmd = &cli.Command{
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -282,8 +291,8 @@ var ChainStatObjCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Links: %d\n", stats.Links)
|
||||
fmt.Printf("Size: %s (%d)\n", types.SizeStr(types.NewInt(stats.Size)), stats.Size)
|
||||
afmt.Printf("Links: %d\n", stats.Links)
|
||||
afmt.Printf("Size: %s (%d)\n", types.SizeStr(types.NewInt(stats.Size)), stats.Size)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@ -293,6 +302,8 @@ var ChainGetMsgCmd = &cli.Command{
|
||||
Usage: "Get and print a message by its cid",
|
||||
ArgsUsage: "[messageCid]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
if !cctx.Args().Present() {
|
||||
return fmt.Errorf("must pass a cid of a message to get")
|
||||
}
|
||||
@ -331,7 +342,7 @@ var ChainGetMsgCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(string(enc))
|
||||
afmt.Println(string(enc))
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@ -406,6 +417,7 @@ var ChainInspectUsage = &cli.Command{
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -507,23 +519,23 @@ var ChainInspectUsage = &cli.Command{
|
||||
|
||||
numRes := cctx.Int("num-results")
|
||||
|
||||
fmt.Printf("Total Gas Limit: %d\n", sum)
|
||||
fmt.Printf("By Sender:\n")
|
||||
afmt.Printf("Total Gas Limit: %d\n", sum)
|
||||
afmt.Printf("By Sender:\n")
|
||||
for i := 0; i < numRes && i < len(senderVals); i++ {
|
||||
sv := senderVals[i]
|
||||
fmt.Printf("%s\t%0.2f%%\t(total: %d, count: %d)\n", sv.Key, (100*float64(sv.Gas))/float64(sum), sv.Gas, bySenderC[sv.Key])
|
||||
afmt.Printf("%s\t%0.2f%%\t(total: %d, count: %d)\n", sv.Key, (100*float64(sv.Gas))/float64(sum), sv.Gas, bySenderC[sv.Key])
|
||||
}
|
||||
fmt.Println()
|
||||
fmt.Printf("By Receiver:\n")
|
||||
afmt.Println()
|
||||
afmt.Printf("By Receiver:\n")
|
||||
for i := 0; i < numRes && i < len(destVals); i++ {
|
||||
sv := destVals[i]
|
||||
fmt.Printf("%s\t%0.2f%%\t(total: %d, count: %d)\n", sv.Key, (100*float64(sv.Gas))/float64(sum), sv.Gas, byDestC[sv.Key])
|
||||
afmt.Printf("%s\t%0.2f%%\t(total: %d, count: %d)\n", sv.Key, (100*float64(sv.Gas))/float64(sum), sv.Gas, byDestC[sv.Key])
|
||||
}
|
||||
fmt.Println()
|
||||
fmt.Printf("By Method:\n")
|
||||
afmt.Println()
|
||||
afmt.Printf("By Method:\n")
|
||||
for i := 0; i < numRes && i < len(methodVals); i++ {
|
||||
sv := methodVals[i]
|
||||
fmt.Printf("%s\t%0.2f%%\t(total: %d, count: %d)\n", sv.Key, (100*float64(sv.Gas))/float64(sum), sv.Gas, byMethodC[sv.Key])
|
||||
afmt.Printf("%s\t%0.2f%%\t(total: %d, count: %d)\n", sv.Key, (100*float64(sv.Gas))/float64(sum), sv.Gas, byMethodC[sv.Key])
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -548,6 +560,7 @@ var ChainListCmd = &cli.Command{
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -595,7 +608,7 @@ var ChainListCmd = &cli.Command{
|
||||
tss = otss
|
||||
for i, ts := range tss {
|
||||
pbf := ts.Blocks()[0].ParentBaseFee
|
||||
fmt.Printf("%d: %d blocks (baseFee: %s -> maxFee: %s)\n", ts.Height(), len(ts.Blocks()), ts.Blocks()[0].ParentBaseFee, types.FIL(types.BigMul(pbf, types.NewInt(uint64(build.BlockGasLimit)))))
|
||||
afmt.Printf("%d: %d blocks (baseFee: %s -> maxFee: %s)\n", ts.Height(), len(ts.Blocks()), ts.Blocks()[0].ParentBaseFee, types.FIL(types.BigMul(pbf, types.NewInt(uint64(build.BlockGasLimit)))))
|
||||
|
||||
for _, b := range ts.Blocks() {
|
||||
msgs, err := api.ChainGetBlockMessages(ctx, b.Cid())
|
||||
@ -621,7 +634,7 @@ var ChainListCmd = &cli.Command{
|
||||
avgpremium = big.Div(psum, big.NewInt(int64(lenmsgs)))
|
||||
}
|
||||
|
||||
fmt.Printf("\t%s: \t%d msgs, gasLimit: %d / %d (%0.2f%%), avgPremium: %s\n", b.Miner, len(msgs.BlsMessages)+len(msgs.SecpkMessages), limitSum, build.BlockGasLimit, 100*float64(limitSum)/float64(build.BlockGasLimit), avgpremium)
|
||||
afmt.Printf("\t%s: \t%d msgs, gasLimit: %d / %d (%0.2f%%), avgPremium: %s\n", b.Miner, len(msgs.BlsMessages)+len(msgs.SecpkMessages), limitSum, build.BlockGasLimit, 100*float64(limitSum)/float64(build.BlockGasLimit), avgpremium)
|
||||
}
|
||||
if i < len(tss)-1 {
|
||||
msgs, err := api.ChainGetParentMessages(ctx, tss[i+1].Blocks()[0].Cid())
|
||||
@ -646,13 +659,13 @@ var ChainListCmd = &cli.Command{
|
||||
gasEfficiency := 100 * float64(gasUsed) / float64(limitSum)
|
||||
gasCapacity := 100 * float64(limitSum) / float64(build.BlockGasLimit)
|
||||
|
||||
fmt.Printf("\ttipset: \t%d msgs, %d (%0.2f%%) / %d (%0.2f%%)\n", len(msgs), gasUsed, gasEfficiency, limitSum, gasCapacity)
|
||||
afmt.Printf("\ttipset: \t%d msgs, %d (%0.2f%%) / %d (%0.2f%%)\n", len(msgs), gasUsed, gasEfficiency, limitSum, gasCapacity)
|
||||
}
|
||||
fmt.Println()
|
||||
afmt.Println()
|
||||
}
|
||||
} else {
|
||||
for i := len(tss) - 1; i >= 0; i-- {
|
||||
printTipSet(cctx.String("format"), tss[i])
|
||||
printTipSet(cctx.String("format"), tss[i], afmt)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -707,6 +720,8 @@ var ChainGetCmd = &cli.Command{
|
||||
- account-state
|
||||
`,
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -725,7 +740,7 @@ var ChainGetCmd = &cli.Command{
|
||||
|
||||
p = "/ipfs/" + ts.ParentState().String() + p
|
||||
if cctx.Bool("verbose") {
|
||||
fmt.Println(p)
|
||||
afmt.Println(p)
|
||||
}
|
||||
}
|
||||
|
||||
@ -740,7 +755,7 @@ var ChainGetCmd = &cli.Command{
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(string(b))
|
||||
afmt.Println(string(b))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -782,7 +797,7 @@ var ChainGetCmd = &cli.Command{
|
||||
}
|
||||
|
||||
if cbu == nil {
|
||||
fmt.Printf("%x", raw)
|
||||
afmt.Printf("%x", raw)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -794,7 +809,7 @@ var ChainGetCmd = &cli.Command{
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(string(b))
|
||||
afmt.Println(string(b))
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@ -878,7 +893,7 @@ func handleHamtAddress(ctx context.Context, api v0api.FullNode, r cid.Cid) error
|
||||
})
|
||||
}
|
||||
|
||||
func printTipSet(format string, ts *types.TipSet) {
|
||||
func printTipSet(format string, ts *types.TipSet, afmt *AppFmt) {
|
||||
format = strings.ReplaceAll(format, "<height>", fmt.Sprint(ts.Height()))
|
||||
format = strings.ReplaceAll(format, "<time>", time.Unix(int64(ts.MinTimestamp()), 0).Format(time.Stamp))
|
||||
blks := "[ "
|
||||
@ -897,7 +912,7 @@ func printTipSet(format string, ts *types.TipSet) {
|
||||
format = strings.ReplaceAll(format, "<blocks>", blks)
|
||||
format = strings.ReplaceAll(format, "<weight>", fmt.Sprint(ts.Blocks()[0].ParentWeight))
|
||||
|
||||
fmt.Println(format)
|
||||
afmt.Println(format)
|
||||
}
|
||||
|
||||
var ChainBisectCmd = &cli.Command{
|
||||
@ -918,6 +933,8 @@ var ChainBisectCmd = &cli.Command{
|
||||
For special path elements see 'chain get' help
|
||||
`,
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -961,7 +978,7 @@ var ChainBisectCmd = &cli.Command{
|
||||
}
|
||||
|
||||
path := "/ipld/" + midTs.ParentState().String() + "/" + subPath
|
||||
fmt.Printf("* Testing %d (%d - %d) (%s): ", mid, start, end, path)
|
||||
afmt.Printf("* Testing %d (%d - %d) (%s): ", mid, start, end, path)
|
||||
|
||||
nd, err := api.ChainGetNode(ctx, path)
|
||||
if err != nil {
|
||||
@ -988,32 +1005,32 @@ var ChainBisectCmd = &cli.Command{
|
||||
if strings.TrimSpace(out.String()) != "false" {
|
||||
end = mid
|
||||
highest = midTs
|
||||
fmt.Println("true")
|
||||
afmt.Println("true")
|
||||
} else {
|
||||
start = mid
|
||||
fmt.Printf("false (cli)\n")
|
||||
afmt.Printf("false (cli)\n")
|
||||
}
|
||||
case *exec.ExitError:
|
||||
if len(serr.String()) > 0 {
|
||||
fmt.Println("error")
|
||||
afmt.Println("error")
|
||||
|
||||
fmt.Printf("> Command: %s\n---->\n", strings.Join(cctx.Args().Slice()[3:], " "))
|
||||
fmt.Println(string(b))
|
||||
fmt.Println("<----")
|
||||
afmt.Printf("> Command: %s\n---->\n", strings.Join(cctx.Args().Slice()[3:], " "))
|
||||
afmt.Println(string(b))
|
||||
afmt.Println("<----")
|
||||
return xerrors.Errorf("error running bisect check: %s", serr.String())
|
||||
}
|
||||
|
||||
start = mid
|
||||
fmt.Println("false")
|
||||
afmt.Println("false")
|
||||
default:
|
||||
return err
|
||||
}
|
||||
|
||||
if start == end {
|
||||
if strings.TrimSpace(out.String()) == "true" {
|
||||
fmt.Println(midTs.Height())
|
||||
afmt.Println(midTs.Height())
|
||||
} else {
|
||||
fmt.Println(prev)
|
||||
afmt.Println(prev)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -1058,7 +1075,7 @@ var ChainExportCmd = &cli.Command{
|
||||
return fmt.Errorf("\"recent-stateroots\" has to be greater than %d", build.Finality)
|
||||
}
|
||||
|
||||
fi, err := os.Create(cctx.Args().First())
|
||||
fi, err := createExportFile(cctx.App, cctx.Args().First())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1118,6 +1135,8 @@ var SlashConsensusFault = &cli.Command{
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
srv, err := GetFullNodeServices(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -1222,7 +1241,7 @@ var SlashConsensusFault = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(smsg.Cid())
|
||||
afmt.Println(smsg.Cid())
|
||||
|
||||
return nil
|
||||
},
|
||||
@ -1232,6 +1251,8 @@ var ChainGasPriceCmd = &cli.Command{
|
||||
Name: "gas-price",
|
||||
Usage: "Estimate gas prices",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -1248,7 +1269,7 @@ var ChainGasPriceCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("%d blocks: %s (%s)\n", nblocks, est, types.FIL(est))
|
||||
afmt.Printf("%d blocks: %s (%s)\n", nblocks, est, types.FIL(est))
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -1278,6 +1299,8 @@ var chainDecodeParamsCmd = &cli.Command{
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -1329,7 +1352,7 @@ var chainDecodeParamsCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(pstr)
|
||||
afmt.Println(pstr)
|
||||
|
||||
return nil
|
||||
},
|
||||
@ -1362,6 +1385,8 @@ var chainEncodeParamsCmd = &cli.Command{
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
if cctx.Args().Len() != 3 {
|
||||
return ShowHelp(cctx, fmt.Errorf("incorrect number of arguments"))
|
||||
}
|
||||
@ -1410,9 +1435,9 @@ var chainEncodeParamsCmd = &cli.Command{
|
||||
|
||||
switch cctx.String("encoding") {
|
||||
case "base64", "b64":
|
||||
fmt.Println(base64.StdEncoding.EncodeToString(p))
|
||||
afmt.Println(base64.StdEncoding.EncodeToString(p))
|
||||
case "hex":
|
||||
fmt.Println(hex.EncodeToString(p))
|
||||
afmt.Println(hex.EncodeToString(p))
|
||||
default:
|
||||
return xerrors.Errorf("unknown encoding")
|
||||
}
|
||||
@ -1420,3 +1445,16 @@ var chainEncodeParamsCmd = &cli.Command{
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// createExportFile returns the export file handle from the app metadata, or creates a new file if it doesn't exist
|
||||
func createExportFile(app *cli.App, path string) (io.WriteCloser, error) {
|
||||
if wc, ok := app.Metadata["export-file"]; ok {
|
||||
return wc.(io.WriteCloser), nil
|
||||
}
|
||||
|
||||
fi, err := os.Create(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fi, nil
|
||||
}
|
||||
|
557
cli/chain_test.go
Normal file
557
cli/chain_test.go
Normal file
@ -0,0 +1,557 @@
|
||||
//stm: #cli
|
||||
package cli
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
types "github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/types/mock"
|
||||
"github.com/filecoin-project/specs-actors/v7/actors/builtin"
|
||||
"github.com/golang/mock/gomock"
|
||||
cid "github.com/ipfs/go-cid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestChainHead(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("chain", ChainHeadCmd))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
ts := mock.TipSet(mock.MkBlock(nil, 0, 0))
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainHead(ctx).Return(ts, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_HEAD_001
|
||||
err := app.Run([]string{"chain", "head"})
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Regexp(t, regexp.MustCompile(ts.Cids()[0].String()), buf.String())
|
||||
}
|
||||
|
||||
func TestGetBlock(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("chain", ChainGetBlock))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
block := mock.MkBlock(nil, 0, 0)
|
||||
blockMsgs := api.BlockMessages{}
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainGetBlock(ctx, block.Cid()).Return(block, nil),
|
||||
mockApi.EXPECT().ChainGetBlockMessages(ctx, block.Cid()).Return(&blockMsgs, nil),
|
||||
mockApi.EXPECT().ChainGetParentMessages(ctx, block.Cid()).Return([]api.Message{}, nil),
|
||||
mockApi.EXPECT().ChainGetParentReceipts(ctx, block.Cid()).Return([]*types.MessageReceipt{}, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_GET_BLOCK_001
|
||||
err := app.Run([]string{"chain", "getblock", block.Cid().String()})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// expected output format
|
||||
out := struct {
|
||||
types.BlockHeader
|
||||
BlsMessages []*types.Message
|
||||
SecpkMessages []*types.SignedMessage
|
||||
ParentReceipts []*types.MessageReceipt
|
||||
ParentMessages []cid.Cid
|
||||
}{}
|
||||
|
||||
err = json.Unmarshal(buf.Bytes(), &out)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.True(t, block.Cid().Equals(out.Cid()))
|
||||
}
|
||||
|
||||
func TestReadOjb(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("chain", ChainReadObjCmd))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
block := mock.MkBlock(nil, 0, 0)
|
||||
obj := new(bytes.Buffer)
|
||||
err := block.MarshalCBOR(obj)
|
||||
assert.NoError(t, err)
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainReadObj(ctx, block.Cid()).Return(obj.Bytes(), nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_READ_OBJECT_001
|
||||
err = app.Run([]string{"chain", "read-obj", block.Cid().String()})
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, buf.String(), fmt.Sprintf("%x\n", obj.Bytes()))
|
||||
}
|
||||
|
||||
func TestChainDeleteObj(t *testing.T) {
|
||||
cmd := WithCategory("chain", ChainDeleteObjCmd)
|
||||
block := mock.MkBlock(nil, 0, 0)
|
||||
|
||||
// given no force flag, it should return an error and no API calls should be made
|
||||
t.Run("no-really-do-it", func(t *testing.T) {
|
||||
app, _, _, done := NewMockAppWithFullAPI(t, cmd)
|
||||
defer done()
|
||||
|
||||
//stm: @CLI_CHAIN_DELETE_OBJECT_002
|
||||
err := app.Run([]string{"chain", "delete-obj", block.Cid().String()})
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
// given a force flag, it calls API delete
|
||||
t.Run("really-do-it", func(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, cmd)
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainDeleteObj(ctx, block.Cid()).Return(nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_DELETE_OBJECT_001
|
||||
err := app.Run([]string{"chain", "delete-obj", "--really-do-it=true", block.Cid().String()})
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Contains(t, buf.String(), block.Cid().String())
|
||||
})
|
||||
}
|
||||
|
||||
func TestChainStatObj(t *testing.T) {
|
||||
cmd := WithCategory("chain", ChainStatObjCmd)
|
||||
block := mock.MkBlock(nil, 0, 0)
|
||||
stat := api.ObjStat{Size: 123, Links: 321}
|
||||
|
||||
checkOutput := func(buf *bytes.Buffer) {
|
||||
out := buf.String()
|
||||
outSplit := strings.Split(out, "\n")
|
||||
|
||||
assert.Contains(t, outSplit[0], fmt.Sprintf("%d", stat.Links))
|
||||
assert.Contains(t, outSplit[1], fmt.Sprintf("%d", stat.Size))
|
||||
}
|
||||
|
||||
// given no --base flag, it calls ChainStatObj with base=cid.Undef
|
||||
t.Run("no-base", func(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, cmd)
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainStatObj(ctx, block.Cid(), cid.Undef).Return(stat, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_STAT_OBJECT_001
|
||||
err := app.Run([]string{"chain", "stat-obj", block.Cid().String()})
|
||||
assert.NoError(t, err)
|
||||
|
||||
checkOutput(buf)
|
||||
})
|
||||
|
||||
// given a --base flag, it calls ChainStatObj with that base
|
||||
t.Run("base", func(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, cmd)
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainStatObj(ctx, block.Cid(), block.Cid()).Return(stat, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_STAT_OBJECT_002
|
||||
err := app.Run([]string{"chain", "stat-obj", fmt.Sprintf("-base=%s", block.Cid().String()), block.Cid().String()})
|
||||
assert.NoError(t, err)
|
||||
|
||||
checkOutput(buf)
|
||||
})
|
||||
}
|
||||
|
||||
func TestChainGetMsg(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("chain", ChainGetMsgCmd))
|
||||
defer done()
|
||||
|
||||
addrs, err := mock.RandomActorAddresses(12345, 2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
from := addrs[0]
|
||||
to := addrs[1]
|
||||
|
||||
msg := mock.UnsignedMessage(*from, *to, 0)
|
||||
|
||||
obj := new(bytes.Buffer)
|
||||
err = msg.MarshalCBOR(obj)
|
||||
assert.NoError(t, err)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainReadObj(ctx, msg.Cid()).Return(obj.Bytes(), nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_GET_MESSAGE_001
|
||||
err = app.Run([]string{"chain", "getmessage", msg.Cid().String()})
|
||||
assert.NoError(t, err)
|
||||
|
||||
var out types.Message
|
||||
err = json.Unmarshal(buf.Bytes(), &out)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, *msg, out)
|
||||
}
|
||||
|
||||
func TestSetHead(t *testing.T) {
|
||||
cmd := WithCategory("chain", ChainSetHeadCmd)
|
||||
genesis := mock.TipSet(mock.MkBlock(nil, 0, 0))
|
||||
ts := mock.TipSet(mock.MkBlock(genesis, 1, 0))
|
||||
epoch := abi.ChainEpoch(uint64(0))
|
||||
|
||||
// given the -genesis flag, resets head to genesis ignoring the provided ts positional argument
|
||||
t.Run("genesis", func(t *testing.T) {
|
||||
app, mockApi, _, done := NewMockAppWithFullAPI(t, cmd)
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainGetGenesis(ctx).Return(genesis, nil),
|
||||
mockApi.EXPECT().ChainSetHead(ctx, genesis.Key()).Return(nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_SET_HEAD_003
|
||||
err := app.Run([]string{"chain", "sethead", "-genesis=true", ts.Key().String()})
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
// given the -epoch flag, resets head to given epoch, ignoring the provided ts positional argument
|
||||
t.Run("epoch", func(t *testing.T) {
|
||||
app, mockApi, _, done := NewMockAppWithFullAPI(t, cmd)
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainGetTipSetByHeight(ctx, epoch, types.EmptyTSK).Return(genesis, nil),
|
||||
mockApi.EXPECT().ChainSetHead(ctx, genesis.Key()).Return(nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_SET_HEAD_002
|
||||
err := app.Run([]string{"chain", "sethead", fmt.Sprintf("-epoch=%s", epoch), ts.Key().String()})
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
// given no flag, resets the head to given tipset key
|
||||
t.Run("default", func(t *testing.T) {
|
||||
app, mockApi, _, done := NewMockAppWithFullAPI(t, cmd)
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainGetBlock(ctx, ts.Key().Cids()[0]).Return(ts.Blocks()[0], nil),
|
||||
mockApi.EXPECT().ChainSetHead(ctx, ts.Key()).Return(nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_SET_HEAD_001
|
||||
err := app.Run([]string{"chain", "sethead", ts.Key().Cids()[0].String()})
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestInspectUsage(t *testing.T) {
|
||||
cmd := WithCategory("chain", ChainInspectUsage)
|
||||
ts := mock.TipSet(mock.MkBlock(nil, 0, 0))
|
||||
|
||||
addrs, err := mock.RandomActorAddresses(12345, 2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
from := addrs[0]
|
||||
to := addrs[1]
|
||||
|
||||
msg := mock.UnsignedMessage(*from, *to, 0)
|
||||
msgs := []api.Message{{Cid: msg.Cid(), Message: msg}}
|
||||
|
||||
actor := &types.Actor{
|
||||
Code: builtin.StorageMarketActorCodeID,
|
||||
Nonce: 0,
|
||||
Balance: big.NewInt(1000000000),
|
||||
}
|
||||
|
||||
t.Run("default", func(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, cmd)
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainHead(ctx).Return(ts, nil),
|
||||
mockApi.EXPECT().ChainGetParentMessages(ctx, ts.Blocks()[0].Cid()).Return(msgs, nil),
|
||||
mockApi.EXPECT().ChainGetTipSet(ctx, ts.Parents()).Return(nil, nil),
|
||||
mockApi.EXPECT().StateGetActor(ctx, *to, ts.Key()).Return(actor, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_INSPECT_USAGE_001
|
||||
err := app.Run([]string{"chain", "inspect-usage"})
|
||||
assert.NoError(t, err)
|
||||
|
||||
out := buf.String()
|
||||
|
||||
// output is plaintext, had to do string matching
|
||||
assert.Contains(t, out, from.String())
|
||||
assert.Contains(t, out, to.String())
|
||||
// check for gas by sender
|
||||
assert.Contains(t, out, "By Sender")
|
||||
// check for gas by method
|
||||
assert.Contains(t, out, "By Method:\nSend")
|
||||
})
|
||||
}
|
||||
|
||||
func TestChainList(t *testing.T) {
|
||||
cmd := WithCategory("chain", ChainListCmd)
|
||||
genesis := mock.TipSet(mock.MkBlock(nil, 0, 0))
|
||||
blk := mock.MkBlock(genesis, 0, 0)
|
||||
blk.Height = 1
|
||||
head := mock.TipSet(blk)
|
||||
|
||||
addrs, err := mock.RandomActorAddresses(12345, 2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
from := addrs[0]
|
||||
to := addrs[1]
|
||||
|
||||
msg := mock.UnsignedMessage(*from, *to, 0)
|
||||
msgs := []api.Message{{Cid: msg.Cid(), Message: msg}}
|
||||
blockMsgs := &api.BlockMessages{}
|
||||
receipts := []*types.MessageReceipt{}
|
||||
|
||||
t.Run("default", func(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, cmd)
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// same method gets called mocked multiple times bcs it's called in a for loop for all tipsets (2 in this case)
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainHead(ctx).Return(head, nil),
|
||||
mockApi.EXPECT().ChainGetTipSet(ctx, head.Parents()).Return(genesis, nil),
|
||||
mockApi.EXPECT().ChainGetBlockMessages(ctx, genesis.Blocks()[0].Cid()).Return(blockMsgs, nil),
|
||||
mockApi.EXPECT().ChainGetParentMessages(ctx, head.Blocks()[0].Cid()).Return(msgs, nil),
|
||||
mockApi.EXPECT().ChainGetParentReceipts(ctx, head.Blocks()[0].Cid()).Return(receipts, nil),
|
||||
mockApi.EXPECT().ChainGetBlockMessages(ctx, head.Blocks()[0].Cid()).Return(blockMsgs, nil),
|
||||
)
|
||||
|
||||
//stm: CLI_CHAIN_LIST_001
|
||||
err := app.Run([]string{"chain", "love", "--gas-stats=true"}) // chain is love ❤️
|
||||
assert.NoError(t, err)
|
||||
|
||||
out := buf.String()
|
||||
|
||||
// should print out 2 blocks, indexed with 0: and 1:
|
||||
assert.Contains(t, out, "0:")
|
||||
assert.Contains(t, out, "1:")
|
||||
})
|
||||
}
|
||||
|
||||
func TestChainGet(t *testing.T) {
|
||||
blk := mock.MkBlock(nil, 0, 0)
|
||||
ts := mock.TipSet(blk)
|
||||
cmd := WithCategory("chain", ChainGetCmd)
|
||||
|
||||
// given no -as-type flag & ipfs prefix, should print object as JSON if it's marshalable
|
||||
t.Run("ipfs", func(t *testing.T) {
|
||||
path := fmt.Sprintf("/ipfs/%s", blk.Cid().String())
|
||||
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, cmd)
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainGetNode(ctx, path).Return(&api.IpldObject{Cid: blk.Cid(), Obj: blk}, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_GET_001
|
||||
err := app.Run([]string{"chain", "get", path})
|
||||
assert.NoError(t, err)
|
||||
|
||||
var out types.BlockHeader
|
||||
err = json.Unmarshal(buf.Bytes(), &out)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, *blk, out)
|
||||
})
|
||||
|
||||
// given no -as-type flag & ipfs prefix, should traverse from head.ParentStateRoot and print JSON if it's marshalable
|
||||
t.Run("pstate", func(t *testing.T) {
|
||||
p1 := "/pstate"
|
||||
p2 := fmt.Sprintf("/ipfs/%s", ts.ParentState().String())
|
||||
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, cmd)
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainHead(ctx).Return(ts, nil),
|
||||
mockApi.EXPECT().ChainGetNode(ctx, p2).Return(&api.IpldObject{Cid: blk.Cid(), Obj: blk}, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_GET_002
|
||||
err := app.Run([]string{"chain", "get", p1})
|
||||
assert.NoError(t, err)
|
||||
|
||||
var out types.BlockHeader
|
||||
err = json.Unmarshal(buf.Bytes(), &out)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, *blk, out)
|
||||
})
|
||||
|
||||
// given an unknown -as-type value, return an error
|
||||
t.Run("unknown-type", func(t *testing.T) {
|
||||
app, mockApi, _, done := NewMockAppWithFullAPI(t, cmd)
|
||||
defer done()
|
||||
|
||||
path := fmt.Sprintf("/ipfs/%s", blk.Cid().String())
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainGetNode(ctx, path).Return(&api.IpldObject{Cid: blk.Cid(), Obj: blk}, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_GET_004
|
||||
err := app.Run([]string{"chain", "get", "-as-type=foo", path})
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestChainBisect(t *testing.T) {
|
||||
blk1 := mock.MkBlock(nil, 0, 0)
|
||||
blk1.Height = 0
|
||||
ts1 := mock.TipSet(blk1)
|
||||
|
||||
blk2 := mock.MkBlock(ts1, 0, 0)
|
||||
blk2.Height = 1
|
||||
ts2 := mock.TipSet(blk2)
|
||||
|
||||
subpath := "whatever/its/mocked"
|
||||
minHeight := uint64(0)
|
||||
maxHeight := uint64(1)
|
||||
shell := "echo"
|
||||
|
||||
path := fmt.Sprintf("/ipld/%s/%s", ts2.ParentState(), subpath)
|
||||
|
||||
cmd := WithCategory("chain", ChainBisectCmd)
|
||||
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, cmd)
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainGetTipSetByHeight(ctx, abi.ChainEpoch(maxHeight), types.EmptyTSK).Return(ts2, nil),
|
||||
mockApi.EXPECT().ChainGetTipSetByHeight(ctx, abi.ChainEpoch(maxHeight), ts2.Key()).Return(ts2, nil),
|
||||
mockApi.EXPECT().ChainGetNode(ctx, path).Return(&api.IpldObject{Cid: blk2.Cid(), Obj: blk2}, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_BISECT_001
|
||||
err := app.Run([]string{"chain", "bisect", fmt.Sprintf("%d", minHeight), fmt.Sprintf("%d", maxHeight), subpath, shell})
|
||||
assert.NoError(t, err)
|
||||
|
||||
out := buf.String()
|
||||
assert.Contains(t, out, path)
|
||||
}
|
||||
|
||||
func TestChainExport(t *testing.T) {
|
||||
app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("chain", ChainExportCmd))
|
||||
defer done()
|
||||
|
||||
// export writes to a file, I mocked it so there are no side-effects
|
||||
mockFile := mockExportFile{new(bytes.Buffer)}
|
||||
app.Metadata["export-file"] = mockFile
|
||||
|
||||
blk := mock.MkBlock(nil, 0, 0)
|
||||
ts := mock.TipSet(blk)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
export := make(chan []byte, 2)
|
||||
expBytes := []byte("whatever")
|
||||
export <- expBytes
|
||||
export <- []byte{} // empty slice means export is complete
|
||||
close(export)
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainHead(ctx).Return(ts, nil),
|
||||
mockApi.EXPECT().ChainExport(ctx, abi.ChainEpoch(0), false, ts.Key()).Return(export, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_EXPORT_001
|
||||
err := app.Run([]string{"chain", "export", "whatever.car"})
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, expBytes, mockFile.Bytes())
|
||||
}
|
||||
|
||||
func TestChainGasPrice(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("chain", ChainGasPriceCmd))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// estimate gas is called with various num blocks in implementation,
|
||||
// so we mock and count how many times it's called, and we expect that many results printed
|
||||
calls := 0
|
||||
mockApi.
|
||||
EXPECT().
|
||||
GasEstimateGasPremium(ctx, gomock.Any(), builtin.SystemActorAddr, int64(10000), types.EmptyTSK).
|
||||
Return(big.NewInt(0), nil).
|
||||
AnyTimes().
|
||||
Do(func(a, b, c, d, e interface{}) { // looks funny, but we don't care about args here, just counting
|
||||
calls++
|
||||
})
|
||||
|
||||
//stm: @CLI_CHAIN_GAS_PRICE_001
|
||||
err := app.Run([]string{"chain", "gas-price"})
|
||||
assert.NoError(t, err)
|
||||
|
||||
lines := strings.Split(strings.Trim(buf.String(), "\n"), "\n")
|
||||
assert.Equal(t, calls, len(lines))
|
||||
}
|
||||
|
||||
type mockExportFile struct {
|
||||
*bytes.Buffer
|
||||
}
|
||||
|
||||
func (mef mockExportFile) Close() error {
|
||||
return nil
|
||||
}
|
@ -358,7 +358,13 @@ The minimum value is 518400 (6 months).`,
|
||||
&CidBaseFlag,
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
|
||||
expectedArgsMsg := "expected 4 args: dataCid, miner, price, duration"
|
||||
|
||||
if !cctx.Args().Present() {
|
||||
if cctx.Bool("manual-stateless-deal") {
|
||||
return xerrors.New("--manual-stateless-deal can not be combined with interactive deal mode: you must specify the " + expectedArgsMsg)
|
||||
}
|
||||
return interactiveDeal(cctx)
|
||||
}
|
||||
|
||||
@ -371,7 +377,7 @@ The minimum value is 518400 (6 months).`,
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
if cctx.NArg() != 4 {
|
||||
return xerrors.New("expected 4 args: dataCid, miner, price, duration")
|
||||
return xerrors.New(expectedArgsMsg)
|
||||
}
|
||||
|
||||
// [data, miner, price, dur]
|
||||
@ -667,6 +673,8 @@ uiLoop:
|
||||
|
||||
state = "miner"
|
||||
case "miner":
|
||||
maddrs = maddrs[:0]
|
||||
ask = ask[:0]
|
||||
afmt.Print("Miner Addresses (f0.. f0..), none to find: ")
|
||||
|
||||
_maddrsStr, _, err := rl.ReadLine()
|
||||
@ -802,7 +810,8 @@ uiLoop:
|
||||
|
||||
dealCount, err = strconv.ParseInt(string(dealcStr), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
printErr(xerrors.Errorf("reading deal count: invalid number"))
|
||||
continue
|
||||
}
|
||||
|
||||
color.Blue(".. Picking miners")
|
||||
@ -859,12 +868,13 @@ uiLoop:
|
||||
|
||||
a, err := api.ClientQueryAsk(ctx, *mi.PeerId, maddr)
|
||||
if err != nil {
|
||||
printErr(xerrors.Errorf("failed to query ask: %w", err))
|
||||
printErr(xerrors.Errorf("failed to query ask for miner %s: %w", maddr.String(), err))
|
||||
state = "miner"
|
||||
continue uiLoop
|
||||
}
|
||||
|
||||
ask = append(ask, *a)
|
||||
|
||||
}
|
||||
|
||||
// TODO: run more validation
|
||||
|
112
cli/filplus.go
112
cli/filplus.go
@ -1,7 +1,9 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg"
|
||||
@ -34,6 +36,7 @@ var filplusCmd = &cli.Command{
|
||||
filplusListClientsCmd,
|
||||
filplusCheckClientCmd,
|
||||
filplusCheckNotaryCmd,
|
||||
filplusSignRemoveDataCapProposal,
|
||||
},
|
||||
}
|
||||
|
||||
@ -274,3 +277,112 @@ func checkNotary(ctx context.Context, api v0api.FullNode, vaddr address.Address)
|
||||
|
||||
return st.VerifierDataCap(vid)
|
||||
}
|
||||
|
||||
var filplusSignRemoveDataCapProposal = &cli.Command{
|
||||
Name: "sign-remove-data-cap-proposal",
|
||||
Usage: "allows a notary to sign a Remove Data Cap Proposal",
|
||||
Flags: []cli.Flag{
|
||||
&cli.Int64Flag{
|
||||
Name: "id",
|
||||
Usage: "specify the RemoveDataCapProposal ID (will look up on chain if unspecified)",
|
||||
Required: false,
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.Args().Len() != 3 {
|
||||
return fmt.Errorf("must specify three arguments: notary address, client address, and allowance to remove")
|
||||
}
|
||||
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get full node api: %w", err)
|
||||
}
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
act, err := api.StateGetActor(ctx, verifreg.Address, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get verifreg actor: %w", err)
|
||||
}
|
||||
|
||||
apibs := blockstore.NewAPIBlockstore(api)
|
||||
store := adt.WrapStore(ctx, cbor.NewCborStore(apibs))
|
||||
|
||||
st, err := verifreg.Load(store, act)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to load verified registry state: %w", err)
|
||||
}
|
||||
|
||||
verifier, err := address.NewFromString(cctx.Args().Get(0))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
verifierIdAddr, err := api.StateLookupID(ctx, verifier, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client, err := address.NewFromString(cctx.Args().Get(1))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clientIdAddr, err := api.StateLookupID(ctx, client, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
allowanceToRemove, err := types.BigFromString(cctx.Args().Get(2))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, dataCap, err := st.VerifiedClientDataCap(clientIdAddr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to find verified client data cap: %w", err)
|
||||
}
|
||||
if dataCap.LessThanEqual(big.Zero()) {
|
||||
return xerrors.Errorf("client data cap %s is less than amount requested to be removed %s", dataCap.String(), allowanceToRemove.String())
|
||||
}
|
||||
|
||||
found, _, err := checkNotary(ctx, api, verifier)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to check notary status: %w", err)
|
||||
}
|
||||
|
||||
if !found {
|
||||
return xerrors.New("verifier address must be a notary")
|
||||
}
|
||||
|
||||
id := cctx.Uint64("id")
|
||||
if id == 0 {
|
||||
_, id, err = st.RemoveDataCapProposalID(verifierIdAddr, clientIdAddr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed find remove data cap proposal id: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
params := verifreg.RemoveDataCapProposal{
|
||||
RemovalProposalID: verifreg.RmDcProposalID{ProposalID: id},
|
||||
DataCapAmount: allowanceToRemove,
|
||||
VerifiedClient: clientIdAddr,
|
||||
}
|
||||
|
||||
paramBuf := new(bytes.Buffer)
|
||||
paramBuf.WriteString(verifreg.SignatureDomainSeparation_RemoveDataCap)
|
||||
err = params.MarshalCBOR(paramBuf)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to marshall paramBuf: %w", err)
|
||||
}
|
||||
|
||||
sig, err := api.WalletSign(ctx, verifier, paramBuf.Bytes())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to sign message: %w", err)
|
||||
}
|
||||
|
||||
sigBytes := append([]byte{byte(sig.Type)}, sig.Data...)
|
||||
|
||||
fmt.Println(hex.EncodeToString(sigBytes))
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
32
cli/mocks_test.go
Normal file
32
cli/mocks_test.go
Normal file
@ -0,0 +1,32 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/mocks"
|
||||
"github.com/golang/mock/gomock"
|
||||
ucli "github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
// newMockAppWithFullAPI returns a gomock-ed CLI app used for unit tests
|
||||
// see cli/util/api.go:GetFullNodeAPI for mock API injection
|
||||
func NewMockAppWithFullAPI(t *testing.T, cmd *ucli.Command) (*ucli.App, *mocks.MockFullNode, *bytes.Buffer, func()) {
|
||||
app := ucli.NewApp()
|
||||
app.Commands = ucli.Commands{cmd}
|
||||
app.Setup()
|
||||
|
||||
// create and inject the mock API into app Metadata
|
||||
ctrl := gomock.NewController(t)
|
||||
mockFullNode := mocks.NewMockFullNode(ctrl)
|
||||
var fullNode api.FullNode = mockFullNode
|
||||
app.Metadata["test-full-api"] = fullNode
|
||||
|
||||
// this will only work if the implementation uses the app.Writer,
|
||||
// if it uses fmt.*, it has to be refactored
|
||||
buf := &bytes.Buffer{}
|
||||
app.Writer = buf
|
||||
|
||||
return app, mockFullNode, buf, ctrl.Finish
|
||||
}
|
61
cli/mpool.go
61
cli/mpool.go
@ -60,6 +60,8 @@ var MpoolPending = &cli.Command{
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -72,7 +74,7 @@ var MpoolPending = &cli.Command{
|
||||
if tos := cctx.String("to"); tos != "" {
|
||||
a, err := address.NewFromString(tos)
|
||||
if err != nil {
|
||||
return fmt.Errorf("given 'to' address %q was invalid: %w", tos, err)
|
||||
return xerrors.Errorf("given 'to' address %q was invalid: %w", tos, err)
|
||||
}
|
||||
toa = a
|
||||
}
|
||||
@ -80,7 +82,7 @@ var MpoolPending = &cli.Command{
|
||||
if froms := cctx.String("from"); froms != "" {
|
||||
a, err := address.NewFromString(froms)
|
||||
if err != nil {
|
||||
return fmt.Errorf("given 'from' address %q was invalid: %w", froms, err)
|
||||
return xerrors.Errorf("given 'from' address %q was invalid: %w", froms, err)
|
||||
}
|
||||
froma = a
|
||||
}
|
||||
@ -119,13 +121,13 @@ var MpoolPending = &cli.Command{
|
||||
}
|
||||
|
||||
if cctx.Bool("cids") {
|
||||
fmt.Println(msg.Cid())
|
||||
afmt.Println(msg.Cid())
|
||||
} else {
|
||||
out, err := json.MarshalIndent(msg, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(string(out))
|
||||
afmt.Println(string(out))
|
||||
}
|
||||
}
|
||||
|
||||
@ -216,6 +218,8 @@ var MpoolStat = &cli.Command{
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -234,6 +238,7 @@ var MpoolStat = &cli.Command{
|
||||
currTs := ts
|
||||
for i := 0; i < cctx.Int("basefee-lookback"); i++ {
|
||||
currTs, err = api.ChainGetTipSet(ctx, currTs.Parents())
|
||||
|
||||
if err != nil {
|
||||
return xerrors.Errorf("walking chain: %w", err)
|
||||
}
|
||||
@ -296,7 +301,7 @@ var MpoolStat = &cli.Command{
|
||||
for a, bkt := range buckets {
|
||||
act, err := api.StateGetActor(ctx, a, ts.Key())
|
||||
if err != nil {
|
||||
fmt.Printf("%s, err: %s\n", a, err)
|
||||
afmt.Printf("%s, err: %s\n", a, err)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -350,11 +355,11 @@ var MpoolStat = &cli.Command{
|
||||
total.belowPast += stat.belowPast
|
||||
total.gasLimit = big.Add(total.gasLimit, stat.gasLimit)
|
||||
|
||||
fmt.Printf("%s: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d, gasLimit: %s\n", stat.addr, stat.past, stat.cur, stat.future, stat.belowCurr, cctx.Int("basefee-lookback"), stat.belowPast, stat.gasLimit)
|
||||
afmt.Printf("%s: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d, gasLimit: %s\n", stat.addr, stat.past, stat.cur, stat.future, stat.belowCurr, cctx.Int("basefee-lookback"), stat.belowPast, stat.gasLimit)
|
||||
}
|
||||
|
||||
fmt.Println("-----")
|
||||
fmt.Printf("total: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d, gasLimit: %s\n", total.past, total.cur, total.future, total.belowCurr, cctx.Int("basefee-lookback"), total.belowPast, total.gasLimit)
|
||||
afmt.Println("-----")
|
||||
afmt.Printf("total: Nonce past: %d, cur: %d, future: %d; FeeCap cur: %d, min-%d: %d, gasLimit: %s\n", total.past, total.cur, total.future, total.belowCurr, cctx.Int("basefee-lookback"), total.belowPast, total.gasLimit)
|
||||
|
||||
return nil
|
||||
},
|
||||
@ -385,8 +390,9 @@ var MpoolReplaceCmd = &cli.Command{
|
||||
Usage: "Spend up to X FIL for this message in units of FIL. Previously when flag was `max-fee` units were in attoFIL. Applicable for auto mode",
|
||||
},
|
||||
},
|
||||
ArgsUsage: "<from nonce> | <message-cid>",
|
||||
ArgsUsage: "<from> <nonce> | <message-cid>",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
@ -407,13 +413,14 @@ var MpoolReplaceCmd = &cli.Command{
|
||||
|
||||
msg, err := api.ChainGetMessage(ctx, mcid)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not find referenced message: %w", err)
|
||||
return xerrors.Errorf("could not find referenced message: %w", err)
|
||||
}
|
||||
|
||||
from = msg.From
|
||||
nonce = msg.Nonce
|
||||
case 2:
|
||||
f, err := address.NewFromString(cctx.Args().Get(0))
|
||||
arg0 := cctx.Args().Get(0)
|
||||
f, err := address.NewFromString(arg0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -448,7 +455,7 @@ var MpoolReplaceCmd = &cli.Command{
|
||||
}
|
||||
|
||||
if found == nil {
|
||||
return fmt.Errorf("no pending message found from %s with nonce %d", from, nonce)
|
||||
return xerrors.Errorf("no pending message found from %s with nonce %d", from, nonce)
|
||||
}
|
||||
|
||||
msg := found.Message
|
||||
@ -460,7 +467,7 @@ var MpoolReplaceCmd = &cli.Command{
|
||||
if cctx.IsSet("fee-limit") {
|
||||
maxFee, err := types.ParseFIL(cctx.String("fee-limit"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing max-spend: %w", err)
|
||||
return xerrors.Errorf("parsing max-spend: %w", err)
|
||||
}
|
||||
mss = &lapi.MessageSendSpec{
|
||||
MaxFee: abi.TokenAmount(maxFee),
|
||||
@ -472,7 +479,7 @@ var MpoolReplaceCmd = &cli.Command{
|
||||
msg.GasPremium = abi.NewTokenAmount(0)
|
||||
retm, err := api.GasEstimateMessageGas(ctx, &msg, mss, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to estimate gas values: %w", err)
|
||||
return xerrors.Errorf("failed to estimate gas values: %w", err)
|
||||
}
|
||||
|
||||
msg.GasPremium = big.Max(retm.GasPremium, minRBF)
|
||||
@ -489,26 +496,26 @@ var MpoolReplaceCmd = &cli.Command{
|
||||
}
|
||||
msg.GasPremium, err = types.BigFromString(cctx.String("gas-premium"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing gas-premium: %w", err)
|
||||
return xerrors.Errorf("parsing gas-premium: %w", err)
|
||||
}
|
||||
// TODO: estimate fee cap here
|
||||
msg.GasFeeCap, err = types.BigFromString(cctx.String("gas-feecap"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing gas-feecap: %w", err)
|
||||
return xerrors.Errorf("parsing gas-feecap: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
smsg, err := api.WalletSignMessage(ctx, msg.From, &msg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to sign message: %w", err)
|
||||
return xerrors.Errorf("failed to sign message: %w", err)
|
||||
}
|
||||
|
||||
cid, err := api.MpoolPush(ctx, smsg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to push new message to mempool: %w", err)
|
||||
return xerrors.Errorf("failed to push new message to mempool: %w", err)
|
||||
}
|
||||
|
||||
fmt.Println("new message cid: ", cid)
|
||||
afmt.Println("new message cid: ", cid)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@ -531,6 +538,8 @@ var MpoolFindCmd = &cli.Command{
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -548,7 +557,7 @@ var MpoolFindCmd = &cli.Command{
|
||||
if cctx.IsSet("to") {
|
||||
a, err := address.NewFromString(cctx.String("to"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("'to' address was invalid: %w", err)
|
||||
return xerrors.Errorf("'to' address was invalid: %w", err)
|
||||
}
|
||||
|
||||
toFilter = a
|
||||
@ -557,7 +566,7 @@ var MpoolFindCmd = &cli.Command{
|
||||
if cctx.IsSet("from") {
|
||||
a, err := address.NewFromString(cctx.String("from"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("'from' address was invalid: %w", err)
|
||||
return xerrors.Errorf("'from' address was invalid: %w", err)
|
||||
}
|
||||
|
||||
fromFilter = a
|
||||
@ -591,7 +600,7 @@ var MpoolFindCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(string(b))
|
||||
afmt.Println(string(b))
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@ -605,6 +614,8 @@ var MpoolConfig = &cli.Command{
|
||||
return cli.ShowCommandHelp(cctx, cctx.Command.Name)
|
||||
}
|
||||
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -624,7 +635,7 @@ var MpoolConfig = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(string(bytes))
|
||||
afmt.Println(string(bytes))
|
||||
} else {
|
||||
cfg := new(types.MpoolConfig)
|
||||
bytes := []byte(cctx.Args().Get(0))
|
||||
@ -651,6 +662,8 @@ var MpoolGasPerfCmd = &cli.Command{
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -717,7 +730,7 @@ var MpoolGasPerfCmd = &cli.Command{
|
||||
gasReward := getGasReward(m)
|
||||
gasPerf := getGasPerf(gasReward, m.Message.GasLimit)
|
||||
|
||||
fmt.Printf("%s\t%d\t%s\t%f\n", m.Message.From, m.Message.Nonce, gasReward, gasPerf)
|
||||
afmt.Printf("%s\t%d\t%s\t%f\n", m.Message.From, m.Message.Nonce, gasReward, gasPerf)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
582
cli/mpool_test.go
Normal file
582
cli/mpool_test.go
Normal file
@ -0,0 +1,582 @@
|
||||
//stm: #cli
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"encoding/json"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/types/mock"
|
||||
"github.com/filecoin-project/lotus/chain/wallet"
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestStat(t *testing.T) {
|
||||
|
||||
t.Run("local", func(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolStat))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// add blocks to the chain
|
||||
first := mock.TipSet(mock.MkBlock(nil, 5, 4))
|
||||
head := mock.TipSet(mock.MkBlock(first, 15, 7))
|
||||
|
||||
// create a signed message to be returned as a pending message
|
||||
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
|
||||
|
||||
// mock actor to return for the sender
|
||||
actor := types.Actor{Nonce: 2, Balance: big.NewInt(200000)}
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainHead(ctx).Return(head, nil),
|
||||
mockApi.EXPECT().ChainGetTipSet(ctx, head.Parents()).Return(first, nil),
|
||||
mockApi.EXPECT().WalletList(ctx).Return([]address.Address{senderAddr, toAddr}, nil),
|
||||
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
|
||||
mockApi.EXPECT().StateGetActor(ctx, senderAddr, head.Key()).Return(&actor, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_MEMPOOL_STAT_002
|
||||
err = app.Run([]string{"mpool", "stat", "--basefee-lookback", "1", "--local"})
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Contains(t, buf.String(), "Nonce past: 1")
|
||||
})
|
||||
|
||||
t.Run("all", func(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolStat))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// add blocks to the chain
|
||||
first := mock.TipSet(mock.MkBlock(nil, 5, 4))
|
||||
head := mock.TipSet(mock.MkBlock(first, 15, 7))
|
||||
|
||||
// create a signed message to be returned as a pending message
|
||||
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
|
||||
|
||||
// mock actor to return for the sender
|
||||
actor := types.Actor{Nonce: 2, Balance: big.NewInt(200000)}
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainHead(ctx).Return(head, nil),
|
||||
mockApi.EXPECT().ChainGetTipSet(ctx, head.Parents()).Return(first, nil),
|
||||
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
|
||||
mockApi.EXPECT().StateGetActor(ctx, senderAddr, head.Key()).Return(&actor, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_MEMPOOL_STAT_001
|
||||
err = app.Run([]string{"mpool", "stat", "--basefee-lookback", "1"})
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Contains(t, buf.String(), "Nonce past: 1")
|
||||
})
|
||||
}
|
||||
|
||||
func TestPending(t *testing.T) {
|
||||
t.Run("all", func(t *testing.T) {
|
||||
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolPending))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// create a signed message to be returned as a pending message
|
||||
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_MEMPOOL_PENDING_001
|
||||
err = app.Run([]string{"mpool", "pending", "--cids"})
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Contains(t, buf.String(), sm.Cid().String())
|
||||
})
|
||||
|
||||
t.Run("local", func(t *testing.T) {
|
||||
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolPending))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// create a signed message to be returned as a pending message
|
||||
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().WalletList(ctx).Return([]address.Address{senderAddr}, nil),
|
||||
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_MEMPOOL_PENDING_002
|
||||
err = app.Run([]string{"mpool", "pending", "--local"})
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Contains(t, buf.String(), sm.Cid().String())
|
||||
})
|
||||
|
||||
t.Run("to", func(t *testing.T) {
|
||||
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolPending))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// create a signed message to be returned as a pending message
|
||||
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_MEMPOOL_PENDING_003
|
||||
err = app.Run([]string{"mpool", "pending", "--to", sm.Message.To.String()})
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Contains(t, buf.String(), sm.Cid().String())
|
||||
})
|
||||
|
||||
t.Run("from", func(t *testing.T) {
|
||||
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolPending))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// create a signed message to be returned as a pending message
|
||||
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_MEMPOOL_PENDING_004
|
||||
err = app.Run([]string{"mpool", "pending", "--from", sm.Message.From.String()})
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Contains(t, buf.String(), sm.Cid().String())
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestReplace(t *testing.T) {
|
||||
t.Run("manual", func(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolReplaceCmd))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// create a signed message to be returned as a pending message
|
||||
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainGetMessage(ctx, sm.Cid()).Return(&sm.Message, nil),
|
||||
mockApi.EXPECT().ChainHead(ctx).Return(nil, nil),
|
||||
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
|
||||
mockApi.EXPECT().WalletSignMessage(ctx, sm.Message.From, &sm.Message).Return(sm, nil),
|
||||
mockApi.EXPECT().MpoolPush(ctx, sm).Return(sm.Cid(), nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_MEMPOOL_REPLACE_002
|
||||
err = app.Run([]string{"mpool", "replace", "--gas-premium", "1", "--gas-feecap", "100", sm.Cid().String()})
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, buf.String(), sm.Cid().String())
|
||||
})
|
||||
|
||||
t.Run("auto", func(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolReplaceCmd))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// create a signed message to be returned as a pending message
|
||||
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
|
||||
|
||||
// gas fee param should be equal to the one passed in the cli invocation (used below)
|
||||
maxFee := "1000000"
|
||||
parsedFee, err := types.ParseFIL(maxFee)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mss := api.MessageSendSpec{MaxFee: abi.TokenAmount(parsedFee)}
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainGetMessage(ctx, sm.Cid()).Return(&sm.Message, nil),
|
||||
mockApi.EXPECT().ChainHead(ctx).Return(nil, nil),
|
||||
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
|
||||
// use gomock.any to match the message in expected api calls
|
||||
// since the replace function modifies the message between calls, it would be pointless to try to match the exact argument
|
||||
mockApi.EXPECT().GasEstimateMessageGas(ctx, gomock.Any(), &mss, types.EmptyTSK).Return(&sm.Message, nil),
|
||||
mockApi.EXPECT().WalletSignMessage(ctx, sm.Message.From, gomock.Any()).Return(sm, nil),
|
||||
mockApi.EXPECT().MpoolPush(ctx, sm).Return(sm.Cid(), nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_MEMPOOL_REPLACE_002
|
||||
err = app.Run([]string{"mpool", "replace", "--auto", "--fee-limit", maxFee, sm.Cid().String()})
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, buf.String(), sm.Cid().String())
|
||||
})
|
||||
|
||||
t.Run("sender / nonce", func(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolReplaceCmd))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// create a signed message to be returned as a pending message
|
||||
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
|
||||
|
||||
// gas fee param should be equal to the one passed in the cli invocation (used below)
|
||||
maxFee := "1000000"
|
||||
parsedFee, err := types.ParseFIL(maxFee)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mss := api.MessageSendSpec{MaxFee: abi.TokenAmount(parsedFee)}
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainHead(ctx).Return(nil, nil),
|
||||
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
|
||||
// use gomock.any to match the message in expected api calls
|
||||
// since the replace function modifies the message between calls, it would be pointless to try to match the exact argument
|
||||
mockApi.EXPECT().GasEstimateMessageGas(ctx, gomock.Any(), &mss, types.EmptyTSK).Return(&sm.Message, nil),
|
||||
mockApi.EXPECT().WalletSignMessage(ctx, sm.Message.From, gomock.Any()).Return(sm, nil),
|
||||
mockApi.EXPECT().MpoolPush(ctx, sm).Return(sm.Cid(), nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_MEMPOOL_REPLACE_001
|
||||
err = app.Run([]string{"mpool", "replace", "--auto", "--fee-limit", maxFee, sm.Message.From.String(), fmt.Sprint(sm.Message.Nonce)})
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, buf.String(), sm.Cid().String())
|
||||
})
|
||||
}
|
||||
|
||||
func TestFindMsg(t *testing.T) {
|
||||
t.Run("from", func(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolFindCmd))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// create a signed message to be returned as a pending message
|
||||
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_MEMPOOL_FIND_001
|
||||
err = app.Run([]string{"mpool", "find", "--from", sm.Message.From.String()})
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, buf.String(), sm.Cid().String())
|
||||
})
|
||||
|
||||
t.Run("to", func(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolFindCmd))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// create a signed message to be returned as a pending message
|
||||
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_MEMPOOL_FIND_002
|
||||
err = app.Run([]string{"mpool", "find", "--to", sm.Message.To.String()})
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, buf.String(), sm.Cid().String())
|
||||
})
|
||||
|
||||
t.Run("method", func(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolFindCmd))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// create a signed message to be returned as a pending message
|
||||
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sm := mock.MkMessage(senderAddr, toAddr, 1, w)
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_MEMPOOL_FIND_003
|
||||
err = app.Run([]string{"mpool", "find", "--method", sm.Message.Method.String()})
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, buf.String(), sm.Cid().String())
|
||||
})
|
||||
}
|
||||
|
||||
func TestGasPerf(t *testing.T) {
|
||||
t.Run("all", func(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolGasPerfCmd))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// add blocks to the chain
|
||||
first := mock.TipSet(mock.MkBlock(nil, 5, 4))
|
||||
head := mock.TipSet(mock.MkBlock(first, 15, 7))
|
||||
|
||||
// create a signed message to be returned as a pending message
|
||||
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sm := mock.MkMessage(senderAddr, toAddr, 13, w)
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
|
||||
mockApi.EXPECT().ChainHead(ctx).Return(head, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_MEMPOOL_GAS_PERF_002
|
||||
err = app.Run([]string{"mpool", "gas-perf", "--all", "true"})
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Contains(t, buf.String(), sm.Message.From.String())
|
||||
assert.Contains(t, buf.String(), fmt.Sprint(sm.Message.Nonce))
|
||||
})
|
||||
|
||||
t.Run("local", func(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolGasPerfCmd))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// add blocks to the chain
|
||||
first := mock.TipSet(mock.MkBlock(nil, 5, 4))
|
||||
head := mock.TipSet(mock.MkBlock(first, 15, 7))
|
||||
|
||||
// create a signed message to be returned as a pending message
|
||||
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
toAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sm := mock.MkMessage(senderAddr, toAddr, 13, w)
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
|
||||
mockApi.EXPECT().WalletList(ctx).Return([]address.Address{senderAddr}, nil),
|
||||
mockApi.EXPECT().ChainHead(ctx).Return(head, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_MEMPOOL_GAS_PERF_001
|
||||
err = app.Run([]string{"mpool", "gas-perf"})
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Contains(t, buf.String(), sm.Message.From.String())
|
||||
assert.Contains(t, buf.String(), fmt.Sprint(sm.Message.Nonce))
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfig(t *testing.T) {
|
||||
t.Run("get", func(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolConfig))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mpoolCfg := &types.MpoolConfig{PriorityAddrs: []address.Address{senderAddr}, SizeLimitHigh: 1234567, SizeLimitLow: 6, ReplaceByFeeRatio: 0.25}
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().MpoolGetConfig(ctx).Return(mpoolCfg, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_MEMPOOL_CONFIG_001
|
||||
err = app.Run([]string{"mpool", "config"})
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Contains(t, buf.String(), mpoolCfg.PriorityAddrs[0].String())
|
||||
assert.Contains(t, buf.String(), fmt.Sprint(mpoolCfg.SizeLimitHigh))
|
||||
assert.Contains(t, buf.String(), fmt.Sprint(mpoolCfg.SizeLimitLow))
|
||||
assert.Contains(t, buf.String(), fmt.Sprint(mpoolCfg.ReplaceByFeeRatio))
|
||||
})
|
||||
|
||||
t.Run("set", func(t *testing.T) {
|
||||
app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("mpool", MpoolConfig))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
w, _ := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
senderAddr, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mpoolCfg := &types.MpoolConfig{PriorityAddrs: []address.Address{senderAddr}, SizeLimitHigh: 234567, SizeLimitLow: 3, ReplaceByFeeRatio: 0.33}
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().MpoolSetConfig(ctx, mpoolCfg).Return(nil),
|
||||
)
|
||||
|
||||
bytes, err := json.Marshal(mpoolCfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
//stm: @CLI_MEMPOOL_CONFIG_002
|
||||
err = app.Run([]string{"mpool", "config", string(bytes)})
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
108
cli/net.go
108
cli/net.go
@ -38,6 +38,9 @@ var NetCmd = &cli.Command{
|
||||
NetBlockCmd,
|
||||
NetStatCmd,
|
||||
NetLimitCmd,
|
||||
NetProtectAdd,
|
||||
NetProtectRemove,
|
||||
NetProtectList,
|
||||
},
|
||||
}
|
||||
|
||||
@ -708,3 +711,108 @@ var NetLimitCmd = &cli.Command{
|
||||
return enc.Encode(result)
|
||||
},
|
||||
}
|
||||
|
||||
var NetProtectAdd = &cli.Command{
|
||||
Name: "protect",
|
||||
Usage: "Add one or more peer IDs to the list of protected peer connections",
|
||||
ArgsUsage: "<peer-id> [<peer-id>...]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
api, closer, err := GetAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
pids, err := decodePeerIDsFromArgs(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = api.NetProtectAdd(ctx, pids)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("added to protected peers:")
|
||||
for _, pid := range pids {
|
||||
fmt.Printf(" %s\n", pid)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var NetProtectRemove = &cli.Command{
|
||||
Name: "unprotect",
|
||||
Usage: "Remove one or more peer IDs from the list of protected peer connections.",
|
||||
ArgsUsage: "<peer-id> [<peer-id>...]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
api, closer, err := GetAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
pids, err := decodePeerIDsFromArgs(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = api.NetProtectRemove(ctx, pids)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("removed from protected peers:")
|
||||
for _, pid := range pids {
|
||||
fmt.Printf(" %s\n", pid)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// decodePeerIDsFromArgs decodes all the arguments present in cli.Context.Args as peer.ID.
|
||||
//
|
||||
// This function requires at least one argument to be present, and arguments must not be empty
|
||||
// string. Otherwise, an error is returned.
|
||||
func decodePeerIDsFromArgs(cctx *cli.Context) ([]peer.ID, error) {
|
||||
pidArgs := cctx.Args().Slice()
|
||||
if len(pidArgs) == 0 {
|
||||
return nil, xerrors.Errorf("must specify at least one peer ID as an argument")
|
||||
}
|
||||
var pids []peer.ID
|
||||
for _, pidStr := range pidArgs {
|
||||
if pidStr == "" {
|
||||
return nil, xerrors.Errorf("peer ID must not be empty")
|
||||
}
|
||||
pid, err := peer.Decode(pidStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pids = append(pids, pid)
|
||||
}
|
||||
return pids, nil
|
||||
}
|
||||
|
||||
var NetProtectList = &cli.Command{
|
||||
Name: "list-protected",
|
||||
Usage: "List the peer IDs with protected connection.",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
api, closer, err := GetAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
pids, err := api.NetProtectList(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, pid := range pids {
|
||||
fmt.Printf("%s\n", pid)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
34
cli/paych.go
34
cli/paych.go
@ -8,7 +8,7 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
lapi "github.com/filecoin-project/lotus/api"
|
||||
|
||||
"github.com/filecoin-project/lotus/paychmgr"
|
||||
|
||||
@ -39,12 +39,15 @@ var paychAddFundsCmd = &cli.Command{
|
||||
Usage: "Add funds to the payment channel between fromAddress and toAddress. Creates the payment channel if it doesn't already exist.",
|
||||
ArgsUsage: "[fromAddress toAddress amount]",
|
||||
Flags: []cli.Flag{
|
||||
|
||||
&cli.BoolFlag{
|
||||
Name: "restart-retrievals",
|
||||
Usage: "restart stalled retrieval deals on this payment channel",
|
||||
Value: true,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "reserve",
|
||||
Usage: "mark funds as reserved",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.Args().Len() != 3 {
|
||||
@ -66,7 +69,7 @@ var paychAddFundsCmd = &cli.Command{
|
||||
return ShowHelp(cctx, fmt.Errorf("parsing amount failed: %s", err))
|
||||
}
|
||||
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
api, closer, err := GetFullNodeAPIV1(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -76,7 +79,14 @@ var paychAddFundsCmd = &cli.Command{
|
||||
|
||||
// Send a message to chain to create channel / add funds to existing
|
||||
// channel
|
||||
info, err := api.PaychGet(ctx, from, to, types.BigInt(amt))
|
||||
var info *lapi.ChannelInfo
|
||||
if cctx.Bool("reserve") {
|
||||
info, err = api.PaychGet(ctx, from, to, types.BigInt(amt), lapi.PaychGetOpts{
|
||||
OffChain: false,
|
||||
})
|
||||
} else {
|
||||
info, err = api.PaychFund(ctx, from, to, types.BigInt(amt))
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -163,13 +173,13 @@ var paychStatusCmd = &cli.Command{
|
||||
},
|
||||
}
|
||||
|
||||
func paychStatus(writer io.Writer, avail *api.ChannelAvailableFunds) {
|
||||
func paychStatus(writer io.Writer, avail *lapi.ChannelAvailableFunds) {
|
||||
if avail.Channel == nil {
|
||||
if avail.PendingWaitSentinel != nil {
|
||||
fmt.Fprint(writer, "Creating channel\n")
|
||||
fmt.Fprintf(writer, " From: %s\n", avail.From)
|
||||
fmt.Fprintf(writer, " To: %s\n", avail.To)
|
||||
fmt.Fprintf(writer, " Pending Amt: %d\n", avail.PendingAmt)
|
||||
fmt.Fprintf(writer, " Pending Amt: %s\n", types.FIL(avail.PendingAmt))
|
||||
fmt.Fprintf(writer, " Wait Sentinel: %s\n", avail.PendingWaitSentinel)
|
||||
return
|
||||
}
|
||||
@ -189,10 +199,12 @@ func paychStatus(writer io.Writer, avail *api.ChannelAvailableFunds) {
|
||||
{"Channel", avail.Channel.String()},
|
||||
{"From", avail.From.String()},
|
||||
{"To", avail.To.String()},
|
||||
{"Confirmed Amt", fmt.Sprintf("%d", avail.ConfirmedAmt)},
|
||||
{"Pending Amt", fmt.Sprintf("%d", avail.PendingAmt)},
|
||||
{"Queued Amt", fmt.Sprintf("%d", avail.QueuedAmt)},
|
||||
{"Voucher Redeemed Amt", fmt.Sprintf("%d", avail.VoucherReedeemedAmt)},
|
||||
{"Confirmed Amt", fmt.Sprintf("%s", types.FIL(avail.ConfirmedAmt))},
|
||||
{"Available Amt", fmt.Sprintf("%s", types.FIL(avail.NonReservedAmt))},
|
||||
{"Voucher Redeemed Amt", fmt.Sprintf("%s", types.FIL(avail.VoucherReedeemedAmt))},
|
||||
{"Pending Amt", fmt.Sprintf("%s", types.FIL(avail.PendingAmt))},
|
||||
{"Pending Available Amt", fmt.Sprintf("%s", types.FIL(avail.PendingAvailableAmt))},
|
||||
{"Queued Amt", fmt.Sprintf("%s", types.FIL(avail.QueuedAmt))},
|
||||
}
|
||||
if avail.PendingWaitSentinel != nil {
|
||||
nameValues = append(nameValues, []string{
|
||||
@ -576,7 +588,7 @@ func outputVoucher(w io.Writer, v *paych.SignedVoucher, export bool) error {
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "Lane %d, Nonce %d: %s", v.Lane, v.Nonce, v.Amount.String())
|
||||
fmt.Fprintf(w, "Lane %d, Nonce %d: %s", v.Lane, v.Nonce, types.FIL(v.Amount))
|
||||
if export {
|
||||
fmt.Fprintf(w, "; %s", enc)
|
||||
}
|
||||
|
@ -1768,6 +1768,9 @@ var StateSectorCmd = &cli.Command{
|
||||
fmt.Println("SectorNumber: ", si.SectorNumber)
|
||||
fmt.Println("SealProof: ", si.SealProof)
|
||||
fmt.Println("SealedCID: ", si.SealedCID)
|
||||
if si.SectorKeyCID != nil {
|
||||
fmt.Println("SectorKeyCID: ", si.SectorKeyCID)
|
||||
}
|
||||
fmt.Println("DealIDs: ", si.DealIDs)
|
||||
fmt.Println()
|
||||
fmt.Println("Activation: ", EpochTime(ts.Height(), si.Activation))
|
||||
|
28
cli/sync.go
28
cli/sync.go
@ -33,6 +33,8 @@ var SyncStatusCmd = &cli.Command{
|
||||
Name: "status",
|
||||
Usage: "check sync status",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
apic, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -45,9 +47,9 @@ var SyncStatusCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("sync status:")
|
||||
afmt.Println("sync status:")
|
||||
for _, ss := range state.ActiveSyncs {
|
||||
fmt.Printf("worker %d:\n", ss.WorkerID)
|
||||
afmt.Printf("worker %d:\n", ss.WorkerID)
|
||||
var base, target []cid.Cid
|
||||
var heightDiff int64
|
||||
var theight abi.ChainEpoch
|
||||
@ -62,20 +64,20 @@ var SyncStatusCmd = &cli.Command{
|
||||
} else {
|
||||
heightDiff = 0
|
||||
}
|
||||
fmt.Printf("\tBase:\t%s\n", base)
|
||||
fmt.Printf("\tTarget:\t%s (%d)\n", target, theight)
|
||||
fmt.Printf("\tHeight diff:\t%d\n", heightDiff)
|
||||
fmt.Printf("\tStage: %s\n", ss.Stage)
|
||||
fmt.Printf("\tHeight: %d\n", ss.Height)
|
||||
afmt.Printf("\tBase:\t%s\n", base)
|
||||
afmt.Printf("\tTarget:\t%s (%d)\n", target, theight)
|
||||
afmt.Printf("\tHeight diff:\t%d\n", heightDiff)
|
||||
afmt.Printf("\tStage: %s\n", ss.Stage)
|
||||
afmt.Printf("\tHeight: %d\n", ss.Height)
|
||||
if ss.End.IsZero() {
|
||||
if !ss.Start.IsZero() {
|
||||
fmt.Printf("\tElapsed: %s\n", time.Since(ss.Start))
|
||||
afmt.Printf("\tElapsed: %s\n", time.Since(ss.Start))
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("\tElapsed: %s\n", ss.End.Sub(ss.Start))
|
||||
afmt.Printf("\tElapsed: %s\n", ss.End.Sub(ss.Start))
|
||||
}
|
||||
if ss.Stage == api.StageSyncErrored {
|
||||
fmt.Printf("\tError: %s\n", ss.Message)
|
||||
afmt.Printf("\tError: %s\n", ss.Message)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -168,6 +170,8 @@ var SyncCheckBadCmd = &cli.Command{
|
||||
Usage: "check if the given block was marked bad, and for what reason",
|
||||
ArgsUsage: "[blockCid]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
napi, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -190,11 +194,11 @@ var SyncCheckBadCmd = &cli.Command{
|
||||
}
|
||||
|
||||
if reason == "" {
|
||||
fmt.Println("block was not marked as bad")
|
||||
afmt.Println("block was not marked as bad")
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Println(reason)
|
||||
afmt.Println(reason)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
189
cli/sync_test.go
Normal file
189
cli/sync_test.go
Normal file
@ -0,0 +1,189 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/types/mock"
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSyncStatus(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncStatusCmd))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
ts1 := mock.TipSet(mock.MkBlock(nil, 0, 0))
|
||||
ts2 := mock.TipSet(mock.MkBlock(ts1, 0, 0))
|
||||
|
||||
start := time.Now()
|
||||
end := start.Add(time.Minute)
|
||||
|
||||
state := &api.SyncState{
|
||||
ActiveSyncs: []api.ActiveSync{{
|
||||
WorkerID: 1,
|
||||
Base: ts1,
|
||||
Target: ts2,
|
||||
Stage: api.StageMessages,
|
||||
Height: abi.ChainEpoch(0),
|
||||
Start: start,
|
||||
End: end,
|
||||
Message: "whatever",
|
||||
}},
|
||||
VMApplied: 0,
|
||||
}
|
||||
|
||||
mockApi.EXPECT().SyncState(ctx).Return(state, nil)
|
||||
|
||||
//stm: @CLI_SYNC_STATUS_001
|
||||
err := app.Run([]string{"sync", "status"})
|
||||
assert.NoError(t, err)
|
||||
|
||||
out := buf.String()
|
||||
|
||||
// output is plaintext, had to do string matching
|
||||
assert.Contains(t, out, fmt.Sprintf("Base:\t[%s]", ts1.Blocks()[0].Cid().String()))
|
||||
assert.Contains(t, out, fmt.Sprintf("Target:\t[%s]", ts2.Blocks()[0].Cid().String()))
|
||||
assert.Contains(t, out, "Height diff:\t1")
|
||||
assert.Contains(t, out, "Stage: message sync")
|
||||
assert.Contains(t, out, "Height: 0")
|
||||
assert.Contains(t, out, "Elapsed: 1m0s")
|
||||
}
|
||||
|
||||
func TestSyncMarkBad(t *testing.T) {
|
||||
app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncMarkBadCmd))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
blk := mock.MkBlock(nil, 0, 0)
|
||||
|
||||
mockApi.EXPECT().SyncMarkBad(ctx, blk.Cid()).Return(nil)
|
||||
|
||||
//stm: @CLI_SYNC_MARK_BAD_001
|
||||
err := app.Run([]string{"sync", "mark-bad", blk.Cid().String()})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestSyncUnmarkBad(t *testing.T) {
|
||||
t.Run("one-block", func(t *testing.T) {
|
||||
app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncUnmarkBadCmd))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
blk := mock.MkBlock(nil, 0, 0)
|
||||
|
||||
mockApi.EXPECT().SyncUnmarkBad(ctx, blk.Cid()).Return(nil)
|
||||
|
||||
//stm: @CLI_SYNC_UNMARK_BAD_001
|
||||
err := app.Run([]string{"sync", "unmark-bad", blk.Cid().String()})
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("all", func(t *testing.T) {
|
||||
app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncUnmarkBadCmd))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
mockApi.EXPECT().SyncUnmarkAllBad(ctx).Return(nil)
|
||||
|
||||
//stm: @CLI_SYNC_UNMARK_BAD_002
|
||||
err := app.Run([]string{"sync", "unmark-bad", "-all"})
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSyncCheckBad(t *testing.T) {
|
||||
t.Run("not-bad", func(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncCheckBadCmd))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
blk := mock.MkBlock(nil, 0, 0)
|
||||
|
||||
mockApi.EXPECT().SyncCheckBad(ctx, blk.Cid()).Return("", nil)
|
||||
|
||||
//stm: @CLI_SYNC_CHECK_BAD_002
|
||||
err := app.Run([]string{"sync", "check-bad", blk.Cid().String()})
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Contains(t, buf.String(), "block was not marked as bad")
|
||||
})
|
||||
|
||||
t.Run("bad", func(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncCheckBadCmd))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
blk := mock.MkBlock(nil, 0, 0)
|
||||
reason := "whatever"
|
||||
|
||||
mockApi.EXPECT().SyncCheckBad(ctx, blk.Cid()).Return(reason, nil)
|
||||
|
||||
//stm: @CLI_SYNC_CHECK_BAD_001
|
||||
err := app.Run([]string{"sync", "check-bad", blk.Cid().String()})
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Contains(t, buf.String(), reason)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSyncCheckpoint(t *testing.T) {
|
||||
t.Run("tipset", func(t *testing.T) {
|
||||
app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncCheckpointCmd))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
blk := mock.MkBlock(nil, 0, 0)
|
||||
ts := mock.TipSet(blk)
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainGetBlock(ctx, blk.Cid()).Return(blk, nil),
|
||||
mockApi.EXPECT().SyncCheckpoint(ctx, ts.Key()).Return(nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_SYNC_CHECKPOINT_001
|
||||
err := app.Run([]string{"sync", "checkpoint", blk.Cid().String()})
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("epoch", func(t *testing.T) {
|
||||
app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("sync", SyncCheckpointCmd))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
epoch := abi.ChainEpoch(0)
|
||||
blk := mock.MkBlock(nil, 0, 0)
|
||||
ts := mock.TipSet(blk)
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainGetTipSetByHeight(ctx, epoch, types.EmptyTSK).Return(ts, nil),
|
||||
mockApi.EXPECT().SyncCheckpoint(ctx, ts.Key()).Return(nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_SYNC_CHECKPOINT_002
|
||||
err := app.Run([]string{"sync", "checkpoint", fmt.Sprintf("-epoch=%d", epoch)})
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
@ -223,6 +223,11 @@ func GetCommonAPI(ctx *cli.Context) (api.CommonNet, jsonrpc.ClientCloser, error)
|
||||
}
|
||||
|
||||
func GetFullNodeAPI(ctx *cli.Context) (v0api.FullNode, jsonrpc.ClientCloser, error) {
|
||||
// use the mocked API in CLI unit tests, see cli/mocks_test.go for mock definition
|
||||
if mock, ok := ctx.App.Metadata["test-full-api"]; ok {
|
||||
return &v0api.WrapperV1Full{FullNode: mock.(v1api.FullNode)}, func() {}, nil
|
||||
}
|
||||
|
||||
if tn, ok := ctx.App.Metadata["testnode-full"]; ok {
|
||||
return &v0api.WrapperV1Full{FullNode: tn.(v1api.FullNode)}, func() {}, nil
|
||||
}
|
||||
|
@ -56,6 +56,8 @@ var walletNew = &cli.Command{
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
t := cctx.Args().First()
|
||||
if t == "" {
|
||||
t = "secp256k1"
|
||||
@ -66,7 +68,7 @@ var walletNew = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(nk.String())
|
||||
afmt.Println(nk.String())
|
||||
|
||||
return nil
|
||||
},
|
||||
@ -100,6 +102,8 @@ var walletList = &cli.Command{
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
addrs, err := api.WalletList(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -120,7 +124,7 @@ var walletList = &cli.Command{
|
||||
|
||||
for _, addr := range addrs {
|
||||
if cctx.Bool("addr-only") {
|
||||
fmt.Println(addr.String())
|
||||
afmt.Println(addr.String())
|
||||
} else {
|
||||
a, err := api.StateGetActor(ctx, addr, types.EmptyTSK)
|
||||
if err != nil {
|
||||
@ -187,6 +191,8 @@ var walletBalance = &cli.Command{
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
var addr address.Address
|
||||
if cctx.Args().First() != "" {
|
||||
addr, err = address.NewFromString(cctx.Args().First())
|
||||
@ -203,9 +209,9 @@ var walletBalance = &cli.Command{
|
||||
}
|
||||
|
||||
if balance.Equals(types.NewInt(0)) {
|
||||
fmt.Printf("%s (warning: may display 0 if chain sync in progress)\n", types.FIL(balance))
|
||||
afmt.Printf("%s (warning: may display 0 if chain sync in progress)\n", types.FIL(balance))
|
||||
} else {
|
||||
fmt.Printf("%s\n", types.FIL(balance))
|
||||
afmt.Printf("%s\n", types.FIL(balance))
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -223,12 +229,14 @@ var walletGetDefault = &cli.Command{
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
addr, err := api.WalletDefaultAddress(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("%s\n", addr.String())
|
||||
afmt.Printf("%s\n", addr.String())
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@ -270,6 +278,8 @@ var walletExport = &cli.Command{
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
if !cctx.Args().Present() {
|
||||
return fmt.Errorf("must specify key to export")
|
||||
}
|
||||
@ -289,7 +299,7 @@ var walletExport = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(hex.EncodeToString(b))
|
||||
afmt.Println(hex.EncodeToString(b))
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@ -403,6 +413,8 @@ var walletSign = &cli.Command{
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
if !cctx.Args().Present() || cctx.NArg() != 2 {
|
||||
return fmt.Errorf("must specify signing address and message to sign")
|
||||
}
|
||||
@ -427,7 +439,7 @@ var walletSign = &cli.Command{
|
||||
|
||||
sigBytes := append([]byte{byte(sig.Type)}, sig.Data...)
|
||||
|
||||
fmt.Println(hex.EncodeToString(sigBytes))
|
||||
afmt.Println(hex.EncodeToString(sigBytes))
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@ -444,6 +456,8 @@ var walletVerify = &cli.Command{
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
if !cctx.Args().Present() || cctx.NArg() != 3 {
|
||||
return fmt.Errorf("must specify signing address, message, and signature to verify")
|
||||
}
|
||||
@ -476,10 +490,10 @@ var walletVerify = &cli.Command{
|
||||
return err
|
||||
}
|
||||
if ok {
|
||||
fmt.Println("valid")
|
||||
afmt.Println("valid")
|
||||
return nil
|
||||
}
|
||||
fmt.Println("invalid")
|
||||
afmt.Println("invalid")
|
||||
return NewCliError("CLI Verify called with invalid signature")
|
||||
},
|
||||
}
|
||||
@ -547,6 +561,8 @@ var walletMarketWithdraw = &cli.Command{
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
var wallet address.Address
|
||||
if cctx.String("wallet") != "" {
|
||||
wallet, err = address.NewFromString(cctx.String("wallet"))
|
||||
@ -622,7 +638,7 @@ var walletMarketWithdraw = &cli.Command{
|
||||
return xerrors.Errorf("fund manager withdraw error: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("WithdrawBalance message cid: %s\n", smsg)
|
||||
afmt.Printf("WithdrawBalance message cid: %s\n", smsg)
|
||||
|
||||
// wait for it to get mined into a block
|
||||
wait, err := api.StateWaitMsg(ctx, smsg, uint64(cctx.Int("confidence")))
|
||||
@ -632,7 +648,7 @@ var walletMarketWithdraw = &cli.Command{
|
||||
|
||||
// check it executed successfully
|
||||
if wait.Receipt.ExitCode != 0 {
|
||||
fmt.Println(cctx.App.Writer, "withdrawal failed!")
|
||||
afmt.Println(cctx.App.Writer, "withdrawal failed!")
|
||||
return err
|
||||
}
|
||||
|
||||
@ -647,7 +663,7 @@ var walletMarketWithdraw = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Successfully withdrew %s \n", types.FIL(withdrawn))
|
||||
afmt.Printf("Successfully withdrew %s \n", types.FIL(withdrawn))
|
||||
if withdrawn.LessThan(amt) {
|
||||
fmt.Printf("Note that this is less than the requested amount of %s \n", types.FIL(amt))
|
||||
}
|
||||
@ -681,6 +697,8 @@ var walletMarketAdd = &cli.Command{
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
// Get amount param
|
||||
if !cctx.Args().Present() {
|
||||
return fmt.Errorf("must pass amount to add")
|
||||
@ -722,7 +740,7 @@ var walletMarketAdd = &cli.Command{
|
||||
return xerrors.Errorf("add balance error: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("AddBalance message cid: %s\n", smsg)
|
||||
afmt.Printf("AddBalance message cid: %s\n", smsg)
|
||||
|
||||
return nil
|
||||
},
|
||||
|
333
cli/wallet_test.go
Normal file
333
cli/wallet_test.go
Normal file
@ -0,0 +1,333 @@
|
||||
//stm: #cli
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||
types "github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/multiformats/go-multihash"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestWalletNew(t *testing.T) {
|
||||
app, mockApi, buffer, done := NewMockAppWithFullAPI(t, WithCategory("wallet", walletNew))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
keyType := types.KeyType("secp256k1")
|
||||
address, err := address.NewFromString("t0123")
|
||||
assert.NoError(t, err)
|
||||
|
||||
mockApi.EXPECT().WalletNew(ctx, keyType).Return(address, nil)
|
||||
|
||||
//stm: @CLI_WALLET_NEW_001
|
||||
err = app.Run([]string{"wallet", "new"})
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, buffer.String(), address.String())
|
||||
}
|
||||
|
||||
func TestWalletList(t *testing.T) {
|
||||
|
||||
addr, err := address.NewIDAddress(1234)
|
||||
addresses := []address.Address{addr}
|
||||
assert.NoError(t, err)
|
||||
|
||||
cid := cid.Cid{}
|
||||
key := types.NewTipSetKey(cid)
|
||||
|
||||
actor := types.Actor{
|
||||
Code: cid,
|
||||
Head: cid,
|
||||
Nonce: 0,
|
||||
Balance: big.NewInt(100),
|
||||
}
|
||||
|
||||
t.Run("wallet-list-addr-only", func(t *testing.T) {
|
||||
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("wallet", walletList))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().WalletList(ctx).Return(addresses, nil),
|
||||
mockApi.EXPECT().WalletDefaultAddress(ctx).Return(addr, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_WALLET_LIST_001
|
||||
err := app.Run([]string{"wallet", "list", "--addr-only"})
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, buf.String(), addr.String())
|
||||
})
|
||||
t.Run("wallet-list-id", func(t *testing.T) {
|
||||
|
||||
app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("wallet", walletList))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().WalletList(ctx).Return(addresses, nil),
|
||||
mockApi.EXPECT().WalletDefaultAddress(ctx).Return(addr, nil),
|
||||
mockApi.EXPECT().StateGetActor(ctx, addr, key).Return(&actor, nil),
|
||||
mockApi.EXPECT().StateLookupID(ctx, addr, key).Return(addr, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_WALLET_LIST_002
|
||||
err := app.Run([]string{"wallet", "list", "--id"})
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
t.Run("wallet-list-market", func(t *testing.T) {
|
||||
|
||||
app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("wallet", walletList))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
balance := api.MarketBalance{
|
||||
Escrow: big.NewInt(1234),
|
||||
Locked: big.NewInt(123),
|
||||
}
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().WalletList(ctx).Return(addresses, nil),
|
||||
mockApi.EXPECT().WalletDefaultAddress(ctx).Return(addr, nil),
|
||||
mockApi.EXPECT().StateGetActor(ctx, addr, key).Return(&actor, nil),
|
||||
mockApi.EXPECT().StateMarketBalance(ctx, addr, key).Return(balance, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_WALLET_LIST_003
|
||||
err := app.Run([]string{"wallet", "list", "--market"})
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestWalletBalance(t *testing.T) {
|
||||
app, mockApi, buffer, done := NewMockAppWithFullAPI(t, WithCategory("wallet", walletBalance))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
addr, err := address.NewIDAddress(1234)
|
||||
assert.NoError(t, err)
|
||||
|
||||
balance := big.NewInt(1234)
|
||||
|
||||
mockApi.EXPECT().WalletBalance(ctx, addr).Return(balance, nil)
|
||||
|
||||
//stm: @CLI_WALLET_BALANCE_001
|
||||
err = app.Run([]string{"wallet", "balance", "f01234"})
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, buffer.String(), balance.String())
|
||||
}
|
||||
|
||||
func TestWalletGetDefault(t *testing.T) {
|
||||
app, mockApi, buffer, done := NewMockAppWithFullAPI(t, WithCategory("wallet", walletGetDefault))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
addr, err := address.NewFromString("t0123")
|
||||
assert.NoError(t, err)
|
||||
|
||||
mockApi.EXPECT().WalletDefaultAddress(ctx).Return(addr, nil)
|
||||
|
||||
//stm: @CLI_WALLET_GET_DEFAULT_001
|
||||
err = app.Run([]string{"wallet", "default"})
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, buffer.String(), addr.String())
|
||||
}
|
||||
|
||||
func TestWalletSetDefault(t *testing.T) {
|
||||
app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("wallet", walletSetDefault))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
addr, err := address.NewIDAddress(1234)
|
||||
assert.NoError(t, err)
|
||||
|
||||
mockApi.EXPECT().WalletSetDefault(ctx, addr).Return(nil)
|
||||
|
||||
//stm: @CLI_WALLET_SET_DEFAULT_001
|
||||
err = app.Run([]string{"wallet", "set-default", "f01234"})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestWalletExport(t *testing.T) {
|
||||
app, mockApi, buffer, done := NewMockAppWithFullAPI(t, WithCategory("wallet", walletExport))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
addr, err := address.NewIDAddress(1234)
|
||||
assert.NoError(t, err)
|
||||
|
||||
keyInfo := types.KeyInfo{
|
||||
Type: types.KTSecp256k1,
|
||||
PrivateKey: []byte("0x000000000000000000001"),
|
||||
}
|
||||
|
||||
mockApi.EXPECT().WalletExport(ctx, addr).Return(&keyInfo, nil)
|
||||
|
||||
ki, err := json.Marshal(keyInfo)
|
||||
assert.NoError(t, err)
|
||||
|
||||
//stm: @CLI_WALLET_EXPORT_001
|
||||
err = app.Run([]string{"wallet", "export", "f01234"})
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, buffer.String(), hex.EncodeToString(ki))
|
||||
}
|
||||
|
||||
func TestWalletSign(t *testing.T) {
|
||||
app, mockApi, buffer, done := NewMockAppWithFullAPI(t, WithCategory("wallet", walletSign))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
addr, err := address.NewFromString("f01234")
|
||||
assert.NoError(t, err)
|
||||
|
||||
msg, err := hex.DecodeString("01")
|
||||
assert.NoError(t, err)
|
||||
|
||||
signature := crypto.Signature{
|
||||
Type: crypto.SigTypeSecp256k1,
|
||||
Data: []byte{0x01},
|
||||
}
|
||||
|
||||
mockApi.EXPECT().WalletSign(ctx, addr, msg).Return(&signature, nil)
|
||||
|
||||
sigBytes := append([]byte{byte(signature.Type)}, signature.Data...)
|
||||
|
||||
//stm: @CLI_WALLET_SIGN_001
|
||||
err = app.Run([]string{"wallet", "sign", "f01234", "01"})
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, buffer.String(), hex.EncodeToString(sigBytes))
|
||||
}
|
||||
|
||||
func TestWalletVerify(t *testing.T) {
|
||||
app, mockApi, buffer, done := NewMockAppWithFullAPI(t, WithCategory("wallet", walletVerify))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
addr, err := address.NewIDAddress(1234)
|
||||
assert.NoError(t, err)
|
||||
|
||||
msg := []byte{1}
|
||||
signature := crypto.Signature{
|
||||
Type: crypto.SigTypeSecp256k1,
|
||||
Data: []byte{},
|
||||
}
|
||||
|
||||
mockApi.EXPECT().WalletVerify(ctx, addr, msg, &signature).Return(true, nil)
|
||||
|
||||
//stm: @CLI_WALLET_VERIFY_001
|
||||
err = app.Run([]string{"wallet", "verify", "f01234", "01", "01"})
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, buffer.String(), "valid")
|
||||
}
|
||||
|
||||
func TestWalletDelete(t *testing.T) {
|
||||
app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("wallet", walletDelete))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
addr, err := address.NewIDAddress(1234)
|
||||
assert.NoError(t, err)
|
||||
|
||||
mockApi.EXPECT().WalletDelete(ctx, addr).Return(nil)
|
||||
|
||||
//stm: @CLI_WALLET_DELETE_001
|
||||
err = app.Run([]string{"wallet", "delete", "f01234"})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestWalletMarketWithdraw(t *testing.T) {
|
||||
app, mockApi, buffer, done := NewMockAppWithFullAPI(t, WithCategory("wallet", walletMarket))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
addr, err := address.NewIDAddress(1234)
|
||||
assert.NoError(t, err)
|
||||
|
||||
balance := api.MarketBalance{
|
||||
Escrow: big.NewInt(100),
|
||||
Locked: big.NewInt(10),
|
||||
}
|
||||
|
||||
h, err := hex.DecodeString("12209cbc07c3f991725836a3aa2a581ca2029198aa420b9d99bc0e131d9f3e2cbe47")
|
||||
assert.NoError(t, err)
|
||||
cid := cid.NewCidV0(multihash.Multihash(h))
|
||||
msgLookup := api.MsgLookup{}
|
||||
|
||||
var networkVers apitypes.NetworkVersion
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().StateMarketBalance(ctx, addr, types.TipSetKey{}).Return(balance, nil),
|
||||
// mock reserve to 10
|
||||
mockApi.EXPECT().MarketGetReserved(ctx, addr).Return(big.NewInt(10), nil),
|
||||
// available should be 80.. escrow - locked - reserve
|
||||
mockApi.EXPECT().MarketWithdraw(ctx, addr, addr, big.NewInt(80)).Return(cid, nil),
|
||||
mockApi.EXPECT().StateWaitMsg(ctx, cid, uint64(5), abi.ChainEpoch(int64(-1)), true).Return(&msgLookup, nil),
|
||||
mockApi.EXPECT().StateNetworkVersion(ctx, types.TipSetKey{}).Return(networkVers, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_WALLET_MARKET_WITHDRAW_001
|
||||
err = app.Run([]string{"wallet", "market", "withdraw", "--wallet", addr.String()})
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, buffer.String(), fmt.Sprintf("WithdrawBalance message cid: %s", cid))
|
||||
}
|
||||
|
||||
func TestWalletMarketAdd(t *testing.T) {
|
||||
app, mockApi, buffer, done := NewMockAppWithFullAPI(t, WithCategory("wallet", walletMarket))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
toAddr := address.Address{}
|
||||
defaultAddr := address.Address{}
|
||||
|
||||
h, err := hex.DecodeString("12209cbc07c3f991725836a3aa2a581ca2029198aa420b9d99bc0e131d9f3e2cbe47")
|
||||
assert.NoError(t, err)
|
||||
cid := cid.NewCidV0(multihash.Multihash(h))
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().WalletDefaultAddress(ctx).Return(defaultAddr, nil),
|
||||
mockApi.EXPECT().MarketAddBalance(ctx, defaultAddr, toAddr, big.NewInt(80)).Return(cid, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_WALLET_MARKET_ADD_001
|
||||
err = app.Run([]string{"wallet", "market", "add", "0.000000000000000080", "--address", toAddr.String()})
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, buffer.String(), fmt.Sprintf("AddBalance message cid: %s", cid))
|
||||
}
|
@ -276,6 +276,13 @@ var sealBenchCmd = &cli.Command{
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to run seals: %w", err)
|
||||
}
|
||||
for _, s := range extendedSealedSectors {
|
||||
sealedSectors = append(sealedSectors, proof.SectorInfo{
|
||||
SealedCID: s.SealedCID,
|
||||
SectorNumber: s.SectorNumber,
|
||||
SealProof: s.SealProof,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
// TODO: implement sbfs.List() and use that for all cases (preexisting sectorbuilder or not)
|
||||
|
||||
|
@ -3,8 +3,10 @@ package main
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
@ -21,6 +23,7 @@ var dagstoreCmd = &cli.Command{
|
||||
dagstoreRecoverShardCmd,
|
||||
dagstoreInitializeAllCmd,
|
||||
dagstoreGcCmd,
|
||||
dagstoreLookupPiecesCmd,
|
||||
},
|
||||
}
|
||||
|
||||
@ -52,38 +55,7 @@ var dagstoreListShardsCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
if len(shards) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
tw := tablewriter.New(
|
||||
tablewriter.Col("Key"),
|
||||
tablewriter.Col("State"),
|
||||
tablewriter.Col("Error"),
|
||||
)
|
||||
|
||||
colors := map[string]color.Attribute{
|
||||
"ShardStateAvailable": color.FgGreen,
|
||||
"ShardStateServing": color.FgBlue,
|
||||
"ShardStateErrored": color.FgRed,
|
||||
"ShardStateNew": color.FgYellow,
|
||||
}
|
||||
|
||||
for _, s := range shards {
|
||||
m := map[string]interface{}{
|
||||
"Key": s.Key,
|
||||
"State": func() string {
|
||||
if c, ok := colors[s.State]; ok {
|
||||
return color.New(c).Sprint(s.State)
|
||||
}
|
||||
return s.State
|
||||
}(),
|
||||
"Error": s.Error,
|
||||
}
|
||||
tw.Write(m)
|
||||
}
|
||||
|
||||
return tw.Flush(os.Stdout)
|
||||
return printTableShards(shards)
|
||||
},
|
||||
}
|
||||
|
||||
@ -265,3 +237,81 @@ var dagstoreGcCmd = &cli.Command{
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func printTableShards(shards []api.DagstoreShardInfo) error {
|
||||
if len(shards) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
tw := tablewriter.New(
|
||||
tablewriter.Col("Key"),
|
||||
tablewriter.Col("State"),
|
||||
tablewriter.Col("Error"),
|
||||
)
|
||||
|
||||
colors := map[string]color.Attribute{
|
||||
"ShardStateAvailable": color.FgGreen,
|
||||
"ShardStateServing": color.FgBlue,
|
||||
"ShardStateErrored": color.FgRed,
|
||||
"ShardStateNew": color.FgYellow,
|
||||
}
|
||||
|
||||
for _, s := range shards {
|
||||
m := map[string]interface{}{
|
||||
"Key": s.Key,
|
||||
"State": func() string {
|
||||
trimmedState := strings.TrimPrefix(s.State, "ShardState")
|
||||
if c, ok := colors[s.State]; ok {
|
||||
return color.New(c).Sprint(trimmedState)
|
||||
}
|
||||
return trimmedState
|
||||
}(),
|
||||
"Error": s.Error,
|
||||
}
|
||||
tw.Write(m)
|
||||
}
|
||||
return tw.Flush(os.Stdout)
|
||||
}
|
||||
|
||||
var dagstoreLookupPiecesCmd = &cli.Command{
|
||||
Name: "lookup-pieces",
|
||||
Usage: "Lookup pieces that a given CID belongs to",
|
||||
ArgsUsage: "<cid>",
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "color",
|
||||
Usage: "use color in display output",
|
||||
DefaultText: "depends on output being a TTY",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.IsSet("color") {
|
||||
color.NoColor = !cctx.Bool("color")
|
||||
}
|
||||
|
||||
if cctx.NArg() != 1 {
|
||||
return fmt.Errorf("must provide a CID")
|
||||
}
|
||||
|
||||
cidStr := cctx.Args().First()
|
||||
cid, err := cid.Parse(cidStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid CID: %w", err)
|
||||
}
|
||||
|
||||
marketsApi, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
shards, err := marketsApi.DagstoreLookupPieces(ctx, cid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return printTableShards(shards)
|
||||
},
|
||||
}
|
||||
|
86
cmd/lotus-miner/index_provider.go
Normal file
86
cmd/lotus-miner/index_provider.go
Normal file
@ -0,0 +1,86 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
)
|
||||
|
||||
var indexProvCmd = &cli.Command{
|
||||
Name: "index",
|
||||
Usage: "Manage the index provider on the markets subsystem",
|
||||
Subcommands: []*cli.Command{
|
||||
indexProvAnnounceCmd,
|
||||
indexProvAnnounceAllCmd,
|
||||
},
|
||||
}
|
||||
|
||||
var indexProvAnnounceCmd = &cli.Command{
|
||||
Name: "announce",
|
||||
ArgsUsage: "<deal proposal cid>",
|
||||
Usage: "Announce a deal to indexers so they can download its index",
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "color",
|
||||
Usage: "use color in display output",
|
||||
DefaultText: "depends on output being a TTY",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.IsSet("color") {
|
||||
color.NoColor = !cctx.Bool("color")
|
||||
}
|
||||
|
||||
if cctx.NArg() != 1 {
|
||||
return fmt.Errorf("must provide the deal proposal CID")
|
||||
}
|
||||
|
||||
proposalCidStr := cctx.Args().First()
|
||||
proposalCid, err := cid.Parse(proposalCidStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid deal proposal CID: %w", err)
|
||||
}
|
||||
|
||||
marketsApi, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
return marketsApi.IndexerAnnounceDeal(ctx, proposalCid)
|
||||
},
|
||||
}
|
||||
|
||||
var indexProvAnnounceAllCmd = &cli.Command{
|
||||
Name: "announce-all",
|
||||
Usage: "Announce all active deals to indexers so they can download the indices",
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "color",
|
||||
Usage: "use color in display output",
|
||||
DefaultText: "depends on output being a TTY",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.IsSet("color") {
|
||||
color.NoColor = !cctx.Bool("color")
|
||||
}
|
||||
|
||||
marketsApi, closer, err := lcli.GetMarketsAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
return marketsApi.IndexerAnnounceAllDeals(ctx)
|
||||
},
|
||||
}
|
@ -126,7 +126,7 @@ func infoCmdAct(cctx *cli.Context) error {
|
||||
|
||||
alerts, err := minerApi.LogAlerts(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting alerts: %w", err)
|
||||
fmt.Printf("ERROR: getting alerts: %s\n", err)
|
||||
}
|
||||
|
||||
activeAlerts := make([]alerting.Alert, 0)
|
||||
@ -466,6 +466,7 @@ var stateOrder = map[sealing.SectorState]stateMeta{}
|
||||
var stateList = []stateMeta{
|
||||
{col: 39, state: "Total"},
|
||||
{col: color.FgGreen, state: sealing.Proving},
|
||||
{col: color.FgGreen, state: sealing.UpdateActivating},
|
||||
|
||||
{col: color.FgBlue, state: sealing.Empty},
|
||||
{col: color.FgBlue, state: sealing.WaitDeals},
|
||||
@ -496,6 +497,7 @@ var stateList = []stateMeta{
|
||||
{col: color.FgYellow, state: sealing.SubmitReplicaUpdate},
|
||||
{col: color.FgYellow, state: sealing.ReplicaUpdateWait},
|
||||
{col: color.FgYellow, state: sealing.FinalizeReplicaUpdate},
|
||||
{col: color.FgYellow, state: sealing.ReleaseSectorKey},
|
||||
|
||||
{col: color.FgCyan, state: sealing.Terminating},
|
||||
{col: color.FgCyan, state: sealing.TerminateWait},
|
||||
@ -524,6 +526,8 @@ var stateList = []stateMeta{
|
||||
{col: color.FgRed, state: sealing.SnapDealsAddPieceFailed},
|
||||
{col: color.FgRed, state: sealing.SnapDealsDealsExpired},
|
||||
{col: color.FgRed, state: sealing.ReplicaUpdateFailed},
|
||||
{col: color.FgRed, state: sealing.ReleaseSectorKeyFailed},
|
||||
{col: color.FgRed, state: sealing.FinalizeReplicaUpdateFailed},
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -96,6 +96,11 @@ var infoAllCmd = &cli.Command{
|
||||
fmt.Println("ERROR: ", err)
|
||||
}
|
||||
|
||||
fmt.Println("\n#: Storage Locks")
|
||||
if err := storageLocks.Action(cctx); err != nil {
|
||||
fmt.Println("ERROR: ", err)
|
||||
}
|
||||
|
||||
fmt.Println("\n#: Sched Diag")
|
||||
if err := sealingSchedDiagCmd.Action(cctx); err != nil {
|
||||
fmt.Println("ERROR: ", err)
|
||||
@ -192,6 +197,11 @@ var infoAllCmd = &cli.Command{
|
||||
fmt.Println("ERROR: ", err)
|
||||
}
|
||||
|
||||
fmt.Println("\n#: Storage Sector List")
|
||||
if err := storageListSectorsCmd.Action(cctx); err != nil {
|
||||
fmt.Println("ERROR: ", err)
|
||||
}
|
||||
|
||||
fmt.Println("\n#: Expired Sectors")
|
||||
if err := sectorsExpiredCmd.Action(cctx); err != nil {
|
||||
fmt.Println("ERROR: ", err)
|
||||
|
@ -468,12 +468,15 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode
|
||||
stor := stores.NewRemote(lstor, si, http.Header(sa), 10, &stores.DefaultPartialFileHandler{})
|
||||
|
||||
smgr, err := sectorstorage.New(ctx, lstor, stor, lr, si, sectorstorage.SealerConfig{
|
||||
ParallelFetchLimit: 10,
|
||||
AllowAddPiece: true,
|
||||
AllowPreCommit1: true,
|
||||
AllowPreCommit2: true,
|
||||
AllowCommit: true,
|
||||
AllowUnseal: true,
|
||||
ParallelFetchLimit: 10,
|
||||
AllowAddPiece: true,
|
||||
AllowPreCommit1: true,
|
||||
AllowPreCommit2: true,
|
||||
AllowCommit: true,
|
||||
AllowUnseal: true,
|
||||
AllowReplicaUpdate: true,
|
||||
AllowProveReplicaUpdate2: true,
|
||||
AllowRegenSectorKey: true,
|
||||
}, wsts, smsts)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -48,6 +48,7 @@ func main() {
|
||||
lcli.WithCategory("market", retrievalDealsCmd),
|
||||
lcli.WithCategory("market", dataTransfersCmd),
|
||||
lcli.WithCategory("market", dagstoreCmd),
|
||||
lcli.WithCategory("market", indexProvCmd),
|
||||
lcli.WithCategory("storage", sectorsCmd),
|
||||
lcli.WithCategory("storage", provingCmd),
|
||||
lcli.WithCategory("storage", storageCmd),
|
||||
|
@ -437,6 +437,7 @@ var provingCheckProvableCmd = &cli.Command{
|
||||
}
|
||||
|
||||
var tocheck []storage.SectorRef
|
||||
var update []bool
|
||||
for _, info := range sectorInfos {
|
||||
si := abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
@ -454,9 +455,10 @@ var provingCheckProvableCmd = &cli.Command{
|
||||
ProofType: info.SealProof,
|
||||
ID: si,
|
||||
})
|
||||
update = append(update, info.SectorKeyCID != nil)
|
||||
}
|
||||
|
||||
bad, err := sapi.CheckProvable(ctx, info.WindowPoStProofType, tocheck, cctx.Bool("slow"))
|
||||
bad, err := sapi.CheckProvable(ctx, info.WindowPoStProofType, tocheck, update, cctx.Bool("slow"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -39,9 +39,12 @@ func barString(total, y, g float64) string {
|
||||
yBars := int(math.Round(y / total * barCols))
|
||||
gBars := int(math.Round(g / total * barCols))
|
||||
eBars := int(barCols) - yBars - gBars
|
||||
return color.YellowString(strings.Repeat("|", yBars)) +
|
||||
color.GreenString(strings.Repeat("|", gBars)) +
|
||||
strings.Repeat(" ", eBars)
|
||||
var barString = color.YellowString(strings.Repeat("|", yBars)) +
|
||||
color.GreenString(strings.Repeat("|", gBars))
|
||||
if eBars >= 0 {
|
||||
barString += strings.Repeat(" ", eBars)
|
||||
}
|
||||
return barString
|
||||
}
|
||||
|
||||
var sealingWorkersCmd = &cli.Command{
|
||||
|
@ -55,6 +55,7 @@ var sectorsCmd = &cli.Command{
|
||||
sectorsTerminateCmd,
|
||||
sectorsRemoveCmd,
|
||||
sectorsSnapUpCmd,
|
||||
sectorsSnapAbortCmd,
|
||||
sectorsMarkForUpgradeCmd,
|
||||
sectorsStartSealCmd,
|
||||
sectorsSealDelayCmd,
|
||||
@ -160,7 +161,7 @@ var sectorsStatusCmd = &cli.Command{
|
||||
fmt.Printf("Expiration:\t\t%v\n", status.Expiration)
|
||||
fmt.Printf("DealWeight:\t\t%v\n", status.DealWeight)
|
||||
fmt.Printf("VerifiedDealWeight:\t\t%v\n", status.VerifiedDealWeight)
|
||||
fmt.Printf("InitialPledge:\t\t%v\n", status.InitialPledge)
|
||||
fmt.Printf("InitialPledge:\t\t%v\n", types.FIL(status.InitialPledge))
|
||||
fmt.Printf("\nExpiration Info\n")
|
||||
fmt.Printf("OnTime:\t\t%v\n", status.OnTime)
|
||||
fmt.Printf("Early:\t\t%v\n", status.Early)
|
||||
@ -293,8 +294,14 @@ var sectorsListCmd = &cli.Command{
|
||||
Aliases: []string{"e"},
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "seal-time",
|
||||
Usage: "display how long it took for the sector to be sealed",
|
||||
Name: "initial-pledge",
|
||||
Usage: "display initial pledge",
|
||||
Aliases: []string{"p"},
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "seal-time",
|
||||
Usage: "display how long it took for the sector to be sealed",
|
||||
Aliases: []string{"t"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "states",
|
||||
@ -404,6 +411,7 @@ var sectorsListCmd = &cli.Command{
|
||||
tablewriter.Col("Deals"),
|
||||
tablewriter.Col("DealWeight"),
|
||||
tablewriter.Col("VerifiedPower"),
|
||||
tablewriter.Col("Pledge"),
|
||||
tablewriter.NewLineCol("Error"),
|
||||
tablewriter.NewLineCol("RecoveryTimeout"))
|
||||
|
||||
@ -482,6 +490,9 @@ var sectorsListCmd = &cli.Command{
|
||||
m["RecoveryTimeout"] = color.YellowString(lcli.EpochTime(head.Height(), st.Early))
|
||||
}
|
||||
}
|
||||
if inSSet && cctx.Bool("initial-pledge") {
|
||||
m["Pledge"] = types.FIL(st.InitialPledge).Short()
|
||||
}
|
||||
}
|
||||
|
||||
if !fast && deals > 0 {
|
||||
@ -1520,6 +1531,43 @@ var sectorsSnapUpCmd = &cli.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var sectorsSnapAbortCmd = &cli.Command{
|
||||
Name: "abort-upgrade",
|
||||
Usage: "Abort the attempted (SnapDeals) upgrade of a CC sector, reverting it to as before",
|
||||
ArgsUsage: "<sectorNum>",
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "really-do-it",
|
||||
Usage: "pass this flag if you know what you are doing",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.Args().Len() != 1 {
|
||||
return lcli.ShowHelp(cctx, xerrors.Errorf("must pass sector number"))
|
||||
}
|
||||
|
||||
really := cctx.Bool("really-do-it")
|
||||
if !really {
|
||||
//nolint:golint
|
||||
return fmt.Errorf("--really-do-it must be specified for this action to have an effect; you have been warned")
|
||||
}
|
||||
|
||||
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
id, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("could not parse sector number: %w", err)
|
||||
}
|
||||
|
||||
return nodeApi.SectorAbortUpgrade(ctx, abi.SectorNumber(id))
|
||||
},
|
||||
}
|
||||
|
||||
var sectorsMarkForUpgradeCmd = &cli.Command{
|
||||
Name: "mark-for-upgrade",
|
||||
Usage: "Mark a committed capacity sector for replacement by a sector with deals",
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user