Merge branch 'master' into bloxico/basic_wallet_tests
merge master to update the branch
This commit is contained in:
commit
5ece3d4c10
@ -44,13 +44,13 @@ commands:
|
||||
- restore_cache:
|
||||
name: Restore parameters cache
|
||||
keys:
|
||||
- 'v25-2k-lotus-params'
|
||||
- 'v26-2k-lotus-params'
|
||||
paths:
|
||||
- /var/tmp/filecoin-proof-parameters/
|
||||
- run: ./lotus fetch-params 2048
|
||||
- save_cache:
|
||||
name: Save parameters cache
|
||||
key: 'v25-2k-lotus-params'
|
||||
key: 'v26-2k-lotus-params'
|
||||
paths:
|
||||
- /var/tmp/filecoin-proof-parameters/
|
||||
install_ipfs:
|
||||
@ -850,6 +850,11 @@ workflows:
|
||||
suite: itest-get_messages_in_ts
|
||||
target: "./itests/get_messages_in_ts_test.go"
|
||||
|
||||
- test:
|
||||
name: test-itest-mempool
|
||||
suite: itest-mempool
|
||||
target: "./itests/mempool_test.go"
|
||||
|
||||
- test:
|
||||
name: test-itest-multisig
|
||||
suite: itest-multisig
|
||||
|
@ -44,13 +44,13 @@ commands:
|
||||
- restore_cache:
|
||||
name: Restore parameters cache
|
||||
keys:
|
||||
- 'v25-2k-lotus-params'
|
||||
- 'v26-2k-lotus-params'
|
||||
paths:
|
||||
- /var/tmp/filecoin-proof-parameters/
|
||||
- run: ./lotus fetch-params 2048
|
||||
- save_cache:
|
||||
name: Save parameters cache
|
||||
key: 'v25-2k-lotus-params'
|
||||
key: 'v26-2k-lotus-params'
|
||||
paths:
|
||||
- /var/tmp/filecoin-proof-parameters/
|
||||
install_ipfs:
|
||||
|
4
.github/pull_request_template.md
vendored
4
.github/pull_request_template.md
vendored
@ -14,8 +14,8 @@ Before you mark the PR ready for review, please make sure that:
|
||||
- [ ] All commits have a clear commit message.
|
||||
- [ ] The PR title is in the form of of `<PR type>: <area>: <change being made>`
|
||||
- example: ` fix: mempool: Introduce a cache for valid signatures`
|
||||
- `PR type`: _fix_, _feat_, _INTERFACE BREAKING CHANGE_, _CONSENSUS BREAKING_, _build_, _chore_, _ci_, _docs_, _misc_,_perf_, _refactor_, _revert_, _style_, _test_
|
||||
- `area`: _api_, _chain_, _state_, _vm_, _data transfer_, _market_, _mempool_, _message_, _block production_, _multisig_, _networking_, _paychan_, _proving_, _sealing_, _wallet_
|
||||
- `PR type`: _fix_, _feat_, _INTERFACE BREAKING CHANGE_, _CONSENSUS BREAKING_, _build_, _chore_, _ci_, _docs_,_perf_, _refactor_, _revert_, _style_, _test_
|
||||
- `area`: _api_, _chain_, _state_, _vm_, _data transfer_, _market_, _mempool_, _message_, _block production_, _multisig_, _networking_, _paychan_, _proving_, _sealing_, _wallet_, _deps_
|
||||
- [ ] This PR has tests for new functionality or change in behaviour
|
||||
- [ ] If new user-facing features are introduced, clear usage guidelines and / or documentation updates should be included in https://lotus.filecoin.io or [Discussion Tutorials.](https://github.com/filecoin-project/lotus/discussions/categories/tutorials)
|
||||
- [ ] CI is green
|
||||
|
72
CHANGELOG.md
72
CHANGELOG.md
@ -1,5 +1,77 @@
|
||||
# Lotus changelog
|
||||
|
||||
# 1.14.1 / 2022-02-18
|
||||
|
||||
This is an **optional** release of lotus, that fixes the incorrect *comment* of network v15 OhSnap upgrade **date**. Note the actual upgrade epoch in [v1.14.0](https://github.com/filecoin-project/lotus/releases/tag/v1.14.0) was correct.
|
||||
|
||||
# 1.14.0 / 2022-02-17
|
||||
|
||||
This is a MANDATORY release of Lotus that introduces [Filecoin network v15,
|
||||
codenamed the OhSnap upgrade](https://github.com/filecoin-project/community/discussions/74?sort=new#discussioncomment-1922550).
|
||||
|
||||
The network is scheduled to upgrade to v15 on March 1st at 2022-03-01T15:00:00Z. All node operators, including storage providers, must upgrade to this release (or a later release) before that time. Storage providers must update their daemons, miners, and worker(s).
|
||||
|
||||
The OhSnap upgrade introduces the following FIPs, delivered in [actors v7](https://github.com/filecoin-project/specs-actors/releases/tag/v7.0.0):
|
||||
- [FIP-0019 Snap Deals](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0019.md)
|
||||
- [FIP-0028 Remove Datacap from Verified clients](https://github.com/filecoin-project/FIPs/pull/226)
|
||||
|
||||
It is recommended that storage providers download the new params before updating their node, miner, and workers. To do so:
|
||||
|
||||
- Download Lotus v1.14.0 or later
|
||||
- run `make lotus-shed`
|
||||
- run `./lotus-shed fetch-params` with the appropriate `proving-params` flag
|
||||
- Upgrade the Lotus daemon and miner **when the previous step is complete**
|
||||
|
||||
All node operators, including storage providers, should be aware that a pre-migration will begin at 2022-03-01T13:30:00Z (150 minutes before the real upgrade). The pre-migration will take between 20 and 50 minutes, depending on hardware specs. During this time, expect slower block validation times, increased CPU and memory usage, and longer delays for API queries.
|
||||
|
||||
## New Features and Changes
|
||||
- Integrate actor v7-rc1:
|
||||
- Integrate v7 actors ([#7617](https://github.com/filecoin-project/lotus/pull/7617))
|
||||
- feat: state: Fast migration for v15 ([#7933](https://github.com/filecoin-project/lotus/pull/7933))
|
||||
- fix: blockstore: Add missing locks to autobatch::Get() [#7939](https://github.com/filecoin-project/lotus/pull/7939))
|
||||
- correctness fixes for the autobatch blockstore ([#7940](https://github.com/filecoin-project/lotus/pull/7940))
|
||||
- Implement and support [FIP-0019 Snap Deals](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0019.md)
|
||||
- chore: deps: Integrate proof v11.0.0 ([#7923](https://github.com/filecoin-project/lotus/pull/7923))
|
||||
- Snap Deals Lotus Integration: FSM Posting and integration test ([#7810](https://github.com/filecoin-project/lotus/pull/7810))
|
||||
- Feat/sector storage unseal ([#7730](https://github.com/filecoin-project/lotus/pull/7730))
|
||||
- Feat/snap deals storage ([#7615](https://github.com/filecoin-project/lotus/pull/7615))
|
||||
- fix: sealing: Add more deal expiration checks during PRU pipeline ([#7871](https://github.com/filecoin-project/lotus/pull/7871))
|
||||
- chore: deps: Update go-paramfetch ([#7917](https://github.com/filecoin-project/lotus/pull/7917))
|
||||
- feat: #7880 gas: add gas charge for VerifyReplicaUpdate ([#7897](https://github.com/filecoin-project/lotus/pull/7897))
|
||||
- enhancement: sectors: disable existing cc upgrade path 2 days before the upgrade epoch ([#7900](https://github.com/filecoin-project/lotus/pull/7900))
|
||||
|
||||
## Improvements
|
||||
- updating to new datastore/blockstore code with contexts ([#7646](https://github.com/filecoin-project/lotus/pull/7646))
|
||||
- reorder transfer checks so as to ensure sending 2B FIL to yourself fails if you don't have that amount ([#7637](https://github.com/filecoin-project/lotus/pull/7637))
|
||||
- VM: Circ supply should be constant per epoch ([#7811](https://github.com/filecoin-project/lotus/pull/7811))
|
||||
|
||||
## Bug Fixes
|
||||
- Fix: state: circsuypply calc around null blocks ([#7890](https://github.com/filecoin-project/lotus/pull/7890))
|
||||
- Mempool msg selection should respect block message limits ([#7321](https://github.com/filecoin-project/lotus/pull/7321))
|
||||
SplitStore: supress compaction near upgrades ([#7734](https://github.com/filecoin-project/lotus/pull/7734))
|
||||
|
||||
## Others
|
||||
- chore: create pull_request_template.md ([#7726](https://github.com/filecoin-project/lotus/pull/7726))
|
||||
|
||||
## Contributors
|
||||
|
||||
| Contributor | Commits | Lines ± | Files Changed |
|
||||
|-------------|---------|---------|---------------|
|
||||
| Aayush Rajasekaran | 41 | +5538/-1205 | 189 |
|
||||
| zenground0 | 11 | +3316/-524 | 124 |
|
||||
| Jennifer Wang | 29 | +714/-599 | 68 |
|
||||
| ZenGround0 | 3 | +263/-25 | 11 |
|
||||
| c r | 2 | +198/-30 | 6 |
|
||||
| vyzo | 4 | +189/-7 | 7 |
|
||||
| Aayush | 11 | +146/-48 | 49 |
|
||||
| web3-bot | 10 | +99/-17 | 10 |
|
||||
| Steven Allen | 1 | +55/-37 | 1 |
|
||||
| Jiaying Wang | 5 | +30/-8 | 5 |
|
||||
| Jakub Sztandera | 2 | +8/-3 | 3 |
|
||||
| Łukasz Magiera | 1 | +3/-3 | 2 |
|
||||
| Travis Person | 1 | +2/-2 | 2 |
|
||||
| Rod Vagg | 1 | +2/-2 | 2 |
|
||||
|
||||
# v1.13.2 / 2022-01-09
|
||||
|
||||
Lotus v1.13.2 is a *highly recommended* feature release with remarkable retrieval improvements, new features like
|
||||
|
2
Makefile
2
Makefile
@ -345,6 +345,8 @@ gen: actors-gen type-gen method-gen cfgdoc-gen docsgen api-gen circleci
|
||||
@echo ">>> IF YOU'VE MODIFIED THE CLI OR CONFIG, REMEMBER TO ALSO MAKE docsgen-cli"
|
||||
.PHONY: gen
|
||||
|
||||
jen: gen
|
||||
|
||||
snap: lotus lotus-miner lotus-worker
|
||||
snapcraft
|
||||
# snapcraft upload ./lotus_*.snap
|
||||
|
@ -1084,7 +1084,7 @@ type CirculatingSupply struct {
|
||||
type MiningBaseInfo struct {
|
||||
MinerPower types.BigInt
|
||||
NetworkPower types.BigInt
|
||||
Sectors []builtin.SectorInfo
|
||||
Sectors []builtin.ExtendedSectorInfo
|
||||
WorkerKey address.Address
|
||||
SectorSize abi.SectorSize
|
||||
PrevBeaconEntry types.BeaconEntry
|
||||
|
@ -45,8 +45,9 @@ type Gateway interface {
|
||||
GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *MessageSendSpec, tsk types.TipSetKey) (*types.Message, error)
|
||||
MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error)
|
||||
MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error)
|
||||
MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error)
|
||||
MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*MsigTransaction, error)
|
||||
MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error)
|
||||
MsigGetVestingSchedule(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MsigVesting, error)
|
||||
StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
|
||||
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (DealCollateralBounds, error)
|
||||
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
|
||||
|
@ -51,6 +51,11 @@ type Net interface {
|
||||
NetBlockRemove(ctx context.Context, acl NetBlockList) error //perm:admin
|
||||
NetBlockList(ctx context.Context) (NetBlockList, error) //perm:read
|
||||
|
||||
// ResourceManager API
|
||||
NetStat(ctx context.Context, scope string) (NetStat, error) //perm:read
|
||||
NetLimit(ctx context.Context, scope string) (NetLimit, error) //perm:read
|
||||
NetSetLimit(ctx context.Context, scope string, limit NetLimit) error //perm:admin
|
||||
|
||||
// ID returns peerID of libp2p node backing this API
|
||||
ID(context.Context) (peer.ID, error) //perm:read
|
||||
}
|
||||
|
@ -14,6 +14,7 @@ import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
abinetwork "github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
@ -99,8 +100,8 @@ type StorageMiner interface {
|
||||
// Returns null if message wasn't sent
|
||||
SectorTerminateFlush(ctx context.Context) (*cid.Cid, error) //perm:admin
|
||||
// SectorTerminatePending returns a list of pending sector terminations to be sent in the next batch message
|
||||
SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
|
||||
SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error //perm:admin
|
||||
SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
|
||||
SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber, snap bool) error //perm:admin
|
||||
// SectorPreCommitFlush immediately sends a PreCommit message with sectors batched for PreCommit.
|
||||
// Returns null if message wasn't sent
|
||||
SectorPreCommitFlush(ctx context.Context) ([]sealiface.PreCommitBatchRes, error) //perm:admin
|
||||
@ -111,6 +112,9 @@ type StorageMiner interface {
|
||||
SectorCommitFlush(ctx context.Context) ([]sealiface.CommitBatchRes, error) //perm:admin
|
||||
// SectorCommitPending returns a list of pending Commit sectors to be sent in the next aggregate message
|
||||
SectorCommitPending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
|
||||
SectorMatchPendingPiecesToOpenSectors(ctx context.Context) error //perm:admin
|
||||
// SectorAbortUpgrade can be called on sectors that are in the process of being upgraded to abort it
|
||||
SectorAbortUpgrade(context.Context, abi.SectorNumber) error //perm:admin
|
||||
|
||||
// WorkerConnect tells the node to connect to workers RPC
|
||||
WorkerConnect(context.Context, string) error //perm:admin retry:true
|
||||
@ -128,6 +132,7 @@ type StorageMiner interface {
|
||||
ReturnProveReplicaUpdate1(ctx context.Context, callID storiface.CallID, vanillaProofs storage.ReplicaVanillaProofs, err *storiface.CallError) error //perm:admin retry:true
|
||||
ReturnProveReplicaUpdate2(ctx context.Context, callID storiface.CallID, proof storage.ReplicaUpdateProof, err *storiface.CallError) error //perm:admin retry:true
|
||||
ReturnGenerateSectorKeyFromData(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
|
||||
ReturnFinalizeReplicaUpdate(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
|
||||
ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
|
||||
ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
|
||||
ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
|
||||
@ -253,7 +258,7 @@ type StorageMiner interface {
|
||||
|
||||
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin
|
||||
|
||||
ComputeProof(ctx context.Context, ssi []builtin.SectorInfo, rand abi.PoStRandomness) ([]builtin.PoStProof, error) //perm:read
|
||||
ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, poStEpoch abi.ChainEpoch, nv abinetwork.Version) ([]builtin.PoStProof, error) //perm:read
|
||||
}
|
||||
|
||||
var _ storiface.WorkerReturn = *new(StorageMiner)
|
||||
|
@ -39,6 +39,7 @@ type Worker interface {
|
||||
SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) //perm:admin
|
||||
SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) //perm:admin
|
||||
FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) //perm:admin
|
||||
FinalizeReplicaUpdate(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) //perm:admin
|
||||
ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storiface.CallID, error) //perm:admin
|
||||
ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storiface.CallID, error) //perm:admin
|
||||
ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storiface.CallID, error) //perm:admin
|
||||
|
@ -300,6 +300,34 @@ func init() {
|
||||
Error: "<error>",
|
||||
})
|
||||
addExample(storiface.ResourceTable)
|
||||
addExample(network.ScopeStat{
|
||||
Memory: 123,
|
||||
NumStreamsInbound: 1,
|
||||
NumStreamsOutbound: 2,
|
||||
NumConnsInbound: 3,
|
||||
NumConnsOutbound: 4,
|
||||
NumFD: 5,
|
||||
})
|
||||
addExample(map[string]network.ScopeStat{
|
||||
"abc": {
|
||||
Memory: 123,
|
||||
NumStreamsInbound: 1,
|
||||
NumStreamsOutbound: 2,
|
||||
NumConnsInbound: 3,
|
||||
NumConnsOutbound: 4,
|
||||
NumFD: 5,
|
||||
}})
|
||||
addExample(api.NetLimit{
|
||||
Memory: 123,
|
||||
StreamsInbound: 1,
|
||||
StreamsOutbound: 2,
|
||||
Streams: 3,
|
||||
ConnsInbound: 3,
|
||||
ConnsOutbound: 4,
|
||||
Conns: 4,
|
||||
FD: 5,
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []reflect.Type) {
|
||||
|
@ -1811,6 +1811,21 @@ func (mr *MockFullNodeMockRecorder) NetFindPeer(arg0, arg1 interface{}) *gomock.
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetFindPeer", reflect.TypeOf((*MockFullNode)(nil).NetFindPeer), arg0, arg1)
|
||||
}
|
||||
|
||||
// NetLimit mocks base method.
|
||||
func (m *MockFullNode) NetLimit(arg0 context.Context, arg1 string) (api.NetLimit, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "NetLimit", arg0, arg1)
|
||||
ret0, _ := ret[0].(api.NetLimit)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// NetLimit indicates an expected call of NetLimit.
|
||||
func (mr *MockFullNodeMockRecorder) NetLimit(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetLimit", reflect.TypeOf((*MockFullNode)(nil).NetLimit), arg0, arg1)
|
||||
}
|
||||
|
||||
// NetPeerInfo mocks base method.
|
||||
func (m *MockFullNode) NetPeerInfo(arg0 context.Context, arg1 peer.ID) (*api.ExtendedPeerInfo, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@ -1856,6 +1871,35 @@ func (mr *MockFullNodeMockRecorder) NetPubsubScores(arg0 interface{}) *gomock.Ca
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPubsubScores", reflect.TypeOf((*MockFullNode)(nil).NetPubsubScores), arg0)
|
||||
}
|
||||
|
||||
// NetSetLimit mocks base method.
|
||||
func (m *MockFullNode) NetSetLimit(arg0 context.Context, arg1 string, arg2 api.NetLimit) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "NetSetLimit", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// NetSetLimit indicates an expected call of NetSetLimit.
|
||||
func (mr *MockFullNodeMockRecorder) NetSetLimit(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetSetLimit", reflect.TypeOf((*MockFullNode)(nil).NetSetLimit), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// NetStat mocks base method.
|
||||
func (m *MockFullNode) NetStat(arg0 context.Context, arg1 string) (api.NetStat, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "NetStat", arg0, arg1)
|
||||
ret0, _ := ret[0].(api.NetStat)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// NetStat indicates an expected call of NetStat.
|
||||
func (mr *MockFullNodeMockRecorder) NetStat(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetStat", reflect.TypeOf((*MockFullNode)(nil).NetStat), arg0, arg1)
|
||||
}
|
||||
|
||||
// NodeStatus mocks base method.
|
||||
func (m *MockFullNode) NodeStatus(arg0 context.Context, arg1 bool) (api.NodeStatus, error) {
|
||||
m.ctrl.T.Helper()
|
||||
|
121
api/proxy_gen.go
121
api/proxy_gen.go
@ -17,6 +17,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
abinetwork "github.com/filecoin-project/go-state-types/network"
|
||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
@ -515,6 +516,8 @@ type GatewayStruct struct {
|
||||
|
||||
MsigGetVested func(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) ``
|
||||
|
||||
MsigGetVestingSchedule func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MsigVesting, error) ``
|
||||
|
||||
StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) ``
|
||||
|
||||
StateDealProviderCollateralBounds func(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (DealCollateralBounds, error) ``
|
||||
@ -586,11 +589,17 @@ type NetStruct struct {
|
||||
|
||||
NetFindPeer func(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) `perm:"read"`
|
||||
|
||||
NetLimit func(p0 context.Context, p1 string) (NetLimit, error) `perm:"read"`
|
||||
|
||||
NetPeerInfo func(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) `perm:"read"`
|
||||
|
||||
NetPeers func(p0 context.Context) ([]peer.AddrInfo, error) `perm:"read"`
|
||||
|
||||
NetPubsubScores func(p0 context.Context) ([]PubsubScore, error) `perm:"read"`
|
||||
|
||||
NetSetLimit func(p0 context.Context, p1 string, p2 NetLimit) error `perm:"admin"`
|
||||
|
||||
NetStat func(p0 context.Context, p1 string) (NetStat, error) `perm:"read"`
|
||||
}
|
||||
}
|
||||
|
||||
@ -620,7 +629,7 @@ type StorageMinerStruct struct {
|
||||
|
||||
CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) `perm:"admin"`
|
||||
|
||||
ComputeProof func(p0 context.Context, p1 []builtin.SectorInfo, p2 abi.PoStRandomness) ([]builtin.PoStProof, error) `perm:"read"`
|
||||
ComputeProof func(p0 context.Context, p1 []builtin.ExtendedSectorInfo, p2 abi.PoStRandomness, p3 abi.ChainEpoch, p4 abinetwork.Version) ([]builtin.PoStProof, error) `perm:"read"`
|
||||
|
||||
CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"`
|
||||
|
||||
@ -716,6 +725,8 @@ type StorageMinerStruct struct {
|
||||
|
||||
ReturnFetch func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
|
||||
|
||||
ReturnFinalizeReplicaUpdate func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
|
||||
|
||||
ReturnFinalizeSector func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
|
||||
|
||||
ReturnGenerateSectorKeyFromData func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
|
||||
@ -748,6 +759,8 @@ type StorageMinerStruct struct {
|
||||
|
||||
SealingSchedDiag func(p0 context.Context, p1 bool) (interface{}, error) `perm:"admin"`
|
||||
|
||||
SectorAbortUpgrade func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"`
|
||||
|
||||
SectorAddPieceToAny func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) `perm:"admin"`
|
||||
|
||||
SectorCommitFlush func(p0 context.Context) ([]sealiface.CommitBatchRes, error) `perm:"admin"`
|
||||
@ -758,7 +771,9 @@ type StorageMinerStruct struct {
|
||||
|
||||
SectorGetSealDelay func(p0 context.Context) (time.Duration, error) `perm:"read"`
|
||||
|
||||
SectorMarkForUpgrade func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"`
|
||||
SectorMarkForUpgrade func(p0 context.Context, p1 abi.SectorNumber, p2 bool) error `perm:"admin"`
|
||||
|
||||
SectorMatchPendingPiecesToOpenSectors func(p0 context.Context) error `perm:"admin"`
|
||||
|
||||
SectorPreCommitFlush func(p0 context.Context) ([]sealiface.PreCommitBatchRes, error) `perm:"admin"`
|
||||
|
||||
@ -863,6 +878,8 @@ type WorkerStruct struct {
|
||||
|
||||
Fetch func(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType, p3 storiface.PathType, p4 storiface.AcquireMode) (storiface.CallID, error) `perm:"admin"`
|
||||
|
||||
FinalizeReplicaUpdate func(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) `perm:"admin"`
|
||||
|
||||
FinalizeSector func(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) `perm:"admin"`
|
||||
|
||||
GenerateSectorKeyFromData func(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid) (storiface.CallID, error) `perm:"admin"`
|
||||
@ -3270,6 +3287,17 @@ func (s *GatewayStub) MsigGetVested(p0 context.Context, p1 address.Address, p2 t
|
||||
return *new(types.BigInt), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) MsigGetVestingSchedule(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MsigVesting, error) {
|
||||
if s.Internal.MsigGetVestingSchedule == nil {
|
||||
return *new(MsigVesting), ErrNotSupported
|
||||
}
|
||||
return s.Internal.MsigGetVestingSchedule(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) MsigGetVestingSchedule(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MsigVesting, error) {
|
||||
return *new(MsigVesting), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) {
|
||||
if s.Internal.StateAccountKey == nil {
|
||||
return *new(address.Address), ErrNotSupported
|
||||
@ -3622,6 +3650,17 @@ func (s *NetStub) NetFindPeer(p0 context.Context, p1 peer.ID) (peer.AddrInfo, er
|
||||
return *new(peer.AddrInfo), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *NetStruct) NetLimit(p0 context.Context, p1 string) (NetLimit, error) {
|
||||
if s.Internal.NetLimit == nil {
|
||||
return *new(NetLimit), ErrNotSupported
|
||||
}
|
||||
return s.Internal.NetLimit(p0, p1)
|
||||
}
|
||||
|
||||
func (s *NetStub) NetLimit(p0 context.Context, p1 string) (NetLimit, error) {
|
||||
return *new(NetLimit), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *NetStruct) NetPeerInfo(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) {
|
||||
if s.Internal.NetPeerInfo == nil {
|
||||
return nil, ErrNotSupported
|
||||
@ -3655,6 +3694,28 @@ func (s *NetStub) NetPubsubScores(p0 context.Context) ([]PubsubScore, error) {
|
||||
return *new([]PubsubScore), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *NetStruct) NetSetLimit(p0 context.Context, p1 string, p2 NetLimit) error {
|
||||
if s.Internal.NetSetLimit == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.NetSetLimit(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *NetStub) NetSetLimit(p0 context.Context, p1 string, p2 NetLimit) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *NetStruct) NetStat(p0 context.Context, p1 string) (NetStat, error) {
|
||||
if s.Internal.NetStat == nil {
|
||||
return *new(NetStat), ErrNotSupported
|
||||
}
|
||||
return s.Internal.NetStat(p0, p1)
|
||||
}
|
||||
|
||||
func (s *NetStub) NetStat(p0 context.Context, p1 string) (NetStat, error) {
|
||||
return *new(NetStat), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *SignableStruct) Sign(p0 context.Context, p1 SignFunc) error {
|
||||
if s.Internal.Sign == nil {
|
||||
return ErrNotSupported
|
||||
@ -3710,14 +3771,14 @@ func (s *StorageMinerStub) CheckProvable(p0 context.Context, p1 abi.RegisteredPo
|
||||
return *new(map[abi.SectorNumber]string), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) ComputeProof(p0 context.Context, p1 []builtin.SectorInfo, p2 abi.PoStRandomness) ([]builtin.PoStProof, error) {
|
||||
func (s *StorageMinerStruct) ComputeProof(p0 context.Context, p1 []builtin.ExtendedSectorInfo, p2 abi.PoStRandomness, p3 abi.ChainEpoch, p4 abinetwork.Version) ([]builtin.PoStProof, error) {
|
||||
if s.Internal.ComputeProof == nil {
|
||||
return *new([]builtin.PoStProof), ErrNotSupported
|
||||
}
|
||||
return s.Internal.ComputeProof(p0, p1, p2)
|
||||
return s.Internal.ComputeProof(p0, p1, p2, p3, p4)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) ComputeProof(p0 context.Context, p1 []builtin.SectorInfo, p2 abi.PoStRandomness) ([]builtin.PoStProof, error) {
|
||||
func (s *StorageMinerStub) ComputeProof(p0 context.Context, p1 []builtin.ExtendedSectorInfo, p2 abi.PoStRandomness, p3 abi.ChainEpoch, p4 abinetwork.Version) ([]builtin.PoStProof, error) {
|
||||
return *new([]builtin.PoStProof), ErrNotSupported
|
||||
}
|
||||
|
||||
@ -4238,6 +4299,17 @@ func (s *StorageMinerStub) ReturnFetch(p0 context.Context, p1 storiface.CallID,
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) ReturnFinalizeReplicaUpdate(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error {
|
||||
if s.Internal.ReturnFinalizeReplicaUpdate == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.ReturnFinalizeReplicaUpdate(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) ReturnFinalizeReplicaUpdate(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) ReturnFinalizeSector(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error {
|
||||
if s.Internal.ReturnFinalizeSector == nil {
|
||||
return ErrNotSupported
|
||||
@ -4414,6 +4486,17 @@ func (s *StorageMinerStub) SealingSchedDiag(p0 context.Context, p1 bool) (interf
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) SectorAbortUpgrade(p0 context.Context, p1 abi.SectorNumber) error {
|
||||
if s.Internal.SectorAbortUpgrade == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.SectorAbortUpgrade(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) SectorAbortUpgrade(p0 context.Context, p1 abi.SectorNumber) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) {
|
||||
if s.Internal.SectorAddPieceToAny == nil {
|
||||
return *new(SectorOffset), ErrNotSupported
|
||||
@ -4469,14 +4552,25 @@ func (s *StorageMinerStub) SectorGetSealDelay(p0 context.Context) (time.Duration
|
||||
return *new(time.Duration), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) SectorMarkForUpgrade(p0 context.Context, p1 abi.SectorNumber) error {
|
||||
func (s *StorageMinerStruct) SectorMarkForUpgrade(p0 context.Context, p1 abi.SectorNumber, p2 bool) error {
|
||||
if s.Internal.SectorMarkForUpgrade == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.SectorMarkForUpgrade(p0, p1)
|
||||
return s.Internal.SectorMarkForUpgrade(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) SectorMarkForUpgrade(p0 context.Context, p1 abi.SectorNumber) error {
|
||||
func (s *StorageMinerStub) SectorMarkForUpgrade(p0 context.Context, p1 abi.SectorNumber, p2 bool) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) SectorMatchPendingPiecesToOpenSectors(p0 context.Context) error {
|
||||
if s.Internal.SectorMatchPendingPiecesToOpenSectors == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.SectorMatchPendingPiecesToOpenSectors(p0)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) SectorMatchPendingPiecesToOpenSectors(p0 context.Context) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
@ -4953,6 +5047,17 @@ func (s *WorkerStub) Fetch(p0 context.Context, p1 storage.SectorRef, p2 storifac
|
||||
return *new(storiface.CallID), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *WorkerStruct) FinalizeReplicaUpdate(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) {
|
||||
if s.Internal.FinalizeReplicaUpdate == nil {
|
||||
return *new(storiface.CallID), ErrNotSupported
|
||||
}
|
||||
return s.Internal.FinalizeReplicaUpdate(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *WorkerStub) FinalizeReplicaUpdate(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) {
|
||||
return *new(storiface.CallID), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *WorkerStruct) FinalizeSector(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) {
|
||||
if s.Internal.FinalizeSector == nil {
|
||||
return *new(storiface.CallID), ErrNotSupported
|
||||
|
23
api/types.go
23
api/types.go
@ -12,6 +12,7 @@ import (
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-graphsync"
|
||||
|
||||
"github.com/libp2p/go-libp2p-core/network"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
@ -129,6 +130,28 @@ type NetBlockList struct {
|
||||
IPSubnets []string
|
||||
}
|
||||
|
||||
type NetStat struct {
|
||||
System *network.ScopeStat `json:",omitempty"`
|
||||
Transient *network.ScopeStat `json:",omitempty"`
|
||||
Services map[string]network.ScopeStat `json:",omitempty"`
|
||||
Protocols map[string]network.ScopeStat `json:",omitempty"`
|
||||
Peers map[string]network.ScopeStat `json:",omitempty"`
|
||||
}
|
||||
|
||||
type NetLimit struct {
|
||||
Dynamic bool `json:",omitempty"`
|
||||
// set if Dynamic is false
|
||||
Memory int64 `json:",omitempty"`
|
||||
// set if Dynamic is true
|
||||
MemoryFraction float64 `json:",omitempty"`
|
||||
MinMemory int64 `json:",omitempty"`
|
||||
MaxMemory int64 `json:",omitempty"`
|
||||
|
||||
Streams, StreamsInbound, StreamsOutbound int
|
||||
Conns, ConnsInbound, ConnsOutbound int
|
||||
FD int
|
||||
}
|
||||
|
||||
type ExtendedPeerInfo struct {
|
||||
ID peer.ID
|
||||
Agent string
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
abinetwork "github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
@ -57,7 +57,7 @@ type Gateway interface {
|
||||
StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error)
|
||||
StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error)
|
||||
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error)
|
||||
StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error)
|
||||
StateNetworkVersion(context.Context, types.TipSetKey) (abinetwork.Version, error)
|
||||
StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error)
|
||||
StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)
|
||||
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
|
||||
|
@ -13,7 +13,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
abinetwork "github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
@ -451,7 +451,7 @@ type GatewayStruct struct {
|
||||
|
||||
StateMinerProvingDeadline func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) ``
|
||||
|
||||
StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (network.Version, error) ``
|
||||
StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (abinetwork.Version, error) ``
|
||||
|
||||
StateSearchMsg func(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) ``
|
||||
|
||||
@ -2703,15 +2703,15 @@ func (s *GatewayStub) StateMinerProvingDeadline(p0 context.Context, p1 address.A
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (network.Version, error) {
|
||||
func (s *GatewayStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (abinetwork.Version, error) {
|
||||
if s.Internal.StateNetworkVersion == nil {
|
||||
return *new(network.Version), ErrNotSupported
|
||||
return *new(abinetwork.Version), ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateNetworkVersion(p0, p1)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (network.Version, error) {
|
||||
return *new(network.Version), ErrNotSupported
|
||||
func (s *GatewayStub) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (abinetwork.Version, error) {
|
||||
return *new(abinetwork.Version), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateSearchMsg(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) {
|
||||
|
@ -1724,6 +1724,21 @@ func (mr *MockFullNodeMockRecorder) NetFindPeer(arg0, arg1 interface{}) *gomock.
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetFindPeer", reflect.TypeOf((*MockFullNode)(nil).NetFindPeer), arg0, arg1)
|
||||
}
|
||||
|
||||
// NetLimit mocks base method.
|
||||
func (m *MockFullNode) NetLimit(arg0 context.Context, arg1 string) (api.NetLimit, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "NetLimit", arg0, arg1)
|
||||
ret0, _ := ret[0].(api.NetLimit)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// NetLimit indicates an expected call of NetLimit.
|
||||
func (mr *MockFullNodeMockRecorder) NetLimit(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetLimit", reflect.TypeOf((*MockFullNode)(nil).NetLimit), arg0, arg1)
|
||||
}
|
||||
|
||||
// NetPeerInfo mocks base method.
|
||||
func (m *MockFullNode) NetPeerInfo(arg0 context.Context, arg1 peer.ID) (*api.ExtendedPeerInfo, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@ -1769,6 +1784,35 @@ func (mr *MockFullNodeMockRecorder) NetPubsubScores(arg0 interface{}) *gomock.Ca
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPubsubScores", reflect.TypeOf((*MockFullNode)(nil).NetPubsubScores), arg0)
|
||||
}
|
||||
|
||||
// NetSetLimit mocks base method.
|
||||
func (m *MockFullNode) NetSetLimit(arg0 context.Context, arg1 string, arg2 api.NetLimit) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "NetSetLimit", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// NetSetLimit indicates an expected call of NetSetLimit.
|
||||
func (mr *MockFullNodeMockRecorder) NetSetLimit(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetSetLimit", reflect.TypeOf((*MockFullNode)(nil).NetSetLimit), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// NetStat mocks base method.
|
||||
func (m *MockFullNode) NetStat(arg0 context.Context, arg1 string) (api.NetStat, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "NetStat", arg0, arg1)
|
||||
ret0, _ := ret[0].(api.NetStat)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// NetStat indicates an expected call of NetStat.
|
||||
func (mr *MockFullNodeMockRecorder) NetStat(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetStat", reflect.TypeOf((*MockFullNode)(nil).NetStat), arg0, arg1)
|
||||
}
|
||||
|
||||
// PaychAllocateLane mocks base method.
|
||||
func (m *MockFullNode) PaychAllocateLane(arg0 context.Context, arg1 address.Address) (uint64, error) {
|
||||
m.ctrl.T.Helper()
|
||||
|
@ -54,10 +54,10 @@ func VersionForType(nodeType NodeType) (Version, error) {
|
||||
|
||||
// semver versions of the rpc api exposed
|
||||
var (
|
||||
FullAPIVersion0 = newVer(1, 4, 0)
|
||||
FullAPIVersion1 = newVer(2, 1, 0)
|
||||
FullAPIVersion0 = newVer(1, 5, 0)
|
||||
FullAPIVersion1 = newVer(2, 2, 0)
|
||||
|
||||
MinerAPIVersion0 = newVer(1, 2, 0)
|
||||
MinerAPIVersion0 = newVer(1, 3, 0)
|
||||
WorkerAPIVersion0 = newVer(1, 5, 0)
|
||||
)
|
||||
|
||||
|
262
blockstore/autobatch.go
Normal file
262
blockstore/autobatch.go
Normal file
@ -0,0 +1,262 @@
|
||||
package blockstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
block "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
)
|
||||
|
||||
// autolog is a logger for the autobatching blockstore. It is subscoped from the
|
||||
// blockstore logger.
|
||||
var autolog = log.Named("auto")
|
||||
|
||||
// contains the same set of blocks twice, once as an ordered list for flushing, and as a map for fast access
|
||||
type blockBatch struct {
|
||||
blockList []block.Block
|
||||
blockMap map[cid.Cid]block.Block
|
||||
}
|
||||
|
||||
type AutobatchBlockstore struct {
|
||||
// TODO: drop if memory consumption is too high
|
||||
addedCids map[cid.Cid]struct{}
|
||||
|
||||
stateLock sync.Mutex
|
||||
bufferedBatch blockBatch
|
||||
|
||||
flushingBatch blockBatch
|
||||
flushErr error
|
||||
|
||||
flushCh chan struct{}
|
||||
|
||||
doFlushLock sync.Mutex
|
||||
flushRetryDelay time.Duration
|
||||
doneCh chan struct{}
|
||||
shutdown context.CancelFunc
|
||||
|
||||
backingBs Blockstore
|
||||
|
||||
bufferCapacity int
|
||||
bufferSize int
|
||||
}
|
||||
|
||||
func NewAutobatch(ctx context.Context, backingBs Blockstore, bufferCapacity int) *AutobatchBlockstore {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
bs := &AutobatchBlockstore{
|
||||
addedCids: make(map[cid.Cid]struct{}),
|
||||
backingBs: backingBs,
|
||||
bufferCapacity: bufferCapacity,
|
||||
flushCh: make(chan struct{}, 1),
|
||||
doneCh: make(chan struct{}),
|
||||
// could be made configable
|
||||
flushRetryDelay: time.Millisecond * 100,
|
||||
shutdown: cancel,
|
||||
}
|
||||
|
||||
bs.bufferedBatch.blockMap = make(map[cid.Cid]block.Block)
|
||||
|
||||
go bs.flushWorker(ctx)
|
||||
|
||||
return bs
|
||||
}
|
||||
|
||||
func (bs *AutobatchBlockstore) Put(ctx context.Context, blk block.Block) error {
|
||||
bs.stateLock.Lock()
|
||||
defer bs.stateLock.Unlock()
|
||||
|
||||
_, ok := bs.addedCids[blk.Cid()]
|
||||
if !ok {
|
||||
bs.addedCids[blk.Cid()] = struct{}{}
|
||||
bs.bufferedBatch.blockList = append(bs.bufferedBatch.blockList, blk)
|
||||
bs.bufferedBatch.blockMap[blk.Cid()] = blk
|
||||
bs.bufferSize += len(blk.RawData())
|
||||
if bs.bufferSize >= bs.bufferCapacity {
|
||||
// signal that a flush is appropriate, may be ignored
|
||||
select {
|
||||
case bs.flushCh <- struct{}{}:
|
||||
default:
|
||||
// do nothing
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bs *AutobatchBlockstore) flushWorker(ctx context.Context) {
|
||||
defer close(bs.doneCh)
|
||||
for {
|
||||
select {
|
||||
case <-bs.flushCh:
|
||||
// TODO: check if we _should_ actually flush. We could get a spurious wakeup
|
||||
// here.
|
||||
putErr := bs.doFlush(ctx, false)
|
||||
for putErr != nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-time.After(bs.flushRetryDelay):
|
||||
autolog.Errorf("FLUSH ERRORED: %w, retrying after %v", putErr, bs.flushRetryDelay)
|
||||
putErr = bs.doFlush(ctx, true)
|
||||
}
|
||||
}
|
||||
case <-ctx.Done():
|
||||
// Do one last flush.
|
||||
_ = bs.doFlush(ctx, false)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// caller must NOT hold stateLock
|
||||
// set retryOnly to true to only retry a failed flush and not flush anything new.
|
||||
func (bs *AutobatchBlockstore) doFlush(ctx context.Context, retryOnly bool) error {
|
||||
bs.doFlushLock.Lock()
|
||||
defer bs.doFlushLock.Unlock()
|
||||
|
||||
// If we failed to flush last time, try flushing again.
|
||||
if bs.flushErr != nil {
|
||||
bs.flushErr = bs.backingBs.PutMany(ctx, bs.flushingBatch.blockList)
|
||||
}
|
||||
|
||||
// If we failed, or we're _only_ retrying, bail.
|
||||
if retryOnly || bs.flushErr != nil {
|
||||
return bs.flushErr
|
||||
}
|
||||
|
||||
// Then take the current batch...
|
||||
bs.stateLock.Lock()
|
||||
// We do NOT clear addedCids here, because its purpose is to expedite Puts
|
||||
bs.flushingBatch = bs.bufferedBatch
|
||||
bs.bufferedBatch.blockList = make([]block.Block, 0, len(bs.flushingBatch.blockList))
|
||||
bs.bufferedBatch.blockMap = make(map[cid.Cid]block.Block, len(bs.flushingBatch.blockMap))
|
||||
bs.stateLock.Unlock()
|
||||
|
||||
// And try to flush it.
|
||||
bs.flushErr = bs.backingBs.PutMany(ctx, bs.flushingBatch.blockList)
|
||||
|
||||
// If we succeeded, reset the batch. Otherwise, we'll try again next time.
|
||||
if bs.flushErr == nil {
|
||||
bs.stateLock.Lock()
|
||||
bs.flushingBatch = blockBatch{}
|
||||
bs.stateLock.Unlock()
|
||||
}
|
||||
|
||||
return bs.flushErr
|
||||
}
|
||||
|
||||
// caller must NOT hold stateLock
|
||||
func (bs *AutobatchBlockstore) Flush(ctx context.Context) error {
|
||||
return bs.doFlush(ctx, false)
|
||||
}
|
||||
|
||||
func (bs *AutobatchBlockstore) Shutdown(ctx context.Context) error {
|
||||
// TODO: Prevent puts after we call this to avoid losing data.
|
||||
bs.shutdown()
|
||||
select {
|
||||
case <-bs.doneCh:
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
bs.doFlushLock.Lock()
|
||||
defer bs.doFlushLock.Unlock()
|
||||
|
||||
return bs.flushErr
|
||||
}
|
||||
|
||||
func (bs *AutobatchBlockstore) Get(ctx context.Context, c cid.Cid) (block.Block, error) {
|
||||
// may seem backward to check the backingBs first, but that is the likeliest case
|
||||
blk, err := bs.backingBs.Get(ctx, c)
|
||||
if err == nil {
|
||||
return blk, nil
|
||||
}
|
||||
|
||||
if err != ErrNotFound {
|
||||
return blk, err
|
||||
}
|
||||
|
||||
bs.stateLock.Lock()
|
||||
defer bs.stateLock.Unlock()
|
||||
v, ok := bs.flushingBatch.blockMap[c]
|
||||
if ok {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
v, ok = bs.bufferedBatch.blockMap[c]
|
||||
if ok {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
return bs.Get(ctx, c)
|
||||
}
|
||||
|
||||
func (bs *AutobatchBlockstore) DeleteBlock(context.Context, cid.Cid) error {
|
||||
// if we wanted to support this, we would have to:
|
||||
// - flush
|
||||
// - delete from the backingBs (if present)
|
||||
// - remove from addedCids (if present)
|
||||
// - if present in addedCids, also walk the ordered lists and remove if present
|
||||
return xerrors.New("deletion is unsupported")
|
||||
}
|
||||
|
||||
func (bs *AutobatchBlockstore) DeleteMany(ctx context.Context, cids []cid.Cid) error {
|
||||
// see note in DeleteBlock()
|
||||
return xerrors.New("deletion is unsupported")
|
||||
}
|
||||
|
||||
func (bs *AutobatchBlockstore) Has(ctx context.Context, c cid.Cid) (bool, error) {
|
||||
_, err := bs.Get(ctx, c)
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
if err == ErrNotFound {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
func (bs *AutobatchBlockstore) GetSize(ctx context.Context, c cid.Cid) (int, error) {
|
||||
blk, err := bs.Get(ctx, c)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return len(blk.RawData()), nil
|
||||
}
|
||||
|
||||
func (bs *AutobatchBlockstore) PutMany(ctx context.Context, blks []block.Block) error {
|
||||
for _, blk := range blks {
|
||||
if err := bs.Put(ctx, blk); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bs *AutobatchBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
||||
if err := bs.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bs.backingBs.AllKeysChan(ctx)
|
||||
}
|
||||
|
||||
func (bs *AutobatchBlockstore) HashOnRead(enabled bool) {
|
||||
bs.backingBs.HashOnRead(enabled)
|
||||
}
|
||||
|
||||
func (bs *AutobatchBlockstore) View(ctx context.Context, cid cid.Cid, callback func([]byte) error) error {
|
||||
blk, err := bs.Get(ctx, cid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return callback(blk.RawData())
|
||||
}
|
34
blockstore/autobatch_test.go
Normal file
34
blockstore/autobatch_test.go
Normal file
@ -0,0 +1,34 @@
|
||||
package blockstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAutobatchBlockstore(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
ab := NewAutobatch(ctx, NewMemory(), len(b0.RawData())+len(b1.RawData())-1)
|
||||
|
||||
require.NoError(t, ab.Put(ctx, b0))
|
||||
require.NoError(t, ab.Put(ctx, b1))
|
||||
require.NoError(t, ab.Put(ctx, b2))
|
||||
|
||||
v0, err := ab.Get(ctx, b0.Cid())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, b0.RawData(), v0.RawData())
|
||||
|
||||
v1, err := ab.Get(ctx, b1.Cid())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, b1.RawData(), v1.RawData())
|
||||
|
||||
v2, err := ab.Get(ctx, b2.Cid())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, b2.RawData(), v2.RawData())
|
||||
|
||||
require.NoError(t, ab.Flush(ctx))
|
||||
require.NoError(t, ab.Shutdown(ctx))
|
||||
}
|
21
blockstore/context.go
Normal file
21
blockstore/context.go
Normal file
@ -0,0 +1,21 @@
|
||||
package blockstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
type hotViewKey struct{}
|
||||
|
||||
var hotView = hotViewKey{}
|
||||
|
||||
// WithHotView constructs a new context with an option that provides a hint to the blockstore
|
||||
// (e.g. the splitstore) that the object (and its ipld references) should be kept hot.
|
||||
func WithHotView(ctx context.Context) context.Context {
|
||||
return context.WithValue(ctx, hotView, struct{}{})
|
||||
}
|
||||
|
||||
// IsHotView returns true if the hot view option is set in the context
|
||||
func IsHotView(ctx context.Context) bool {
|
||||
v := ctx.Value(hotView)
|
||||
return v != nil
|
||||
}
|
@ -49,10 +49,11 @@ These are options in the `[Chainstore.Splitstore]` section of the configuration:
|
||||
blockstore and discards writes; this is necessary to support syncing from a snapshot.
|
||||
- `MarkSetType` -- specifies the type of markset to use during compaction.
|
||||
The markset is the data structure used by compaction/gc to track live objects.
|
||||
The default value is `"map"`, which will use an in-memory map; if you are limited
|
||||
in memory (or indeed see compaction run out of memory), you can also specify
|
||||
`"badger"` which will use an disk backed markset, using badger. This will use
|
||||
much less memory, but will also make compaction slower.
|
||||
The default value is "badger", which will use a disk backed markset using badger.
|
||||
If you have a lot of memory (48G or more) you can also use "map", which will use
|
||||
an in memory markset, speeding up compaction at the cost of higher memory usage.
|
||||
Note: If you are using a VPS with a network volume, you need to provision at least
|
||||
3000 IOPs with the badger markset.
|
||||
- `HotStoreMessageRetention` -- specifies how many finalities, beyond the 4
|
||||
finalities maintained by default, to maintain messages and message receipts in the
|
||||
hotstore. This is useful for assistive nodes that want to support syncing for other
|
||||
@ -105,6 +106,12 @@ Compaction works transactionally with the following algorithm:
|
||||
- We delete in small batches taking a lock; each batch is checked again for marks, from the concurrent transactional mark, so as to never delete anything live
|
||||
- We then end the transaction and compact/gc the hotstore.
|
||||
|
||||
As of [#8008](https://github.com/filecoin-project/lotus/pull/8008) the compaction algorithm has been
|
||||
modified to eliminate sorting and maintain the cold object set on disk. This drastically reduces
|
||||
memory usage; in fact, when using badger as the markset compaction uses very little memory, and
|
||||
it should be now possible to run splitstore with 32GB of RAM or less without danger of running out of
|
||||
memory during compaction.
|
||||
|
||||
## Garbage Collection
|
||||
|
||||
TBD -- see [#6577](https://github.com/filecoin-project/lotus/issues/6577)
|
||||
|
118
blockstore/splitstore/checkpoint.go
Normal file
118
blockstore/splitstore/checkpoint.go
Normal file
@ -0,0 +1,118 @@
|
||||
package splitstore
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
cid "github.com/ipfs/go-cid"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
)
|
||||
|
||||
type Checkpoint struct {
|
||||
file *os.File
|
||||
buf *bufio.Writer
|
||||
}
|
||||
|
||||
func NewCheckpoint(path string) (*Checkpoint, error) {
|
||||
file, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_SYNC, 0644)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("error creating checkpoint: %w", err)
|
||||
}
|
||||
buf := bufio.NewWriter(file)
|
||||
|
||||
return &Checkpoint{
|
||||
file: file,
|
||||
buf: buf,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func OpenCheckpoint(path string) (*Checkpoint, cid.Cid, error) {
|
||||
filein, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, cid.Undef, xerrors.Errorf("error opening checkpoint for reading: %w", err)
|
||||
}
|
||||
defer filein.Close() //nolint:errcheck
|
||||
|
||||
bufin := bufio.NewReader(filein)
|
||||
start, err := readRawCid(bufin, nil)
|
||||
if err != nil && err != io.EOF {
|
||||
return nil, cid.Undef, xerrors.Errorf("error reading cid from checkpoint: %w", err)
|
||||
}
|
||||
|
||||
fileout, err := os.OpenFile(path, os.O_WRONLY|os.O_SYNC, 0644)
|
||||
if err != nil {
|
||||
return nil, cid.Undef, xerrors.Errorf("error opening checkpoint for writing: %w", err)
|
||||
}
|
||||
bufout := bufio.NewWriter(fileout)
|
||||
|
||||
return &Checkpoint{
|
||||
file: fileout,
|
||||
buf: bufout,
|
||||
}, start, nil
|
||||
}
|
||||
|
||||
func (cp *Checkpoint) Set(c cid.Cid) error {
|
||||
if _, err := cp.file.Seek(0, io.SeekStart); err != nil {
|
||||
return xerrors.Errorf("error seeking beginning of checkpoint: %w", err)
|
||||
}
|
||||
|
||||
if err := writeRawCid(cp.buf, c, true); err != nil {
|
||||
return xerrors.Errorf("error writing cid to checkpoint: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cp *Checkpoint) Close() error {
|
||||
if cp.file == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := cp.file.Close()
|
||||
cp.file = nil
|
||||
cp.buf = nil
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func readRawCid(buf *bufio.Reader, hbuf []byte) (cid.Cid, error) {
|
||||
sz, err := buf.ReadByte()
|
||||
if err != nil {
|
||||
return cid.Undef, err // don't wrap EOF as it is not an error here
|
||||
}
|
||||
|
||||
if hbuf == nil {
|
||||
hbuf = make([]byte, int(sz))
|
||||
} else {
|
||||
hbuf = hbuf[:int(sz)]
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(buf, hbuf); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("error reading hash: %w", err) // wrap EOF, it's corrupt
|
||||
}
|
||||
|
||||
hash, err := mh.Cast(hbuf)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("error casting multihash: %w", err)
|
||||
}
|
||||
|
||||
return cid.NewCidV1(cid.Raw, hash), nil
|
||||
}
|
||||
|
||||
func writeRawCid(buf *bufio.Writer, c cid.Cid, flush bool) error {
|
||||
hash := c.Hash()
|
||||
if err := buf.WriteByte(byte(len(hash))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := buf.Write(hash); err != nil {
|
||||
return err
|
||||
}
|
||||
if flush {
|
||||
return buf.Flush()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
147
blockstore/splitstore/checkpoint_test.go
Normal file
147
blockstore/splitstore/checkpoint_test.go
Normal file
@ -0,0 +1,147 @@
|
||||
package splitstore
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/multiformats/go-multihash"
|
||||
)
|
||||
|
||||
func TestCheckpoint(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "checkpoint.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(dir)
|
||||
})
|
||||
|
||||
path := filepath.Join(dir, "checkpoint")
|
||||
|
||||
makeCid := func(key string) cid.Cid {
|
||||
h, err := multihash.Sum([]byte(key), multihash.SHA2_256, -1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return cid.NewCidV1(cid.Raw, h)
|
||||
}
|
||||
|
||||
k1 := makeCid("a")
|
||||
k2 := makeCid("b")
|
||||
k3 := makeCid("c")
|
||||
k4 := makeCid("d")
|
||||
|
||||
cp, err := NewCheckpoint(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := cp.Set(k1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cp.Set(k2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := cp.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cp, start, err := OpenCheckpoint(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !start.Equals(k2) {
|
||||
t.Fatalf("expected start to be %s; got %s", k2, start)
|
||||
}
|
||||
|
||||
if err := cp.Set(k3); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cp.Set(k4); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := cp.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cp, start, err = OpenCheckpoint(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !start.Equals(k4) {
|
||||
t.Fatalf("expected start to be %s; got %s", k4, start)
|
||||
}
|
||||
|
||||
if err := cp.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// also test correct operation with an empty checkpoint
|
||||
cp, err = NewCheckpoint(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := cp.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cp, start, err = OpenCheckpoint(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if start.Defined() {
|
||||
t.Fatal("expected start to be undefined")
|
||||
}
|
||||
|
||||
if err := cp.Set(k1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cp.Set(k2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := cp.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cp, start, err = OpenCheckpoint(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !start.Equals(k2) {
|
||||
t.Fatalf("expected start to be %s; got %s", k2, start)
|
||||
}
|
||||
|
||||
if err := cp.Set(k3); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cp.Set(k4); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := cp.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cp, start, err = OpenCheckpoint(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !start.Equals(k4) {
|
||||
t.Fatalf("expected start to be %s; got %s", k4, start)
|
||||
}
|
||||
|
||||
if err := cp.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
}
|
102
blockstore/splitstore/coldset.go
Normal file
102
blockstore/splitstore/coldset.go
Normal file
@ -0,0 +1,102 @@
|
||||
package splitstore
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
cid "github.com/ipfs/go-cid"
|
||||
)
|
||||
|
||||
type ColdSetWriter struct {
|
||||
file *os.File
|
||||
buf *bufio.Writer
|
||||
}
|
||||
|
||||
type ColdSetReader struct {
|
||||
file *os.File
|
||||
buf *bufio.Reader
|
||||
}
|
||||
|
||||
func NewColdSetWriter(path string) (*ColdSetWriter, error) {
|
||||
file, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("error creating coldset: %w", err)
|
||||
}
|
||||
buf := bufio.NewWriter(file)
|
||||
|
||||
return &ColdSetWriter{
|
||||
file: file,
|
||||
buf: buf,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewColdSetReader(path string) (*ColdSetReader, error) {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("error opening coldset: %w", err)
|
||||
}
|
||||
buf := bufio.NewReader(file)
|
||||
|
||||
return &ColdSetReader{
|
||||
file: file,
|
||||
buf: buf,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *ColdSetWriter) Write(c cid.Cid) error {
|
||||
return writeRawCid(s.buf, c, false)
|
||||
}
|
||||
|
||||
func (s *ColdSetWriter) Close() error {
|
||||
if s.file == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
err1 := s.buf.Flush()
|
||||
err2 := s.file.Close()
|
||||
s.buf = nil
|
||||
s.file = nil
|
||||
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
return err2
|
||||
}
|
||||
|
||||
func (s *ColdSetReader) ForEach(f func(cid.Cid) error) error {
|
||||
hbuf := make([]byte, 256)
|
||||
for {
|
||||
next, err := readRawCid(s.buf, hbuf)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
|
||||
return xerrors.Errorf("error reading coldset: %w", err)
|
||||
}
|
||||
|
||||
if err := f(next); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ColdSetReader) Reset() error {
|
||||
_, err := s.file.Seek(0, io.SeekStart)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *ColdSetReader) Close() error {
|
||||
if s.file == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := s.file.Close()
|
||||
s.file = nil
|
||||
s.buf = nil
|
||||
|
||||
return err
|
||||
}
|
99
blockstore/splitstore/coldset_test.go
Normal file
99
blockstore/splitstore/coldset_test.go
Normal file
@ -0,0 +1,99 @@
|
||||
package splitstore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/multiformats/go-multihash"
|
||||
)
|
||||
|
||||
func TestColdSet(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "coldset.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(dir)
|
||||
})
|
||||
|
||||
path := filepath.Join(dir, "coldset")
|
||||
|
||||
makeCid := func(i int) cid.Cid {
|
||||
h, err := multihash.Sum([]byte(fmt.Sprintf("cid.%d", i)), multihash.SHA2_256, -1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return cid.NewCidV1(cid.Raw, h)
|
||||
}
|
||||
|
||||
const count = 1000
|
||||
cids := make([]cid.Cid, 0, count)
|
||||
for i := 0; i < count; i++ {
|
||||
cids = append(cids, makeCid(i))
|
||||
}
|
||||
|
||||
cw, err := NewColdSetWriter(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, c := range cids {
|
||||
if err := cw.Write(c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := cw.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cr, err := NewColdSetReader(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
index := 0
|
||||
err = cr.ForEach(func(c cid.Cid) error {
|
||||
if index >= count {
|
||||
t.Fatal("too many cids")
|
||||
}
|
||||
|
||||
if !c.Equals(cids[index]) {
|
||||
t.Fatalf("wrong cid %d; expected %s but got %s", index, cids[index], c)
|
||||
}
|
||||
|
||||
index++
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := cr.Reset(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
index = 0
|
||||
err = cr.ForEach(func(c cid.Cid) error {
|
||||
if index >= count {
|
||||
t.Fatal("too many cids")
|
||||
}
|
||||
|
||||
if !c.Equals(cids[index]) {
|
||||
t.Fatalf("wrong cid; expected %s but got %s", cids[index], c)
|
||||
}
|
||||
|
||||
index++
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
}
|
@ -10,39 +10,36 @@ import (
|
||||
|
||||
var errMarkSetClosed = errors.New("markset closed")
|
||||
|
||||
// MarkSet is a utility to keep track of seen CID, and later query for them.
|
||||
//
|
||||
// * If the expected dataset is large, it can be backed by a datastore (e.g. bbolt).
|
||||
// * If a probabilistic result is acceptable, it can be backed by a bloom filter
|
||||
// MarkSet is an interface for tracking CIDs during chain and object walks
|
||||
type MarkSet interface {
|
||||
ObjectVisitor
|
||||
Mark(cid.Cid) error
|
||||
MarkMany([]cid.Cid) error
|
||||
Has(cid.Cid) (bool, error)
|
||||
Close() error
|
||||
SetConcurrent()
|
||||
}
|
||||
|
||||
type MarkSetVisitor interface {
|
||||
MarkSet
|
||||
ObjectVisitor
|
||||
// BeginCriticalSection ensures that the markset is persisted to disk for recovery in case
|
||||
// of abnormal termination during the critical section span.
|
||||
BeginCriticalSection() error
|
||||
// EndCriticalSection ends the critical section span.
|
||||
EndCriticalSection()
|
||||
}
|
||||
|
||||
type MarkSetEnv interface {
|
||||
// Create creates a new markset within the environment.
|
||||
// name is a unique name for this markset, mapped to the filesystem in disk-backed environments
|
||||
// New creates a new markset within the environment.
|
||||
// name is a unique name for this markset, mapped to the filesystem for on-disk persistence.
|
||||
// sizeHint is a hint about the expected size of the markset
|
||||
Create(name string, sizeHint int64) (MarkSet, error)
|
||||
// CreateVisitor is like Create, but returns a wider interface that supports atomic visits.
|
||||
// It may not be supported by some markset types (e.g. bloom).
|
||||
CreateVisitor(name string, sizeHint int64) (MarkSetVisitor, error)
|
||||
// SupportsVisitor returns true if the marksets created by this environment support the visitor interface.
|
||||
SupportsVisitor() bool
|
||||
New(name string, sizeHint int64) (MarkSet, error)
|
||||
// Recover recovers an existing markset persisted on-disk.
|
||||
Recover(name string) (MarkSet, error)
|
||||
// Close closes the markset
|
||||
Close() error
|
||||
}
|
||||
|
||||
func OpenMarkSetEnv(path string, mtype string) (MarkSetEnv, error) {
|
||||
switch mtype {
|
||||
case "map":
|
||||
return NewMapMarkSetEnv()
|
||||
return NewMapMarkSetEnv(path)
|
||||
case "badger":
|
||||
return NewBadgerMarkSetEnv(path)
|
||||
default:
|
||||
|
@ -3,6 +3,7 @@ package splitstore
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
@ -28,13 +29,13 @@ type BadgerMarkSet struct {
|
||||
writers int
|
||||
seqno int
|
||||
version int
|
||||
persist bool
|
||||
|
||||
db *badger.DB
|
||||
path string
|
||||
}
|
||||
|
||||
var _ MarkSet = (*BadgerMarkSet)(nil)
|
||||
var _ MarkSetVisitor = (*BadgerMarkSet)(nil)
|
||||
|
||||
var badgerMarkSetBatchSize = 16384
|
||||
|
||||
@ -48,11 +49,10 @@ func NewBadgerMarkSetEnv(path string) (MarkSetEnv, error) {
|
||||
return &BadgerMarkSetEnv{path: msPath}, nil
|
||||
}
|
||||
|
||||
func (e *BadgerMarkSetEnv) create(name string, sizeHint int64) (*BadgerMarkSet, error) {
|
||||
name += ".tmp"
|
||||
func (e *BadgerMarkSetEnv) New(name string, sizeHint int64) (MarkSet, error) {
|
||||
path := filepath.Join(e.path, name)
|
||||
|
||||
db, err := openTransientBadgerDB(path)
|
||||
db, err := openBadgerDB(path, false)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("error creating badger db: %w", err)
|
||||
}
|
||||
@ -68,18 +68,72 @@ func (e *BadgerMarkSetEnv) create(name string, sizeHint int64) (*BadgerMarkSet,
|
||||
return ms, nil
|
||||
}
|
||||
|
||||
func (e *BadgerMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) {
|
||||
return e.create(name, sizeHint)
|
||||
}
|
||||
func (e *BadgerMarkSetEnv) Recover(name string) (MarkSet, error) {
|
||||
path := filepath.Join(e.path, name)
|
||||
|
||||
func (e *BadgerMarkSetEnv) CreateVisitor(name string, sizeHint int64) (MarkSetVisitor, error) {
|
||||
return e.create(name, sizeHint)
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
return nil, xerrors.Errorf("error stating badger db path: %w", err)
|
||||
}
|
||||
|
||||
func (e *BadgerMarkSetEnv) SupportsVisitor() bool { return true }
|
||||
db, err := openBadgerDB(path, true)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("error creating badger db: %w", err)
|
||||
}
|
||||
|
||||
ms := &BadgerMarkSet{
|
||||
pend: make(map[string]struct{}),
|
||||
writing: make(map[int]map[string]struct{}),
|
||||
db: db,
|
||||
path: path,
|
||||
persist: true,
|
||||
}
|
||||
ms.cond.L = &ms.mx
|
||||
|
||||
return ms, nil
|
||||
}
|
||||
|
||||
func (e *BadgerMarkSetEnv) Close() error {
|
||||
return os.RemoveAll(e.path)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *BadgerMarkSet) BeginCriticalSection() error {
|
||||
s.mx.Lock()
|
||||
|
||||
if s.persist {
|
||||
s.mx.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
var write bool
|
||||
var seqno int
|
||||
if len(s.pend) > 0 {
|
||||
write = true
|
||||
seqno = s.nextBatch()
|
||||
}
|
||||
|
||||
s.persist = true
|
||||
s.mx.Unlock()
|
||||
|
||||
if write {
|
||||
// all writes sync once perist is true
|
||||
return s.write(seqno)
|
||||
}
|
||||
|
||||
// wait for any pending writes and sync
|
||||
s.mx.Lock()
|
||||
for s.writers > 0 {
|
||||
s.cond.Wait()
|
||||
}
|
||||
s.mx.Unlock()
|
||||
|
||||
return s.db.Sync()
|
||||
}
|
||||
|
||||
func (s *BadgerMarkSet) EndCriticalSection() {
|
||||
s.mx.Lock()
|
||||
defer s.mx.Unlock()
|
||||
|
||||
s.persist = false
|
||||
}
|
||||
|
||||
func (s *BadgerMarkSet) Mark(c cid.Cid) error {
|
||||
@ -99,6 +153,23 @@ func (s *BadgerMarkSet) Mark(c cid.Cid) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *BadgerMarkSet) MarkMany(batch []cid.Cid) error {
|
||||
s.mx.Lock()
|
||||
if s.pend == nil {
|
||||
s.mx.Unlock()
|
||||
return errMarkSetClosed
|
||||
}
|
||||
|
||||
write, seqno := s.putMany(batch)
|
||||
s.mx.Unlock()
|
||||
|
||||
if write {
|
||||
return s.write(seqno)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *BadgerMarkSet) Has(c cid.Cid) (bool, error) {
|
||||
s.mx.RLock()
|
||||
defer s.mx.RUnlock()
|
||||
@ -204,16 +275,34 @@ func (s *BadgerMarkSet) tryDB(key []byte) (has bool, err error) {
|
||||
// writer holds the exclusive lock
|
||||
func (s *BadgerMarkSet) put(key string) (write bool, seqno int) {
|
||||
s.pend[key] = struct{}{}
|
||||
if len(s.pend) < badgerMarkSetBatchSize {
|
||||
if !s.persist && len(s.pend) < badgerMarkSetBatchSize {
|
||||
return false, 0
|
||||
}
|
||||
|
||||
seqno = s.seqno
|
||||
seqno = s.nextBatch()
|
||||
return true, seqno
|
||||
}
|
||||
|
||||
func (s *BadgerMarkSet) putMany(batch []cid.Cid) (write bool, seqno int) {
|
||||
for _, c := range batch {
|
||||
key := string(c.Hash())
|
||||
s.pend[key] = struct{}{}
|
||||
}
|
||||
|
||||
if !s.persist && len(s.pend) < badgerMarkSetBatchSize {
|
||||
return false, 0
|
||||
}
|
||||
|
||||
seqno = s.nextBatch()
|
||||
return true, seqno
|
||||
}
|
||||
|
||||
func (s *BadgerMarkSet) nextBatch() int {
|
||||
seqno := s.seqno
|
||||
s.seqno++
|
||||
s.writing[seqno] = s.pend
|
||||
s.pend = make(map[string]struct{})
|
||||
|
||||
return true, seqno
|
||||
return seqno
|
||||
}
|
||||
|
||||
func (s *BadgerMarkSet) write(seqno int) (err error) {
|
||||
@ -258,6 +347,14 @@ func (s *BadgerMarkSet) write(seqno int) (err error) {
|
||||
return xerrors.Errorf("error flushing batch to badger markset: %w", err)
|
||||
}
|
||||
|
||||
s.mx.RLock()
|
||||
persist := s.persist
|
||||
s.mx.RUnlock()
|
||||
|
||||
if persist {
|
||||
return s.db.Sync()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -277,26 +374,29 @@ func (s *BadgerMarkSet) Close() error {
|
||||
db := s.db
|
||||
s.db = nil
|
||||
|
||||
return closeTransientBadgerDB(db, s.path)
|
||||
return closeBadgerDB(db, s.path, s.persist)
|
||||
}
|
||||
|
||||
func (s *BadgerMarkSet) SetConcurrent() {}
|
||||
func openBadgerDB(path string, recover bool) (*badger.DB, error) {
|
||||
// if it is not a recovery, clean up first
|
||||
if !recover {
|
||||
err := os.RemoveAll(path)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("error clearing markset directory: %w", err)
|
||||
}
|
||||
|
||||
func openTransientBadgerDB(path string) (*badger.DB, error) {
|
||||
// clean up first
|
||||
err := os.RemoveAll(path)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("error clearing markset directory: %w", err)
|
||||
}
|
||||
|
||||
err = os.MkdirAll(path, 0755) //nolint:gosec
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("error creating markset directory: %w", err)
|
||||
err = os.MkdirAll(path, 0755) //nolint:gosec
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("error creating markset directory: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
opts := badger.DefaultOptions(path)
|
||||
// we manually sync when we are in critical section
|
||||
opts.SyncWrites = false
|
||||
// no need to do that
|
||||
opts.CompactL0OnClose = false
|
||||
// we store hashes, not much to gain by compression
|
||||
opts.Compression = options.None
|
||||
// Note: We use FileIO for loading modes to avoid memory thrashing and interference
|
||||
// between the system blockstore and the markset.
|
||||
@ -305,6 +405,15 @@ func openTransientBadgerDB(path string) (*badger.DB, error) {
|
||||
// exceeded 1GB in size.
|
||||
opts.TableLoadingMode = options.FileIO
|
||||
opts.ValueLogLoadingMode = options.FileIO
|
||||
// We increase the number of L0 tables before compaction to make it unlikely to
|
||||
// be necessary.
|
||||
opts.NumLevelZeroTables = 20 // default is 5
|
||||
opts.NumLevelZeroTablesStall = 30 // default is 10
|
||||
// increase the number of compactors from default 2 so that if we ever have to
|
||||
// compact, it is fast
|
||||
if runtime.NumCPU()/2 > opts.NumCompactors {
|
||||
opts.NumCompactors = runtime.NumCPU() / 2
|
||||
}
|
||||
opts.Logger = &badgerLogger{
|
||||
SugaredLogger: log.Desugar().WithOptions(zap.AddCallerSkip(1)).Sugar(),
|
||||
skip2: log.Desugar().WithOptions(zap.AddCallerSkip(2)).Sugar(),
|
||||
@ -313,12 +422,16 @@ func openTransientBadgerDB(path string) (*badger.DB, error) {
|
||||
return badger.Open(opts)
|
||||
}
|
||||
|
||||
func closeTransientBadgerDB(db *badger.DB, path string) error {
|
||||
func closeBadgerDB(db *badger.DB, path string, persist bool) error {
|
||||
err := db.Close()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error closing badger markset: %w", err)
|
||||
}
|
||||
|
||||
if persist {
|
||||
return nil
|
||||
}
|
||||
|
||||
err = os.RemoveAll(path)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error deleting badger markset: %w", err)
|
||||
|
@ -1,12 +1,20 @@
|
||||
package splitstore
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
cid "github.com/ipfs/go-cid"
|
||||
)
|
||||
|
||||
type MapMarkSetEnv struct{}
|
||||
type MapMarkSetEnv struct {
|
||||
path string
|
||||
}
|
||||
|
||||
var _ MarkSetEnv = (*MapMarkSetEnv)(nil)
|
||||
|
||||
@ -14,55 +22,194 @@ type MapMarkSet struct {
|
||||
mx sync.RWMutex
|
||||
set map[string]struct{}
|
||||
|
||||
ts bool
|
||||
persist bool
|
||||
file *os.File
|
||||
buf *bufio.Writer
|
||||
|
||||
path string
|
||||
}
|
||||
|
||||
var _ MarkSet = (*MapMarkSet)(nil)
|
||||
var _ MarkSetVisitor = (*MapMarkSet)(nil)
|
||||
|
||||
func NewMapMarkSetEnv() (*MapMarkSetEnv, error) {
|
||||
return &MapMarkSetEnv{}, nil
|
||||
func NewMapMarkSetEnv(path string) (*MapMarkSetEnv, error) {
|
||||
msPath := filepath.Join(path, "markset.map")
|
||||
err := os.MkdirAll(msPath, 0755) //nolint:gosec
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("error creating markset directory: %w", err)
|
||||
}
|
||||
|
||||
return &MapMarkSetEnv{path: msPath}, nil
|
||||
}
|
||||
|
||||
func (e *MapMarkSetEnv) create(name string, sizeHint int64) (*MapMarkSet, error) {
|
||||
func (e *MapMarkSetEnv) New(name string, sizeHint int64) (MarkSet, error) {
|
||||
path := filepath.Join(e.path, name)
|
||||
return &MapMarkSet{
|
||||
set: make(map[string]struct{}, sizeHint),
|
||||
set: make(map[string]struct{}, sizeHint),
|
||||
path: path,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *MapMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) {
|
||||
return e.create(name, sizeHint)
|
||||
}
|
||||
func (e *MapMarkSetEnv) Recover(name string) (MarkSet, error) {
|
||||
path := filepath.Join(e.path, name)
|
||||
s := &MapMarkSet{
|
||||
set: make(map[string]struct{}),
|
||||
path: path,
|
||||
}
|
||||
|
||||
func (e *MapMarkSetEnv) CreateVisitor(name string, sizeHint int64) (MarkSetVisitor, error) {
|
||||
return e.create(name, sizeHint)
|
||||
}
|
||||
in, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("error opening markset file for read: %w", err)
|
||||
}
|
||||
defer in.Close() //nolint:errcheck
|
||||
|
||||
func (e *MapMarkSetEnv) SupportsVisitor() bool { return true }
|
||||
// wrap a buffered reader to make this faster
|
||||
buf := bufio.NewReader(in)
|
||||
for {
|
||||
var sz byte
|
||||
if sz, err = buf.ReadByte(); err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
key := make([]byte, int(sz))
|
||||
if _, err = io.ReadFull(buf, key); err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
s.set[string(key)] = struct{}{}
|
||||
}
|
||||
|
||||
if err != io.EOF {
|
||||
return nil, xerrors.Errorf("error reading markset file: %w", err)
|
||||
}
|
||||
|
||||
file, err := os.OpenFile(s.path, os.O_WRONLY|os.O_APPEND, 0)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("error opening markset file for write: %w", err)
|
||||
}
|
||||
|
||||
s.persist = true
|
||||
s.file = file
|
||||
s.buf = bufio.NewWriter(file)
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (e *MapMarkSetEnv) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *MapMarkSet) Mark(cid cid.Cid) error {
|
||||
if s.ts {
|
||||
s.mx.Lock()
|
||||
defer s.mx.Unlock()
|
||||
}
|
||||
func (s *MapMarkSet) BeginCriticalSection() error {
|
||||
s.mx.Lock()
|
||||
defer s.mx.Unlock()
|
||||
|
||||
if s.set == nil {
|
||||
return errMarkSetClosed
|
||||
}
|
||||
|
||||
s.set[string(cid.Hash())] = struct{}{}
|
||||
if s.persist {
|
||||
return nil
|
||||
}
|
||||
|
||||
file, err := os.OpenFile(s.path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error opening markset file: %w", err)
|
||||
}
|
||||
|
||||
// wrap a buffered writer to make this faster
|
||||
s.buf = bufio.NewWriter(file)
|
||||
for key := range s.set {
|
||||
if err := s.writeKey([]byte(key), false); err != nil {
|
||||
_ = file.Close()
|
||||
s.buf = nil
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := s.buf.Flush(); err != nil {
|
||||
_ = file.Close()
|
||||
s.buf = nil
|
||||
return xerrors.Errorf("error flushing markset file buffer: %w", err)
|
||||
}
|
||||
|
||||
s.file = file
|
||||
s.persist = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *MapMarkSet) EndCriticalSection() {
|
||||
s.mx.Lock()
|
||||
defer s.mx.Unlock()
|
||||
|
||||
if !s.persist {
|
||||
return
|
||||
}
|
||||
|
||||
_ = s.file.Close()
|
||||
_ = os.Remove(s.path)
|
||||
s.file = nil
|
||||
s.buf = nil
|
||||
s.persist = false
|
||||
}
|
||||
|
||||
func (s *MapMarkSet) Mark(c cid.Cid) error {
|
||||
s.mx.Lock()
|
||||
defer s.mx.Unlock()
|
||||
|
||||
if s.set == nil {
|
||||
return errMarkSetClosed
|
||||
}
|
||||
|
||||
hash := c.Hash()
|
||||
s.set[string(hash)] = struct{}{}
|
||||
|
||||
if s.persist {
|
||||
if err := s.writeKey(hash, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.file.Sync(); err != nil {
|
||||
return xerrors.Errorf("error syncing markset: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *MapMarkSet) MarkMany(batch []cid.Cid) error {
|
||||
s.mx.Lock()
|
||||
defer s.mx.Unlock()
|
||||
|
||||
if s.set == nil {
|
||||
return errMarkSetClosed
|
||||
}
|
||||
|
||||
for _, c := range batch {
|
||||
hash := c.Hash()
|
||||
s.set[string(hash)] = struct{}{}
|
||||
|
||||
if s.persist {
|
||||
if err := s.writeKey(hash, false); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if s.persist {
|
||||
if err := s.buf.Flush(); err != nil {
|
||||
return xerrors.Errorf("error flushing markset buffer to disk: %w", err)
|
||||
}
|
||||
|
||||
if err := s.file.Sync(); err != nil {
|
||||
return xerrors.Errorf("error syncing markset: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *MapMarkSet) Has(cid cid.Cid) (bool, error) {
|
||||
if s.ts {
|
||||
s.mx.RLock()
|
||||
defer s.mx.RUnlock()
|
||||
}
|
||||
s.mx.RLock()
|
||||
defer s.mx.RUnlock()
|
||||
|
||||
if s.set == nil {
|
||||
return false, errMarkSetClosed
|
||||
@ -73,33 +220,70 @@ func (s *MapMarkSet) Has(cid cid.Cid) (bool, error) {
|
||||
}
|
||||
|
||||
func (s *MapMarkSet) Visit(c cid.Cid) (bool, error) {
|
||||
if s.ts {
|
||||
s.mx.Lock()
|
||||
defer s.mx.Unlock()
|
||||
}
|
||||
s.mx.Lock()
|
||||
defer s.mx.Unlock()
|
||||
|
||||
if s.set == nil {
|
||||
return false, errMarkSetClosed
|
||||
}
|
||||
|
||||
key := string(c.Hash())
|
||||
hash := c.Hash()
|
||||
key := string(hash)
|
||||
if _, ok := s.set[key]; ok {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
s.set[key] = struct{}{}
|
||||
|
||||
if s.persist {
|
||||
if err := s.writeKey(hash, true); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := s.file.Sync(); err != nil {
|
||||
return false, xerrors.Errorf("error syncing markset: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (s *MapMarkSet) Close() error {
|
||||
if s.ts {
|
||||
s.mx.Lock()
|
||||
defer s.mx.Unlock()
|
||||
s.mx.Lock()
|
||||
defer s.mx.Unlock()
|
||||
|
||||
if s.set == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
s.set = nil
|
||||
|
||||
if s.file != nil {
|
||||
if err := s.file.Close(); err != nil {
|
||||
log.Warnf("error closing markset file: %s", err)
|
||||
}
|
||||
|
||||
if !s.persist {
|
||||
if err := os.Remove(s.path); err != nil {
|
||||
log.Warnf("error removing markset file: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *MapMarkSet) SetConcurrent() {
|
||||
s.ts = true
|
||||
func (s *MapMarkSet) writeKey(k []byte, flush bool) error {
|
||||
if err := s.buf.WriteByte(byte(len(k))); err != nil {
|
||||
return xerrors.Errorf("error writing markset key length to disk: %w", err)
|
||||
}
|
||||
if _, err := s.buf.Write(k); err != nil {
|
||||
return xerrors.Errorf("error writing markset key to disk: %w", err)
|
||||
}
|
||||
if flush {
|
||||
if err := s.buf.Flush(); err != nil {
|
||||
return xerrors.Errorf("error flushing markset buffer to disk: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -11,7 +11,10 @@ import (
|
||||
|
||||
func TestMapMarkSet(t *testing.T) {
|
||||
testMarkSet(t, "map")
|
||||
testMarkSetRecovery(t, "map")
|
||||
testMarkSetMarkMany(t, "map")
|
||||
testMarkSetVisitor(t, "map")
|
||||
testMarkSetVisitorRecovery(t, "map")
|
||||
}
|
||||
|
||||
func TestBadgerMarkSet(t *testing.T) {
|
||||
@ -21,12 +24,13 @@ func TestBadgerMarkSet(t *testing.T) {
|
||||
badgerMarkSetBatchSize = bs
|
||||
})
|
||||
testMarkSet(t, "badger")
|
||||
testMarkSetRecovery(t, "badger")
|
||||
testMarkSetMarkMany(t, "badger")
|
||||
testMarkSetVisitor(t, "badger")
|
||||
testMarkSetVisitorRecovery(t, "badger")
|
||||
}
|
||||
|
||||
func testMarkSet(t *testing.T, lsType string) {
|
||||
t.Helper()
|
||||
|
||||
path, err := ioutil.TempDir("", "markset.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -42,12 +46,12 @@ func testMarkSet(t *testing.T, lsType string) {
|
||||
}
|
||||
defer env.Close() //nolint:errcheck
|
||||
|
||||
hotSet, err := env.Create("hot", 0)
|
||||
hotSet, err := env.New("hot", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
coldSet, err := env.Create("cold", 0)
|
||||
coldSet, err := env.New("cold", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -62,6 +66,7 @@ func testMarkSet(t *testing.T, lsType string) {
|
||||
}
|
||||
|
||||
mustHave := func(s MarkSet, cid cid.Cid) {
|
||||
t.Helper()
|
||||
has, err := s.Has(cid)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -73,6 +78,7 @@ func testMarkSet(t *testing.T, lsType string) {
|
||||
}
|
||||
|
||||
mustNotHave := func(s MarkSet, cid cid.Cid) {
|
||||
t.Helper()
|
||||
has, err := s.Has(cid)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -114,12 +120,12 @@ func testMarkSet(t *testing.T, lsType string) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hotSet, err = env.Create("hot", 0)
|
||||
hotSet, err = env.New("hot", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
coldSet, err = env.Create("cold", 0)
|
||||
coldSet, err = env.New("cold", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -150,8 +156,6 @@ func testMarkSet(t *testing.T, lsType string) {
|
||||
}
|
||||
|
||||
func testMarkSetVisitor(t *testing.T, lsType string) {
|
||||
t.Helper()
|
||||
|
||||
path, err := ioutil.TempDir("", "markset.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -167,7 +171,7 @@ func testMarkSetVisitor(t *testing.T, lsType string) {
|
||||
}
|
||||
defer env.Close() //nolint:errcheck
|
||||
|
||||
visitor, err := env.CreateVisitor("test", 0)
|
||||
visitor, err := env.New("test", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -219,3 +223,322 @@ func testMarkSetVisitor(t *testing.T, lsType string) {
|
||||
mustNotVisit(visitor, k3)
|
||||
mustNotVisit(visitor, k4)
|
||||
}
|
||||
|
||||
func testMarkSetVisitorRecovery(t *testing.T, lsType string) {
|
||||
path, err := ioutil.TempDir("", "markset.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
|
||||
env, err := OpenMarkSetEnv(path, lsType)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer env.Close() //nolint:errcheck
|
||||
|
||||
visitor, err := env.New("test", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer visitor.Close() //nolint:errcheck
|
||||
|
||||
makeCid := func(key string) cid.Cid {
|
||||
h, err := multihash.Sum([]byte(key), multihash.SHA2_256, -1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return cid.NewCidV1(cid.Raw, h)
|
||||
}
|
||||
|
||||
mustVisit := func(v ObjectVisitor, cid cid.Cid) {
|
||||
visit, err := v.Visit(cid)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !visit {
|
||||
t.Fatal("object should be visited")
|
||||
}
|
||||
}
|
||||
|
||||
mustNotVisit := func(v ObjectVisitor, cid cid.Cid) {
|
||||
visit, err := v.Visit(cid)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if visit {
|
||||
t.Fatal("unexpected visit")
|
||||
}
|
||||
}
|
||||
|
||||
k1 := makeCid("a")
|
||||
k2 := makeCid("b")
|
||||
k3 := makeCid("c")
|
||||
k4 := makeCid("d")
|
||||
|
||||
mustVisit(visitor, k1)
|
||||
mustVisit(visitor, k2)
|
||||
|
||||
if err := visitor.BeginCriticalSection(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mustVisit(visitor, k3)
|
||||
mustVisit(visitor, k4)
|
||||
|
||||
mustNotVisit(visitor, k1)
|
||||
mustNotVisit(visitor, k2)
|
||||
mustNotVisit(visitor, k3)
|
||||
mustNotVisit(visitor, k4)
|
||||
|
||||
if err := visitor.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
visitor, err = env.Recover("test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mustNotVisit(visitor, k1)
|
||||
mustNotVisit(visitor, k2)
|
||||
mustNotVisit(visitor, k3)
|
||||
mustNotVisit(visitor, k4)
|
||||
|
||||
visitor.EndCriticalSection()
|
||||
|
||||
if err := visitor.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = env.Recover("test")
|
||||
if err == nil {
|
||||
t.Fatal("expected recovery to fail")
|
||||
}
|
||||
}
|
||||
|
||||
func testMarkSetRecovery(t *testing.T, lsType string) {
|
||||
path, err := ioutil.TempDir("", "markset.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
|
||||
env, err := OpenMarkSetEnv(path, lsType)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer env.Close() //nolint:errcheck
|
||||
|
||||
markSet, err := env.New("test", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
makeCid := func(key string) cid.Cid {
|
||||
h, err := multihash.Sum([]byte(key), multihash.SHA2_256, -1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return cid.NewCidV1(cid.Raw, h)
|
||||
}
|
||||
|
||||
mustHave := func(s MarkSet, cid cid.Cid) {
|
||||
t.Helper()
|
||||
has, err := s.Has(cid)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !has {
|
||||
t.Fatal("mark not found")
|
||||
}
|
||||
}
|
||||
|
||||
mustNotHave := func(s MarkSet, cid cid.Cid) {
|
||||
t.Helper()
|
||||
has, err := s.Has(cid)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if has {
|
||||
t.Fatal("unexpected mark")
|
||||
}
|
||||
}
|
||||
|
||||
k1 := makeCid("a")
|
||||
k2 := makeCid("b")
|
||||
k3 := makeCid("c")
|
||||
k4 := makeCid("d")
|
||||
|
||||
if err := markSet.Mark(k1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := markSet.Mark(k2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mustHave(markSet, k1)
|
||||
mustHave(markSet, k2)
|
||||
mustNotHave(markSet, k3)
|
||||
mustNotHave(markSet, k4)
|
||||
|
||||
if err := markSet.BeginCriticalSection(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := markSet.Mark(k3); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := markSet.Mark(k4); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mustHave(markSet, k1)
|
||||
mustHave(markSet, k2)
|
||||
mustHave(markSet, k3)
|
||||
mustHave(markSet, k4)
|
||||
|
||||
if err := markSet.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
markSet, err = env.Recover("test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mustHave(markSet, k1)
|
||||
mustHave(markSet, k2)
|
||||
mustHave(markSet, k3)
|
||||
mustHave(markSet, k4)
|
||||
|
||||
markSet.EndCriticalSection()
|
||||
|
||||
if err := markSet.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = env.Recover("test")
|
||||
if err == nil {
|
||||
t.Fatal("expected recovery to fail")
|
||||
}
|
||||
}
|
||||
|
||||
func testMarkSetMarkMany(t *testing.T, lsType string) {
|
||||
path, err := ioutil.TempDir("", "markset.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
|
||||
env, err := OpenMarkSetEnv(path, lsType)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer env.Close() //nolint:errcheck
|
||||
|
||||
markSet, err := env.New("test", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
makeCid := func(key string) cid.Cid {
|
||||
h, err := multihash.Sum([]byte(key), multihash.SHA2_256, -1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return cid.NewCidV1(cid.Raw, h)
|
||||
}
|
||||
|
||||
mustHave := func(s MarkSet, cid cid.Cid) {
|
||||
t.Helper()
|
||||
has, err := s.Has(cid)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !has {
|
||||
t.Fatal("mark not found")
|
||||
}
|
||||
}
|
||||
|
||||
mustNotHave := func(s MarkSet, cid cid.Cid) {
|
||||
t.Helper()
|
||||
has, err := s.Has(cid)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if has {
|
||||
t.Fatal("unexpected mark")
|
||||
}
|
||||
}
|
||||
|
||||
k1 := makeCid("a")
|
||||
k2 := makeCid("b")
|
||||
k3 := makeCid("c")
|
||||
k4 := makeCid("d")
|
||||
|
||||
if err := markSet.MarkMany([]cid.Cid{k1, k2}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mustHave(markSet, k1)
|
||||
mustHave(markSet, k2)
|
||||
mustNotHave(markSet, k3)
|
||||
mustNotHave(markSet, k4)
|
||||
|
||||
if err := markSet.BeginCriticalSection(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := markSet.MarkMany([]cid.Cid{k3, k4}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mustHave(markSet, k1)
|
||||
mustHave(markSet, k2)
|
||||
mustHave(markSet, k3)
|
||||
mustHave(markSet, k4)
|
||||
|
||||
if err := markSet.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
markSet, err = env.Recover("test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mustHave(markSet, k1)
|
||||
mustHave(markSet, k2)
|
||||
mustHave(markSet, k3)
|
||||
mustHave(markSet, k4)
|
||||
|
||||
markSet.EndCriticalSection()
|
||||
|
||||
if err := markSet.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = env.Recover("test")
|
||||
if err == nil {
|
||||
t.Fatal("expected recovery to fail")
|
||||
}
|
||||
}
|
||||
|
@ -129,8 +129,6 @@ type SplitStore struct {
|
||||
|
||||
headChangeMx sync.Mutex
|
||||
|
||||
coldPurgeSize int
|
||||
|
||||
chain ChainAccessor
|
||||
ds dstore.Datastore
|
||||
cold bstore.Blockstore
|
||||
@ -158,6 +156,17 @@ type SplitStore struct {
|
||||
txnRefsMx sync.Mutex
|
||||
txnRefs map[cid.Cid]struct{}
|
||||
txnMissing map[cid.Cid]struct{}
|
||||
txnMarkSet MarkSet
|
||||
txnSyncMx sync.Mutex
|
||||
txnSyncCond sync.Cond
|
||||
txnSync bool
|
||||
|
||||
// background cold object reification
|
||||
reifyWorkers sync.WaitGroup
|
||||
reifyMx sync.Mutex
|
||||
reifyCond sync.Cond
|
||||
reifyPend map[cid.Cid]struct{}
|
||||
reifyInProgress map[cid.Cid]struct{}
|
||||
|
||||
// registered protectors
|
||||
protectors []func(func(cid.Cid) error) error
|
||||
@ -186,10 +195,6 @@ func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Co
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !markSetEnv.SupportsVisitor() {
|
||||
return nil, xerrors.Errorf("markset type does not support atomic visitors")
|
||||
}
|
||||
|
||||
// and now we can make a SplitStore
|
||||
ss := &SplitStore{
|
||||
cfg: cfg,
|
||||
@ -198,13 +203,16 @@ func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Co
|
||||
cold: cold,
|
||||
hot: hots,
|
||||
markSetEnv: markSetEnv,
|
||||
|
||||
coldPurgeSize: defaultColdPurgeSize,
|
||||
}
|
||||
|
||||
ss.txnViewsCond.L = &ss.txnViewsMx
|
||||
ss.txnSyncCond.L = &ss.txnSyncMx
|
||||
ss.ctx, ss.cancel = context.WithCancel(context.Background())
|
||||
|
||||
ss.reifyCond.L = &ss.reifyMx
|
||||
ss.reifyPend = make(map[cid.Cid]struct{})
|
||||
ss.reifyInProgress = make(map[cid.Cid]struct{})
|
||||
|
||||
if enableDebugLog {
|
||||
ss.debug, err = openDebugLog(path)
|
||||
if err != nil {
|
||||
@ -212,6 +220,14 @@ func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Co
|
||||
}
|
||||
}
|
||||
|
||||
if ss.checkpointExists() {
|
||||
log.Info("found compaction checkpoint; resuming compaction")
|
||||
if err := ss.completeCompaction(); err != nil {
|
||||
markSetEnv.Close() //nolint:errcheck
|
||||
return nil, xerrors.Errorf("error resuming compaction: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
@ -234,6 +250,20 @@ func (s *SplitStore) Has(ctx context.Context, cid cid.Cid) (bool, error) {
|
||||
s.txnLk.RLock()
|
||||
defer s.txnLk.RUnlock()
|
||||
|
||||
// critical section
|
||||
if s.txnMarkSet != nil {
|
||||
has, err := s.txnMarkSet.Has(cid)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if has {
|
||||
return s.has(cid)
|
||||
}
|
||||
|
||||
return s.cold.Has(ctx, cid)
|
||||
}
|
||||
|
||||
has, err := s.hot.Has(ctx, cid)
|
||||
|
||||
if err != nil {
|
||||
@ -245,7 +275,13 @@ func (s *SplitStore) Has(ctx context.Context, cid cid.Cid) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return s.cold.Has(ctx, cid)
|
||||
has, err = s.cold.Has(ctx, cid)
|
||||
if has && bstore.IsHotView(ctx) {
|
||||
s.reifyColdObject(cid)
|
||||
}
|
||||
|
||||
return has, err
|
||||
|
||||
}
|
||||
|
||||
func (s *SplitStore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) {
|
||||
@ -261,6 +297,20 @@ func (s *SplitStore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error)
|
||||
s.txnLk.RLock()
|
||||
defer s.txnLk.RUnlock()
|
||||
|
||||
// critical section
|
||||
if s.txnMarkSet != nil {
|
||||
has, err := s.txnMarkSet.Has(cid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if has {
|
||||
return s.get(cid)
|
||||
}
|
||||
|
||||
return s.cold.Get(ctx, cid)
|
||||
}
|
||||
|
||||
blk, err := s.hot.Get(ctx, cid)
|
||||
|
||||
switch err {
|
||||
@ -275,8 +325,11 @@ func (s *SplitStore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error)
|
||||
|
||||
blk, err = s.cold.Get(ctx, cid)
|
||||
if err == nil {
|
||||
stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
|
||||
if bstore.IsHotView(ctx) {
|
||||
s.reifyColdObject(cid)
|
||||
}
|
||||
|
||||
stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
|
||||
}
|
||||
return blk, err
|
||||
|
||||
@ -298,6 +351,20 @@ func (s *SplitStore) GetSize(ctx context.Context, cid cid.Cid) (int, error) {
|
||||
s.txnLk.RLock()
|
||||
defer s.txnLk.RUnlock()
|
||||
|
||||
// critical section
|
||||
if s.txnMarkSet != nil {
|
||||
has, err := s.txnMarkSet.Has(cid)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if has {
|
||||
return s.getSize(cid)
|
||||
}
|
||||
|
||||
return s.cold.GetSize(ctx, cid)
|
||||
}
|
||||
|
||||
size, err := s.hot.GetSize(ctx, cid)
|
||||
|
||||
switch err {
|
||||
@ -312,6 +379,10 @@ func (s *SplitStore) GetSize(ctx context.Context, cid cid.Cid) (int, error) {
|
||||
|
||||
size, err = s.cold.GetSize(ctx, cid)
|
||||
if err == nil {
|
||||
if bstore.IsHotView(ctx) {
|
||||
s.reifyColdObject(cid)
|
||||
}
|
||||
|
||||
stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
|
||||
}
|
||||
return size, err
|
||||
@ -336,6 +407,12 @@ func (s *SplitStore) Put(ctx context.Context, blk blocks.Block) error {
|
||||
|
||||
s.debug.LogWrite(blk)
|
||||
|
||||
// critical section
|
||||
if s.txnMarkSet != nil {
|
||||
s.markLiveRefs([]cid.Cid{blk.Cid()})
|
||||
return nil
|
||||
}
|
||||
|
||||
s.trackTxnRef(blk.Cid())
|
||||
return nil
|
||||
}
|
||||
@ -381,6 +458,12 @@ func (s *SplitStore) PutMany(ctx context.Context, blks []blocks.Block) error {
|
||||
|
||||
s.debug.LogWriteMany(blks)
|
||||
|
||||
// critical section
|
||||
if s.txnMarkSet != nil {
|
||||
s.markLiveRefs(batch)
|
||||
return nil
|
||||
}
|
||||
|
||||
s.trackTxnRefMany(batch)
|
||||
return nil
|
||||
}
|
||||
@ -440,6 +523,23 @@ func (s *SplitStore) View(ctx context.Context, cid cid.Cid, cb func([]byte) erro
|
||||
return cb(data)
|
||||
}
|
||||
|
||||
// critical section
|
||||
s.txnLk.RLock() // the lock is released in protectView if we are not in critical section
|
||||
if s.txnMarkSet != nil {
|
||||
has, err := s.txnMarkSet.Has(cid)
|
||||
s.txnLk.RUnlock()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if has {
|
||||
return s.view(cid, cb)
|
||||
}
|
||||
|
||||
return s.cold.View(ctx, cid, cb)
|
||||
}
|
||||
|
||||
// views are (optimistically) protected two-fold:
|
||||
// - if there is an active transaction, then the reference is protected.
|
||||
// - if there is no active transaction, active views are tracked in a
|
||||
@ -460,6 +560,10 @@ func (s *SplitStore) View(ctx context.Context, cid cid.Cid, cb func([]byte) erro
|
||||
|
||||
err = s.cold.View(ctx, cid, cb)
|
||||
if err == nil {
|
||||
if bstore.IsHotView(ctx) {
|
||||
s.reifyColdObject(cid)
|
||||
}
|
||||
|
||||
stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
|
||||
}
|
||||
return err
|
||||
@ -569,6 +673,9 @@ func (s *SplitStore) Start(chain ChainAccessor, us stmgr.UpgradeSchedule) error
|
||||
}
|
||||
}
|
||||
|
||||
// spawn the reifier
|
||||
go s.reifyOrchestrator()
|
||||
|
||||
// watch the chain
|
||||
chain.SubscribeHeadChanges(s.HeadChange)
|
||||
|
||||
@ -589,12 +696,19 @@ func (s *SplitStore) Close() error {
|
||||
}
|
||||
|
||||
if atomic.LoadInt32(&s.compacting) == 1 {
|
||||
s.txnSyncMx.Lock()
|
||||
s.txnSync = true
|
||||
s.txnSyncCond.Broadcast()
|
||||
s.txnSyncMx.Unlock()
|
||||
|
||||
log.Warn("close with ongoing compaction in progress; waiting for it to finish...")
|
||||
for atomic.LoadInt32(&s.compacting) == 1 {
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
s.reifyCond.Broadcast()
|
||||
s.reifyWorkers.Wait()
|
||||
s.cancel()
|
||||
return multierr.Combine(s.markSetEnv.Close(), s.debug.Close())
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
@ -67,7 +68,10 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error {
|
||||
}
|
||||
defer output.Close() //nolint:errcheck
|
||||
|
||||
var mx sync.Mutex
|
||||
write := func(format string, args ...interface{}) {
|
||||
mx.Lock()
|
||||
defer mx.Unlock()
|
||||
_, err := fmt.Fprintf(output, format+"\n", args...)
|
||||
if err != nil {
|
||||
log.Warnf("error writing check output: %s", err)
|
||||
@ -82,9 +86,10 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error {
|
||||
write("compaction index: %d", s.compactionIndex)
|
||||
write("--")
|
||||
|
||||
var coldCnt, missingCnt int64
|
||||
coldCnt := new(int64)
|
||||
missingCnt := new(int64)
|
||||
|
||||
visitor, err := s.markSetEnv.CreateVisitor("check", 0)
|
||||
visitor, err := s.markSetEnv.New("check", 0)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error creating visitor: %w", err)
|
||||
}
|
||||
@ -111,10 +116,10 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error {
|
||||
}
|
||||
|
||||
if has {
|
||||
coldCnt++
|
||||
atomic.AddInt64(coldCnt, 1)
|
||||
write("cold object reference: %s", c)
|
||||
} else {
|
||||
missingCnt++
|
||||
atomic.AddInt64(missingCnt, 1)
|
||||
write("missing object reference: %s", c)
|
||||
return errStopWalk
|
||||
}
|
||||
@ -128,9 +133,9 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infow("check done", "cold", coldCnt, "missing", missingCnt)
|
||||
log.Infow("check done", "cold", *coldCnt, "missing", *missingCnt)
|
||||
write("--")
|
||||
write("cold: %d missing: %d", coldCnt, missingCnt)
|
||||
write("cold: %d missing: %d", *coldCnt, *missingCnt)
|
||||
write("DONE")
|
||||
|
||||
return nil
|
||||
|
@ -3,8 +3,10 @@ package splitstore
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
@ -47,6 +49,10 @@ var (
|
||||
// SyncGapTime is the time delay from a tipset's min timestamp before we decide
|
||||
// there is a sync gap
|
||||
SyncGapTime = time.Minute
|
||||
|
||||
// SyncWaitTime is the time delay from a tipset's min timestamp before we decide
|
||||
// we have synced.
|
||||
SyncWaitTime = 30 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
@ -56,8 +62,6 @@ var (
|
||||
|
||||
const (
|
||||
batchSize = 16384
|
||||
|
||||
defaultColdPurgeSize = 7_000_000
|
||||
)
|
||||
|
||||
func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error {
|
||||
@ -140,9 +144,9 @@ func (s *SplitStore) isNearUpgrade(epoch abi.ChainEpoch) bool {
|
||||
// transactionally protect incoming tipsets
|
||||
func (s *SplitStore) protectTipSets(apply []*types.TipSet) {
|
||||
s.txnLk.RLock()
|
||||
defer s.txnLk.RUnlock()
|
||||
|
||||
if !s.txnActive {
|
||||
s.txnLk.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
@ -151,12 +155,115 @@ func (s *SplitStore) protectTipSets(apply []*types.TipSet) {
|
||||
cids = append(cids, ts.Cids()...)
|
||||
}
|
||||
|
||||
if len(cids) == 0 {
|
||||
s.txnLk.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
// critical section
|
||||
if s.txnMarkSet != nil {
|
||||
curTs := apply[len(apply)-1]
|
||||
timestamp := time.Unix(int64(curTs.MinTimestamp()), 0)
|
||||
doSync := time.Since(timestamp) < SyncWaitTime
|
||||
go func() {
|
||||
if doSync {
|
||||
defer func() {
|
||||
s.txnSyncMx.Lock()
|
||||
defer s.txnSyncMx.Unlock()
|
||||
s.txnSync = true
|
||||
s.txnSyncCond.Broadcast()
|
||||
}()
|
||||
}
|
||||
defer s.txnLk.RUnlock()
|
||||
s.markLiveRefs(cids)
|
||||
|
||||
}()
|
||||
return
|
||||
}
|
||||
|
||||
s.trackTxnRefMany(cids)
|
||||
s.txnLk.RUnlock()
|
||||
}
|
||||
|
||||
func (s *SplitStore) markLiveRefs(cids []cid.Cid) {
|
||||
log.Debugf("marking %d live refs", len(cids))
|
||||
startMark := time.Now()
|
||||
|
||||
count := new(int32)
|
||||
visitor := newConcurrentVisitor()
|
||||
walkObject := func(c cid.Cid) error {
|
||||
return s.walkObjectIncomplete(c, visitor,
|
||||
func(c cid.Cid) error {
|
||||
if isUnitaryObject(c) {
|
||||
return errStopWalk
|
||||
}
|
||||
|
||||
visit, err := s.txnMarkSet.Visit(c)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error visiting object: %w", err)
|
||||
}
|
||||
|
||||
if !visit {
|
||||
return errStopWalk
|
||||
}
|
||||
|
||||
atomic.AddInt32(count, 1)
|
||||
return nil
|
||||
},
|
||||
func(missing cid.Cid) error {
|
||||
log.Warnf("missing object reference %s in %s", missing, c)
|
||||
return errStopWalk
|
||||
})
|
||||
}
|
||||
|
||||
// optimize the common case of single put
|
||||
if len(cids) == 1 {
|
||||
if err := walkObject(cids[0]); err != nil {
|
||||
log.Errorf("error marking tipset refs: %s", err)
|
||||
}
|
||||
log.Debugw("marking live refs done", "took", time.Since(startMark), "marked", *count)
|
||||
return
|
||||
}
|
||||
|
||||
workch := make(chan cid.Cid, len(cids))
|
||||
for _, c := range cids {
|
||||
workch <- c
|
||||
}
|
||||
close(workch)
|
||||
|
||||
worker := func() error {
|
||||
for c := range workch {
|
||||
if err := walkObject(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
workers := runtime.NumCPU() / 2
|
||||
if workers < 2 {
|
||||
workers = 2
|
||||
}
|
||||
if workers > len(cids) {
|
||||
workers = len(cids)
|
||||
}
|
||||
|
||||
g := new(errgroup.Group)
|
||||
for i := 0; i < workers; i++ {
|
||||
g.Go(worker)
|
||||
}
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
log.Errorf("error marking tipset refs: %s", err)
|
||||
}
|
||||
|
||||
log.Debugw("marking live refs done", "took", time.Since(startMark), "marked", *count)
|
||||
}
|
||||
|
||||
// transactionally protect a view
|
||||
func (s *SplitStore) protectView(c cid.Cid) {
|
||||
s.txnLk.RLock()
|
||||
// the txnLk is held for read
|
||||
defer s.txnLk.RUnlock()
|
||||
|
||||
if s.txnActive {
|
||||
@ -227,7 +334,7 @@ func (s *SplitStore) trackTxnRefMany(cids []cid.Cid) {
|
||||
}
|
||||
|
||||
// protect all pending transactional references
|
||||
func (s *SplitStore) protectTxnRefs(markSet MarkSetVisitor) error {
|
||||
func (s *SplitStore) protectTxnRefs(markSet MarkSet) error {
|
||||
for {
|
||||
var txnRefs map[cid.Cid]struct{}
|
||||
|
||||
@ -299,14 +406,14 @@ func (s *SplitStore) protectTxnRefs(markSet MarkSetVisitor) error {
|
||||
|
||||
// transactionally protect a reference by walking the object and marking.
|
||||
// concurrent markings are short circuited by checking the markset.
|
||||
func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSetVisitor) error {
|
||||
func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSet) error {
|
||||
if err := s.checkClosing(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Note: cold objects are deleted heaviest first, so the consituents of an object
|
||||
// cannot be deleted before the object itself.
|
||||
return s.walkObjectIncomplete(root, tmpVisitor(),
|
||||
return s.walkObjectIncomplete(root, newTmpVisitor(),
|
||||
func(c cid.Cid) error {
|
||||
if isUnitaryObject(c) {
|
||||
return errStopWalk
|
||||
@ -386,6 +493,12 @@ func (s *SplitStore) compact(curTs *types.TipSet) {
|
||||
}
|
||||
|
||||
func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
if s.checkpointExists() {
|
||||
// this really shouldn't happen, but if it somehow does, it means that the hotstore
|
||||
// might be potentially inconsistent; abort compaction and notify the user to intervene.
|
||||
return xerrors.Errorf("checkpoint exists; aborting compaction")
|
||||
}
|
||||
|
||||
currentEpoch := curTs.Height()
|
||||
boundaryEpoch := currentEpoch - CompactionBoundary
|
||||
|
||||
@ -397,7 +510,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
|
||||
log.Infow("running compaction", "currentEpoch", currentEpoch, "baseEpoch", s.baseEpoch, "boundaryEpoch", boundaryEpoch, "inclMsgsEpoch", inclMsgsEpoch, "compactionIndex", s.compactionIndex)
|
||||
|
||||
markSet, err := s.markSetEnv.CreateVisitor("live", s.markSetSize)
|
||||
markSet, err := s.markSetEnv.New("live", s.markSetSize)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error creating mark set: %w", err)
|
||||
}
|
||||
@ -408,9 +521,6 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// we are ready for concurrent marking
|
||||
s.beginTxnMarking(markSet)
|
||||
|
||||
// 0. track all protected references at beginning of compaction; anything added later should
|
||||
// be transactionally protected by the write
|
||||
log.Info("protecting references with registered protectors")
|
||||
@ -424,7 +534,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
log.Info("marking reachable objects")
|
||||
startMark := time.Now()
|
||||
|
||||
var count int64
|
||||
count := new(int64)
|
||||
err = s.walkChain(curTs, boundaryEpoch, inclMsgsEpoch, &noopVisitor{},
|
||||
func(c cid.Cid) error {
|
||||
if isUnitaryObject(c) {
|
||||
@ -440,7 +550,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
return errStopWalk
|
||||
}
|
||||
|
||||
count++
|
||||
atomic.AddInt64(count, 1)
|
||||
return nil
|
||||
})
|
||||
|
||||
@ -448,9 +558,9 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
return xerrors.Errorf("error marking: %w", err)
|
||||
}
|
||||
|
||||
s.markSetSize = count + count>>2 // overestimate a bit
|
||||
s.markSetSize = *count + *count>>2 // overestimate a bit
|
||||
|
||||
log.Infow("marking done", "took", time.Since(startMark), "marked", count)
|
||||
log.Infow("marking done", "took", time.Since(startMark), "marked", *count)
|
||||
|
||||
if err := s.checkClosing(); err != nil {
|
||||
return err
|
||||
@ -470,10 +580,15 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
log.Info("collecting cold objects")
|
||||
startCollect := time.Now()
|
||||
|
||||
coldw, err := NewColdSetWriter(s.coldSetPath())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error creating coldset: %w", err)
|
||||
}
|
||||
defer coldw.Close() //nolint:errcheck
|
||||
|
||||
// some stats for logging
|
||||
var hotCnt, coldCnt int
|
||||
|
||||
cold := make([]cid.Cid, 0, s.coldPurgeSize)
|
||||
err = s.hot.ForEachKey(func(c cid.Cid) error {
|
||||
// was it marked?
|
||||
mark, err := markSet.Has(c)
|
||||
@ -487,7 +602,9 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
}
|
||||
|
||||
// it's cold, mark it as candidate for move
|
||||
cold = append(cold, c)
|
||||
if err := coldw.Write(c); err != nil {
|
||||
return xerrors.Errorf("error writing cid to coldstore: %w", err)
|
||||
}
|
||||
coldCnt++
|
||||
|
||||
return nil
|
||||
@ -497,12 +614,12 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
return xerrors.Errorf("error collecting cold objects: %w", err)
|
||||
}
|
||||
|
||||
log.Infow("cold collection done", "took", time.Since(startCollect))
|
||||
|
||||
if coldCnt > 0 {
|
||||
s.coldPurgeSize = coldCnt + coldCnt>>2 // overestimate a bit
|
||||
if err := coldw.Close(); err != nil {
|
||||
return xerrors.Errorf("error closing coldset: %w", err)
|
||||
}
|
||||
|
||||
log.Infow("cold collection done", "took", time.Since(startCollect))
|
||||
|
||||
log.Infow("compaction stats", "hot", hotCnt, "cold", coldCnt)
|
||||
stats.Record(s.ctx, metrics.SplitstoreCompactionHot.M(int64(hotCnt)))
|
||||
stats.Record(s.ctx, metrics.SplitstoreCompactionCold.M(int64(coldCnt)))
|
||||
@ -520,11 +637,17 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
return err
|
||||
}
|
||||
|
||||
coldr, err := NewColdSetReader(s.coldSetPath())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error opening coldset: %w", err)
|
||||
}
|
||||
defer coldr.Close() //nolint:errcheck
|
||||
|
||||
// 3. copy the cold objects to the coldstore -- if we have one
|
||||
if !s.cfg.DiscardColdBlocks {
|
||||
log.Info("moving cold objects to the coldstore")
|
||||
startMove := time.Now()
|
||||
err = s.moveColdBlocks(cold)
|
||||
err = s.moveColdBlocks(coldr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error moving cold objects: %w", err)
|
||||
}
|
||||
@ -533,41 +656,64 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
if err := s.checkClosing(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := coldr.Reset(); err != nil {
|
||||
return xerrors.Errorf("error resetting coldset: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// 4. sort cold objects so that the dags with most references are deleted first
|
||||
// this ensures that we can't refer to a dag with its consituents already deleted, ie
|
||||
// we lave no dangling references.
|
||||
log.Info("sorting cold objects")
|
||||
startSort := time.Now()
|
||||
err = s.sortObjects(cold)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error sorting objects: %w", err)
|
||||
}
|
||||
log.Infow("sorting done", "took", time.Since(startSort))
|
||||
|
||||
// 4.1 protect transactional refs once more
|
||||
// strictly speaking, this is not necessary as purge will do it before deleting each
|
||||
// batch. however, there is likely a largish number of references accumulated during
|
||||
// ths sort and this protects before entering pruge context.
|
||||
err = s.protectTxnRefs(markSet)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error protecting transactional refs: %w", err)
|
||||
// 4. Purge cold objects with checkpointing for recovery.
|
||||
// This is the critical section of compaction, whereby any cold object not in the markSet is
|
||||
// considered already deleted.
|
||||
// We delete cold objects in batches, holding the transaction lock, where we check the markSet
|
||||
// again for new references created by the VM.
|
||||
// After each batch, we write a checkpoint to disk; if the process is interrupted before completion,
|
||||
// the process will continue from the checkpoint in the next recovery.
|
||||
if err := s.beginCriticalSection(markSet); err != nil {
|
||||
return xerrors.Errorf("error beginning critical section: %w", err)
|
||||
}
|
||||
|
||||
if err := s.checkClosing(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// wait for the head to catch up so that the current tipset is marked
|
||||
s.waitForSync()
|
||||
|
||||
if err := s.checkClosing(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
checkpoint, err := NewCheckpoint(s.checkpointPath())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error creating checkpoint: %w", err)
|
||||
}
|
||||
defer checkpoint.Close() //nolint:errcheck
|
||||
|
||||
// 5. purge cold objects from the hotstore, taking protected references into account
|
||||
log.Info("purging cold objects from the hotstore")
|
||||
startPurge := time.Now()
|
||||
err = s.purge(cold, markSet)
|
||||
err = s.purge(coldr, checkpoint, markSet)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error purging cold blocks: %w", err)
|
||||
return xerrors.Errorf("error purging cold objects: %w", err)
|
||||
}
|
||||
log.Infow("purging cold objects from hotstore done", "took", time.Since(startPurge))
|
||||
|
||||
s.endCriticalSection()
|
||||
|
||||
if err := checkpoint.Close(); err != nil {
|
||||
log.Warnf("error closing checkpoint: %s", err)
|
||||
}
|
||||
if err := os.Remove(s.checkpointPath()); err != nil {
|
||||
log.Warnf("error removing checkpoint: %s", err)
|
||||
}
|
||||
if err := coldr.Close(); err != nil {
|
||||
log.Warnf("error closing coldset: %s", err)
|
||||
}
|
||||
if err := os.Remove(s.coldSetPath()); err != nil {
|
||||
log.Warnf("error removing coldset: %s", err)
|
||||
}
|
||||
|
||||
// we are done; do some housekeeping
|
||||
s.endTxnProtect()
|
||||
s.gcHotstore()
|
||||
@ -598,12 +744,51 @@ func (s *SplitStore) beginTxnProtect() {
|
||||
defer s.txnLk.Unlock()
|
||||
|
||||
s.txnActive = true
|
||||
s.txnSync = false
|
||||
s.txnRefs = make(map[cid.Cid]struct{})
|
||||
s.txnMissing = make(map[cid.Cid]struct{})
|
||||
}
|
||||
|
||||
func (s *SplitStore) beginTxnMarking(markSet MarkSetVisitor) {
|
||||
markSet.SetConcurrent()
|
||||
func (s *SplitStore) beginCriticalSection(markSet MarkSet) error {
|
||||
log.Info("beginning critical section")
|
||||
|
||||
// do that once first to get the bulk before the markset is in critical section
|
||||
if err := s.protectTxnRefs(markSet); err != nil {
|
||||
return xerrors.Errorf("error protecting transactional references: %w", err)
|
||||
}
|
||||
|
||||
if err := markSet.BeginCriticalSection(); err != nil {
|
||||
return xerrors.Errorf("error beginning critical section for markset: %w", err)
|
||||
}
|
||||
|
||||
s.txnLk.Lock()
|
||||
defer s.txnLk.Unlock()
|
||||
|
||||
s.txnMarkSet = markSet
|
||||
|
||||
// and do it again while holding the lock to mark references that might have been created
|
||||
// in the meantime and avoid races of the type Has->txnRef->enterCS->Get fails because
|
||||
// it's not in the markset
|
||||
if err := s.protectTxnRefs(markSet); err != nil {
|
||||
return xerrors.Errorf("error protecting transactional references: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SplitStore) waitForSync() {
|
||||
log.Info("waiting for sync")
|
||||
startWait := time.Now()
|
||||
defer func() {
|
||||
log.Infow("waiting for sync done", "took", time.Since(startWait))
|
||||
}()
|
||||
|
||||
s.txnSyncMx.Lock()
|
||||
defer s.txnSyncMx.Unlock()
|
||||
|
||||
for !s.txnSync {
|
||||
s.txnSyncCond.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SplitStore) endTxnProtect() {
|
||||
@ -615,32 +800,51 @@ func (s *SplitStore) endTxnProtect() {
|
||||
}
|
||||
|
||||
s.txnActive = false
|
||||
s.txnSync = false
|
||||
s.txnRefs = nil
|
||||
s.txnMissing = nil
|
||||
s.txnMarkSet = nil
|
||||
}
|
||||
|
||||
func (s *SplitStore) endCriticalSection() {
|
||||
log.Info("ending critical section")
|
||||
|
||||
s.txnLk.Lock()
|
||||
defer s.txnLk.Unlock()
|
||||
|
||||
s.txnMarkSet.EndCriticalSection()
|
||||
s.txnMarkSet = nil
|
||||
}
|
||||
|
||||
func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEpoch,
|
||||
visitor ObjectVisitor, f func(cid.Cid) error) error {
|
||||
var walked *cid.Set
|
||||
toWalk := ts.Cids()
|
||||
walkCnt := 0
|
||||
scanCnt := 0
|
||||
var walked ObjectVisitor
|
||||
var mx sync.Mutex
|
||||
// we copy the tipset first into a new slice, which allows us to reuse it in every epoch.
|
||||
toWalk := make([]cid.Cid, len(ts.Cids()))
|
||||
copy(toWalk, ts.Cids())
|
||||
walkCnt := new(int64)
|
||||
scanCnt := new(int64)
|
||||
|
||||
stopWalk := func(_ cid.Cid) error { return errStopWalk }
|
||||
|
||||
walkBlock := func(c cid.Cid) error {
|
||||
if !walked.Visit(c) {
|
||||
visit, err := walked.Visit(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !visit {
|
||||
return nil
|
||||
}
|
||||
|
||||
walkCnt++
|
||||
atomic.AddInt64(walkCnt, 1)
|
||||
|
||||
if err := f(c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var hdr types.BlockHeader
|
||||
err := s.view(c, func(data []byte) error {
|
||||
err = s.view(c, func(data []byte) error {
|
||||
return hdr.UnmarshalCBOR(bytes.NewBuffer(data))
|
||||
})
|
||||
|
||||
@ -676,11 +880,13 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp
|
||||
if err := s.walkObject(hdr.ParentStateRoot, visitor, f); err != nil {
|
||||
return xerrors.Errorf("error walking state root (cid: %s): %w", hdr.ParentStateRoot, err)
|
||||
}
|
||||
scanCnt++
|
||||
atomic.AddInt64(scanCnt, 1)
|
||||
}
|
||||
|
||||
if hdr.Height > 0 {
|
||||
mx.Lock()
|
||||
toWalk = append(toWalk, hdr.Parents...)
|
||||
mx.Unlock()
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -692,20 +898,43 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp
|
||||
return err
|
||||
}
|
||||
|
||||
workers := len(toWalk)
|
||||
if workers > runtime.NumCPU()/2 {
|
||||
workers = runtime.NumCPU() / 2
|
||||
}
|
||||
if workers < 2 {
|
||||
workers = 2
|
||||
}
|
||||
|
||||
// the walk is BFS, so we can reset the walked set in every iteration and avoid building up
|
||||
// a set that contains all blocks (1M epochs -> 5M blocks -> 200MB worth of memory and growing
|
||||
// over time)
|
||||
walked = cid.NewSet()
|
||||
walking := toWalk
|
||||
toWalk = nil
|
||||
for _, c := range walking {
|
||||
if err := walkBlock(c); err != nil {
|
||||
return xerrors.Errorf("error walking block (cid: %s): %w", c, err)
|
||||
}
|
||||
walked = newConcurrentVisitor()
|
||||
workch := make(chan cid.Cid, len(toWalk))
|
||||
for _, c := range toWalk {
|
||||
workch <- c
|
||||
}
|
||||
close(workch)
|
||||
toWalk = toWalk[:0]
|
||||
|
||||
g := new(errgroup.Group)
|
||||
for i := 0; i < workers; i++ {
|
||||
g.Go(func() error {
|
||||
for c := range workch {
|
||||
if err := walkBlock(c); err != nil {
|
||||
return xerrors.Errorf("error walking block (cid: %s): %w", c, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Infow("chain walk done", "walked", walkCnt, "scanned", scanCnt)
|
||||
log.Infow("chain walk done", "walked", *walkCnt, "scanned", *scanCnt)
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -824,7 +1053,7 @@ func (s *SplitStore) walkObjectIncomplete(c cid.Cid, visitor ObjectVisitor, f, m
|
||||
return nil
|
||||
}
|
||||
|
||||
// internal version used by walk
|
||||
// internal version used during compaction and related operations
|
||||
func (s *SplitStore) view(c cid.Cid, cb func([]byte) error) error {
|
||||
if isIdentiyCid(c) {
|
||||
data, err := decodeIdentityCid(c)
|
||||
@ -859,10 +1088,34 @@ func (s *SplitStore) has(c cid.Cid) (bool, error) {
|
||||
return s.cold.Has(s.ctx, c)
|
||||
}
|
||||
|
||||
func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error {
|
||||
func (s *SplitStore) get(c cid.Cid) (blocks.Block, error) {
|
||||
blk, err := s.hot.Get(s.ctx, c)
|
||||
switch err {
|
||||
case nil:
|
||||
return blk, nil
|
||||
case bstore.ErrNotFound:
|
||||
return s.cold.Get(s.ctx, c)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SplitStore) getSize(c cid.Cid) (int, error) {
|
||||
sz, err := s.hot.GetSize(s.ctx, c)
|
||||
switch err {
|
||||
case nil:
|
||||
return sz, nil
|
||||
case bstore.ErrNotFound:
|
||||
return s.cold.GetSize(s.ctx, c)
|
||||
default:
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SplitStore) moveColdBlocks(coldr *ColdSetReader) error {
|
||||
batch := make([]blocks.Block, 0, batchSize)
|
||||
|
||||
for _, c := range cold {
|
||||
err := coldr.ForEach(func(c cid.Cid) error {
|
||||
if err := s.checkClosing(); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -871,7 +1124,7 @@ func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error {
|
||||
if err != nil {
|
||||
if err == bstore.ErrNotFound {
|
||||
log.Warnf("hotstore missing block %s", c)
|
||||
continue
|
||||
return nil
|
||||
}
|
||||
|
||||
return xerrors.Errorf("error retrieving block %s from hotstore: %w", c, err)
|
||||
@ -885,6 +1138,12 @@ func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error {
|
||||
}
|
||||
batch = batch[:0]
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error iterating coldset: %w", err)
|
||||
}
|
||||
|
||||
if len(batch) > 0 {
|
||||
@ -897,177 +1156,202 @@ func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// sorts a slice of objects heaviest first -- it's a little expensive but worth the
|
||||
// guarantee that we don't leave dangling references behind, e.g. if we die in the middle
|
||||
// of a purge.
|
||||
func (s *SplitStore) sortObjects(cids []cid.Cid) error {
|
||||
// we cache the keys to avoid making a gazillion of strings
|
||||
keys := make(map[cid.Cid]string)
|
||||
key := func(c cid.Cid) string {
|
||||
s, ok := keys[c]
|
||||
if !ok {
|
||||
s = string(c.Hash())
|
||||
keys[c] = s
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// compute sorting weights as the cumulative number of DAG links
|
||||
weights := make(map[string]int)
|
||||
for _, c := range cids {
|
||||
// this can take quite a while, so check for shutdown with every opportunity
|
||||
if err := s.checkClosing(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w := s.getObjectWeight(c, weights, key)
|
||||
weights[key(c)] = w
|
||||
}
|
||||
|
||||
// sort!
|
||||
sort.Slice(cids, func(i, j int) bool {
|
||||
wi := weights[key(cids[i])]
|
||||
wj := weights[key(cids[j])]
|
||||
if wi == wj {
|
||||
return bytes.Compare(cids[i].Hash(), cids[j].Hash()) > 0
|
||||
}
|
||||
|
||||
return wi > wj
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SplitStore) getObjectWeight(c cid.Cid, weights map[string]int, key func(cid.Cid) string) int {
|
||||
w, ok := weights[key(c)]
|
||||
if ok {
|
||||
return w
|
||||
}
|
||||
|
||||
// we treat block headers specially to avoid walking the entire chain
|
||||
var hdr types.BlockHeader
|
||||
err := s.view(c, func(data []byte) error {
|
||||
return hdr.UnmarshalCBOR(bytes.NewBuffer(data))
|
||||
})
|
||||
if err == nil {
|
||||
w1 := s.getObjectWeight(hdr.ParentStateRoot, weights, key)
|
||||
weights[key(hdr.ParentStateRoot)] = w1
|
||||
|
||||
w2 := s.getObjectWeight(hdr.Messages, weights, key)
|
||||
weights[key(hdr.Messages)] = w2
|
||||
|
||||
return 1 + w1 + w2
|
||||
}
|
||||
|
||||
var links []cid.Cid
|
||||
err = s.view(c, func(data []byte) error {
|
||||
return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) {
|
||||
links = append(links, c)
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return 1
|
||||
}
|
||||
|
||||
w = 1
|
||||
for _, c := range links {
|
||||
// these are internal refs, so dags will be dags
|
||||
if c.Prefix().Codec != cid.DagCBOR {
|
||||
w++
|
||||
continue
|
||||
}
|
||||
|
||||
wc := s.getObjectWeight(c, weights, key)
|
||||
weights[key(c)] = wc
|
||||
|
||||
w += wc
|
||||
}
|
||||
|
||||
return w
|
||||
}
|
||||
|
||||
func (s *SplitStore) purgeBatch(cids []cid.Cid, deleteBatch func([]cid.Cid) error) error {
|
||||
if len(cids) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// we don't delete one giant batch of millions of objects, but rather do smaller batches
|
||||
// so that we don't stop the world for an extended period of time
|
||||
done := false
|
||||
for i := 0; !done; i++ {
|
||||
start := i * batchSize
|
||||
end := start + batchSize
|
||||
if end >= len(cids) {
|
||||
end = len(cids)
|
||||
done = true
|
||||
}
|
||||
|
||||
err := deleteBatch(cids[start:end])
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error deleting batch: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SplitStore) purge(cids []cid.Cid, markSet MarkSetVisitor) error {
|
||||
func (s *SplitStore) purge(coldr *ColdSetReader, checkpoint *Checkpoint, markSet MarkSet) error {
|
||||
batch := make([]cid.Cid, 0, batchSize)
|
||||
deadCids := make([]cid.Cid, 0, batchSize)
|
||||
|
||||
var purgeCnt, liveCnt int
|
||||
defer func() {
|
||||
log.Infow("purged cold objects", "purged", purgeCnt, "live", liveCnt)
|
||||
}()
|
||||
|
||||
return s.purgeBatch(cids,
|
||||
func(cids []cid.Cid) error {
|
||||
deadCids := deadCids[:0]
|
||||
deleteBatch := func() error {
|
||||
pc, lc, err := s.purgeBatch(batch, deadCids, checkpoint, markSet)
|
||||
|
||||
for {
|
||||
if err := s.checkClosing(); err != nil {
|
||||
return err
|
||||
}
|
||||
purgeCnt += pc
|
||||
liveCnt += lc
|
||||
batch = batch[:0]
|
||||
|
||||
s.txnLk.Lock()
|
||||
if len(s.txnRefs) == 0 {
|
||||
// keep the lock!
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// unlock and protect
|
||||
s.txnLk.Unlock()
|
||||
err := coldr.ForEach(func(c cid.Cid) error {
|
||||
batch = append(batch, c)
|
||||
if len(batch) == batchSize {
|
||||
return deleteBatch()
|
||||
}
|
||||
|
||||
err := s.protectTxnRefs(markSet)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error protecting transactional refs: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(batch) > 0 {
|
||||
return deleteBatch()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SplitStore) purgeBatch(batch, deadCids []cid.Cid, checkpoint *Checkpoint, markSet MarkSet) (purgeCnt int, liveCnt int, err error) {
|
||||
if err := s.checkClosing(); err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
s.txnLk.Lock()
|
||||
defer s.txnLk.Unlock()
|
||||
|
||||
for _, c := range batch {
|
||||
has, err := markSet.Has(c)
|
||||
if err != nil {
|
||||
return 0, 0, xerrors.Errorf("error checking markset for liveness: %w", err)
|
||||
}
|
||||
|
||||
if has {
|
||||
liveCnt++
|
||||
continue
|
||||
}
|
||||
|
||||
deadCids = append(deadCids, c)
|
||||
}
|
||||
|
||||
if len(deadCids) == 0 {
|
||||
if err := checkpoint.Set(batch[len(batch)-1]); err != nil {
|
||||
return 0, 0, xerrors.Errorf("error setting checkpoint: %w", err)
|
||||
}
|
||||
|
||||
return 0, liveCnt, nil
|
||||
}
|
||||
|
||||
if err := s.hot.DeleteMany(s.ctx, deadCids); err != nil {
|
||||
return 0, liveCnt, xerrors.Errorf("error purging cold objects: %w", err)
|
||||
}
|
||||
|
||||
s.debug.LogDelete(deadCids)
|
||||
purgeCnt = len(deadCids)
|
||||
|
||||
if err := checkpoint.Set(batch[len(batch)-1]); err != nil {
|
||||
return purgeCnt, liveCnt, xerrors.Errorf("error setting checkpoint: %w", err)
|
||||
}
|
||||
|
||||
return purgeCnt, liveCnt, nil
|
||||
}
|
||||
|
||||
func (s *SplitStore) coldSetPath() string {
|
||||
return filepath.Join(s.path, "coldset")
|
||||
}
|
||||
|
||||
func (s *SplitStore) checkpointPath() string {
|
||||
return filepath.Join(s.path, "checkpoint")
|
||||
}
|
||||
|
||||
func (s *SplitStore) checkpointExists() bool {
|
||||
_, err := os.Stat(s.checkpointPath())
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func (s *SplitStore) completeCompaction() error {
|
||||
checkpoint, last, err := OpenCheckpoint(s.checkpointPath())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error opening checkpoint: %w", err)
|
||||
}
|
||||
defer checkpoint.Close() //nolint:errcheck
|
||||
|
||||
coldr, err := NewColdSetReader(s.coldSetPath())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error opening coldset: %w", err)
|
||||
}
|
||||
defer coldr.Close() //nolint:errcheck
|
||||
|
||||
markSet, err := s.markSetEnv.Recover("live")
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error recovering markset: %w", err)
|
||||
}
|
||||
defer markSet.Close() //nolint:errcheck
|
||||
|
||||
// PURGE
|
||||
log.Info("purging cold objects from the hotstore")
|
||||
startPurge := time.Now()
|
||||
err = s.completePurge(coldr, checkpoint, last, markSet)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error purging cold objects: %w", err)
|
||||
}
|
||||
log.Infow("purging cold objects from hotstore done", "took", time.Since(startPurge))
|
||||
|
||||
markSet.EndCriticalSection()
|
||||
|
||||
if err := checkpoint.Close(); err != nil {
|
||||
log.Warnf("error closing checkpoint: %s", err)
|
||||
}
|
||||
if err := os.Remove(s.checkpointPath()); err != nil {
|
||||
log.Warnf("error removing checkpoint: %s", err)
|
||||
}
|
||||
if err := coldr.Close(); err != nil {
|
||||
log.Warnf("error closing coldset: %s", err)
|
||||
}
|
||||
if err := os.Remove(s.coldSetPath()); err != nil {
|
||||
log.Warnf("error removing coldset: %s", err)
|
||||
}
|
||||
|
||||
// Note: at this point we can start the splitstore; a compaction should run on
|
||||
// the first head change, which will trigger gc on the hotstore.
|
||||
// We don't mind the second (back-to-back) compaction as the head will
|
||||
// have advanced during marking and coldset accumulation.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SplitStore) completePurge(coldr *ColdSetReader, checkpoint *Checkpoint, start cid.Cid, markSet MarkSet) error {
|
||||
if !start.Defined() {
|
||||
return s.purge(coldr, checkpoint, markSet)
|
||||
}
|
||||
|
||||
seeking := true
|
||||
batch := make([]cid.Cid, 0, batchSize)
|
||||
deadCids := make([]cid.Cid, 0, batchSize)
|
||||
|
||||
var purgeCnt, liveCnt int
|
||||
defer func() {
|
||||
log.Infow("purged cold objects", "purged", purgeCnt, "live", liveCnt)
|
||||
}()
|
||||
|
||||
deleteBatch := func() error {
|
||||
pc, lc, err := s.purgeBatch(batch, deadCids, checkpoint, markSet)
|
||||
|
||||
purgeCnt += pc
|
||||
liveCnt += lc
|
||||
batch = batch[:0]
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
err := coldr.ForEach(func(c cid.Cid) error {
|
||||
if seeking {
|
||||
if start.Equals(c) {
|
||||
seeking = false
|
||||
}
|
||||
|
||||
defer s.txnLk.Unlock()
|
||||
|
||||
for _, c := range cids {
|
||||
live, err := markSet.Has(c)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error checking for liveness: %w", err)
|
||||
}
|
||||
|
||||
if live {
|
||||
liveCnt++
|
||||
continue
|
||||
}
|
||||
|
||||
deadCids = append(deadCids, c)
|
||||
}
|
||||
|
||||
err := s.hot.DeleteMany(s.ctx, deadCids)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error purging cold objects: %w", err)
|
||||
}
|
||||
|
||||
s.debug.LogDelete(deadCids)
|
||||
|
||||
purgeCnt += len(deadCids)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
batch = append(batch, c)
|
||||
if len(batch) == batchSize {
|
||||
return deleteBatch()
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(batch) > 0 {
|
||||
return deleteBatch()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// I really don't like having this code, but we seem to have some occasional DAG references with
|
||||
@ -1077,7 +1361,7 @@ func (s *SplitStore) purge(cids []cid.Cid, markSet MarkSetVisitor) error {
|
||||
// have this gem[TM].
|
||||
// My best guess is that they are parent message receipts or yet to be computed state roots; magik
|
||||
// thinks the cause may be block validation.
|
||||
func (s *SplitStore) waitForMissingRefs(markSet MarkSetVisitor) {
|
||||
func (s *SplitStore) waitForMissingRefs(markSet MarkSet) {
|
||||
s.txnLk.Lock()
|
||||
missing := s.txnMissing
|
||||
s.txnMissing = nil
|
||||
@ -1106,7 +1390,7 @@ func (s *SplitStore) waitForMissingRefs(markSet MarkSetVisitor) {
|
||||
}
|
||||
|
||||
towalk := missing
|
||||
visitor := tmpVisitor()
|
||||
visitor := newTmpVisitor()
|
||||
missing = make(map[cid.Cid]struct{})
|
||||
|
||||
for c := range towalk {
|
||||
|
214
blockstore/splitstore/splitstore_reify.go
Normal file
214
blockstore/splitstore/splitstore_reify.go
Normal file
@ -0,0 +1,214 @@
|
||||
package splitstore
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
cid "github.com/ipfs/go-cid"
|
||||
)
|
||||
|
||||
var (
|
||||
errReifyLimit = errors.New("reification limit reached")
|
||||
ReifyLimit = 16384
|
||||
)
|
||||
|
||||
func (s *SplitStore) reifyColdObject(c cid.Cid) {
|
||||
if !s.isWarm() {
|
||||
return
|
||||
}
|
||||
|
||||
if isUnitaryObject(c) {
|
||||
return
|
||||
}
|
||||
|
||||
s.reifyMx.Lock()
|
||||
defer s.reifyMx.Unlock()
|
||||
|
||||
_, ok := s.reifyInProgress[c]
|
||||
if ok {
|
||||
return
|
||||
}
|
||||
|
||||
s.reifyPend[c] = struct{}{}
|
||||
s.reifyCond.Broadcast()
|
||||
}
|
||||
|
||||
func (s *SplitStore) reifyOrchestrator() {
|
||||
workers := runtime.NumCPU() / 4
|
||||
if workers < 2 {
|
||||
workers = 2
|
||||
}
|
||||
|
||||
workch := make(chan cid.Cid, workers)
|
||||
defer close(workch)
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
s.reifyWorkers.Add(1)
|
||||
go s.reifyWorker(workch)
|
||||
}
|
||||
|
||||
for {
|
||||
s.reifyMx.Lock()
|
||||
for len(s.reifyPend) == 0 && atomic.LoadInt32(&s.closing) == 0 {
|
||||
s.reifyCond.Wait()
|
||||
}
|
||||
|
||||
if atomic.LoadInt32(&s.closing) != 0 {
|
||||
s.reifyMx.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
reifyPend := s.reifyPend
|
||||
s.reifyPend = make(map[cid.Cid]struct{})
|
||||
s.reifyMx.Unlock()
|
||||
|
||||
for c := range reifyPend {
|
||||
select {
|
||||
case workch <- c:
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SplitStore) reifyWorker(workch chan cid.Cid) {
|
||||
defer s.reifyWorkers.Done()
|
||||
for c := range workch {
|
||||
s.doReify(c)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SplitStore) doReify(c cid.Cid) {
|
||||
var toreify, totrack, toforget []cid.Cid
|
||||
|
||||
defer func() {
|
||||
s.reifyMx.Lock()
|
||||
defer s.reifyMx.Unlock()
|
||||
|
||||
for _, c := range toreify {
|
||||
delete(s.reifyInProgress, c)
|
||||
}
|
||||
for _, c := range totrack {
|
||||
delete(s.reifyInProgress, c)
|
||||
}
|
||||
for _, c := range toforget {
|
||||
delete(s.reifyInProgress, c)
|
||||
}
|
||||
}()
|
||||
|
||||
s.txnLk.RLock()
|
||||
defer s.txnLk.RUnlock()
|
||||
|
||||
count := 0
|
||||
err := s.walkObjectIncomplete(c, newTmpVisitor(),
|
||||
func(c cid.Cid) error {
|
||||
if isUnitaryObject(c) {
|
||||
return errStopWalk
|
||||
}
|
||||
|
||||
count++
|
||||
if count > ReifyLimit {
|
||||
return errReifyLimit
|
||||
}
|
||||
|
||||
s.reifyMx.Lock()
|
||||
_, inProgress := s.reifyInProgress[c]
|
||||
if !inProgress {
|
||||
s.reifyInProgress[c] = struct{}{}
|
||||
}
|
||||
s.reifyMx.Unlock()
|
||||
|
||||
if inProgress {
|
||||
return errStopWalk
|
||||
}
|
||||
|
||||
has, err := s.hot.Has(s.ctx, c)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error checking hotstore: %w", err)
|
||||
}
|
||||
|
||||
if has {
|
||||
if s.txnMarkSet != nil {
|
||||
hasMark, err := s.txnMarkSet.Has(c)
|
||||
if err != nil {
|
||||
log.Warnf("error checking markset: %s", err)
|
||||
} else if hasMark {
|
||||
toforget = append(toforget, c)
|
||||
return errStopWalk
|
||||
}
|
||||
} else {
|
||||
totrack = append(totrack, c)
|
||||
return errStopWalk
|
||||
}
|
||||
}
|
||||
|
||||
toreify = append(toreify, c)
|
||||
return nil
|
||||
},
|
||||
func(missing cid.Cid) error {
|
||||
log.Warnf("missing reference while reifying %s: %s", c, missing)
|
||||
return errStopWalk
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
if xerrors.Is(err, errReifyLimit) {
|
||||
log.Debug("reification aborted; reify limit reached")
|
||||
return
|
||||
}
|
||||
|
||||
log.Warnf("error walking cold object for reification (cid: %s): %s", c, err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Debugf("reifying %d objects rooted at %s", len(toreify), c)
|
||||
|
||||
// this should not get too big, maybe some 100s of objects.
|
||||
batch := make([]blocks.Block, 0, len(toreify))
|
||||
for _, c := range toreify {
|
||||
blk, err := s.cold.Get(s.ctx, c)
|
||||
if err != nil {
|
||||
log.Warnf("error retrieving cold object for reification (cid: %s): %s", c, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.checkClosing(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
batch = append(batch, blk)
|
||||
}
|
||||
|
||||
if len(batch) > 0 {
|
||||
err = s.hot.PutMany(s.ctx, batch)
|
||||
if err != nil {
|
||||
log.Warnf("error reifying cold object (cid: %s): %s", c, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if s.txnMarkSet != nil {
|
||||
if len(toreify) > 0 {
|
||||
if err := s.txnMarkSet.MarkMany(toreify); err != nil {
|
||||
log.Warnf("error marking reified objects: %s", err)
|
||||
}
|
||||
}
|
||||
if len(totrack) > 0 {
|
||||
if err := s.txnMarkSet.MarkMany(totrack); err != nil {
|
||||
log.Warnf("error marking tracked objects: %s", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// if txnActive is false these are noops
|
||||
if len(toreify) > 0 {
|
||||
s.trackTxnRefMany(toreify)
|
||||
}
|
||||
if len(totrack) > 0 {
|
||||
s.trackTxnRefMany(totrack)
|
||||
}
|
||||
}
|
||||
}
|
@ -4,6 +4,9 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
@ -20,12 +23,14 @@ import (
|
||||
datastore "github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-datastore/sync"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
)
|
||||
|
||||
func init() {
|
||||
CompactionThreshold = 5
|
||||
CompactionBoundary = 2
|
||||
WarmupBoundary = 0
|
||||
SyncWaitTime = time.Millisecond
|
||||
logging.SetLogLevel("splitstore", "DEBUG")
|
||||
}
|
||||
|
||||
@ -80,8 +85,17 @@ func testSplitStore(t *testing.T, cfg *Config) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
path, err := ioutil.TempDir("", "splitstore.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
|
||||
// open the splitstore
|
||||
ss, err := Open("", ds, hot, cold, cfg)
|
||||
ss, err := Open(path, ds, hot, cold, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -125,6 +139,10 @@ func testSplitStore(t *testing.T, cfg *Config) {
|
||||
}
|
||||
|
||||
waitForCompaction := func() {
|
||||
ss.txnSyncMx.Lock()
|
||||
ss.txnSync = true
|
||||
ss.txnSyncCond.Broadcast()
|
||||
ss.txnSyncMx.Unlock()
|
||||
for atomic.LoadInt32(&ss.compacting) == 1 {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
@ -259,8 +277,17 @@ func TestSplitStoreSuppressCompactionNearUpgrade(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
path, err := ioutil.TempDir("", "splitstore.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
|
||||
// open the splitstore
|
||||
ss, err := Open("", ds, hot, cold, &Config{MarkSetType: "map"})
|
||||
ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -305,6 +332,10 @@ func TestSplitStoreSuppressCompactionNearUpgrade(t *testing.T) {
|
||||
}
|
||||
|
||||
waitForCompaction := func() {
|
||||
ss.txnSyncMx.Lock()
|
||||
ss.txnSync = true
|
||||
ss.txnSyncCond.Broadcast()
|
||||
ss.txnSyncMx.Unlock()
|
||||
for atomic.LoadInt32(&ss.compacting) == 1 {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
@ -357,6 +388,235 @@ func TestSplitStoreSuppressCompactionNearUpgrade(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testSplitStoreReification(t *testing.T, f func(context.Context, blockstore.Blockstore, cid.Cid) error) {
|
||||
ds := dssync.MutexWrap(datastore.NewMapDatastore())
|
||||
hot := newMockStore()
|
||||
cold := newMockStore()
|
||||
|
||||
mkRandomBlock := func() blocks.Block {
|
||||
data := make([]byte, 128)
|
||||
_, err := rand.Read(data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return blocks.NewBlock(data)
|
||||
}
|
||||
|
||||
block1 := mkRandomBlock()
|
||||
block2 := mkRandomBlock()
|
||||
block3 := mkRandomBlock()
|
||||
|
||||
hdr := mock.MkBlock(nil, 0, 0)
|
||||
hdr.Messages = block1.Cid()
|
||||
hdr.ParentMessageReceipts = block2.Cid()
|
||||
hdr.ParentStateRoot = block3.Cid()
|
||||
block4, err := hdr.ToStorageBlock()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
allBlocks := []blocks.Block{block1, block2, block3, block4}
|
||||
for _, blk := range allBlocks {
|
||||
err := cold.Put(context.Background(), blk)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
path, err := ioutil.TempDir("", "splitstore.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
|
||||
ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer ss.Close() //nolint
|
||||
|
||||
ss.warmupEpoch = 1
|
||||
go ss.reifyOrchestrator()
|
||||
|
||||
waitForReification := func() {
|
||||
for {
|
||||
ss.reifyMx.Lock()
|
||||
ready := len(ss.reifyPend) == 0 && len(ss.reifyInProgress) == 0
|
||||
ss.reifyMx.Unlock()
|
||||
|
||||
if ready {
|
||||
return
|
||||
}
|
||||
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
// first access using the standard view
|
||||
err = f(context.Background(), ss, block4.Cid())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// nothing should be reified
|
||||
waitForReification()
|
||||
for _, blk := range allBlocks {
|
||||
has, err := hot.Has(context.Background(), blk.Cid())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if has {
|
||||
t.Fatal("block unexpectedly reified")
|
||||
}
|
||||
}
|
||||
|
||||
// now make the hot/reifying view and ensure access reifies
|
||||
err = f(blockstore.WithHotView(context.Background()), ss, block4.Cid())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// everything should be reified
|
||||
waitForReification()
|
||||
for i, blk := range allBlocks {
|
||||
has, err := hot.Has(context.Background(), blk.Cid())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !has {
|
||||
t.Fatalf("block%d was not reified", i+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testSplitStoreReificationLimit(t *testing.T, f func(context.Context, blockstore.Blockstore, cid.Cid) error) {
|
||||
ds := dssync.MutexWrap(datastore.NewMapDatastore())
|
||||
hot := newMockStore()
|
||||
cold := newMockStore()
|
||||
|
||||
mkRandomBlock := func() blocks.Block {
|
||||
data := make([]byte, 128)
|
||||
_, err := rand.Read(data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return blocks.NewBlock(data)
|
||||
}
|
||||
|
||||
block1 := mkRandomBlock()
|
||||
block2 := mkRandomBlock()
|
||||
block3 := mkRandomBlock()
|
||||
|
||||
hdr := mock.MkBlock(nil, 0, 0)
|
||||
hdr.Messages = block1.Cid()
|
||||
hdr.ParentMessageReceipts = block2.Cid()
|
||||
hdr.ParentStateRoot = block3.Cid()
|
||||
block4, err := hdr.ToStorageBlock()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
allBlocks := []blocks.Block{block1, block2, block3, block4}
|
||||
for _, blk := range allBlocks {
|
||||
err := cold.Put(context.Background(), blk)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
path, err := ioutil.TempDir("", "splitstore.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
|
||||
ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer ss.Close() //nolint
|
||||
|
||||
ss.warmupEpoch = 1
|
||||
go ss.reifyOrchestrator()
|
||||
|
||||
waitForReification := func() {
|
||||
for {
|
||||
ss.reifyMx.Lock()
|
||||
ready := len(ss.reifyPend) == 0 && len(ss.reifyInProgress) == 0
|
||||
ss.reifyMx.Unlock()
|
||||
|
||||
if ready {
|
||||
return
|
||||
}
|
||||
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
// do a hot access -- nothing should be reified as the limit should be exceeded
|
||||
oldReifyLimit := ReifyLimit
|
||||
ReifyLimit = 2
|
||||
t.Cleanup(func() {
|
||||
ReifyLimit = oldReifyLimit
|
||||
})
|
||||
|
||||
err = f(blockstore.WithHotView(context.Background()), ss, block4.Cid())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
waitForReification()
|
||||
|
||||
for _, blk := range allBlocks {
|
||||
has, err := hot.Has(context.Background(), blk.Cid())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if has {
|
||||
t.Fatal("block unexpectedly reified")
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestSplitStoreReification(t *testing.T) {
|
||||
t.Log("test reification with Has")
|
||||
testSplitStoreReification(t, func(ctx context.Context, s blockstore.Blockstore, c cid.Cid) error {
|
||||
_, err := s.Has(ctx, c)
|
||||
return err
|
||||
})
|
||||
t.Log("test reification with Get")
|
||||
testSplitStoreReification(t, func(ctx context.Context, s blockstore.Blockstore, c cid.Cid) error {
|
||||
_, err := s.Get(ctx, c)
|
||||
return err
|
||||
})
|
||||
t.Log("test reification with GetSize")
|
||||
testSplitStoreReification(t, func(ctx context.Context, s blockstore.Blockstore, c cid.Cid) error {
|
||||
_, err := s.GetSize(ctx, c)
|
||||
return err
|
||||
})
|
||||
t.Log("test reification with View")
|
||||
testSplitStoreReification(t, func(ctx context.Context, s blockstore.Blockstore, c cid.Cid) error {
|
||||
return s.View(ctx, c, func(_ []byte) error { return nil })
|
||||
})
|
||||
t.Log("test reification limit")
|
||||
testSplitStoreReificationLimit(t, func(ctx context.Context, s blockstore.Blockstore, c cid.Cid) error {
|
||||
_, err := s.Has(ctx, c)
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
type mockChain struct {
|
||||
t testing.TB
|
||||
|
||||
@ -426,17 +686,25 @@ func (c *mockChain) SubscribeHeadChanges(change func(revert []*types.TipSet, app
|
||||
|
||||
type mockStore struct {
|
||||
mx sync.Mutex
|
||||
set map[cid.Cid]blocks.Block
|
||||
set map[string]blocks.Block
|
||||
}
|
||||
|
||||
func newMockStore() *mockStore {
|
||||
return &mockStore{set: make(map[cid.Cid]blocks.Block)}
|
||||
return &mockStore{set: make(map[string]blocks.Block)}
|
||||
}
|
||||
|
||||
func (b *mockStore) keyOf(c cid.Cid) string {
|
||||
return string(c.Hash())
|
||||
}
|
||||
|
||||
func (b *mockStore) cidOf(k string) cid.Cid {
|
||||
return cid.NewCidV1(cid.Raw, mh.Multihash([]byte(k)))
|
||||
}
|
||||
|
||||
func (b *mockStore) Has(_ context.Context, cid cid.Cid) (bool, error) {
|
||||
b.mx.Lock()
|
||||
defer b.mx.Unlock()
|
||||
_, ok := b.set[cid]
|
||||
_, ok := b.set[b.keyOf(cid)]
|
||||
return ok, nil
|
||||
}
|
||||
|
||||
@ -446,7 +714,7 @@ func (b *mockStore) Get(_ context.Context, cid cid.Cid) (blocks.Block, error) {
|
||||
b.mx.Lock()
|
||||
defer b.mx.Unlock()
|
||||
|
||||
blk, ok := b.set[cid]
|
||||
blk, ok := b.set[b.keyOf(cid)]
|
||||
if !ok {
|
||||
return nil, blockstore.ErrNotFound
|
||||
}
|
||||
@ -474,7 +742,7 @@ func (b *mockStore) Put(_ context.Context, blk blocks.Block) error {
|
||||
b.mx.Lock()
|
||||
defer b.mx.Unlock()
|
||||
|
||||
b.set[blk.Cid()] = blk
|
||||
b.set[b.keyOf(blk.Cid())] = blk
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -483,7 +751,7 @@ func (b *mockStore) PutMany(_ context.Context, blks []blocks.Block) error {
|
||||
defer b.mx.Unlock()
|
||||
|
||||
for _, blk := range blks {
|
||||
b.set[blk.Cid()] = blk
|
||||
b.set[b.keyOf(blk.Cid())] = blk
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -492,7 +760,7 @@ func (b *mockStore) DeleteBlock(_ context.Context, cid cid.Cid) error {
|
||||
b.mx.Lock()
|
||||
defer b.mx.Unlock()
|
||||
|
||||
delete(b.set, cid)
|
||||
delete(b.set, b.keyOf(cid))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -501,7 +769,7 @@ func (b *mockStore) DeleteMany(_ context.Context, cids []cid.Cid) error {
|
||||
defer b.mx.Unlock()
|
||||
|
||||
for _, c := range cids {
|
||||
delete(b.set, c)
|
||||
delete(b.set, b.keyOf(c))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -515,7 +783,7 @@ func (b *mockStore) ForEachKey(f func(cid.Cid) error) error {
|
||||
defer b.mx.Unlock()
|
||||
|
||||
for c := range b.set {
|
||||
err := f(c)
|
||||
err := f(b.cidOf(c))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
package splitstore
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
@ -55,12 +56,13 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error {
|
||||
if WarmupBoundary < epoch {
|
||||
boundaryEpoch = epoch - WarmupBoundary
|
||||
}
|
||||
var mx sync.Mutex
|
||||
batchHot := make([]blocks.Block, 0, batchSize)
|
||||
count := int64(0)
|
||||
xcount := int64(0)
|
||||
missing := int64(0)
|
||||
count := new(int64)
|
||||
xcount := new(int64)
|
||||
missing := new(int64)
|
||||
|
||||
visitor, err := s.markSetEnv.CreateVisitor("warmup", 0)
|
||||
visitor, err := s.markSetEnv.New("warmup", 0)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error creating visitor: %w", err)
|
||||
}
|
||||
@ -73,7 +75,7 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error {
|
||||
return errStopWalk
|
||||
}
|
||||
|
||||
count++
|
||||
atomic.AddInt64(count, 1)
|
||||
|
||||
has, err := s.hot.Has(s.ctx, c)
|
||||
if err != nil {
|
||||
@ -87,22 +89,25 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error {
|
||||
blk, err := s.cold.Get(s.ctx, c)
|
||||
if err != nil {
|
||||
if err == bstore.ErrNotFound {
|
||||
missing++
|
||||
atomic.AddInt64(missing, 1)
|
||||
return errStopWalk
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
xcount++
|
||||
atomic.AddInt64(xcount, 1)
|
||||
|
||||
mx.Lock()
|
||||
batchHot = append(batchHot, blk)
|
||||
if len(batchHot) == batchSize {
|
||||
err = s.hot.PutMany(s.ctx, batchHot)
|
||||
if err != nil {
|
||||
mx.Unlock()
|
||||
return err
|
||||
}
|
||||
batchHot = batchHot[:0]
|
||||
}
|
||||
mx.Unlock()
|
||||
|
||||
return nil
|
||||
})
|
||||
@ -118,9 +123,9 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error {
|
||||
}
|
||||
}
|
||||
|
||||
log.Infow("warmup stats", "visited", count, "warm", xcount, "missing", missing)
|
||||
log.Infow("warmup stats", "visited", *count, "warm", *xcount, "missing", *missing)
|
||||
|
||||
s.markSetSize = count + count>>2 // overestimate a bit
|
||||
s.markSetSize = *count + *count>>2 // overestimate a bit
|
||||
err = s.ds.Put(s.ctx, markSetSizeKey, int64ToBytes(s.markSetSize))
|
||||
if err != nil {
|
||||
log.Warnf("error saving mark set size: %s", err)
|
||||
|
@ -1,6 +1,8 @@
|
||||
package splitstore
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
cid "github.com/ipfs/go-cid"
|
||||
)
|
||||
|
||||
@ -17,16 +19,42 @@ func (v *noopVisitor) Visit(_ cid.Cid) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
type cidSetVisitor struct {
|
||||
type tmpVisitor struct {
|
||||
set *cid.Set
|
||||
}
|
||||
|
||||
var _ ObjectVisitor = (*cidSetVisitor)(nil)
|
||||
var _ ObjectVisitor = (*tmpVisitor)(nil)
|
||||
|
||||
func (v *tmpVisitor) Visit(c cid.Cid) (bool, error) {
|
||||
if isUnitaryObject(c) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (v *cidSetVisitor) Visit(c cid.Cid) (bool, error) {
|
||||
return v.set.Visit(c), nil
|
||||
}
|
||||
|
||||
func tmpVisitor() ObjectVisitor {
|
||||
return &cidSetVisitor{set: cid.NewSet()}
|
||||
func newTmpVisitor() ObjectVisitor {
|
||||
return &tmpVisitor{set: cid.NewSet()}
|
||||
}
|
||||
|
||||
type concurrentVisitor struct {
|
||||
mx sync.Mutex
|
||||
set *cid.Set
|
||||
}
|
||||
|
||||
var _ ObjectVisitor = (*concurrentVisitor)(nil)
|
||||
|
||||
func newConcurrentVisitor() *concurrentVisitor {
|
||||
return &concurrentVisitor{set: cid.NewSet()}
|
||||
}
|
||||
|
||||
func (v *concurrentVisitor) Visit(c cid.Cid) (bool, error) {
|
||||
if isUnitaryObject(c) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
v.mx.Lock()
|
||||
defer v.mx.Unlock()
|
||||
|
||||
return v.set.Visit(c), nil
|
||||
}
|
||||
|
@ -1,2 +1,2 @@
|
||||
/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWBzv5sf4eTyo8cjJGfGnpxo6QkEPkRShG9GqjE2A5QaW5
|
||||
/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWBo9TSD4XXRFtu6snv6QNYvXgRaSaVb116YiYEsDWgKtq
|
||||
/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWFHDtFx7CVTy4xoCDutVo1cScvSnQjDeaM8UzwVS1qwkh
|
||||
/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWKt8cwpkiumkT8x32c3YFxsPRwhV5J8hCYPn9mhUmcAXt
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -17,7 +17,7 @@ import (
|
||||
const BootstrappersFile = ""
|
||||
const GenesisFile = ""
|
||||
|
||||
const GenesisNetworkVersion = network.Version14
|
||||
const GenesisNetworkVersion = network.Version15
|
||||
|
||||
var UpgradeBreezeHeight = abi.ChainEpoch(-1)
|
||||
|
||||
@ -47,7 +47,7 @@ var UpgradeHyperdriveHeight = abi.ChainEpoch(-16)
|
||||
|
||||
var UpgradeChocolateHeight = abi.ChainEpoch(-17)
|
||||
|
||||
var UpgradeSnapDealsHeight = abi.ChainEpoch(-18)
|
||||
var UpgradeOhSnapHeight = abi.ChainEpoch(-18)
|
||||
|
||||
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
@ -90,6 +90,7 @@ func init() {
|
||||
UpgradeTurboHeight = getUpgradeHeight("LOTUS_ACTORSV4_HEIGHT", UpgradeTurboHeight)
|
||||
UpgradeHyperdriveHeight = getUpgradeHeight("LOTUS_HYPERDRIVE_HEIGHT", UpgradeHyperdriveHeight)
|
||||
UpgradeChocolateHeight = getUpgradeHeight("LOTUS_CHOCOLATE_HEIGHT", UpgradeChocolateHeight)
|
||||
UpgradeOhSnapHeight = getUpgradeHeight("LOTUS_OHSNAP_HEIGHT", UpgradeOhSnapHeight)
|
||||
|
||||
BuildType |= Build2k
|
||||
|
||||
|
@ -16,7 +16,7 @@ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
}
|
||||
|
||||
const GenesisNetworkVersion = network.Version13
|
||||
const GenesisNetworkVersion = network.Version14
|
||||
|
||||
const BootstrappersFile = "butterflynet.pi"
|
||||
const GenesisFile = "butterflynet.car"
|
||||
@ -40,13 +40,16 @@ const UpgradeTrustHeight = -13
|
||||
const UpgradeNorwegianHeight = -14
|
||||
const UpgradeTurboHeight = -15
|
||||
const UpgradeHyperdriveHeight = -16
|
||||
const UpgradeChocolateHeight = 6360
|
||||
const UpgradeSnapDealsHeight = 99999999
|
||||
const UpgradeChocolateHeight = -17
|
||||
|
||||
const UpgradeOhSnapHeight = 240
|
||||
|
||||
func init() {
|
||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2 << 30))
|
||||
policy.SetSupportedProofTypes(
|
||||
abi.RegisteredSealProof_StackedDrg512MiBV1,
|
||||
abi.RegisteredSealProof_StackedDrg32GiBV1,
|
||||
abi.RegisteredSealProof_StackedDrg64GiBV1,
|
||||
)
|
||||
|
||||
SetAddressNetwork(address.Testnet)
|
||||
|
@ -54,7 +54,8 @@ const UpgradeHyperdriveHeight = 420
|
||||
|
||||
const UpgradeChocolateHeight = 312746
|
||||
|
||||
const UpgradeSnapDealsHeight = 99999999
|
||||
// 2022-02-10T19:23:00Z
|
||||
const UpgradeOhSnapHeight = 682006
|
||||
|
||||
func init() {
|
||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(32 << 30))
|
||||
|
@ -47,7 +47,7 @@ var UpgradeTurboHeight = abi.ChainEpoch(-15)
|
||||
|
||||
var UpgradeHyperdriveHeight = abi.ChainEpoch(-16)
|
||||
var UpgradeChocolateHeight = abi.ChainEpoch(-17)
|
||||
var UpgradeSnapDealsHeight = abi.ChainEpoch(-18)
|
||||
var UpgradeOhSnapHeight = abi.ChainEpoch(-18)
|
||||
|
||||
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
|
@ -67,7 +67,8 @@ const UpgradeHyperdriveHeight = 892800
|
||||
// 2021-10-26T13:30:00Z
|
||||
const UpgradeChocolateHeight = 1231620
|
||||
|
||||
var UpgradeSnapDealsHeight = abi.ChainEpoch(999999999999)
|
||||
// 2022-03-01T15:00:00Z
|
||||
var UpgradeOhSnapHeight = abi.ChainEpoch(1594680)
|
||||
|
||||
func init() {
|
||||
if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" {
|
||||
@ -75,7 +76,7 @@ func init() {
|
||||
}
|
||||
|
||||
if os.Getenv("LOTUS_DISABLE_SNAPDEALS") == "1" {
|
||||
UpgradeSnapDealsHeight = math.MaxInt64
|
||||
UpgradeOhSnapHeight = math.MaxInt64
|
||||
}
|
||||
|
||||
Devnet = false
|
||||
|
@ -99,7 +99,7 @@ var (
|
||||
UpgradeTurboHeight abi.ChainEpoch = -14
|
||||
UpgradeHyperdriveHeight abi.ChainEpoch = -15
|
||||
UpgradeChocolateHeight abi.ChainEpoch = -16
|
||||
UpgradeSnapDealsHeight abi.ChainEpoch = -17
|
||||
UpgradeOhSnapHeight abi.ChainEpoch = -17
|
||||
|
||||
DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
@ -107,8 +107,8 @@ var (
|
||||
|
||||
GenesisNetworkVersion = network.Version0
|
||||
|
||||
NewestNetworkVersion = network.Version14
|
||||
ActorUpgradeNetworkVersion = network.Version4
|
||||
NewestNetworkVersion = network.Version15
|
||||
ActorUpgradeNetworkVersion = network.Version15
|
||||
|
||||
Devnet = true
|
||||
ZeroAddress = MustParseAddress("f3yaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaby2smx7a")
|
||||
|
@ -1,4 +1,54 @@
|
||||
{
|
||||
"v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-61fa69f38b9cc771ba27b670124714b4ea77fbeae05e377fb859c4a43b73a30c.params": {
|
||||
"cid": "Qma5WL6abSqYg9uUQAZ3EHS286bsNsha7oAGsJBD48Bq2q",
|
||||
"digest": "c3ad7bb549470b82ad52ed070aebb4f4",
|
||||
"sector_size": 536870912
|
||||
},
|
||||
"v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-61fa69f38b9cc771ba27b670124714b4ea77fbeae05e377fb859c4a43b73a30c.vk": {
|
||||
"cid": "QmUa7f9JtJMsqJJ3s3ZXk6WyF4xJLE8FiqYskZGgk8GCDv",
|
||||
"digest": "994c5b7d450ca9da348c910689f2dc7f",
|
||||
"sector_size": 536870912
|
||||
},
|
||||
"v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-92180959e1918d26350b8e6cfe217bbdd0a2d8de51ebec269078b364b715ad63.params": {
|
||||
"cid": "QmQiT4qBGodrVNEgVTDXxBNDdPbaD8Ag7Sx3ZTq1zHX79S",
|
||||
"digest": "5aedd2cf3e5c0a15623d56a1b43110ad",
|
||||
"sector_size": 8388608
|
||||
},
|
||||
"v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-92180959e1918d26350b8e6cfe217bbdd0a2d8de51ebec269078b364b715ad63.vk": {
|
||||
"cid": "QmdcpKUQvHM8RFRVKbk1yHfEqMcBzhtFWKRp9SNEmWq37i",
|
||||
"digest": "abd80269054d391a734febdac0d2e687",
|
||||
"sector_size": 8388608
|
||||
},
|
||||
"v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-fb9e095bebdd77511c0269b967b4d87ba8b8a525edaa0e165de23ba454510194.params": {
|
||||
"cid": "QmYM6Hg7mjmvA3ZHTsqkss1fkdyDju5dDmLiBZGJ5pz9y9",
|
||||
"digest": "311f92a3e75036ced01b1c0025f1fa0c",
|
||||
"sector_size": 2048
|
||||
},
|
||||
"v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-fb9e095bebdd77511c0269b967b4d87ba8b8a525edaa0e165de23ba454510194.vk": {
|
||||
"cid": "QmaQsTLL3nc5dw6wAvaioJSBfd1jhQrA2o6ucFf7XeV74P",
|
||||
"digest": "eadad9784969890d30f2749708c79771",
|
||||
"sector_size": 2048
|
||||
},
|
||||
"v28-empty-sector-update-merkletree-poseidon_hasher-8-8-0-3b7f44a9362e3985369454947bc94022e118211e49fd672d52bec1cbfd599d18.params": {
|
||||
"cid": "QmNPc75iEfcahCwNKdqnWLtxnjspUGGR4iscjiz3wP3RtS",
|
||||
"digest": "1b3cfd761a961543f9eb273e435a06a2",
|
||||
"sector_size": 34359738368
|
||||
},
|
||||
"v28-empty-sector-update-merkletree-poseidon_hasher-8-8-0-3b7f44a9362e3985369454947bc94022e118211e49fd672d52bec1cbfd599d18.vk": {
|
||||
"cid": "QmdFFUe1gcz9MMHc6YW8aoV48w4ckvcERjt7PkydQAMfCN",
|
||||
"digest": "3a6941983754737fde880d29c7094905",
|
||||
"sector_size": 34359738368
|
||||
},
|
||||
"v28-empty-sector-update-merkletree-poseidon_hasher-8-8-2-102e1444a7e9a97ebf1e3d6855dcc77e66c011ea66f936d9b2c508f87f2f83a7.params": {
|
||||
"cid": "QmUB6xTVjzBQGuDNeyJMrrJ1byk58vhPm8eY2Lv9pgwanp",
|
||||
"digest": "1a392e7b759fb18e036c7559b5ece816",
|
||||
"sector_size": 68719476736
|
||||
},
|
||||
"v28-empty-sector-update-merkletree-poseidon_hasher-8-8-2-102e1444a7e9a97ebf1e3d6855dcc77e66c011ea66f936d9b2c508f87f2f83a7.vk": {
|
||||
"cid": "Qmd794Jty7k26XJ8Eg4NDEks65Qk8G4GVfGkwqvymv8HAg",
|
||||
"digest": "80e366df2f1011953c2d01c7b7c9ee8e",
|
||||
"sector_size": 68719476736
|
||||
},
|
||||
"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.params": {
|
||||
"cid": "QmVxjFRyhmyQaZEtCh7nk2abc7LhFkzhnRX4rcHqCCpikR",
|
||||
"digest": "7610b9f82bfc88405b7a832b651ce2f6",
|
||||
|
@ -37,7 +37,7 @@ func BuildTypeString() string {
|
||||
}
|
||||
|
||||
// BuildVersion is the local build version
|
||||
const BuildVersion = "1.13.3-dev"
|
||||
const BuildVersion = "1.15.1-dev"
|
||||
|
||||
func UserVersion() string {
|
||||
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
|
||||
|
@ -61,6 +61,7 @@ const (
|
||||
// These are all just type aliases across actor versions. In the future, that might change
|
||||
// and we might need to do something fancier.
|
||||
type SectorInfo = proof7.SectorInfo
|
||||
type ExtendedSectorInfo = proof7.ExtendedSectorInfo
|
||||
type PoStProof = proof7.PoStProof
|
||||
type FilterEstimate = smoothing0.FilterEstimate
|
||||
|
||||
|
@ -45,6 +45,7 @@ const (
|
||||
// These are all just type aliases across actor versions. In the future, that might change
|
||||
// and we might need to do something fancier.
|
||||
type SectorInfo = proof{{.latestVersion}}.SectorInfo
|
||||
type ExtendedSectorInfo = proof{{.latestVersion}}.ExtendedSectorInfo
|
||||
type PoStProof = proof{{.latestVersion}}.PoStProof
|
||||
type FilterEstimate = smoothing0.FilterEstimate
|
||||
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
|
||||
miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner"
|
||||
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
|
||||
miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner"
|
||||
{{range .versions}}
|
||||
builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin"
|
||||
{{end}}
|
||||
@ -193,6 +194,7 @@ type SectorPreCommitOnChainInfo struct {
|
||||
type PoStPartition = miner0.PoStPartition
|
||||
type RecoveryDeclaration = miner0.RecoveryDeclaration
|
||||
type FaultDeclaration = miner0.FaultDeclaration
|
||||
type ReplicaUpdate = miner7.ReplicaUpdate
|
||||
|
||||
// Params
|
||||
type DeclareFaultsParams = miner0.DeclareFaultsParams
|
||||
@ -201,6 +203,7 @@ type SubmitWindowedPoStParams = miner0.SubmitWindowedPoStParams
|
||||
type ProveCommitSectorParams = miner0.ProveCommitSectorParams
|
||||
type DisputeWindowedPoStParams = miner3.DisputeWindowedPoStParams
|
||||
type ProveCommitAggregateParams = miner5.ProveCommitAggregateParams
|
||||
type ProveReplicaUpdatesParams = miner7.ProveReplicaUpdatesParams
|
||||
|
||||
func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) {
|
||||
// We added support for the new proofs in network version 7, and removed support for the old
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
|
||||
miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner"
|
||||
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
|
||||
miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner"
|
||||
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
|
||||
@ -282,6 +283,7 @@ type SectorPreCommitOnChainInfo struct {
|
||||
type PoStPartition = miner0.PoStPartition
|
||||
type RecoveryDeclaration = miner0.RecoveryDeclaration
|
||||
type FaultDeclaration = miner0.FaultDeclaration
|
||||
type ReplicaUpdate = miner7.ReplicaUpdate
|
||||
|
||||
// Params
|
||||
type DeclareFaultsParams = miner0.DeclareFaultsParams
|
||||
@ -290,6 +292,7 @@ type SubmitWindowedPoStParams = miner0.SubmitWindowedPoStParams
|
||||
type ProveCommitSectorParams = miner0.ProveCommitSectorParams
|
||||
type DisputeWindowedPoStParams = miner3.DisputeWindowedPoStParams
|
||||
type ProveCommitAggregateParams = miner5.ProveCommitAggregateParams
|
||||
type ProveReplicaUpdatesParams = miner7.ProveReplicaUpdatesParams
|
||||
|
||||
func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) {
|
||||
// We added support for the new proofs in network version 7, and removed support for the old
|
||||
|
@ -39,7 +39,11 @@ func (m message{{.v}}) Create(to address.Address, initialAmount abi.TokenAmount)
|
||||
|
||||
func (m message{{.v}}) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) {
|
||||
params, aerr := actors.SerializeParams(&paych{{.v}}.UpdateChannelStateParams{
|
||||
{{if (ge .v 7)}}
|
||||
Sv: toV{{.v}}SignedVoucher(*sv),
|
||||
{{else}}
|
||||
Sv: *sv,
|
||||
{{end}}
|
||||
Secret: secret,
|
||||
})
|
||||
if aerr != nil {
|
||||
|
@ -39,7 +39,9 @@ func (m message0) Create(to address.Address, initialAmount abi.TokenAmount) (*ty
|
||||
|
||||
func (m message0) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) {
|
||||
params, aerr := actors.SerializeParams(&paych0.UpdateChannelStateParams{
|
||||
Sv: *sv,
|
||||
|
||||
Sv: *sv,
|
||||
|
||||
Secret: secret,
|
||||
})
|
||||
if aerr != nil {
|
||||
|
@ -39,7 +39,9 @@ func (m message2) Create(to address.Address, initialAmount abi.TokenAmount) (*ty
|
||||
|
||||
func (m message2) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) {
|
||||
params, aerr := actors.SerializeParams(&paych2.UpdateChannelStateParams{
|
||||
Sv: *sv,
|
||||
|
||||
Sv: *sv,
|
||||
|
||||
Secret: secret,
|
||||
})
|
||||
if aerr != nil {
|
||||
|
@ -39,7 +39,9 @@ func (m message3) Create(to address.Address, initialAmount abi.TokenAmount) (*ty
|
||||
|
||||
func (m message3) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) {
|
||||
params, aerr := actors.SerializeParams(&paych3.UpdateChannelStateParams{
|
||||
Sv: *sv,
|
||||
|
||||
Sv: *sv,
|
||||
|
||||
Secret: secret,
|
||||
})
|
||||
if aerr != nil {
|
||||
|
@ -39,7 +39,9 @@ func (m message4) Create(to address.Address, initialAmount abi.TokenAmount) (*ty
|
||||
|
||||
func (m message4) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) {
|
||||
params, aerr := actors.SerializeParams(&paych4.UpdateChannelStateParams{
|
||||
Sv: *sv,
|
||||
|
||||
Sv: *sv,
|
||||
|
||||
Secret: secret,
|
||||
})
|
||||
if aerr != nil {
|
||||
|
@ -39,7 +39,9 @@ func (m message5) Create(to address.Address, initialAmount abi.TokenAmount) (*ty
|
||||
|
||||
func (m message5) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) {
|
||||
params, aerr := actors.SerializeParams(&paych5.UpdateChannelStateParams{
|
||||
Sv: *sv,
|
||||
|
||||
Sv: *sv,
|
||||
|
||||
Secret: secret,
|
||||
})
|
||||
if aerr != nil {
|
||||
|
@ -39,7 +39,9 @@ func (m message6) Create(to address.Address, initialAmount abi.TokenAmount) (*ty
|
||||
|
||||
func (m message6) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) {
|
||||
params, aerr := actors.SerializeParams(&paych6.UpdateChannelStateParams{
|
||||
Sv: *sv,
|
||||
|
||||
Sv: *sv,
|
||||
|
||||
Secret: secret,
|
||||
})
|
||||
if aerr != nil {
|
||||
|
@ -39,7 +39,9 @@ func (m message7) Create(to address.Address, initialAmount abi.TokenAmount) (*ty
|
||||
|
||||
func (m message7) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) {
|
||||
params, aerr := actors.SerializeParams(&paych7.UpdateChannelStateParams{
|
||||
Sv: *sv,
|
||||
|
||||
Sv: toV7SignedVoucher(*sv),
|
||||
|
||||
Secret: secret,
|
||||
})
|
||||
if aerr != nil {
|
||||
|
@ -112,3 +112,21 @@ func (ls *laneState{{.v}}) Redeemed() (big.Int, error) {
|
||||
func (ls *laneState{{.v}}) Nonce() (uint64, error) {
|
||||
return ls.LaneState.Nonce, nil
|
||||
}
|
||||
|
||||
{{if (ge .v 7)}}
|
||||
func toV{{.v}}SignedVoucher(sv SignedVoucher) paych{{.v}}.SignedVoucher {
|
||||
return paych{{.v}}.SignedVoucher{
|
||||
ChannelAddr: sv.ChannelAddr,
|
||||
TimeLockMin: sv.TimeLockMin,
|
||||
TimeLockMax: sv.TimeLockMax,
|
||||
SecretHash: sv.SecretPreimage,
|
||||
Extra: sv.Extra,
|
||||
Lane: sv.Lane,
|
||||
Nonce: sv.Nonce,
|
||||
Amount: sv.Amount,
|
||||
MinSettleHeight: sv.MinSettleHeight,
|
||||
Merges: sv.Merges,
|
||||
Signature: sv.Signature,
|
||||
}
|
||||
}
|
||||
{{end}}
|
@ -112,3 +112,19 @@ func (ls *laneState7) Redeemed() (big.Int, error) {
|
||||
func (ls *laneState7) Nonce() (uint64, error) {
|
||||
return ls.LaneState.Nonce, nil
|
||||
}
|
||||
|
||||
func toV7SignedVoucher(sv SignedVoucher) paych7.SignedVoucher {
|
||||
return paych7.SignedVoucher{
|
||||
ChannelAddr: sv.ChannelAddr,
|
||||
TimeLockMin: sv.TimeLockMin,
|
||||
TimeLockMax: sv.TimeLockMax,
|
||||
SecretHash: sv.SecretPreimage,
|
||||
Extra: sv.Extra,
|
||||
Lane: sv.Lane,
|
||||
Nonce: sv.Nonce,
|
||||
Amount: sv.Amount,
|
||||
MinSettleHeight: sv.MinSettleHeight,
|
||||
Merges: sv.Merges,
|
||||
Signature: sv.Signature,
|
||||
}
|
||||
}
|
||||
|
@ -142,7 +142,7 @@ func (db *DrandBeacon) Entry(ctx context.Context, round uint64) <-chan beacon.Re
|
||||
|
||||
go func() {
|
||||
start := build.Clock.Now()
|
||||
log.Infow("start fetching randomness", "round", round)
|
||||
log.Debugw("start fetching randomness", "round", round)
|
||||
resp, err := db.client.Get(ctx, round)
|
||||
|
||||
var br beacon.Response
|
||||
@ -152,7 +152,7 @@ func (db *DrandBeacon) Entry(ctx context.Context, round uint64) <-chan beacon.Re
|
||||
br.Entry.Round = resp.Round()
|
||||
br.Entry.Data = resp.Signature()
|
||||
}
|
||||
log.Infow("done fetching randomness", "round", round, "took", build.Clock.Since(start))
|
||||
log.Debugw("done fetching randomness", "round", round, "took", build.Clock.Since(start))
|
||||
out <- br
|
||||
close(out)
|
||||
}()
|
||||
|
@ -32,6 +32,7 @@ import (
|
||||
|
||||
/* inline-gen end */
|
||||
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
@ -92,16 +93,17 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager
|
||||
partDone()
|
||||
}()
|
||||
|
||||
makeVmWithBaseState := func(base cid.Cid) (*vm.VM, error) {
|
||||
ctx = blockstore.WithHotView(ctx)
|
||||
makeVmWithBaseStateAndEpoch := func(base cid.Cid, e abi.ChainEpoch) (*vm.VM, error) {
|
||||
vmopt := &vm.VMOpts{
|
||||
StateBase: base,
|
||||
Epoch: epoch,
|
||||
Epoch: e,
|
||||
Rand: r,
|
||||
Bstore: sm.ChainStore().StateBlockstore(),
|
||||
Actors: NewActorRegistry(),
|
||||
Syscalls: sm.Syscalls,
|
||||
CircSupplyCalc: sm.GetVMCirculatingSupply,
|
||||
NetworkVersion: sm.GetNetworkVersion(ctx, epoch),
|
||||
NetworkVersion: sm.GetNetworkVersion(ctx, e),
|
||||
BaseFee: baseFee,
|
||||
LookbackState: stmgr.LookbackStateGetterForTipset(sm, ts),
|
||||
}
|
||||
@ -109,12 +111,7 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager
|
||||
return sm.VMConstructor()(ctx, vmopt)
|
||||
}
|
||||
|
||||
vmi, err := makeVmWithBaseState(pstate)
|
||||
if err != nil {
|
||||
return cid.Undef, cid.Undef, xerrors.Errorf("making vm: %w", err)
|
||||
}
|
||||
|
||||
runCron := func(epoch abi.ChainEpoch) error {
|
||||
runCron := func(vmCron *vm.VM, epoch abi.ChainEpoch) error {
|
||||
cronMsg := &types.Message{
|
||||
To: cron.Address,
|
||||
From: builtin.SystemActorAddr,
|
||||
@ -126,59 +123,58 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager
|
||||
Method: cron.Methods.EpochTick,
|
||||
Params: nil,
|
||||
}
|
||||
ret, err := vmi.ApplyImplicitMessage(ctx, cronMsg)
|
||||
ret, err := vmCron.ApplyImplicitMessage(ctx, cronMsg)
|
||||
if err != nil {
|
||||
return err
|
||||
return xerrors.Errorf("running cron: %w", err)
|
||||
}
|
||||
|
||||
if em != nil {
|
||||
if err := em.MessageApplied(ctx, ts, cronMsg.Cid(), cronMsg, ret, true); err != nil {
|
||||
return xerrors.Errorf("callback failed on cron message: %w", err)
|
||||
}
|
||||
}
|
||||
if ret.ExitCode != 0 {
|
||||
return xerrors.Errorf("CheckProofSubmissions exit was non-zero: %d", ret.ExitCode)
|
||||
return xerrors.Errorf("cron exit was non-zero: %d", ret.ExitCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := parentEpoch; i < epoch; i++ {
|
||||
var err error
|
||||
if i > parentEpoch {
|
||||
// run cron for null rounds if any
|
||||
if err := runCron(i); err != nil {
|
||||
return cid.Undef, cid.Undef, err
|
||||
vmCron, err := makeVmWithBaseStateAndEpoch(pstate, i)
|
||||
if err != nil {
|
||||
return cid.Undef, cid.Undef, xerrors.Errorf("making cron vm: %w", err)
|
||||
}
|
||||
|
||||
pstate, err = vmi.Flush(ctx)
|
||||
// run cron for null rounds if any
|
||||
if err = runCron(vmCron, i); err != nil {
|
||||
return cid.Undef, cid.Undef, xerrors.Errorf("running cron: %w", err)
|
||||
}
|
||||
|
||||
pstate, err = vmCron.Flush(ctx)
|
||||
if err != nil {
|
||||
return cid.Undef, cid.Undef, xerrors.Errorf("flushing vm: %w", err)
|
||||
return cid.Undef, cid.Undef, xerrors.Errorf("flushing cron vm: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// handle state forks
|
||||
// XXX: The state tree
|
||||
newState, err := sm.HandleStateForks(ctx, pstate, i, em, ts)
|
||||
pstate, err = sm.HandleStateForks(ctx, pstate, i, em, ts)
|
||||
if err != nil {
|
||||
return cid.Undef, cid.Undef, xerrors.Errorf("error handling state forks: %w", err)
|
||||
}
|
||||
|
||||
if pstate != newState {
|
||||
vmi, err = makeVmWithBaseState(newState)
|
||||
if err != nil {
|
||||
return cid.Undef, cid.Undef, xerrors.Errorf("making vm: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err = vmi.SetBlockHeight(ctx, i+1); err != nil {
|
||||
return cid.Undef, cid.Undef, xerrors.Errorf("error advancing vm an epoch: %w", err)
|
||||
}
|
||||
|
||||
pstate = newState
|
||||
}
|
||||
|
||||
partDone()
|
||||
partDone = metrics.Timer(ctx, metrics.VMApplyMessages)
|
||||
|
||||
vmi, err := makeVmWithBaseStateAndEpoch(pstate, epoch)
|
||||
if err != nil {
|
||||
return cid.Undef, cid.Undef, xerrors.Errorf("making vm: %w", err)
|
||||
}
|
||||
|
||||
var receipts []cbg.CBORMarshaler
|
||||
processedMsgs := make(map[cid.Cid]struct{})
|
||||
for _, b := range bms {
|
||||
@ -246,7 +242,7 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager
|
||||
partDone()
|
||||
partDone = metrics.Timer(ctx, metrics.VMApplyCron)
|
||||
|
||||
if err := runCron(epoch); err != nil {
|
||||
if err := runCron(vmi, epoch); err != nil {
|
||||
return cid.Cid{}, cid.Cid{}, err
|
||||
}
|
||||
|
||||
|
@ -26,7 +26,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
|
||||
"github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
|
||||
|
||||
bstore "github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
@ -182,7 +182,7 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock)
|
||||
}
|
||||
}
|
||||
|
||||
return xerrors.Errorf("parent state root did not match computed state (%s != %s)", stateroot, h.ParentStateRoot)
|
||||
return xerrors.Errorf("parent state root did not match computed state (%s != %s)", h.ParentStateRoot, stateroot)
|
||||
}
|
||||
|
||||
if precp != h.ParentMessageReceipts {
|
||||
@ -400,12 +400,21 @@ func (filec *FilecoinEC) VerifyWinningPoStProof(ctx context.Context, nv network.
|
||||
return xerrors.Errorf("failed to get ID from miner address %s: %w", h.Miner, err)
|
||||
}
|
||||
|
||||
sectors, err := stmgr.GetSectorsForWinningPoSt(ctx, nv, filec.verifier, filec.sm, lbst, h.Miner, rand)
|
||||
xsectors, err := stmgr.GetSectorsForWinningPoSt(ctx, nv, filec.verifier, filec.sm, lbst, h.Miner, rand)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting winning post sector set: %w", err)
|
||||
}
|
||||
|
||||
ok, err := ffiwrapper.ProofVerifier.VerifyWinningPoSt(ctx, proof2.WinningPoStVerifyInfo{
|
||||
sectors := make([]proof.SectorInfo, len(xsectors))
|
||||
for i, xsi := range xsectors {
|
||||
sectors[i] = proof.SectorInfo{
|
||||
SealProof: xsi.SealProof,
|
||||
SectorNumber: xsi.SectorNumber,
|
||||
SealedCID: xsi.SealedCID,
|
||||
}
|
||||
}
|
||||
|
||||
ok, err := ffiwrapper.ProofVerifier.VerifyWinningPoSt(ctx, proof.WinningPoStVerifyInfo{
|
||||
Randomness: rand,
|
||||
Proofs: h.WinPoStProof,
|
||||
ChallengedSectors: sectors,
|
||||
@ -449,7 +458,7 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl
|
||||
|
||||
stateroot, _, err := filec.sm.TipSetState(ctx, baseTs)
|
||||
if err != nil {
|
||||
return err
|
||||
return xerrors.Errorf("failed to compute tipsettate for %s: %w", baseTs.Key(), err)
|
||||
}
|
||||
|
||||
st, err := state.LoadStateTree(filec.store.ActorStore(ctx), stateroot)
|
||||
@ -466,7 +475,7 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl
|
||||
// Phase 1: syntactic validation, as defined in the spec
|
||||
minGas := pl.OnChainMessage(msg.ChainLength())
|
||||
if err := m.ValidForBlockInclusion(minGas.Total(), nv); err != nil {
|
||||
return err
|
||||
return xerrors.Errorf("msg %s invalid for block inclusion: %w", m.Cid(), err)
|
||||
}
|
||||
|
||||
// ValidForBlockInclusion checks if any single message does not exceed BlockGasLimit
|
||||
@ -482,7 +491,7 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl
|
||||
if filec.sm.GetNetworkVersion(ctx, b.Header.Height) >= network.Version13 {
|
||||
sender, err = st.LookupID(m.From)
|
||||
if err != nil {
|
||||
return err
|
||||
return xerrors.Errorf("failed to lookup sender %s: %w", m.From, err)
|
||||
}
|
||||
} else {
|
||||
sender = m.From
|
||||
@ -565,12 +574,13 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl
|
||||
|
||||
bmroot, err := bmArr.Root()
|
||||
if err != nil {
|
||||
return err
|
||||
return xerrors.Errorf("failed to root bls msgs: %w", err)
|
||||
|
||||
}
|
||||
|
||||
smroot, err := smArr.Root()
|
||||
if err != nil {
|
||||
return err
|
||||
return xerrors.Errorf("failed to root secp msgs: %w", err)
|
||||
}
|
||||
|
||||
mrcid, err := tmpstore.Put(ctx, &types.MsgMeta{
|
||||
@ -578,7 +588,7 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl
|
||||
SecpkMessages: smroot,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
return xerrors.Errorf("failed to put msg meta: %w", err)
|
||||
}
|
||||
|
||||
if b.Header.Messages != mrcid {
|
||||
@ -586,7 +596,12 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl
|
||||
}
|
||||
|
||||
// Finally, flush.
|
||||
return vm.Copy(ctx, tmpbs, filec.store.ChainBlockstore(), mrcid)
|
||||
err = vm.Copy(ctx, tmpbs, filec.store.ChainBlockstore(), mrcid)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to flush:%w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (filec *FilecoinEC) IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool {
|
||||
|
@ -5,6 +5,8 @@ import (
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/docker/go-units"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/v6/actors/migration/nv14"
|
||||
"github.com/filecoin-project/specs-actors/v7/actors/migration/nv15"
|
||||
|
||||
@ -158,18 +160,13 @@ func DefaultUpgradeSchedule() stmgr.UpgradeSchedule {
|
||||
}},
|
||||
Expensive: true,
|
||||
}, {
|
||||
Height: build.UpgradeSnapDealsHeight,
|
||||
Height: build.UpgradeOhSnapHeight,
|
||||
Network: network.Version15,
|
||||
Migration: UpgradeActorsV7,
|
||||
PreMigrations: []stmgr.PreMigration{{
|
||||
PreMigration: PreUpgradeActorsV7,
|
||||
StartWithin: 120,
|
||||
StartWithin: 180,
|
||||
DontStartWithin: 60,
|
||||
StopWithin: 35,
|
||||
}, {
|
||||
PreMigration: PreUpgradeActorsV7,
|
||||
StartWithin: 30,
|
||||
DontStartWithin: 15,
|
||||
StopWithin: 5,
|
||||
}},
|
||||
Expensive: true,
|
||||
@ -1245,8 +1242,15 @@ func PreUpgradeActorsV7(ctx context.Context, sm *stmgr.StateManager, cache stmgr
|
||||
workerCount /= 2
|
||||
}
|
||||
|
||||
config := nv15.Config{MaxWorkers: uint(workerCount)}
|
||||
_, err := upgradeActorsV7Common(ctx, sm, cache, root, epoch, ts, config)
|
||||
lbts, lbRoot, err := stmgr.GetLookbackTipSetForRound(ctx, sm, ts, epoch)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error getting lookback ts for premigration: %w", err)
|
||||
}
|
||||
|
||||
config := nv15.Config{MaxWorkers: uint(workerCount),
|
||||
ProgressLogPeriod: time.Minute * 5}
|
||||
|
||||
_, err = upgradeActorsV7Common(ctx, sm, cache, lbRoot, epoch, lbts, config)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1255,9 +1259,10 @@ func upgradeActorsV7Common(
|
||||
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
|
||||
config nv15.Config,
|
||||
) (cid.Cid, error) {
|
||||
buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync())
|
||||
store := store.ActorStore(ctx, buf)
|
||||
|
||||
writeStore := blockstore.NewAutobatch(ctx, sm.ChainStore().StateBlockstore(), units.GiB/4)
|
||||
// TODO: pretty sure we'd achieve nothing by doing this, confirm in review
|
||||
//buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), writeStore)
|
||||
store := store.ActorStore(ctx, writeStore)
|
||||
// Load the state root.
|
||||
var stateRoot types.StateRoot
|
||||
if err := store.Get(ctx, root, &stateRoot); err != nil {
|
||||
@ -1287,15 +1292,13 @@ func upgradeActorsV7Common(
|
||||
return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
|
||||
}
|
||||
|
||||
// Persist the new tree.
|
||||
// Persists the new tree and shuts down the flush worker
|
||||
if err := writeStore.Flush(ctx); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("writeStore flush failed: %w", err)
|
||||
}
|
||||
|
||||
{
|
||||
from := buf
|
||||
to := buf.Read()
|
||||
|
||||
if err := vm.Copy(ctx, from, to, newRoot); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err)
|
||||
}
|
||||
if err := writeStore.Shutdown(ctx); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("writeStore shutdown failed: %w", err)
|
||||
}
|
||||
|
||||
return newRoot, nil
|
||||
|
@ -461,7 +461,7 @@ func (cg *ChainGen) NextTipSetFromMinersWithMessagesAndNulls(base *types.TipSet,
|
||||
|
||||
if et != nil {
|
||||
// TODO: maybe think about passing in more real parameters to this?
|
||||
wpost, err := cg.eppProvs[m].ComputeProof(context.TODO(), nil, nil)
|
||||
wpost, err := cg.eppProvs[m].ComputeProof(context.TODO(), nil, nil, round, network.Version0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -620,7 +620,7 @@ func (mca mca) WalletSign(ctx context.Context, a address.Address, v []byte) (*cr
|
||||
|
||||
type WinningPoStProver interface {
|
||||
GenerateCandidates(context.Context, abi.PoStRandomness, uint64) ([]uint64, error)
|
||||
ComputeProof(context.Context, []proof5.SectorInfo, abi.PoStRandomness) ([]proof5.PoStProof, error)
|
||||
ComputeProof(context.Context, []proof7.ExtendedSectorInfo, abi.PoStRandomness, abi.ChainEpoch, network.Version) ([]proof5.PoStProof, error)
|
||||
}
|
||||
|
||||
type wppProvider struct{}
|
||||
@ -629,7 +629,7 @@ func (wpp *wppProvider) GenerateCandidates(ctx context.Context, _ abi.PoStRandom
|
||||
return []uint64{0}, nil
|
||||
}
|
||||
|
||||
func (wpp *wppProvider) ComputeProof(context.Context, []proof5.SectorInfo, abi.PoStRandomness) ([]proof5.PoStProof, error) {
|
||||
func (wpp *wppProvider) ComputeProof(context.Context, []proof7.ExtendedSectorInfo, abi.PoStRandomness, abi.ChainEpoch, network.Version) ([]proof5.PoStProof, error) {
|
||||
return ValidWpostForTesting, nil
|
||||
}
|
||||
|
||||
@ -692,11 +692,11 @@ func (m genFakeVerifier) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (b
|
||||
panic("not supported")
|
||||
}
|
||||
|
||||
func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) {
|
||||
func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof7.WinningPoStVerifyInfo) (bool, error) {
|
||||
panic("not supported")
|
||||
}
|
||||
|
||||
func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) {
|
||||
func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info proof7.WindowPoStVerifyInfo) (bool, error) {
|
||||
panic("not supported")
|
||||
}
|
||||
|
||||
|
224
chain/messagepool/check_test.go
Normal file
224
chain/messagepool/check_test.go
Normal file
@ -0,0 +1,224 @@
|
||||
//stm: #unit
|
||||
package messagepool
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/go-datastore"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/types/mock"
|
||||
"github.com/filecoin-project/lotus/chain/wallet"
|
||||
_ "github.com/filecoin-project/lotus/lib/sigs/bls"
|
||||
_ "github.com/filecoin-project/lotus/lib/sigs/secp"
|
||||
)
|
||||
|
||||
func init() {
|
||||
_ = logging.SetLogLevel("*", "INFO")
|
||||
}
|
||||
|
||||
func getCheckMessageStatus(statusCode api.CheckStatusCode, msgStatuses []api.MessageCheckStatus) (*api.MessageCheckStatus, error) {
|
||||
for i := 0; i < len(msgStatuses); i++ {
|
||||
iMsgStatuses := msgStatuses[i]
|
||||
if iMsgStatuses.CheckStatus.Code == statusCode {
|
||||
return &iMsgStatuses, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Could not find CheckStatusCode %s", statusCode)
|
||||
}
|
||||
|
||||
func TestCheckMessages(t *testing.T) {
|
||||
//stm: @CHAIN_MEMPOOL_CHECK_MESSAGES_001
|
||||
tma := newTestMpoolAPI()
|
||||
|
||||
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ds := datastore.NewMapDatastore()
|
||||
|
||||
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sender, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tma.setBalance(sender, 1000e15)
|
||||
target := mock.Address(1001)
|
||||
|
||||
var protos []*api.MessagePrototype
|
||||
for i := 0; i < 5; i++ {
|
||||
msg := &types.Message{
|
||||
To: target,
|
||||
From: sender,
|
||||
Value: types.NewInt(1),
|
||||
Nonce: uint64(i),
|
||||
GasLimit: 50000000,
|
||||
GasFeeCap: types.NewInt(minimumBaseFee.Uint64()),
|
||||
GasPremium: types.NewInt(1),
|
||||
Params: make([]byte, 2<<10),
|
||||
}
|
||||
proto := &api.MessagePrototype{
|
||||
Message: *msg,
|
||||
ValidNonce: true,
|
||||
}
|
||||
protos = append(protos, proto)
|
||||
}
|
||||
|
||||
messageStatuses, err := mp.CheckMessages(context.TODO(), protos)
|
||||
assert.NoError(t, err)
|
||||
for i := 0; i < len(messageStatuses); i++ {
|
||||
iMsgStatuses := messageStatuses[i]
|
||||
for j := 0; j < len(iMsgStatuses); j++ {
|
||||
jStatus := iMsgStatuses[i]
|
||||
assert.True(t, jStatus.OK)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckPendingMessages(t *testing.T) {
|
||||
//stm: @CHAIN_MEMPOOL_CHECK_PENDING_MESSAGES_001
|
||||
tma := newTestMpoolAPI()
|
||||
|
||||
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ds := datastore.NewMapDatastore()
|
||||
|
||||
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sender, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tma.setBalance(sender, 1000e15)
|
||||
target := mock.Address(1001)
|
||||
|
||||
// add a valid message to the pool
|
||||
msg := &types.Message{
|
||||
To: target,
|
||||
From: sender,
|
||||
Value: types.NewInt(1),
|
||||
Nonce: 0,
|
||||
GasLimit: 50000000,
|
||||
GasFeeCap: types.NewInt(minimumBaseFee.Uint64()),
|
||||
GasPremium: types.NewInt(1),
|
||||
Params: make([]byte, 2<<10),
|
||||
}
|
||||
|
||||
sig, err := w.WalletSign(context.TODO(), sender, msg.Cid().Bytes(), api.MsgMeta{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
sm := &types.SignedMessage{
|
||||
Message: *msg,
|
||||
Signature: *sig,
|
||||
}
|
||||
mustAdd(t, mp, sm)
|
||||
|
||||
messageStatuses, err := mp.CheckPendingMessages(context.TODO(), sender)
|
||||
assert.NoError(t, err)
|
||||
for i := 0; i < len(messageStatuses); i++ {
|
||||
iMsgStatuses := messageStatuses[i]
|
||||
for j := 0; j < len(iMsgStatuses); j++ {
|
||||
jStatus := iMsgStatuses[i]
|
||||
assert.True(t, jStatus.OK)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckReplaceMessages(t *testing.T) {
|
||||
//stm: @CHAIN_MEMPOOL_CHECK_REPLACE_MESSAGES_001
|
||||
tma := newTestMpoolAPI()
|
||||
|
||||
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ds := datastore.NewMapDatastore()
|
||||
|
||||
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sender, err := w.WalletNew(context.Background(), types.KTSecp256k1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tma.setBalance(sender, 1000e15)
|
||||
target := mock.Address(1001)
|
||||
|
||||
// add a valid message to the pool
|
||||
msg := &types.Message{
|
||||
To: target,
|
||||
From: sender,
|
||||
Value: types.NewInt(1),
|
||||
Nonce: 0,
|
||||
GasLimit: 50000000,
|
||||
GasFeeCap: types.NewInt(minimumBaseFee.Uint64()),
|
||||
GasPremium: types.NewInt(1),
|
||||
Params: make([]byte, 2<<10),
|
||||
}
|
||||
|
||||
sig, err := w.WalletSign(context.TODO(), sender, msg.Cid().Bytes(), api.MsgMeta{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
sm := &types.SignedMessage{
|
||||
Message: *msg,
|
||||
Signature: *sig,
|
||||
}
|
||||
mustAdd(t, mp, sm)
|
||||
|
||||
// create a new message with the same data, except that it is too big
|
||||
var msgs []*types.Message
|
||||
invalidmsg := &types.Message{
|
||||
To: target,
|
||||
From: sender,
|
||||
Value: types.NewInt(1),
|
||||
Nonce: 0,
|
||||
GasLimit: 50000000,
|
||||
GasFeeCap: types.NewInt(minimumBaseFee.Uint64()),
|
||||
GasPremium: types.NewInt(1),
|
||||
Params: make([]byte, 128<<10),
|
||||
}
|
||||
msgs = append(msgs, invalidmsg)
|
||||
|
||||
{
|
||||
messageStatuses, err := mp.CheckReplaceMessages(context.TODO(), msgs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i := 0; i < len(messageStatuses); i++ {
|
||||
iMsgStatuses := messageStatuses[i]
|
||||
|
||||
status, err := getCheckMessageStatus(api.CheckStatusMessageSize, iMsgStatuses)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// the replacement message should cause a status error
|
||||
assert.False(t, status.OK)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -173,10 +173,17 @@ type MessagePool struct {
|
||||
|
||||
sigValCache *lru.TwoQueueCache
|
||||
|
||||
nonceCache *lru.Cache
|
||||
|
||||
evtTypes [3]journal.EventType
|
||||
journal journal.Journal
|
||||
}
|
||||
|
||||
type nonceCacheKey struct {
|
||||
tsk types.TipSetKey
|
||||
addr address.Address
|
||||
}
|
||||
|
||||
type msgSet struct {
|
||||
msgs map[uint64]*types.SignedMessage
|
||||
nextNonce uint64
|
||||
@ -361,6 +368,7 @@ func (ms *msgSet) toSlice() []*types.SignedMessage {
|
||||
func New(ctx context.Context, api Provider, ds dtypes.MetadataDS, us stmgr.UpgradeSchedule, netName dtypes.NetworkName, j journal.Journal) (*MessagePool, error) {
|
||||
cache, _ := lru.New2Q(build.BlsSignatureCacheSize)
|
||||
verifcache, _ := lru.New2Q(build.VerifSigCacheSize)
|
||||
noncecache, _ := lru.New(256)
|
||||
|
||||
cfg, err := loadConfig(ctx, ds)
|
||||
if err != nil {
|
||||
@ -386,6 +394,7 @@ func New(ctx context.Context, api Provider, ds dtypes.MetadataDS, us stmgr.Upgra
|
||||
pruneCooldown: make(chan struct{}, 1),
|
||||
blsSigCache: cache,
|
||||
sigValCache: verifcache,
|
||||
nonceCache: noncecache,
|
||||
changes: lps.New(50),
|
||||
localMsgs: namespace.Wrap(ds, datastore.NewKey(localMsgsDs)),
|
||||
api: api,
|
||||
@ -1016,11 +1025,23 @@ func (mp *MessagePool) getStateNonce(ctx context.Context, addr address.Address,
|
||||
done := metrics.Timer(ctx, metrics.MpoolGetNonceDuration)
|
||||
defer done()
|
||||
|
||||
nk := nonceCacheKey{
|
||||
tsk: ts.Key(),
|
||||
addr: addr,
|
||||
}
|
||||
|
||||
n, ok := mp.nonceCache.Get(nk)
|
||||
if ok {
|
||||
return n.(uint64), nil
|
||||
}
|
||||
|
||||
act, err := mp.api.GetActorAfter(addr, ts)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
mp.nonceCache.Add(nk, act.Nonce)
|
||||
|
||||
return act.Nonce, nil
|
||||
}
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package messagepool
|
||||
|
||||
import (
|
||||
@ -8,6 +9,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-datastore"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
@ -206,6 +208,7 @@ func (tma *testMpoolAPI) ChainComputeBaseFee(ctx context.Context, ts *types.TipS
|
||||
|
||||
func assertNonce(t *testing.T, mp *MessagePool, addr address.Address, val uint64) {
|
||||
t.Helper()
|
||||
//stm: @CHAIN_MEMPOOL_GET_NONCE_001
|
||||
n, err := mp.GetNonce(context.TODO(), addr, types.EmptyTSK)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -224,6 +227,8 @@ func mustAdd(t *testing.T, mp *MessagePool, msg *types.SignedMessage) {
|
||||
}
|
||||
|
||||
func TestMessagePool(t *testing.T) {
|
||||
//stm: @CHAIN_MEMPOOL_GET_NONCE_001
|
||||
|
||||
tma := newTestMpoolAPI()
|
||||
|
||||
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
@ -325,6 +330,7 @@ func TestCheckMessageBig(t *testing.T) {
|
||||
Message: *msg,
|
||||
Signature: *sig,
|
||||
}
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
err = mp.Add(context.TODO(), sm)
|
||||
assert.ErrorIs(t, err, ErrMessageTooBig)
|
||||
}
|
||||
@ -366,8 +372,10 @@ func TestMessagePoolMessagesInEachBlock(t *testing.T) {
|
||||
tma.applyBlock(t, a)
|
||||
tsa := mock.TipSet(a)
|
||||
|
||||
//stm: @CHAIN_MEMPOOL_PENDING_001
|
||||
_, _ = mp.Pending(context.TODO())
|
||||
|
||||
//stm: @CHAIN_MEMPOOL_SELECT_001
|
||||
selm, _ := mp.SelectMessages(context.Background(), tsa, 1)
|
||||
if len(selm) == 0 {
|
||||
t.Fatal("should have returned the rest of the messages")
|
||||
@ -428,6 +436,7 @@ func TestRevertMessages(t *testing.T) {
|
||||
|
||||
assertNonce(t, mp, sender, 4)
|
||||
|
||||
//stm: @CHAIN_MEMPOOL_PENDING_001
|
||||
p, _ := mp.Pending(context.TODO())
|
||||
fmt.Printf("%+v\n", p)
|
||||
if len(p) != 3 {
|
||||
@ -486,6 +495,7 @@ func TestPruningSimple(t *testing.T) {
|
||||
|
||||
mp.Prune()
|
||||
|
||||
//stm: @CHAIN_MEMPOOL_PENDING_001
|
||||
msgs, _ := mp.Pending(context.TODO())
|
||||
if len(msgs) != 5 {
|
||||
t.Fatal("expected only 5 messages in pool, got: ", len(msgs))
|
||||
@ -528,6 +538,7 @@ func TestLoadLocal(t *testing.T) {
|
||||
msgs := make(map[cid.Cid]struct{})
|
||||
for i := 0; i < 10; i++ {
|
||||
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
cid, err := mp.Push(context.TODO(), m)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -544,6 +555,7 @@ func TestLoadLocal(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
//stm: @CHAIN_MEMPOOL_PENDING_001
|
||||
pmsgs, _ := mp.Pending(context.TODO())
|
||||
if len(msgs) != len(pmsgs) {
|
||||
t.Fatalf("expected %d messages, but got %d", len(msgs), len(pmsgs))
|
||||
@ -599,6 +611,7 @@ func TestClearAll(t *testing.T) {
|
||||
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}]
|
||||
for i := 0; i < 10; i++ {
|
||||
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
_, err := mp.Push(context.TODO(), m)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -610,8 +623,10 @@ func TestClearAll(t *testing.T) {
|
||||
mustAdd(t, mp, m)
|
||||
}
|
||||
|
||||
//stm: @CHAIN_MEMPOOL_CLEAR_001
|
||||
mp.Clear(context.Background(), true)
|
||||
|
||||
//stm: @CHAIN_MEMPOOL_PENDING_001
|
||||
pending, _ := mp.Pending(context.TODO())
|
||||
if len(pending) > 0 {
|
||||
t.Fatalf("cleared the mpool, but got %d pending messages", len(pending))
|
||||
@ -654,6 +669,7 @@ func TestClearNonLocal(t *testing.T) {
|
||||
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}]
|
||||
for i := 0; i < 10; i++ {
|
||||
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
_, err := mp.Push(context.TODO(), m)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -665,8 +681,10 @@ func TestClearNonLocal(t *testing.T) {
|
||||
mustAdd(t, mp, m)
|
||||
}
|
||||
|
||||
//stm: @CHAIN_MEMPOOL_CLEAR_001
|
||||
mp.Clear(context.Background(), false)
|
||||
|
||||
//stm: @CHAIN_MEMPOOL_PENDING_001
|
||||
pending, _ := mp.Pending(context.TODO())
|
||||
if len(pending) != 10 {
|
||||
t.Fatalf("expected 10 pending messages, but got %d instead", len(pending))
|
||||
@ -724,6 +742,7 @@ func TestUpdates(t *testing.T) {
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
_, err := mp.Push(context.TODO(), m)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -745,3 +764,302 @@ func TestUpdates(t *testing.T) {
|
||||
t.Fatal("expected closed channel, but got an update instead")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMessageBelowMinGasFee(t *testing.T) {
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
tma := newTestMpoolAPI()
|
||||
|
||||
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
assert.NoError(t, err)
|
||||
|
||||
from, err := w.WalletNew(context.Background(), types.KTBLS)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tma.setBalance(from, 1000e9)
|
||||
|
||||
ds := datastore.NewMapDatastore()
|
||||
|
||||
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
to := mock.Address(1001)
|
||||
|
||||
// fee is just below minimum gas fee
|
||||
fee := minimumBaseFee.Uint64() - 1
|
||||
{
|
||||
msg := &types.Message{
|
||||
To: to,
|
||||
From: from,
|
||||
Value: types.NewInt(1),
|
||||
Nonce: 0,
|
||||
GasLimit: 50000000,
|
||||
GasFeeCap: types.NewInt(fee),
|
||||
GasPremium: types.NewInt(1),
|
||||
Params: make([]byte, 32<<10),
|
||||
}
|
||||
|
||||
sig, err := w.WalletSign(context.TODO(), from, msg.Cid().Bytes(), api.MsgMeta{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
sm := &types.SignedMessage{
|
||||
Message: *msg,
|
||||
Signature: *sig,
|
||||
}
|
||||
err = mp.Add(context.TODO(), sm)
|
||||
assert.ErrorIs(t, err, ErrGasFeeCapTooLow)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMessageValueTooHigh(t *testing.T) {
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
tma := newTestMpoolAPI()
|
||||
|
||||
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
assert.NoError(t, err)
|
||||
|
||||
from, err := w.WalletNew(context.Background(), types.KTBLS)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tma.setBalance(from, 1000e9)
|
||||
|
||||
ds := datastore.NewMapDatastore()
|
||||
|
||||
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
to := mock.Address(1001)
|
||||
|
||||
totalFil := types.TotalFilecoinInt
|
||||
extra := types.NewInt(1)
|
||||
|
||||
value := types.BigAdd(totalFil, extra)
|
||||
{
|
||||
msg := &types.Message{
|
||||
To: to,
|
||||
From: from,
|
||||
Value: value,
|
||||
Nonce: 0,
|
||||
GasLimit: 50000000,
|
||||
GasFeeCap: types.NewInt(minimumBaseFee.Uint64()),
|
||||
GasPremium: types.NewInt(1),
|
||||
Params: make([]byte, 32<<10),
|
||||
}
|
||||
|
||||
sig, err := w.WalletSign(context.TODO(), from, msg.Cid().Bytes(), api.MsgMeta{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
sm := &types.SignedMessage{
|
||||
Message: *msg,
|
||||
Signature: *sig,
|
||||
}
|
||||
|
||||
err = mp.Add(context.TODO(), sm)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMessageSignatureInvalid(t *testing.T) {
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
tma := newTestMpoolAPI()
|
||||
|
||||
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
assert.NoError(t, err)
|
||||
|
||||
from, err := w.WalletNew(context.Background(), types.KTBLS)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tma.setBalance(from, 1000e9)
|
||||
|
||||
ds := datastore.NewMapDatastore()
|
||||
|
||||
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
to := mock.Address(1001)
|
||||
|
||||
{
|
||||
msg := &types.Message{
|
||||
To: to,
|
||||
From: from,
|
||||
Value: types.NewInt(1),
|
||||
Nonce: 0,
|
||||
GasLimit: 50000000,
|
||||
GasFeeCap: types.NewInt(minimumBaseFee.Uint64()),
|
||||
GasPremium: types.NewInt(1),
|
||||
Params: make([]byte, 32<<10),
|
||||
}
|
||||
|
||||
badSig := &crypto.Signature{
|
||||
Type: crypto.SigTypeSecp256k1,
|
||||
Data: make([]byte, 0),
|
||||
}
|
||||
sm := &types.SignedMessage{
|
||||
Message: *msg,
|
||||
Signature: *badSig,
|
||||
}
|
||||
err = mp.Add(context.TODO(), sm)
|
||||
assert.Error(t, err)
|
||||
// assert.Contains(t, err.Error(), "invalid signature length")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddMessageTwice(t *testing.T) {
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
tma := newTestMpoolAPI()
|
||||
|
||||
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
assert.NoError(t, err)
|
||||
|
||||
from, err := w.WalletNew(context.Background(), types.KTBLS)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tma.setBalance(from, 1000e9)
|
||||
|
||||
ds := datastore.NewMapDatastore()
|
||||
|
||||
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
to := mock.Address(1001)
|
||||
|
||||
{
|
||||
// create a valid messages
|
||||
sm := makeTestMessage(w, from, to, 0, 50_000_000, minimumBaseFee.Uint64())
|
||||
mustAdd(t, mp, sm)
|
||||
|
||||
// try to add it twice
|
||||
err = mp.Add(context.TODO(), sm)
|
||||
// assert.Contains(t, err.Error(), "with nonce 0 already in mpool")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddMessageTwiceNonceGap(t *testing.T) {
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
tma := newTestMpoolAPI()
|
||||
|
||||
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
assert.NoError(t, err)
|
||||
|
||||
from, err := w.WalletNew(context.Background(), types.KTBLS)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tma.setBalance(from, 1000e9)
|
||||
|
||||
ds := datastore.NewMapDatastore()
|
||||
|
||||
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
to := mock.Address(1001)
|
||||
|
||||
{
|
||||
// create message with invalid nonce (1)
|
||||
sm := makeTestMessage(w, from, to, 1, 50_000_000, minimumBaseFee.Uint64())
|
||||
mustAdd(t, mp, sm)
|
||||
|
||||
// then try to add message again
|
||||
err = mp.Add(context.TODO(), sm)
|
||||
// assert.Contains(t, err.Error(), "unfulfilled nonce gap")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddMessageTwiceCidDiff(t *testing.T) {
|
||||
tma := newTestMpoolAPI()
|
||||
|
||||
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
assert.NoError(t, err)
|
||||
|
||||
from, err := w.WalletNew(context.Background(), types.KTBLS)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tma.setBalance(from, 1000e9)
|
||||
|
||||
ds := datastore.NewMapDatastore()
|
||||
|
||||
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
to := mock.Address(1001)
|
||||
|
||||
{
|
||||
sm := makeTestMessage(w, from, to, 0, 50_000_000, minimumBaseFee.Uint64())
|
||||
mustAdd(t, mp, sm)
|
||||
|
||||
// Create message with different data, so CID is different
|
||||
sm2 := makeTestMessage(w, from, to, 0, 50_000_001, minimumBaseFee.Uint64())
|
||||
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
// then try to add message again
|
||||
err = mp.Add(context.TODO(), sm2)
|
||||
// assert.Contains(t, err.Error(), "replace by fee has too low GasPremium")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddMessageTwiceCidDiffReplaced(t *testing.T) {
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
tma := newTestMpoolAPI()
|
||||
|
||||
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
assert.NoError(t, err)
|
||||
|
||||
from, err := w.WalletNew(context.Background(), types.KTBLS)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tma.setBalance(from, 1000e9)
|
||||
|
||||
ds := datastore.NewMapDatastore()
|
||||
|
||||
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
to := mock.Address(1001)
|
||||
|
||||
{
|
||||
sm := makeTestMessage(w, from, to, 0, 50_000_000, minimumBaseFee.Uint64())
|
||||
mustAdd(t, mp, sm)
|
||||
|
||||
// Create message with different data, so CID is different
|
||||
sm2 := makeTestMessage(w, from, to, 0, 50_000_000, minimumBaseFee.Uint64()*2)
|
||||
mustAdd(t, mp, sm2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveMessage(t *testing.T) {
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
tma := newTestMpoolAPI()
|
||||
|
||||
w, err := wallet.NewWallet(wallet.NewMemKeyStore())
|
||||
assert.NoError(t, err)
|
||||
|
||||
from, err := w.WalletNew(context.Background(), types.KTBLS)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tma.setBalance(from, 1000e9)
|
||||
|
||||
ds := datastore.NewMapDatastore()
|
||||
|
||||
mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
to := mock.Address(1001)
|
||||
|
||||
{
|
||||
sm := makeTestMessage(w, from, to, 0, 50_000_000, minimumBaseFee.Uint64())
|
||||
mustAdd(t, mp, sm)
|
||||
|
||||
//stm: @CHAIN_MEMPOOL_REMOVE_001
|
||||
// remove message for sender
|
||||
mp.Remove(context.TODO(), from, sm.Message.Nonce, true)
|
||||
|
||||
//stm: @CHAIN_MEMPOOL_PENDING_FOR_001
|
||||
// check messages in pool: should be none present
|
||||
msgs := mp.pendingFor(context.TODO(), from)
|
||||
assert.Len(t, msgs, 0)
|
||||
}
|
||||
}
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package messagepool
|
||||
|
||||
import (
|
||||
@ -16,6 +17,7 @@ import (
|
||||
)
|
||||
|
||||
func TestRepubMessages(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001
|
||||
oldRepublishBatchDelay := RepublishBatchDelay
|
||||
RepublishBatchDelay = time.Microsecond
|
||||
defer func() {
|
||||
@ -57,6 +59,7 @@ func TestRepubMessages(t *testing.T) {
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
_, err := mp.Push(context.TODO(), m)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package messagepool
|
||||
|
||||
import (
|
||||
@ -74,6 +75,8 @@ func makeTestMpool() (*MessagePool, *testMpoolAPI) {
|
||||
}
|
||||
|
||||
func TestMessageChains(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001
|
||||
//stm: @CHAIN_MEMPOOL_CREATE_MSG_CHAINS_001
|
||||
mp, tma := makeTestMpool()
|
||||
|
||||
// the actors
|
||||
@ -310,6 +313,8 @@ func TestMessageChains(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMessageChainSkipping(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_CREATE_MSG_CHAINS_001
|
||||
|
||||
// regression test for chain skip bug
|
||||
|
||||
mp, tma := makeTestMpool()
|
||||
@ -382,6 +387,7 @@ func TestMessageChainSkipping(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBasicMessageSelection(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
|
||||
oldMaxNonceGap := MaxNonceGap
|
||||
MaxNonceGap = 1000
|
||||
defer func() {
|
||||
@ -532,6 +538,7 @@ func TestBasicMessageSelection(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMessageSelectionTrimmingGas(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
|
||||
mp, tma := makeTestMpool()
|
||||
|
||||
// the actors
|
||||
@ -595,6 +602,7 @@ func TestMessageSelectionTrimmingGas(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMessageSelectionTrimmingMsgsBasic(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
|
||||
mp, tma := makeTestMpool()
|
||||
|
||||
// the actors
|
||||
@ -641,6 +649,7 @@ func TestMessageSelectionTrimmingMsgsBasic(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMessageSelectionTrimmingMsgsTwoSendersBasic(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
|
||||
mp, tma := makeTestMpool()
|
||||
|
||||
// the actors
|
||||
@ -707,6 +716,7 @@ func TestMessageSelectionTrimmingMsgsTwoSendersBasic(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMessageSelectionTrimmingMsgsTwoSendersAdvanced(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
|
||||
mp, tma := makeTestMpool()
|
||||
|
||||
// the actors
|
||||
@ -788,6 +798,7 @@ func TestMessageSelectionTrimmingMsgsTwoSendersAdvanced(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPriorityMessageSelection(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
|
||||
mp, tma := makeTestMpool()
|
||||
|
||||
// the actors
|
||||
@ -867,6 +878,7 @@ func TestPriorityMessageSelection(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPriorityMessageSelection2(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
|
||||
mp, tma := makeTestMpool()
|
||||
|
||||
// the actors
|
||||
@ -934,6 +946,7 @@ func TestPriorityMessageSelection2(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPriorityMessageSelection3(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
|
||||
mp, tma := makeTestMpool()
|
||||
|
||||
// the actors
|
||||
@ -1028,6 +1041,8 @@ func TestPriorityMessageSelection3(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOptimalMessageSelection1(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
|
||||
|
||||
// this test uses just a single actor sending messages with a low tq
|
||||
// the chain depenent merging algorithm should pick messages from the actor
|
||||
// from the start
|
||||
@ -1094,6 +1109,8 @@ func TestOptimalMessageSelection1(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOptimalMessageSelection2(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
|
||||
|
||||
// this test uses two actors sending messages to each other, with the first
|
||||
// actor paying (much) higher gas premium than the second.
|
||||
// We select with a low ticket quality; the chain depenent merging algorithm should pick
|
||||
@ -1173,6 +1190,8 @@ func TestOptimalMessageSelection2(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOptimalMessageSelection3(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
|
||||
|
||||
// this test uses 10 actors sending a block of messages to each other, with the the first
|
||||
// actors paying higher gas premium than the subsequent actors.
|
||||
// We select with a low ticket quality; the chain dependent merging algorithm should pick
|
||||
@ -1416,6 +1435,8 @@ func makeZipfPremiumDistribution(rng *rand.Rand) func() uint64 {
|
||||
}
|
||||
|
||||
func TestCompetitiveMessageSelectionExp(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
|
||||
|
||||
if testing.Short() {
|
||||
t.Skip("skipping in short mode")
|
||||
}
|
||||
@ -1439,6 +1460,8 @@ func TestCompetitiveMessageSelectionExp(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCompetitiveMessageSelectionZipf(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
|
||||
|
||||
if testing.Short() {
|
||||
t.Skip("skipping in short mode")
|
||||
}
|
||||
@ -1462,6 +1485,7 @@ func TestCompetitiveMessageSelectionZipf(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGasReward(t *testing.T) {
|
||||
//stm: @CHAIN_MEMPOOL_GET_GAS_REWARD_001
|
||||
tests := []struct {
|
||||
Premium uint64
|
||||
FeeCap uint64
|
||||
@ -1494,6 +1518,8 @@ func TestGasReward(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRealWorldSelection(t *testing.T) {
|
||||
//stm: @TOKEN_WALLET_NEW_001, @TOKEN_WALLET_SIGN_001, @CHAIN_MEMPOOL_SELECT_001
|
||||
|
||||
// load test-messages.json.gz and rewrite the messages so that
|
||||
// 1) we map each real actor to a test actor so that we can sign the messages
|
||||
// 2) adjust the nonces so that they start from 0
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package messagesigner
|
||||
|
||||
import (
|
||||
@ -60,6 +61,7 @@ func TestMessageSignerSignMessage(t *testing.T) {
|
||||
to2, err := w.WalletNew(ctx, types.KTSecp256k1)
|
||||
require.NoError(t, err)
|
||||
|
||||
//stm: @CHAIN_MESSAGE_SIGNER_NEW_SIGNER_001, @CHAIN_MESSAGE_SIGNER_SIGN_MESSAGE_001, @CHAIN_MESSAGE_SIGNER_SIGN_MESSAGE_005
|
||||
type msgSpec struct {
|
||||
msg *types.Message
|
||||
mpoolNonce [1]uint64
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm:#unit
|
||||
package rand_test
|
||||
|
||||
import (
|
||||
@ -55,11 +56,13 @@ func TestNullRandomnessV1(t *testing.T) {
|
||||
|
||||
randEpoch := ts.TipSet.TipSet().Height() - 2
|
||||
|
||||
//stm: @BLOCKCHAIN_RAND_GET_BEACON_RANDOMNESS_V1_01, @BLOCKCHAIN_RAND_EXTRACT_BEACON_ENTRY_FOR_EPOCH_01, @BLOCKCHAIN_RAND_GET_BEACON_RANDOMNESS_TIPSET_02
|
||||
rand1, err := cg.StateManager().GetRandomnessFromBeacon(ctx, pers, randEpoch, entropy, ts.TipSet.TipSet().Key())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
//stm: @BLOCKCHAIN_BEACON_GET_BEACON_FOR_EPOCH_01
|
||||
bch := cg.BeaconSchedule().BeaconForEpoch(randEpoch).Entry(ctx, uint64(beforeNullHeight)+offset)
|
||||
|
||||
select {
|
||||
@ -68,6 +71,7 @@ func TestNullRandomnessV1(t *testing.T) {
|
||||
t.Fatal(resp.Err)
|
||||
}
|
||||
|
||||
//stm: @BLOCKCHAIN_RAND_DRAW_RANDOMNESS_01
|
||||
rand2, err := rand.DrawRandomness(resp.Entry.Data, pers, randEpoch, entropy)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -131,11 +135,13 @@ func TestNullRandomnessV2(t *testing.T) {
|
||||
|
||||
randEpoch := ts.TipSet.TipSet().Height() - 2
|
||||
|
||||
//stm: @BLOCKCHAIN_RAND_GET_BEACON_RANDOMNESS_V2_01
|
||||
rand1, err := cg.StateManager().GetRandomnessFromBeacon(ctx, pers, randEpoch, entropy, ts.TipSet.TipSet().Key())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
//stm: @BLOCKCHAIN_BEACON_GET_BEACON_FOR_EPOCH_01
|
||||
bch := cg.BeaconSchedule().BeaconForEpoch(randEpoch).Entry(ctx, uint64(ts.TipSet.TipSet().Height())+offset)
|
||||
|
||||
select {
|
||||
@ -144,6 +150,7 @@ func TestNullRandomnessV2(t *testing.T) {
|
||||
t.Fatal(resp.Err)
|
||||
}
|
||||
|
||||
//stm: @BLOCKCHAIN_RAND_DRAW_RANDOMNESS_01, @BLOCKCHAIN_RAND_EXTRACT_BEACON_ENTRY_FOR_EPOCH_01, @BLOCKCHAIN_RAND_GET_BEACON_RANDOMNESS_TIPSET_03
|
||||
// note that the randEpoch passed to DrawRandomness is still randEpoch (not the latest ts height)
|
||||
rand2, err := rand.DrawRandomness(resp.Entry.Data, pers, randEpoch, entropy)
|
||||
if err != nil {
|
||||
@ -212,11 +219,13 @@ func TestNullRandomnessV3(t *testing.T) {
|
||||
|
||||
randEpoch := ts.TipSet.TipSet().Height() - 2
|
||||
|
||||
//stm: @BLOCKCHAIN_RAND_GET_BEACON_RANDOMNESS_V3_01, @BLOCKCHAIN_RAND_EXTRACT_BEACON_ENTRY_FOR_EPOCH_01
|
||||
rand1, err := cg.StateManager().GetRandomnessFromBeacon(ctx, pers, randEpoch, entropy, ts.TipSet.TipSet().Key())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
//stm: @BLOCKCHAIN_BEACON_GET_BEACON_FOR_EPOCH_01
|
||||
bch := cg.BeaconSchedule().BeaconForEpoch(randEpoch).Entry(ctx, uint64(randEpoch)+offset)
|
||||
|
||||
select {
|
||||
@ -225,6 +234,7 @@ func TestNullRandomnessV3(t *testing.T) {
|
||||
t.Fatal(resp.Err)
|
||||
}
|
||||
|
||||
//stm: @BLOCKCHAIN_RAND_DRAW_RANDOMNESS_01
|
||||
rand2, err := rand.DrawRandomness(resp.Entry.Data, pers, randEpoch, entropy)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -117,7 +117,7 @@ func MinerSectorInfo(ctx context.Context, sm *StateManager, maddr address.Addres
|
||||
return mas.GetSector(sid)
|
||||
}
|
||||
|
||||
func GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]builtin.SectorInfo, error) {
|
||||
func GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]builtin.ExtendedSectorInfo, error) {
|
||||
act, err := sm.LoadActorRaw(ctx, maddr, st)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to load miner actor: %w", err)
|
||||
@ -203,12 +203,13 @@ func GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwra
|
||||
return nil, xerrors.Errorf("loading proving sectors: %w", err)
|
||||
}
|
||||
|
||||
out := make([]builtin.SectorInfo, len(sectors))
|
||||
out := make([]builtin.ExtendedSectorInfo, len(sectors))
|
||||
for i, sinfo := range sectors {
|
||||
out[i] = builtin.SectorInfo{
|
||||
out[i] = builtin.ExtendedSectorInfo{
|
||||
SealProof: sinfo.SealProof,
|
||||
SectorNumber: sinfo.SectorNumber,
|
||||
SealedCID: sinfo.SealedCID,
|
||||
SectorKey: sinfo.SectorKeyCID,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -8,6 +8,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/v7/actors/migration/nv15"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
@ -15,8 +17,6 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/specs-actors/v3/actors/migration/nv10"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
|
||||
@ -211,7 +211,7 @@ func (sm *StateManager) hasExpensiveFork(height abi.ChainEpoch) bool {
|
||||
return ok
|
||||
}
|
||||
|
||||
func runPreMigration(ctx context.Context, sm *StateManager, fn PreMigrationFunc, cache *nv10.MemMigrationCache, ts *types.TipSet) {
|
||||
func runPreMigration(ctx context.Context, sm *StateManager, fn PreMigrationFunc, cache *nv15.MemMigrationCache, ts *types.TipSet) {
|
||||
height := ts.Height()
|
||||
parent := ts.ParentState()
|
||||
|
||||
|
@ -4,6 +4,8 @@ import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/v7/actors/migration/nv15"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/rand"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/beacon"
|
||||
@ -18,10 +20,6 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
// Used for genesis.
|
||||
msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
|
||||
"github.com/filecoin-project/specs-actors/v3/actors/migration/nv10"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/paych"
|
||||
@ -30,6 +28,9 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/vm"
|
||||
|
||||
// Used for genesis.
|
||||
msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
|
||||
)
|
||||
|
||||
const LookbackNoLimit = api.LookbackNoLimit
|
||||
@ -53,7 +54,7 @@ type versionSpec struct {
|
||||
type migration struct {
|
||||
upgrade MigrationFunc
|
||||
preMigrations []PreMigration
|
||||
cache *nv10.MemMigrationCache
|
||||
cache *nv15.MemMigrationCache
|
||||
}
|
||||
|
||||
type Executor interface {
|
||||
@ -121,7 +122,7 @@ func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder,
|
||||
migration := &migration{
|
||||
upgrade: upgrade.Migration,
|
||||
preMigrations: upgrade.PreMigrations,
|
||||
cache: nv10.NewMemMigrationCache(),
|
||||
cache: nv15.NewMemMigrationCache(),
|
||||
}
|
||||
stateMigrations[upgrade.Height] = migration
|
||||
}
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package sub
|
||||
|
||||
import (
|
||||
@ -49,6 +50,7 @@ func TestFetchCidsWithDedup(t *testing.T) {
|
||||
}
|
||||
g := &getter{msgs}
|
||||
|
||||
//stm: @CHAIN_INCOMING_FETCH_MESSAGES_BY_CID_001
|
||||
// the cids have a duplicate
|
||||
res, err := FetchMessagesByCids(context.TODO(), g, append(cids, cids[0]))
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package chain_test
|
||||
|
||||
import (
|
||||
@ -22,6 +23,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
|
||||
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
@ -102,7 +104,7 @@ func prepSyncTest(t testing.TB, h int) *syncTestUtil {
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
|
||||
mn: mocknet.New(ctx),
|
||||
mn: mocknet.New(),
|
||||
g: g,
|
||||
us: filcns.DefaultUpgradeSchedule(),
|
||||
}
|
||||
@ -156,7 +158,7 @@ func prepSyncTestWithV5Height(t testing.TB, h int, v5height abi.ChainEpoch) *syn
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
|
||||
mn: mocknet.New(ctx),
|
||||
mn: mocknet.New(),
|
||||
g: g,
|
||||
us: sched,
|
||||
}
|
||||
@ -461,6 +463,8 @@ func (tu *syncTestUtil) waitUntilSyncTarget(to int, target *types.TipSet) {
|
||||
}
|
||||
|
||||
func TestSyncSimple(t *testing.T) {
|
||||
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||
H := 50
|
||||
tu := prepSyncTest(t, H)
|
||||
|
||||
@ -477,6 +481,8 @@ func TestSyncSimple(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncMining(t *testing.T) {
|
||||
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||
H := 50
|
||||
tu := prepSyncTest(t, H)
|
||||
|
||||
@ -499,6 +505,8 @@ func TestSyncMining(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncBadTimestamp(t *testing.T) {
|
||||
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||
H := 50
|
||||
tu := prepSyncTest(t, H)
|
||||
|
||||
@ -543,7 +551,7 @@ func (wpp badWpp) GenerateCandidates(context.Context, abi.PoStRandomness, uint64
|
||||
return []uint64{1}, nil
|
||||
}
|
||||
|
||||
func (wpp badWpp) ComputeProof(context.Context, []proof2.SectorInfo, abi.PoStRandomness) ([]proof2.PoStProof, error) {
|
||||
func (wpp badWpp) ComputeProof(context.Context, []proof7.ExtendedSectorInfo, abi.PoStRandomness, abi.ChainEpoch, network.Version) ([]proof2.PoStProof, error) {
|
||||
return []proof2.PoStProof{
|
||||
{
|
||||
PoStProof: abi.RegisteredPoStProof_StackedDrgWinning2KiBV1,
|
||||
@ -553,6 +561,8 @@ func (wpp badWpp) ComputeProof(context.Context, []proof2.SectorInfo, abi.PoStRan
|
||||
}
|
||||
|
||||
func TestSyncBadWinningPoSt(t *testing.T) {
|
||||
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||
H := 15
|
||||
tu := prepSyncTest(t, H)
|
||||
|
||||
@ -582,6 +592,9 @@ func (tu *syncTestUtil) loadChainToNode(to int) {
|
||||
}
|
||||
|
||||
func TestSyncFork(t *testing.T) {
|
||||
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
||||
//stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||
H := 10
|
||||
tu := prepSyncTest(t, H)
|
||||
|
||||
@ -649,6 +662,9 @@ func TestSyncFork(t *testing.T) {
|
||||
// A and B both include _different_ messages from sender X with nonce N (where N is the correct nonce for X).
|
||||
// We can confirm that the state can be correctly computed, and that `MessagesForTipset` behaves as expected.
|
||||
func TestDuplicateNonce(t *testing.T) {
|
||||
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
||||
//stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||
H := 10
|
||||
tu := prepSyncTest(t, H)
|
||||
|
||||
@ -703,6 +719,7 @@ func TestDuplicateNonce(t *testing.T) {
|
||||
|
||||
var includedMsg cid.Cid
|
||||
var skippedMsg cid.Cid
|
||||
//stm: @CHAIN_STATE_SEARCH_MSG_001
|
||||
r0, err0 := tu.nds[0].StateSearchMsg(context.TODO(), ts2.TipSet().Key(), msgs[0][0].Cid(), api.LookbackNoLimit, true)
|
||||
r1, err1 := tu.nds[0].StateSearchMsg(context.TODO(), ts2.TipSet().Key(), msgs[1][0].Cid(), api.LookbackNoLimit, true)
|
||||
|
||||
@ -744,6 +761,9 @@ func TestDuplicateNonce(t *testing.T) {
|
||||
// This test asserts that a block that includes a message with bad nonce can't be synced. A nonce is "bad" if it can't
|
||||
// be applied on the parent state.
|
||||
func TestBadNonce(t *testing.T) {
|
||||
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
||||
//stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001
|
||||
//stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001, @CHAIN_SYNCER_STOP_001
|
||||
H := 10
|
||||
tu := prepSyncTest(t, H)
|
||||
|
||||
@ -791,6 +811,9 @@ func TestBadNonce(t *testing.T) {
|
||||
// One of the messages uses the sender's robust address, the other uses the ID address.
|
||||
// Such a block is invalid and should not sync.
|
||||
func TestMismatchedNoncesRobustID(t *testing.T) {
|
||||
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
||||
//stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001
|
||||
//stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001, @CHAIN_SYNCER_STOP_001
|
||||
v5h := abi.ChainEpoch(4)
|
||||
tu := prepSyncTestWithV5Height(t, int(v5h+5), v5h)
|
||||
|
||||
@ -803,6 +826,7 @@ func TestMismatchedNoncesRobustID(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Produce a message from the banker
|
||||
//stm: @CHAIN_STATE_LOOKUP_ID_001
|
||||
makeMsg := func(id bool) *types.SignedMessage {
|
||||
sender := tu.g.Banker()
|
||||
if id {
|
||||
@ -845,6 +869,9 @@ func TestMismatchedNoncesRobustID(t *testing.T) {
|
||||
// One of the messages uses the sender's robust address, the other uses the ID address.
|
||||
// Such a block is valid and should sync.
|
||||
func TestMatchedNoncesRobustID(t *testing.T) {
|
||||
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
||||
//stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001
|
||||
//stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001, @CHAIN_SYNCER_STOP_001
|
||||
v5h := abi.ChainEpoch(4)
|
||||
tu := prepSyncTestWithV5Height(t, int(v5h+5), v5h)
|
||||
|
||||
@ -857,6 +884,7 @@ func TestMatchedNoncesRobustID(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Produce a message from the banker with specified nonce
|
||||
//stm: @CHAIN_STATE_LOOKUP_ID_001
|
||||
makeMsg := func(n uint64, id bool) *types.SignedMessage {
|
||||
sender := tu.g.Banker()
|
||||
if id {
|
||||
@ -916,6 +944,8 @@ func runSyncBenchLength(b *testing.B, l int) {
|
||||
}
|
||||
|
||||
func TestSyncInputs(t *testing.T) {
|
||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_VALIDATE_BLOCK_001,
|
||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_STOP_001
|
||||
H := 10
|
||||
tu := prepSyncTest(t, H)
|
||||
|
||||
@ -943,6 +973,9 @@ func TestSyncInputs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncCheckpointHead(t *testing.T) {
|
||||
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
||||
//stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001
|
||||
//stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001, @CHAIN_SYNCER_STOP_001
|
||||
H := 10
|
||||
tu := prepSyncTest(t, H)
|
||||
|
||||
@ -962,6 +995,7 @@ func TestSyncCheckpointHead(t *testing.T) {
|
||||
a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0, true)
|
||||
|
||||
tu.waitUntilSyncTarget(p1, a.TipSet())
|
||||
//stm: @CHAIN_SYNCER_CHECKPOINT_001
|
||||
tu.checkpointTs(p1, a.TipSet().Key())
|
||||
|
||||
require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet()))
|
||||
@ -981,15 +1015,20 @@ func TestSyncCheckpointHead(t *testing.T) {
|
||||
tu.waitUntilNodeHasTs(p1, b.TipSet().Key())
|
||||
p1Head := tu.getHead(p1)
|
||||
require.True(tu.t, p1Head.Equals(a.TipSet()))
|
||||
//stm: @CHAIN_SYNCER_CHECK_BAD_001
|
||||
tu.assertBad(p1, b.TipSet())
|
||||
|
||||
// Should be able to switch forks.
|
||||
//stm: @CHAIN_SYNCER_CHECKPOINT_001
|
||||
tu.checkpointTs(p1, b.TipSet().Key())
|
||||
p1Head = tu.getHead(p1)
|
||||
require.True(tu.t, p1Head.Equals(b.TipSet()))
|
||||
}
|
||||
|
||||
func TestSyncCheckpointEarlierThanHead(t *testing.T) {
|
||||
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
||||
//stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001
|
||||
//stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001, @CHAIN_SYNCER_STOP_001
|
||||
H := 10
|
||||
tu := prepSyncTest(t, H)
|
||||
|
||||
@ -1009,6 +1048,7 @@ func TestSyncCheckpointEarlierThanHead(t *testing.T) {
|
||||
a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0, true)
|
||||
|
||||
tu.waitUntilSyncTarget(p1, a.TipSet())
|
||||
//stm: @CHAIN_SYNCER_CHECKPOINT_001
|
||||
tu.checkpointTs(p1, a1.TipSet().Key())
|
||||
|
||||
require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet()))
|
||||
@ -1028,15 +1068,19 @@ func TestSyncCheckpointEarlierThanHead(t *testing.T) {
|
||||
tu.waitUntilNodeHasTs(p1, b.TipSet().Key())
|
||||
p1Head := tu.getHead(p1)
|
||||
require.True(tu.t, p1Head.Equals(a.TipSet()))
|
||||
//stm: @CHAIN_SYNCER_CHECK_BAD_001
|
||||
tu.assertBad(p1, b.TipSet())
|
||||
|
||||
// Should be able to switch forks.
|
||||
//stm: @CHAIN_SYNCER_CHECKPOINT_001
|
||||
tu.checkpointTs(p1, b.TipSet().Key())
|
||||
p1Head = tu.getHead(p1)
|
||||
require.True(tu.t, p1Head.Equals(b.TipSet()))
|
||||
}
|
||||
|
||||
func TestInvalidHeight(t *testing.T) {
|
||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
|
||||
//stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
||||
H := 50
|
||||
tu := prepSyncTest(t, H)
|
||||
|
||||
|
@ -3,6 +3,7 @@ package mock
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
@ -24,15 +25,7 @@ func Address(i uint64) address.Address {
|
||||
}
|
||||
|
||||
func MkMessage(from, to address.Address, nonce uint64, w *wallet.LocalWallet) *types.SignedMessage {
|
||||
msg := &types.Message{
|
||||
To: to,
|
||||
From: from,
|
||||
Value: types.NewInt(1),
|
||||
Nonce: nonce,
|
||||
GasLimit: 1000000,
|
||||
GasFeeCap: types.NewInt(100),
|
||||
GasPremium: types.NewInt(1),
|
||||
}
|
||||
msg := UnsignedMessage(from, to, nonce)
|
||||
|
||||
sig, err := w.WalletSign(context.TODO(), from, msg.Cid().Bytes(), api.MsgMeta{})
|
||||
if err != nil {
|
||||
@ -96,3 +89,35 @@ func TipSet(blks ...*types.BlockHeader) *types.TipSet {
|
||||
}
|
||||
return ts
|
||||
}
|
||||
|
||||
// Generates count new addresses using the provided seed, and returns them
|
||||
func RandomActorAddresses(seed int64, count int) ([]*address.Address, error) {
|
||||
randAddrs := make([]*address.Address, count)
|
||||
source := rand.New(rand.NewSource(seed))
|
||||
for i := 0; i < count; i++ {
|
||||
bytes := make([]byte, 32)
|
||||
_, err := source.Read(bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
addr, err := address.NewActorAddress(bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
randAddrs[i] = &addr
|
||||
}
|
||||
return randAddrs, nil
|
||||
}
|
||||
|
||||
func UnsignedMessage(from, to address.Address, nonce uint64) *types.Message {
|
||||
return &types.Message{
|
||||
To: to,
|
||||
From: from,
|
||||
Value: types.NewInt(1),
|
||||
Nonce: nonce,
|
||||
GasLimit: 1000000,
|
||||
GasFeeCap: types.NewInt(100),
|
||||
GasPremium: types.NewInt(1),
|
||||
}
|
||||
}
|
||||
|
@ -245,8 +245,8 @@ func (ss *syscallShim) workerKeyAtLookback(height abi.ChainEpoch) (address.Addre
|
||||
return ResolveToKeyAddr(ss.cstate, ss.cst, info.Worker)
|
||||
}
|
||||
|
||||
func (ss *syscallShim) VerifyPoSt(proof proof5.WindowPoStVerifyInfo) error {
|
||||
ok, err := ss.verifier.VerifyWindowPoSt(context.TODO(), proof)
|
||||
func (ss *syscallShim) VerifyPoSt(info proof5.WindowPoStVerifyInfo) error {
|
||||
ok, err := ss.verifier.VerifyWindowPoSt(context.TODO(), info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -824,17 +824,6 @@ func (vm *VM) StateTree() types.StateTree {
|
||||
return vm.cstate
|
||||
}
|
||||
|
||||
func (vm *VM) SetBlockHeight(ctx context.Context, h abi.ChainEpoch) error {
|
||||
vm.blockHeight = h
|
||||
ncirc, err := vm.circSupplyCalc(ctx, vm.blockHeight, vm.cstate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vm.baseCircSupply = ncirc
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vm *VM) Invoke(act *types.Actor, rt *Runtime, method abi.MethodNum, params []byte) ([]byte, aerrors.ActorError) {
|
||||
ctx, span := trace.StartSpan(rt.ctx, "vm.Invoke")
|
||||
defer span.End()
|
||||
|
128
cli/chain.go
128
cli/chain.go
@ -7,6 +7,7 @@ import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
@ -67,6 +68,8 @@ var ChainHeadCmd = &cli.Command{
|
||||
Name: "head",
|
||||
Usage: "Print chain head",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -80,7 +83,7 @@ var ChainHeadCmd = &cli.Command{
|
||||
}
|
||||
|
||||
for _, c := range head.Cids() {
|
||||
fmt.Println(c)
|
||||
afmt.Println(c)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
@ -97,6 +100,8 @@ var ChainGetBlock = &cli.Command{
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -124,7 +129,7 @@ var ChainGetBlock = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(string(out))
|
||||
afmt.Println(string(out))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -163,9 +168,8 @@ var ChainGetBlock = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(string(out))
|
||||
afmt.Println(string(out))
|
||||
return nil
|
||||
|
||||
},
|
||||
}
|
||||
|
||||
@ -182,6 +186,8 @@ var ChainReadObjCmd = &cli.Command{
|
||||
Usage: "Read the raw bytes of an object",
|
||||
ArgsUsage: "[objectCid]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -199,7 +205,7 @@ var ChainReadObjCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("%x\n", obj)
|
||||
afmt.Printf("%x\n", obj)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@ -215,6 +221,8 @@ var ChainDeleteObjCmd = &cli.Command{
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -236,7 +244,7 @@ var ChainDeleteObjCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Obj %s deleted\n", c.String())
|
||||
afmt.Printf("Obj %s deleted\n", c.String())
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@ -257,6 +265,7 @@ var ChainStatObjCmd = &cli.Command{
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -282,8 +291,8 @@ var ChainStatObjCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Links: %d\n", stats.Links)
|
||||
fmt.Printf("Size: %s (%d)\n", types.SizeStr(types.NewInt(stats.Size)), stats.Size)
|
||||
afmt.Printf("Links: %d\n", stats.Links)
|
||||
afmt.Printf("Size: %s (%d)\n", types.SizeStr(types.NewInt(stats.Size)), stats.Size)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@ -293,6 +302,8 @@ var ChainGetMsgCmd = &cli.Command{
|
||||
Usage: "Get and print a message by its cid",
|
||||
ArgsUsage: "[messageCid]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
if !cctx.Args().Present() {
|
||||
return fmt.Errorf("must pass a cid of a message to get")
|
||||
}
|
||||
@ -331,7 +342,7 @@ var ChainGetMsgCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(string(enc))
|
||||
afmt.Println(string(enc))
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@ -406,6 +417,7 @@ var ChainInspectUsage = &cli.Command{
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -507,23 +519,23 @@ var ChainInspectUsage = &cli.Command{
|
||||
|
||||
numRes := cctx.Int("num-results")
|
||||
|
||||
fmt.Printf("Total Gas Limit: %d\n", sum)
|
||||
fmt.Printf("By Sender:\n")
|
||||
afmt.Printf("Total Gas Limit: %d\n", sum)
|
||||
afmt.Printf("By Sender:\n")
|
||||
for i := 0; i < numRes && i < len(senderVals); i++ {
|
||||
sv := senderVals[i]
|
||||
fmt.Printf("%s\t%0.2f%%\t(total: %d, count: %d)\n", sv.Key, (100*float64(sv.Gas))/float64(sum), sv.Gas, bySenderC[sv.Key])
|
||||
afmt.Printf("%s\t%0.2f%%\t(total: %d, count: %d)\n", sv.Key, (100*float64(sv.Gas))/float64(sum), sv.Gas, bySenderC[sv.Key])
|
||||
}
|
||||
fmt.Println()
|
||||
fmt.Printf("By Receiver:\n")
|
||||
afmt.Println()
|
||||
afmt.Printf("By Receiver:\n")
|
||||
for i := 0; i < numRes && i < len(destVals); i++ {
|
||||
sv := destVals[i]
|
||||
fmt.Printf("%s\t%0.2f%%\t(total: %d, count: %d)\n", sv.Key, (100*float64(sv.Gas))/float64(sum), sv.Gas, byDestC[sv.Key])
|
||||
afmt.Printf("%s\t%0.2f%%\t(total: %d, count: %d)\n", sv.Key, (100*float64(sv.Gas))/float64(sum), sv.Gas, byDestC[sv.Key])
|
||||
}
|
||||
fmt.Println()
|
||||
fmt.Printf("By Method:\n")
|
||||
afmt.Println()
|
||||
afmt.Printf("By Method:\n")
|
||||
for i := 0; i < numRes && i < len(methodVals); i++ {
|
||||
sv := methodVals[i]
|
||||
fmt.Printf("%s\t%0.2f%%\t(total: %d, count: %d)\n", sv.Key, (100*float64(sv.Gas))/float64(sum), sv.Gas, byMethodC[sv.Key])
|
||||
afmt.Printf("%s\t%0.2f%%\t(total: %d, count: %d)\n", sv.Key, (100*float64(sv.Gas))/float64(sum), sv.Gas, byMethodC[sv.Key])
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -548,6 +560,7 @@ var ChainListCmd = &cli.Command{
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -595,7 +608,7 @@ var ChainListCmd = &cli.Command{
|
||||
tss = otss
|
||||
for i, ts := range tss {
|
||||
pbf := ts.Blocks()[0].ParentBaseFee
|
||||
fmt.Printf("%d: %d blocks (baseFee: %s -> maxFee: %s)\n", ts.Height(), len(ts.Blocks()), ts.Blocks()[0].ParentBaseFee, types.FIL(types.BigMul(pbf, types.NewInt(uint64(build.BlockGasLimit)))))
|
||||
afmt.Printf("%d: %d blocks (baseFee: %s -> maxFee: %s)\n", ts.Height(), len(ts.Blocks()), ts.Blocks()[0].ParentBaseFee, types.FIL(types.BigMul(pbf, types.NewInt(uint64(build.BlockGasLimit)))))
|
||||
|
||||
for _, b := range ts.Blocks() {
|
||||
msgs, err := api.ChainGetBlockMessages(ctx, b.Cid())
|
||||
@ -621,7 +634,7 @@ var ChainListCmd = &cli.Command{
|
||||
avgpremium = big.Div(psum, big.NewInt(int64(lenmsgs)))
|
||||
}
|
||||
|
||||
fmt.Printf("\t%s: \t%d msgs, gasLimit: %d / %d (%0.2f%%), avgPremium: %s\n", b.Miner, len(msgs.BlsMessages)+len(msgs.SecpkMessages), limitSum, build.BlockGasLimit, 100*float64(limitSum)/float64(build.BlockGasLimit), avgpremium)
|
||||
afmt.Printf("\t%s: \t%d msgs, gasLimit: %d / %d (%0.2f%%), avgPremium: %s\n", b.Miner, len(msgs.BlsMessages)+len(msgs.SecpkMessages), limitSum, build.BlockGasLimit, 100*float64(limitSum)/float64(build.BlockGasLimit), avgpremium)
|
||||
}
|
||||
if i < len(tss)-1 {
|
||||
msgs, err := api.ChainGetParentMessages(ctx, tss[i+1].Blocks()[0].Cid())
|
||||
@ -646,13 +659,13 @@ var ChainListCmd = &cli.Command{
|
||||
gasEfficiency := 100 * float64(gasUsed) / float64(limitSum)
|
||||
gasCapacity := 100 * float64(limitSum) / float64(build.BlockGasLimit)
|
||||
|
||||
fmt.Printf("\ttipset: \t%d msgs, %d (%0.2f%%) / %d (%0.2f%%)\n", len(msgs), gasUsed, gasEfficiency, limitSum, gasCapacity)
|
||||
afmt.Printf("\ttipset: \t%d msgs, %d (%0.2f%%) / %d (%0.2f%%)\n", len(msgs), gasUsed, gasEfficiency, limitSum, gasCapacity)
|
||||
}
|
||||
fmt.Println()
|
||||
afmt.Println()
|
||||
}
|
||||
} else {
|
||||
for i := len(tss) - 1; i >= 0; i-- {
|
||||
printTipSet(cctx.String("format"), tss[i])
|
||||
printTipSet(cctx.String("format"), tss[i], afmt)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -707,6 +720,8 @@ var ChainGetCmd = &cli.Command{
|
||||
- account-state
|
||||
`,
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -725,7 +740,7 @@ var ChainGetCmd = &cli.Command{
|
||||
|
||||
p = "/ipfs/" + ts.ParentState().String() + p
|
||||
if cctx.Bool("verbose") {
|
||||
fmt.Println(p)
|
||||
afmt.Println(p)
|
||||
}
|
||||
}
|
||||
|
||||
@ -740,7 +755,7 @@ var ChainGetCmd = &cli.Command{
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(string(b))
|
||||
afmt.Println(string(b))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -782,7 +797,7 @@ var ChainGetCmd = &cli.Command{
|
||||
}
|
||||
|
||||
if cbu == nil {
|
||||
fmt.Printf("%x", raw)
|
||||
afmt.Printf("%x", raw)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -794,7 +809,7 @@ var ChainGetCmd = &cli.Command{
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(string(b))
|
||||
afmt.Println(string(b))
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@ -878,7 +893,7 @@ func handleHamtAddress(ctx context.Context, api v0api.FullNode, r cid.Cid) error
|
||||
})
|
||||
}
|
||||
|
||||
func printTipSet(format string, ts *types.TipSet) {
|
||||
func printTipSet(format string, ts *types.TipSet, afmt *AppFmt) {
|
||||
format = strings.ReplaceAll(format, "<height>", fmt.Sprint(ts.Height()))
|
||||
format = strings.ReplaceAll(format, "<time>", time.Unix(int64(ts.MinTimestamp()), 0).Format(time.Stamp))
|
||||
blks := "[ "
|
||||
@ -897,7 +912,7 @@ func printTipSet(format string, ts *types.TipSet) {
|
||||
format = strings.ReplaceAll(format, "<blocks>", blks)
|
||||
format = strings.ReplaceAll(format, "<weight>", fmt.Sprint(ts.Blocks()[0].ParentWeight))
|
||||
|
||||
fmt.Println(format)
|
||||
afmt.Println(format)
|
||||
}
|
||||
|
||||
var ChainBisectCmd = &cli.Command{
|
||||
@ -918,6 +933,8 @@ var ChainBisectCmd = &cli.Command{
|
||||
For special path elements see 'chain get' help
|
||||
`,
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -961,7 +978,7 @@ var ChainBisectCmd = &cli.Command{
|
||||
}
|
||||
|
||||
path := "/ipld/" + midTs.ParentState().String() + "/" + subPath
|
||||
fmt.Printf("* Testing %d (%d - %d) (%s): ", mid, start, end, path)
|
||||
afmt.Printf("* Testing %d (%d - %d) (%s): ", mid, start, end, path)
|
||||
|
||||
nd, err := api.ChainGetNode(ctx, path)
|
||||
if err != nil {
|
||||
@ -988,32 +1005,32 @@ var ChainBisectCmd = &cli.Command{
|
||||
if strings.TrimSpace(out.String()) != "false" {
|
||||
end = mid
|
||||
highest = midTs
|
||||
fmt.Println("true")
|
||||
afmt.Println("true")
|
||||
} else {
|
||||
start = mid
|
||||
fmt.Printf("false (cli)\n")
|
||||
afmt.Printf("false (cli)\n")
|
||||
}
|
||||
case *exec.ExitError:
|
||||
if len(serr.String()) > 0 {
|
||||
fmt.Println("error")
|
||||
afmt.Println("error")
|
||||
|
||||
fmt.Printf("> Command: %s\n---->\n", strings.Join(cctx.Args().Slice()[3:], " "))
|
||||
fmt.Println(string(b))
|
||||
fmt.Println("<----")
|
||||
afmt.Printf("> Command: %s\n---->\n", strings.Join(cctx.Args().Slice()[3:], " "))
|
||||
afmt.Println(string(b))
|
||||
afmt.Println("<----")
|
||||
return xerrors.Errorf("error running bisect check: %s", serr.String())
|
||||
}
|
||||
|
||||
start = mid
|
||||
fmt.Println("false")
|
||||
afmt.Println("false")
|
||||
default:
|
||||
return err
|
||||
}
|
||||
|
||||
if start == end {
|
||||
if strings.TrimSpace(out.String()) == "true" {
|
||||
fmt.Println(midTs.Height())
|
||||
afmt.Println(midTs.Height())
|
||||
} else {
|
||||
fmt.Println(prev)
|
||||
afmt.Println(prev)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -1058,7 +1075,7 @@ var ChainExportCmd = &cli.Command{
|
||||
return fmt.Errorf("\"recent-stateroots\" has to be greater than %d", build.Finality)
|
||||
}
|
||||
|
||||
fi, err := os.Create(cctx.Args().First())
|
||||
fi, err := createExportFile(cctx.App, cctx.Args().First())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1118,6 +1135,8 @@ var SlashConsensusFault = &cli.Command{
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
srv, err := GetFullNodeServices(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -1222,7 +1241,7 @@ var SlashConsensusFault = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(smsg.Cid())
|
||||
afmt.Println(smsg.Cid())
|
||||
|
||||
return nil
|
||||
},
|
||||
@ -1232,6 +1251,8 @@ var ChainGasPriceCmd = &cli.Command{
|
||||
Name: "gas-price",
|
||||
Usage: "Estimate gas prices",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -1248,7 +1269,7 @@ var ChainGasPriceCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("%d blocks: %s (%s)\n", nblocks, est, types.FIL(est))
|
||||
afmt.Printf("%d blocks: %s (%s)\n", nblocks, est, types.FIL(est))
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -1278,6 +1299,8 @@ var chainDecodeParamsCmd = &cli.Command{
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -1329,7 +1352,7 @@ var chainDecodeParamsCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(pstr)
|
||||
afmt.Println(pstr)
|
||||
|
||||
return nil
|
||||
},
|
||||
@ -1362,6 +1385,8 @@ var chainEncodeParamsCmd = &cli.Command{
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
afmt := NewAppFmt(cctx.App)
|
||||
|
||||
if cctx.Args().Len() != 3 {
|
||||
return ShowHelp(cctx, fmt.Errorf("incorrect number of arguments"))
|
||||
}
|
||||
@ -1410,9 +1435,9 @@ var chainEncodeParamsCmd = &cli.Command{
|
||||
|
||||
switch cctx.String("encoding") {
|
||||
case "base64", "b64":
|
||||
fmt.Println(base64.StdEncoding.EncodeToString(p))
|
||||
afmt.Println(base64.StdEncoding.EncodeToString(p))
|
||||
case "hex":
|
||||
fmt.Println(hex.EncodeToString(p))
|
||||
afmt.Println(hex.EncodeToString(p))
|
||||
default:
|
||||
return xerrors.Errorf("unknown encoding")
|
||||
}
|
||||
@ -1420,3 +1445,16 @@ var chainEncodeParamsCmd = &cli.Command{
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// createExportFile returns the export file handle from the app metadata, or creates a new file if it doesn't exist
|
||||
func createExportFile(app *cli.App, path string) (io.WriteCloser, error) {
|
||||
if wc, ok := app.Metadata["export-file"]; ok {
|
||||
return wc.(io.WriteCloser), nil
|
||||
}
|
||||
|
||||
fi, err := os.Create(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fi, nil
|
||||
}
|
||||
|
557
cli/chain_test.go
Normal file
557
cli/chain_test.go
Normal file
@ -0,0 +1,557 @@
|
||||
//stm: #cli
|
||||
package cli
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
types "github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/types/mock"
|
||||
"github.com/filecoin-project/specs-actors/v7/actors/builtin"
|
||||
"github.com/golang/mock/gomock"
|
||||
cid "github.com/ipfs/go-cid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestChainHead(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("chain", ChainHeadCmd))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
ts := mock.TipSet(mock.MkBlock(nil, 0, 0))
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainHead(ctx).Return(ts, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_HEAD_001
|
||||
err := app.Run([]string{"chain", "head"})
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Regexp(t, regexp.MustCompile(ts.Cids()[0].String()), buf.String())
|
||||
}
|
||||
|
||||
func TestGetBlock(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("chain", ChainGetBlock))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
block := mock.MkBlock(nil, 0, 0)
|
||||
blockMsgs := api.BlockMessages{}
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainGetBlock(ctx, block.Cid()).Return(block, nil),
|
||||
mockApi.EXPECT().ChainGetBlockMessages(ctx, block.Cid()).Return(&blockMsgs, nil),
|
||||
mockApi.EXPECT().ChainGetParentMessages(ctx, block.Cid()).Return([]api.Message{}, nil),
|
||||
mockApi.EXPECT().ChainGetParentReceipts(ctx, block.Cid()).Return([]*types.MessageReceipt{}, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_GET_BLOCK_001
|
||||
err := app.Run([]string{"chain", "getblock", block.Cid().String()})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// expected output format
|
||||
out := struct {
|
||||
types.BlockHeader
|
||||
BlsMessages []*types.Message
|
||||
SecpkMessages []*types.SignedMessage
|
||||
ParentReceipts []*types.MessageReceipt
|
||||
ParentMessages []cid.Cid
|
||||
}{}
|
||||
|
||||
err = json.Unmarshal(buf.Bytes(), &out)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.True(t, block.Cid().Equals(out.Cid()))
|
||||
}
|
||||
|
||||
func TestReadOjb(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("chain", ChainReadObjCmd))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
block := mock.MkBlock(nil, 0, 0)
|
||||
obj := new(bytes.Buffer)
|
||||
err := block.MarshalCBOR(obj)
|
||||
assert.NoError(t, err)
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainReadObj(ctx, block.Cid()).Return(obj.Bytes(), nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_READ_OBJECT_001
|
||||
err = app.Run([]string{"chain", "read-obj", block.Cid().String()})
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, buf.String(), fmt.Sprintf("%x\n", obj.Bytes()))
|
||||
}
|
||||
|
||||
func TestChainDeleteObj(t *testing.T) {
|
||||
cmd := WithCategory("chain", ChainDeleteObjCmd)
|
||||
block := mock.MkBlock(nil, 0, 0)
|
||||
|
||||
// given no force flag, it should return an error and no API calls should be made
|
||||
t.Run("no-really-do-it", func(t *testing.T) {
|
||||
app, _, _, done := NewMockAppWithFullAPI(t, cmd)
|
||||
defer done()
|
||||
|
||||
//stm: @CLI_CHAIN_DELETE_OBJECT_002
|
||||
err := app.Run([]string{"chain", "delete-obj", block.Cid().String()})
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
// given a force flag, it calls API delete
|
||||
t.Run("really-do-it", func(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, cmd)
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainDeleteObj(ctx, block.Cid()).Return(nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_DELETE_OBJECT_001
|
||||
err := app.Run([]string{"chain", "delete-obj", "--really-do-it=true", block.Cid().String()})
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Contains(t, buf.String(), block.Cid().String())
|
||||
})
|
||||
}
|
||||
|
||||
func TestChainStatObj(t *testing.T) {
|
||||
cmd := WithCategory("chain", ChainStatObjCmd)
|
||||
block := mock.MkBlock(nil, 0, 0)
|
||||
stat := api.ObjStat{Size: 123, Links: 321}
|
||||
|
||||
checkOutput := func(buf *bytes.Buffer) {
|
||||
out := buf.String()
|
||||
outSplit := strings.Split(out, "\n")
|
||||
|
||||
assert.Contains(t, outSplit[0], fmt.Sprintf("%d", stat.Links))
|
||||
assert.Contains(t, outSplit[1], fmt.Sprintf("%d", stat.Size))
|
||||
}
|
||||
|
||||
// given no --base flag, it calls ChainStatObj with base=cid.Undef
|
||||
t.Run("no-base", func(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, cmd)
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainStatObj(ctx, block.Cid(), cid.Undef).Return(stat, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_STAT_OBJECT_001
|
||||
err := app.Run([]string{"chain", "stat-obj", block.Cid().String()})
|
||||
assert.NoError(t, err)
|
||||
|
||||
checkOutput(buf)
|
||||
})
|
||||
|
||||
// given a --base flag, it calls ChainStatObj with that base
|
||||
t.Run("base", func(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, cmd)
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainStatObj(ctx, block.Cid(), block.Cid()).Return(stat, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_STAT_OBJECT_002
|
||||
err := app.Run([]string{"chain", "stat-obj", fmt.Sprintf("-base=%s", block.Cid().String()), block.Cid().String()})
|
||||
assert.NoError(t, err)
|
||||
|
||||
checkOutput(buf)
|
||||
})
|
||||
}
|
||||
|
||||
func TestChainGetMsg(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("chain", ChainGetMsgCmd))
|
||||
defer done()
|
||||
|
||||
addrs, err := mock.RandomActorAddresses(12345, 2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
from := addrs[0]
|
||||
to := addrs[1]
|
||||
|
||||
msg := mock.UnsignedMessage(*from, *to, 0)
|
||||
|
||||
obj := new(bytes.Buffer)
|
||||
err = msg.MarshalCBOR(obj)
|
||||
assert.NoError(t, err)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainReadObj(ctx, msg.Cid()).Return(obj.Bytes(), nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_GET_MESSAGE_001
|
||||
err = app.Run([]string{"chain", "getmessage", msg.Cid().String()})
|
||||
assert.NoError(t, err)
|
||||
|
||||
var out types.Message
|
||||
err = json.Unmarshal(buf.Bytes(), &out)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, *msg, out)
|
||||
}
|
||||
|
||||
func TestSetHead(t *testing.T) {
|
||||
cmd := WithCategory("chain", ChainSetHeadCmd)
|
||||
genesis := mock.TipSet(mock.MkBlock(nil, 0, 0))
|
||||
ts := mock.TipSet(mock.MkBlock(genesis, 1, 0))
|
||||
epoch := abi.ChainEpoch(uint64(0))
|
||||
|
||||
// given the -genesis flag, resets head to genesis ignoring the provided ts positional argument
|
||||
t.Run("genesis", func(t *testing.T) {
|
||||
app, mockApi, _, done := NewMockAppWithFullAPI(t, cmd)
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainGetGenesis(ctx).Return(genesis, nil),
|
||||
mockApi.EXPECT().ChainSetHead(ctx, genesis.Key()).Return(nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_SET_HEAD_003
|
||||
err := app.Run([]string{"chain", "sethead", "-genesis=true", ts.Key().String()})
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
// given the -epoch flag, resets head to given epoch, ignoring the provided ts positional argument
|
||||
t.Run("epoch", func(t *testing.T) {
|
||||
app, mockApi, _, done := NewMockAppWithFullAPI(t, cmd)
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainGetTipSetByHeight(ctx, epoch, types.EmptyTSK).Return(genesis, nil),
|
||||
mockApi.EXPECT().ChainSetHead(ctx, genesis.Key()).Return(nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_SET_HEAD_002
|
||||
err := app.Run([]string{"chain", "sethead", fmt.Sprintf("-epoch=%s", epoch), ts.Key().String()})
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
// given no flag, resets the head to given tipset key
|
||||
t.Run("default", func(t *testing.T) {
|
||||
app, mockApi, _, done := NewMockAppWithFullAPI(t, cmd)
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainGetBlock(ctx, ts.Key().Cids()[0]).Return(ts.Blocks()[0], nil),
|
||||
mockApi.EXPECT().ChainSetHead(ctx, ts.Key()).Return(nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_SET_HEAD_001
|
||||
err := app.Run([]string{"chain", "sethead", ts.Key().Cids()[0].String()})
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestInspectUsage(t *testing.T) {
|
||||
cmd := WithCategory("chain", ChainInspectUsage)
|
||||
ts := mock.TipSet(mock.MkBlock(nil, 0, 0))
|
||||
|
||||
addrs, err := mock.RandomActorAddresses(12345, 2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
from := addrs[0]
|
||||
to := addrs[1]
|
||||
|
||||
msg := mock.UnsignedMessage(*from, *to, 0)
|
||||
msgs := []api.Message{{Cid: msg.Cid(), Message: msg}}
|
||||
|
||||
actor := &types.Actor{
|
||||
Code: builtin.StorageMarketActorCodeID,
|
||||
Nonce: 0,
|
||||
Balance: big.NewInt(1000000000),
|
||||
}
|
||||
|
||||
t.Run("default", func(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, cmd)
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainHead(ctx).Return(ts, nil),
|
||||
mockApi.EXPECT().ChainGetParentMessages(ctx, ts.Blocks()[0].Cid()).Return(msgs, nil),
|
||||
mockApi.EXPECT().ChainGetTipSet(ctx, ts.Parents()).Return(nil, nil),
|
||||
mockApi.EXPECT().StateGetActor(ctx, *to, ts.Key()).Return(actor, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_INSPECT_USAGE_001
|
||||
err := app.Run([]string{"chain", "inspect-usage"})
|
||||
assert.NoError(t, err)
|
||||
|
||||
out := buf.String()
|
||||
|
||||
// output is plaintext, had to do string matching
|
||||
assert.Contains(t, out, from.String())
|
||||
assert.Contains(t, out, to.String())
|
||||
// check for gas by sender
|
||||
assert.Contains(t, out, "By Sender")
|
||||
// check for gas by method
|
||||
assert.Contains(t, out, "By Method:\nSend")
|
||||
})
|
||||
}
|
||||
|
||||
func TestChainList(t *testing.T) {
|
||||
cmd := WithCategory("chain", ChainListCmd)
|
||||
genesis := mock.TipSet(mock.MkBlock(nil, 0, 0))
|
||||
blk := mock.MkBlock(genesis, 0, 0)
|
||||
blk.Height = 1
|
||||
head := mock.TipSet(blk)
|
||||
|
||||
addrs, err := mock.RandomActorAddresses(12345, 2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
from := addrs[0]
|
||||
to := addrs[1]
|
||||
|
||||
msg := mock.UnsignedMessage(*from, *to, 0)
|
||||
msgs := []api.Message{{Cid: msg.Cid(), Message: msg}}
|
||||
blockMsgs := &api.BlockMessages{}
|
||||
receipts := []*types.MessageReceipt{}
|
||||
|
||||
t.Run("default", func(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, cmd)
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// same method gets called mocked multiple times bcs it's called in a for loop for all tipsets (2 in this case)
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainHead(ctx).Return(head, nil),
|
||||
mockApi.EXPECT().ChainGetTipSet(ctx, head.Parents()).Return(genesis, nil),
|
||||
mockApi.EXPECT().ChainGetBlockMessages(ctx, genesis.Blocks()[0].Cid()).Return(blockMsgs, nil),
|
||||
mockApi.EXPECT().ChainGetParentMessages(ctx, head.Blocks()[0].Cid()).Return(msgs, nil),
|
||||
mockApi.EXPECT().ChainGetParentReceipts(ctx, head.Blocks()[0].Cid()).Return(receipts, nil),
|
||||
mockApi.EXPECT().ChainGetBlockMessages(ctx, head.Blocks()[0].Cid()).Return(blockMsgs, nil),
|
||||
)
|
||||
|
||||
//stm: CLI_CHAIN_LIST_001
|
||||
err := app.Run([]string{"chain", "love", "--gas-stats=true"}) // chain is love ❤️
|
||||
assert.NoError(t, err)
|
||||
|
||||
out := buf.String()
|
||||
|
||||
// should print out 2 blocks, indexed with 0: and 1:
|
||||
assert.Contains(t, out, "0:")
|
||||
assert.Contains(t, out, "1:")
|
||||
})
|
||||
}
|
||||
|
||||
func TestChainGet(t *testing.T) {
|
||||
blk := mock.MkBlock(nil, 0, 0)
|
||||
ts := mock.TipSet(blk)
|
||||
cmd := WithCategory("chain", ChainGetCmd)
|
||||
|
||||
// given no -as-type flag & ipfs prefix, should print object as JSON if it's marshalable
|
||||
t.Run("ipfs", func(t *testing.T) {
|
||||
path := fmt.Sprintf("/ipfs/%s", blk.Cid().String())
|
||||
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, cmd)
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainGetNode(ctx, path).Return(&api.IpldObject{Cid: blk.Cid(), Obj: blk}, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_GET_001
|
||||
err := app.Run([]string{"chain", "get", path})
|
||||
assert.NoError(t, err)
|
||||
|
||||
var out types.BlockHeader
|
||||
err = json.Unmarshal(buf.Bytes(), &out)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, *blk, out)
|
||||
})
|
||||
|
||||
// given no -as-type flag & ipfs prefix, should traverse from head.ParentStateRoot and print JSON if it's marshalable
|
||||
t.Run("pstate", func(t *testing.T) {
|
||||
p1 := "/pstate"
|
||||
p2 := fmt.Sprintf("/ipfs/%s", ts.ParentState().String())
|
||||
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, cmd)
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainHead(ctx).Return(ts, nil),
|
||||
mockApi.EXPECT().ChainGetNode(ctx, p2).Return(&api.IpldObject{Cid: blk.Cid(), Obj: blk}, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_GET_002
|
||||
err := app.Run([]string{"chain", "get", p1})
|
||||
assert.NoError(t, err)
|
||||
|
||||
var out types.BlockHeader
|
||||
err = json.Unmarshal(buf.Bytes(), &out)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, *blk, out)
|
||||
})
|
||||
|
||||
// given an unknown -as-type value, return an error
|
||||
t.Run("unknown-type", func(t *testing.T) {
|
||||
app, mockApi, _, done := NewMockAppWithFullAPI(t, cmd)
|
||||
defer done()
|
||||
|
||||
path := fmt.Sprintf("/ipfs/%s", blk.Cid().String())
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainGetNode(ctx, path).Return(&api.IpldObject{Cid: blk.Cid(), Obj: blk}, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_GET_004
|
||||
err := app.Run([]string{"chain", "get", "-as-type=foo", path})
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestChainBisect(t *testing.T) {
|
||||
blk1 := mock.MkBlock(nil, 0, 0)
|
||||
blk1.Height = 0
|
||||
ts1 := mock.TipSet(blk1)
|
||||
|
||||
blk2 := mock.MkBlock(ts1, 0, 0)
|
||||
blk2.Height = 1
|
||||
ts2 := mock.TipSet(blk2)
|
||||
|
||||
subpath := "whatever/its/mocked"
|
||||
minHeight := uint64(0)
|
||||
maxHeight := uint64(1)
|
||||
shell := "echo"
|
||||
|
||||
path := fmt.Sprintf("/ipld/%s/%s", ts2.ParentState(), subpath)
|
||||
|
||||
cmd := WithCategory("chain", ChainBisectCmd)
|
||||
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, cmd)
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainGetTipSetByHeight(ctx, abi.ChainEpoch(maxHeight), types.EmptyTSK).Return(ts2, nil),
|
||||
mockApi.EXPECT().ChainGetTipSetByHeight(ctx, abi.ChainEpoch(maxHeight), ts2.Key()).Return(ts2, nil),
|
||||
mockApi.EXPECT().ChainGetNode(ctx, path).Return(&api.IpldObject{Cid: blk2.Cid(), Obj: blk2}, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_BISECT_001
|
||||
err := app.Run([]string{"chain", "bisect", fmt.Sprintf("%d", minHeight), fmt.Sprintf("%d", maxHeight), subpath, shell})
|
||||
assert.NoError(t, err)
|
||||
|
||||
out := buf.String()
|
||||
assert.Contains(t, out, path)
|
||||
}
|
||||
|
||||
func TestChainExport(t *testing.T) {
|
||||
app, mockApi, _, done := NewMockAppWithFullAPI(t, WithCategory("chain", ChainExportCmd))
|
||||
defer done()
|
||||
|
||||
// export writes to a file, I mocked it so there are no side-effects
|
||||
mockFile := mockExportFile{new(bytes.Buffer)}
|
||||
app.Metadata["export-file"] = mockFile
|
||||
|
||||
blk := mock.MkBlock(nil, 0, 0)
|
||||
ts := mock.TipSet(blk)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
export := make(chan []byte, 2)
|
||||
expBytes := []byte("whatever")
|
||||
export <- expBytes
|
||||
export <- []byte{} // empty slice means export is complete
|
||||
close(export)
|
||||
|
||||
gomock.InOrder(
|
||||
mockApi.EXPECT().ChainHead(ctx).Return(ts, nil),
|
||||
mockApi.EXPECT().ChainExport(ctx, abi.ChainEpoch(0), false, ts.Key()).Return(export, nil),
|
||||
)
|
||||
|
||||
//stm: @CLI_CHAIN_EXPORT_001
|
||||
err := app.Run([]string{"chain", "export", "whatever.car"})
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, expBytes, mockFile.Bytes())
|
||||
}
|
||||
|
||||
func TestChainGasPrice(t *testing.T) {
|
||||
app, mockApi, buf, done := NewMockAppWithFullAPI(t, WithCategory("chain", ChainGasPriceCmd))
|
||||
defer done()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// estimate gas is called with various num blocks in implementation,
|
||||
// so we mock and count how many times it's called, and we expect that many results printed
|
||||
calls := 0
|
||||
mockApi.
|
||||
EXPECT().
|
||||
GasEstimateGasPremium(ctx, gomock.Any(), builtin.SystemActorAddr, int64(10000), types.EmptyTSK).
|
||||
Return(big.NewInt(0), nil).
|
||||
AnyTimes().
|
||||
Do(func(a, b, c, d, e interface{}) { // looks funny, but we don't care about args here, just counting
|
||||
calls++
|
||||
})
|
||||
|
||||
//stm: @CLI_CHAIN_GAS_PRICE_001
|
||||
err := app.Run([]string{"chain", "gas-price"})
|
||||
assert.NoError(t, err)
|
||||
|
||||
lines := strings.Split(strings.Trim(buf.String(), "\n"), "\n")
|
||||
assert.Equal(t, calls, len(lines))
|
||||
}
|
||||
|
||||
type mockExportFile struct {
|
||||
*bytes.Buffer
|
||||
}
|
||||
|
||||
func (mef mockExportFile) Close() error {
|
||||
return nil
|
||||
}
|
@ -667,6 +667,8 @@ uiLoop:
|
||||
|
||||
state = "miner"
|
||||
case "miner":
|
||||
maddrs = maddrs[:0]
|
||||
ask = ask[:0]
|
||||
afmt.Print("Miner Addresses (f0.. f0..), none to find: ")
|
||||
|
||||
_maddrsStr, _, err := rl.ReadLine()
|
||||
@ -802,7 +804,8 @@ uiLoop:
|
||||
|
||||
dealCount, err = strconv.ParseInt(string(dealcStr), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
printErr(xerrors.Errorf("reading deal count: invalid number"))
|
||||
continue
|
||||
}
|
||||
|
||||
color.Blue(".. Picking miners")
|
||||
@ -859,12 +862,13 @@ uiLoop:
|
||||
|
||||
a, err := api.ClientQueryAsk(ctx, *mi.PeerId, maddr)
|
||||
if err != nil {
|
||||
printErr(xerrors.Errorf("failed to query ask: %w", err))
|
||||
printErr(xerrors.Errorf("failed to query ask for miner %s: %w", maddr.String(), err))
|
||||
state = "miner"
|
||||
continue uiLoop
|
||||
}
|
||||
|
||||
ask = append(ask, *a)
|
||||
|
||||
}
|
||||
|
||||
// TODO: run more validation
|
||||
|
32
cli/mocks_test.go
Normal file
32
cli/mocks_test.go
Normal file
@ -0,0 +1,32 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/mocks"
|
||||
"github.com/golang/mock/gomock"
|
||||
ucli "github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
// newMockAppWithFullAPI returns a gomock-ed CLI app used for unit tests
|
||||
// see cli/util/api.go:GetFullNodeAPI for mock API injection
|
||||
func NewMockAppWithFullAPI(t *testing.T, cmd *ucli.Command) (*ucli.App, *mocks.MockFullNode, *bytes.Buffer, func()) {
|
||||
app := ucli.NewApp()
|
||||
app.Commands = ucli.Commands{cmd}
|
||||
app.Setup()
|
||||
|
||||
// create and inject the mock API into app Metadata
|
||||
ctrl := gomock.NewController(t)
|
||||
mockFullNode := mocks.NewMockFullNode(ctrl)
|
||||
var fullNode api.FullNode = mockFullNode
|
||||
app.Metadata["test-full-api"] = fullNode
|
||||
|
||||
// this will only work if the implementation uses the app.Writer,
|
||||
// if it uses fmt.*, it has to be refactored
|
||||
buf := &bytes.Buffer{}
|
||||
app.Writer = buf
|
||||
|
||||
return app, mockFullNode, buf, ctrl.Finish
|
||||
}
|
102
cli/net.go
102
cli/net.go
@ -36,6 +36,8 @@ var NetCmd = &cli.Command{
|
||||
NetReachability,
|
||||
NetBandwidthCmd,
|
||||
NetBlockCmd,
|
||||
NetStatCmd,
|
||||
NetLimitCmd,
|
||||
},
|
||||
}
|
||||
|
||||
@ -606,3 +608,103 @@ var NetBlockListCmd = &cli.Command{
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var NetStatCmd = &cli.Command{
|
||||
Name: "stat",
|
||||
Usage: "Report resource usage for a scope",
|
||||
ArgsUsage: "scope",
|
||||
Description: `Report resource usage for a scope.
|
||||
|
||||
The scope can be one of the following:
|
||||
- system -- reports the system aggregate resource usage.
|
||||
- transient -- reports the transient resource usage.
|
||||
- svc:<service> -- reports the resource usage of a specific service.
|
||||
- proto:<proto> -- reports the resource usage of a specific protocol.
|
||||
- peer:<peer> -- reports the resource usage of a specific peer.
|
||||
- all -- reports the resource usage for all currently active scopes.
|
||||
`,
|
||||
Action: func(cctx *cli.Context) error {
|
||||
api, closer, err := GetAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
args := cctx.Args().Slice()
|
||||
if len(args) != 1 {
|
||||
return xerrors.Errorf("must specify exactly one scope")
|
||||
}
|
||||
scope := args[0]
|
||||
|
||||
result, err := api.NetStat(ctx, scope)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
return enc.Encode(result)
|
||||
},
|
||||
}
|
||||
|
||||
var NetLimitCmd = &cli.Command{
|
||||
Name: "limit",
|
||||
Usage: "Get or set resource limits for a scope",
|
||||
ArgsUsage: "scope [limit]",
|
||||
Description: `Get or set resource limits for a scope.
|
||||
|
||||
The scope can be one of the following:
|
||||
- system -- reports the system aggregate resource usage.
|
||||
- transient -- reports the transient resource usage.
|
||||
- svc:<service> -- reports the resource usage of a specific service.
|
||||
- proto:<proto> -- reports the resource usage of a specific protocol.
|
||||
- peer:<peer> -- reports the resource usage of a specific peer.
|
||||
|
||||
The limit is json-formatted, with the same structure as the limits file.
|
||||
`,
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "set",
|
||||
Usage: "set the limit for a scope",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
api, closer, err := GetAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
args := cctx.Args().Slice()
|
||||
|
||||
if cctx.Bool("set") {
|
||||
if len(args) != 2 {
|
||||
return xerrors.Errorf("must specify exactly a scope and a limit")
|
||||
}
|
||||
scope := args[0]
|
||||
limitStr := args[1]
|
||||
|
||||
var limit atypes.NetLimit
|
||||
err := json.Unmarshal([]byte(limitStr), &limit)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error decoding limit: %w", err)
|
||||
}
|
||||
|
||||
return api.NetSetLimit(ctx, scope, limit)
|
||||
|
||||
}
|
||||
|
||||
if len(args) != 1 {
|
||||
return xerrors.Errorf("must specify exactly one scope")
|
||||
}
|
||||
scope := args[0]
|
||||
|
||||
result, err := api.NetLimit(ctx, scope)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
return enc.Encode(result)
|
||||
},
|
||||
}
|
||||
|
@ -223,6 +223,11 @@ func GetCommonAPI(ctx *cli.Context) (api.CommonNet, jsonrpc.ClientCloser, error)
|
||||
}
|
||||
|
||||
func GetFullNodeAPI(ctx *cli.Context) (v0api.FullNode, jsonrpc.ClientCloser, error) {
|
||||
// use the mocked API in CLI unit tests, see cli/mocks_test.go for mock definition
|
||||
if mock, ok := ctx.App.Metadata["test-full-api"]; ok {
|
||||
return &v0api.WrapperV1Full{FullNode: mock.(v1api.FullNode)}, func() {}, nil
|
||||
}
|
||||
|
||||
if tn, ok := ctx.App.Metadata["testnode-full"]; ok {
|
||||
return &v0api.WrapperV1Full{FullNode: tn.(v1api.FullNode)}, func() {}, nil
|
||||
}
|
||||
|
@ -86,9 +86,10 @@ func (cv *cachingVerifier) VerifySeal(svi proof2.SealVerifyInfo) (bool, error) {
|
||||
}, &svi)
|
||||
}
|
||||
|
||||
func (cv *cachingVerifier) VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) {
|
||||
func (cv *cachingVerifier) VerifyWinningPoSt(ctx context.Context, info proof7.WinningPoStVerifyInfo) (bool, error) {
|
||||
return cv.backend.VerifyWinningPoSt(ctx, info)
|
||||
}
|
||||
|
||||
func (cv *cachingVerifier) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStVerifyInfo) (bool, error) {
|
||||
return cv.withCache(func() (bool, error) {
|
||||
return cv.backend.VerifyWindowPoSt(ctx, info)
|
||||
|
@ -12,6 +12,8 @@ import (
|
||||
"time"
|
||||
|
||||
saproof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
|
||||
"github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
|
||||
saproof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
|
||||
|
||||
"github.com/docker/go-units"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
@ -260,7 +262,8 @@ var sealBenchCmd = &cli.Command{
|
||||
sectorNumber := c.Int("num-sectors")
|
||||
|
||||
var sealTimings []SealingResult
|
||||
var sealedSectors []saproof2.SectorInfo
|
||||
var extendedSealedSectors []saproof7.ExtendedSectorInfo
|
||||
var sealedSectors []saproof7.SectorInfo
|
||||
|
||||
if robench == "" {
|
||||
var err error
|
||||
@ -269,7 +272,7 @@ var sealBenchCmd = &cli.Command{
|
||||
PreCommit2: 1,
|
||||
Commit: 1,
|
||||
}
|
||||
sealTimings, sealedSectors, err = runSeals(sb, sbfs, sectorNumber, parCfg, mid, sectorSize, []byte(c.String("ticket-preimage")), c.String("save-commit2-input"), skipc2, c.Bool("skip-unseal"))
|
||||
sealTimings, extendedSealedSectors, err = runSeals(sb, sbfs, sectorNumber, parCfg, mid, sectorSize, []byte(c.String("ticket-preimage")), c.String("save-commit2-input"), skipc2, c.Bool("skip-unseal"))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to run seals: %w", err)
|
||||
}
|
||||
@ -296,7 +299,13 @@ var sealBenchCmd = &cli.Command{
|
||||
}
|
||||
|
||||
for _, s := range genm.Sectors {
|
||||
sealedSectors = append(sealedSectors, saproof2.SectorInfo{
|
||||
extendedSealedSectors = append(extendedSealedSectors, saproof7.ExtendedSectorInfo{
|
||||
SealedCID: s.CommR,
|
||||
SectorNumber: s.SectorID,
|
||||
SealProof: s.ProofType,
|
||||
SectorKey: nil,
|
||||
})
|
||||
sealedSectors = append(sealedSectors, proof.SectorInfo{
|
||||
SealedCID: s.CommR,
|
||||
SectorNumber: s.SectorID,
|
||||
SealProof: s.ProofType,
|
||||
@ -325,20 +334,20 @@ var sealBenchCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
fcandidates, err := ffiwrapper.ProofVerifier.GenerateWinningPoStSectorChallenge(context.TODO(), wipt, mid, challenge[:], uint64(len(sealedSectors)))
|
||||
fcandidates, err := ffiwrapper.ProofVerifier.GenerateWinningPoStSectorChallenge(context.TODO(), wipt, mid, challenge[:], uint64(len(extendedSealedSectors)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
candidates := make([]saproof2.SectorInfo, len(fcandidates))
|
||||
xcandidates := make([]saproof7.ExtendedSectorInfo, len(fcandidates))
|
||||
for i, fcandidate := range fcandidates {
|
||||
candidates[i] = sealedSectors[fcandidate]
|
||||
xcandidates[i] = extendedSealedSectors[fcandidate]
|
||||
}
|
||||
|
||||
gencandidates := time.Now()
|
||||
|
||||
log.Info("computing winning post snark (cold)")
|
||||
proof1, err := sb.GenerateWinningPoSt(context.TODO(), mid, candidates, challenge[:])
|
||||
proof1, err := sb.GenerateWinningPoSt(context.TODO(), mid, xcandidates, challenge[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -346,14 +355,23 @@ var sealBenchCmd = &cli.Command{
|
||||
winningpost1 := time.Now()
|
||||
|
||||
log.Info("computing winning post snark (hot)")
|
||||
proof2, err := sb.GenerateWinningPoSt(context.TODO(), mid, candidates, challenge[:])
|
||||
proof2, err := sb.GenerateWinningPoSt(context.TODO(), mid, xcandidates, challenge[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
candidates := make([]saproof7.SectorInfo, len(xcandidates))
|
||||
for i, xsi := range xcandidates {
|
||||
candidates[i] = saproof7.SectorInfo{
|
||||
SealedCID: xsi.SealedCID,
|
||||
SectorNumber: xsi.SectorNumber,
|
||||
SealProof: xsi.SealProof,
|
||||
}
|
||||
}
|
||||
|
||||
winnningpost2 := time.Now()
|
||||
|
||||
pvi1 := saproof2.WinningPoStVerifyInfo{
|
||||
pvi1 := saproof7.WinningPoStVerifyInfo{
|
||||
Randomness: abi.PoStRandomness(challenge[:]),
|
||||
Proofs: proof1,
|
||||
ChallengedSectors: candidates,
|
||||
@ -369,7 +387,7 @@ var sealBenchCmd = &cli.Command{
|
||||
|
||||
verifyWinningPost1 := time.Now()
|
||||
|
||||
pvi2 := saproof2.WinningPoStVerifyInfo{
|
||||
pvi2 := saproof7.WinningPoStVerifyInfo{
|
||||
Randomness: abi.PoStRandomness(challenge[:]),
|
||||
Proofs: proof2,
|
||||
ChallengedSectors: candidates,
|
||||
@ -386,7 +404,7 @@ var sealBenchCmd = &cli.Command{
|
||||
verifyWinningPost2 := time.Now()
|
||||
|
||||
log.Info("computing window post snark (cold)")
|
||||
wproof1, _, err := sb.GenerateWindowPoSt(context.TODO(), mid, sealedSectors, challenge[:])
|
||||
wproof1, _, err := sb.GenerateWindowPoSt(context.TODO(), mid, extendedSealedSectors, challenge[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -394,7 +412,7 @@ var sealBenchCmd = &cli.Command{
|
||||
windowpost1 := time.Now()
|
||||
|
||||
log.Info("computing window post snark (hot)")
|
||||
wproof2, _, err := sb.GenerateWindowPoSt(context.TODO(), mid, sealedSectors, challenge[:])
|
||||
wproof2, _, err := sb.GenerateWindowPoSt(context.TODO(), mid, extendedSealedSectors, challenge[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -502,10 +520,10 @@ type ParCfg struct {
|
||||
Commit int
|
||||
}
|
||||
|
||||
func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par ParCfg, mid abi.ActorID, sectorSize abi.SectorSize, ticketPreimage []byte, saveC2inp string, skipc2, skipunseal bool) ([]SealingResult, []saproof2.SectorInfo, error) {
|
||||
func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par ParCfg, mid abi.ActorID, sectorSize abi.SectorSize, ticketPreimage []byte, saveC2inp string, skipc2, skipunseal bool) ([]SealingResult, []saproof7.ExtendedSectorInfo, error) {
|
||||
var pieces []abi.PieceInfo
|
||||
sealTimings := make([]SealingResult, numSectors)
|
||||
sealedSectors := make([]saproof2.SectorInfo, numSectors)
|
||||
sealedSectors := make([]saproof7.ExtendedSectorInfo, numSectors)
|
||||
|
||||
preCommit2Sema := make(chan struct{}, par.PreCommit2)
|
||||
commitSema := make(chan struct{}, par.Commit)
|
||||
@ -579,10 +597,11 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
|
||||
precommit2 := time.Now()
|
||||
<-preCommit2Sema
|
||||
|
||||
sealedSectors[i] = saproof2.SectorInfo{
|
||||
sealedSectors[i] = saproof7.ExtendedSectorInfo{
|
||||
SealProof: sid.ProofType,
|
||||
SectorNumber: i,
|
||||
SealedCID: cids.Sealed,
|
||||
SectorKey: nil,
|
||||
}
|
||||
|
||||
seed := lapi.SealSeed{
|
||||
|
@ -126,7 +126,7 @@ func infoCmdAct(cctx *cli.Context) error {
|
||||
|
||||
alerts, err := minerApi.LogAlerts(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting alerts: %w", err)
|
||||
fmt.Printf("ERROR: getting alerts: %s\n", err)
|
||||
}
|
||||
|
||||
activeAlerts := make([]alerting.Alert, 0)
|
||||
@ -466,10 +466,13 @@ var stateOrder = map[sealing.SectorState]stateMeta{}
|
||||
var stateList = []stateMeta{
|
||||
{col: 39, state: "Total"},
|
||||
{col: color.FgGreen, state: sealing.Proving},
|
||||
{col: color.FgGreen, state: sealing.UpdateActivating},
|
||||
|
||||
{col: color.FgBlue, state: sealing.Empty},
|
||||
{col: color.FgBlue, state: sealing.WaitDeals},
|
||||
{col: color.FgBlue, state: sealing.AddPiece},
|
||||
{col: color.FgBlue, state: sealing.SnapDealsWaitDeals},
|
||||
{col: color.FgBlue, state: sealing.SnapDealsAddPiece},
|
||||
|
||||
{col: color.FgRed, state: sealing.UndefinedSectorState},
|
||||
{col: color.FgYellow, state: sealing.Packing},
|
||||
@ -488,6 +491,13 @@ var stateList = []stateMeta{
|
||||
{col: color.FgYellow, state: sealing.SubmitCommitAggregate},
|
||||
{col: color.FgYellow, state: sealing.CommitAggregateWait},
|
||||
{col: color.FgYellow, state: sealing.FinalizeSector},
|
||||
{col: color.FgYellow, state: sealing.SnapDealsPacking},
|
||||
{col: color.FgYellow, state: sealing.UpdateReplica},
|
||||
{col: color.FgYellow, state: sealing.ProveReplicaUpdate},
|
||||
{col: color.FgYellow, state: sealing.SubmitReplicaUpdate},
|
||||
{col: color.FgYellow, state: sealing.ReplicaUpdateWait},
|
||||
{col: color.FgYellow, state: sealing.FinalizeReplicaUpdate},
|
||||
{col: color.FgYellow, state: sealing.ReleaseSectorKey},
|
||||
|
||||
{col: color.FgCyan, state: sealing.Terminating},
|
||||
{col: color.FgCyan, state: sealing.TerminateWait},
|
||||
@ -495,6 +505,7 @@ var stateList = []stateMeta{
|
||||
{col: color.FgCyan, state: sealing.TerminateFailed},
|
||||
{col: color.FgCyan, state: sealing.Removing},
|
||||
{col: color.FgCyan, state: sealing.Removed},
|
||||
{col: color.FgCyan, state: sealing.AbortUpgrade},
|
||||
|
||||
{col: color.FgRed, state: sealing.FailedUnrecoverable},
|
||||
{col: color.FgRed, state: sealing.AddPieceFailed},
|
||||
@ -512,6 +523,10 @@ var stateList = []stateMeta{
|
||||
{col: color.FgRed, state: sealing.RemoveFailed},
|
||||
{col: color.FgRed, state: sealing.DealsExpired},
|
||||
{col: color.FgRed, state: sealing.RecoverDealIDs},
|
||||
{col: color.FgRed, state: sealing.SnapDealsAddPieceFailed},
|
||||
{col: color.FgRed, state: sealing.SnapDealsDealsExpired},
|
||||
{col: color.FgRed, state: sealing.ReplicaUpdateFailed},
|
||||
{col: color.FgRed, state: sealing.ReleaseSectorKeyFailed},
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -96,6 +96,11 @@ var infoAllCmd = &cli.Command{
|
||||
fmt.Println("ERROR: ", err)
|
||||
}
|
||||
|
||||
fmt.Println("\n#: Storage Locks")
|
||||
if err := storageLocks.Action(cctx); err != nil {
|
||||
fmt.Println("ERROR: ", err)
|
||||
}
|
||||
|
||||
fmt.Println("\n#: Sched Diag")
|
||||
if err := sealingSchedDiagCmd.Action(cctx); err != nil {
|
||||
fmt.Println("ERROR: ", err)
|
||||
@ -192,6 +197,11 @@ var infoAllCmd = &cli.Command{
|
||||
fmt.Println("ERROR: ", err)
|
||||
}
|
||||
|
||||
fmt.Println("\n#: Storage Sector List")
|
||||
if err := storageListSectorsCmd.Action(cctx); err != nil {
|
||||
fmt.Println("ERROR: ", err)
|
||||
}
|
||||
|
||||
fmt.Println("\n#: Expired Sectors")
|
||||
if err := sectorsExpiredCmd.Action(cctx); err != nil {
|
||||
fmt.Println("ERROR: ", err)
|
||||
|
@ -467,12 +467,15 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode
|
||||
stor := stores.NewRemote(lstor, si, http.Header(sa), 10, &stores.DefaultPartialFileHandler{})
|
||||
|
||||
smgr, err := sectorstorage.New(ctx, lstor, stor, lr, si, sectorstorage.SealerConfig{
|
||||
ParallelFetchLimit: 10,
|
||||
AllowAddPiece: true,
|
||||
AllowPreCommit1: true,
|
||||
AllowPreCommit2: true,
|
||||
AllowCommit: true,
|
||||
AllowUnseal: true,
|
||||
ParallelFetchLimit: 10,
|
||||
AllowAddPiece: true,
|
||||
AllowPreCommit1: true,
|
||||
AllowPreCommit2: true,
|
||||
AllowCommit: true,
|
||||
AllowUnseal: true,
|
||||
AllowReplicaUpdate: true,
|
||||
AllowProveReplicaUpdate2: true,
|
||||
AllowRegenSectorKey: true,
|
||||
}, wsts, smsts)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -11,6 +11,9 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
|
||||
"github.com/docker/go-units"
|
||||
"github.com/fatih/color"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
@ -20,6 +23,7 @@ import (
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
@ -50,11 +54,14 @@ var sectorsCmd = &cli.Command{
|
||||
sectorsExtendCmd,
|
||||
sectorsTerminateCmd,
|
||||
sectorsRemoveCmd,
|
||||
sectorsSnapUpCmd,
|
||||
sectorsSnapAbortCmd,
|
||||
sectorsMarkForUpgradeCmd,
|
||||
sectorsStartSealCmd,
|
||||
sectorsSealDelayCmd,
|
||||
sectorsCapacityCollateralCmd,
|
||||
sectorsBatching,
|
||||
sectorsRefreshPieceMatchingCmd,
|
||||
},
|
||||
}
|
||||
|
||||
@ -154,7 +161,7 @@ var sectorsStatusCmd = &cli.Command{
|
||||
fmt.Printf("Expiration:\t\t%v\n", status.Expiration)
|
||||
fmt.Printf("DealWeight:\t\t%v\n", status.DealWeight)
|
||||
fmt.Printf("VerifiedDealWeight:\t\t%v\n", status.VerifiedDealWeight)
|
||||
fmt.Printf("InitialPledge:\t\t%v\n", status.InitialPledge)
|
||||
fmt.Printf("InitialPledge:\t\t%v\n", types.FIL(status.InitialPledge))
|
||||
fmt.Printf("\nExpiration Info\n")
|
||||
fmt.Printf("OnTime:\t\t%v\n", status.OnTime)
|
||||
fmt.Printf("Early:\t\t%v\n", status.Early)
|
||||
@ -287,8 +294,14 @@ var sectorsListCmd = &cli.Command{
|
||||
Aliases: []string{"e"},
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "seal-time",
|
||||
Usage: "display how long it took for the sector to be sealed",
|
||||
Name: "initial-pledge",
|
||||
Usage: "display initial pledge",
|
||||
Aliases: []string{"p"},
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "seal-time",
|
||||
Usage: "display how long it took for the sector to be sealed",
|
||||
Aliases: []string{"t"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "states",
|
||||
@ -398,6 +411,7 @@ var sectorsListCmd = &cli.Command{
|
||||
tablewriter.Col("Deals"),
|
||||
tablewriter.Col("DealWeight"),
|
||||
tablewriter.Col("VerifiedPower"),
|
||||
tablewriter.Col("Pledge"),
|
||||
tablewriter.NewLineCol("Error"),
|
||||
tablewriter.NewLineCol("RecoveryTimeout"))
|
||||
|
||||
@ -476,6 +490,9 @@ var sectorsListCmd = &cli.Command{
|
||||
m["RecoveryTimeout"] = color.YellowString(lcli.EpochTime(head.Height(), st.Early))
|
||||
}
|
||||
}
|
||||
if inSSet && cctx.Bool("initial-pledge") {
|
||||
m["Pledge"] = types.FIL(st.InitialPledge).Short()
|
||||
}
|
||||
}
|
||||
|
||||
if !fast && deals > 0 {
|
||||
@ -1476,9 +1493,47 @@ var sectorsRemoveCmd = &cli.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var sectorsMarkForUpgradeCmd = &cli.Command{
|
||||
Name: "mark-for-upgrade",
|
||||
Usage: "Mark a committed capacity sector for replacement by a sector with deals",
|
||||
var sectorsSnapUpCmd = &cli.Command{
|
||||
Name: "snap-up",
|
||||
Usage: "Mark a committed capacity sector to be filled with deals",
|
||||
ArgsUsage: "<sectorNum>",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.Args().Len() != 1 {
|
||||
return lcli.ShowHelp(cctx, xerrors.Errorf("must pass sector number"))
|
||||
}
|
||||
|
||||
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
api, nCloser, err := lcli.GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer nCloser()
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
nv, err := api.StateNetworkVersion(ctx, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get network version: %w", err)
|
||||
}
|
||||
if nv < network.Version15 {
|
||||
return xerrors.Errorf("snap deals upgrades enabled in network v15")
|
||||
}
|
||||
|
||||
id, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("could not parse sector number: %w", err)
|
||||
}
|
||||
|
||||
return nodeApi.SectorMarkForUpgrade(ctx, abi.SectorNumber(id), true)
|
||||
},
|
||||
}
|
||||
|
||||
var sectorsSnapAbortCmd = &cli.Command{
|
||||
Name: "abort-upgrade",
|
||||
Usage: "Abort the attempted (SnapDeals) upgrade of a CC sector, reverting it to as before",
|
||||
ArgsUsage: "<sectorNum>",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.Args().Len() != 1 {
|
||||
@ -1497,7 +1552,58 @@ var sectorsMarkForUpgradeCmd = &cli.Command{
|
||||
return xerrors.Errorf("could not parse sector number: %w", err)
|
||||
}
|
||||
|
||||
return nodeApi.SectorMarkForUpgrade(ctx, abi.SectorNumber(id))
|
||||
return nodeApi.SectorAbortUpgrade(ctx, abi.SectorNumber(id))
|
||||
},
|
||||
}
|
||||
|
||||
var sectorsMarkForUpgradeCmd = &cli.Command{
|
||||
Name: "mark-for-upgrade",
|
||||
Usage: "Mark a committed capacity sector for replacement by a sector with deals",
|
||||
ArgsUsage: "<sectorNum>",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.Args().Len() != 1 {
|
||||
return lcli.ShowHelp(cctx, xerrors.Errorf("must pass sector number"))
|
||||
}
|
||||
|
||||
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
api, nCloser, err := lcli.GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer nCloser()
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
nv, err := api.StateNetworkVersion(ctx, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get network version: %w", err)
|
||||
}
|
||||
if nv >= network.Version15 {
|
||||
return xerrors.Errorf("classic cc upgrades disabled v15 and beyond, use `snap-up`")
|
||||
}
|
||||
|
||||
// disable mark for upgrade two days before the ntwk v15 upgrade
|
||||
// TODO: remove the following block in v1.15.1
|
||||
head, err := api.ChainHead(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get chain head: %w", err)
|
||||
}
|
||||
twoDays := abi.ChainEpoch(2 * builtin.EpochsInDay)
|
||||
if head.Height() > (build.UpgradeOhSnapHeight - twoDays) {
|
||||
return xerrors.Errorf("OhSnap is coming soon, " +
|
||||
"please use `snap-up` to upgrade your cc sectors after the network v15 upgrade!")
|
||||
}
|
||||
|
||||
id, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("could not parse sector number: %w", err)
|
||||
}
|
||||
|
||||
return nodeApi.SectorMarkForUpgrade(ctx, abi.SectorNumber(id), false)
|
||||
},
|
||||
}
|
||||
|
||||
@ -2000,6 +2106,25 @@ var sectorsBatchingPendingPreCommit = &cli.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var sectorsRefreshPieceMatchingCmd = &cli.Command{
|
||||
Name: "match-pending-pieces",
|
||||
Usage: "force a refreshed match of pending pieces to open sectors without manually waiting for more deals",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
if err := nodeApi.SectorMatchPendingPiecesToOpenSectors(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func yesno(b bool) string {
|
||||
if b {
|
||||
return color.GreenString("YES")
|
||||
|
@ -368,6 +368,7 @@ type storedSector struct {
|
||||
store stores.SectorStorageInfo
|
||||
|
||||
unsealed, sealed, cache bool
|
||||
update, updatecache bool
|
||||
}
|
||||
|
||||
var storageFindCmd = &cli.Command{
|
||||
@ -421,6 +422,16 @@ var storageFindCmd = &cli.Command{
|
||||
return xerrors.Errorf("finding cache: %w", err)
|
||||
}
|
||||
|
||||
us, err := nodeApi.StorageFindSector(ctx, sid, storiface.FTUpdate, 0, false)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("finding sealed: %w", err)
|
||||
}
|
||||
|
||||
uc, err := nodeApi.StorageFindSector(ctx, sid, storiface.FTUpdateCache, 0, false)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("finding cache: %w", err)
|
||||
}
|
||||
|
||||
byId := map[stores.ID]*storedSector{}
|
||||
for _, info := range u {
|
||||
sts, ok := byId[info.ID]
|
||||
@ -455,6 +466,28 @@ var storageFindCmd = &cli.Command{
|
||||
}
|
||||
sts.cache = true
|
||||
}
|
||||
for _, info := range us {
|
||||
sts, ok := byId[info.ID]
|
||||
if !ok {
|
||||
sts = &storedSector{
|
||||
id: info.ID,
|
||||
store: info,
|
||||
}
|
||||
byId[info.ID] = sts
|
||||
}
|
||||
sts.update = true
|
||||
}
|
||||
for _, info := range uc {
|
||||
sts, ok := byId[info.ID]
|
||||
if !ok {
|
||||
sts = &storedSector{
|
||||
id: info.ID,
|
||||
store: info,
|
||||
}
|
||||
byId[info.ID] = sts
|
||||
}
|
||||
sts.updatecache = true
|
||||
}
|
||||
|
||||
local, err := nodeApi.StorageLocal(ctx)
|
||||
if err != nil {
|
||||
@ -480,6 +513,12 @@ var storageFindCmd = &cli.Command{
|
||||
if info.cache {
|
||||
types += "Cache, "
|
||||
}
|
||||
if info.update {
|
||||
types += "Update, "
|
||||
}
|
||||
if info.updatecache {
|
||||
types += "UpdateCache, "
|
||||
}
|
||||
|
||||
fmt.Printf("In %s (%s)\n", info.id, types[:len(types)-2])
|
||||
fmt.Printf("\tSealing: %t; Storage: %t\n", info.store.CanSeal, info.store.CanStore)
|
||||
|
@ -163,6 +163,21 @@ var runCmd = &cli.Command{
|
||||
Usage: "enable commit (32G sectors: all cores or GPUs, 128GiB Memory + 64GiB swap)",
|
||||
Value: true,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "replica-update",
|
||||
Usage: "enable replica update",
|
||||
Value: true,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "prove-replica-update2",
|
||||
Usage: "enable prove replica update 2",
|
||||
Value: true,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "regen-sector-key",
|
||||
Usage: "enable regen sector key",
|
||||
Value: true,
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "parallel-fetch-limit",
|
||||
Usage: "maximum fetch operations to run in parallel",
|
||||
@ -251,7 +266,7 @@ var runCmd = &cli.Command{
|
||||
|
||||
var taskTypes []sealtasks.TaskType
|
||||
|
||||
taskTypes = append(taskTypes, sealtasks.TTFetch, sealtasks.TTCommit1, sealtasks.TTFinalize)
|
||||
taskTypes = append(taskTypes, sealtasks.TTFetch, sealtasks.TTCommit1, sealtasks.TTProveReplicaUpdate1, sealtasks.TTFinalize, sealtasks.TTFinalizeReplicaUpdate)
|
||||
|
||||
if cctx.Bool("addpiece") {
|
||||
taskTypes = append(taskTypes, sealtasks.TTAddPiece)
|
||||
@ -268,6 +283,15 @@ var runCmd = &cli.Command{
|
||||
if cctx.Bool("commit") {
|
||||
taskTypes = append(taskTypes, sealtasks.TTCommit2)
|
||||
}
|
||||
if cctx.Bool("replica-update") {
|
||||
taskTypes = append(taskTypes, sealtasks.TTReplicaUpdate)
|
||||
}
|
||||
if cctx.Bool("prove-replica-update2") {
|
||||
taskTypes = append(taskTypes, sealtasks.TTProveReplicaUpdate2)
|
||||
}
|
||||
if cctx.Bool("regen-sector-key") {
|
||||
taskTypes = append(taskTypes, sealtasks.TTRegenSectorKey)
|
||||
}
|
||||
|
||||
if len(taskTypes) == 0 {
|
||||
return xerrors.Errorf("no task types specified")
|
||||
|
@ -22,11 +22,14 @@ var tasksCmd = &cli.Command{
|
||||
}
|
||||
|
||||
var allowSetting = map[sealtasks.TaskType]struct{}{
|
||||
sealtasks.TTAddPiece: {},
|
||||
sealtasks.TTPreCommit1: {},
|
||||
sealtasks.TTPreCommit2: {},
|
||||
sealtasks.TTCommit2: {},
|
||||
sealtasks.TTUnseal: {},
|
||||
sealtasks.TTAddPiece: {},
|
||||
sealtasks.TTPreCommit1: {},
|
||||
sealtasks.TTPreCommit2: {},
|
||||
sealtasks.TTCommit2: {},
|
||||
sealtasks.TTUnseal: {},
|
||||
sealtasks.TTReplicaUpdate: {},
|
||||
sealtasks.TTProveReplicaUpdate2: {},
|
||||
sealtasks.TTRegenSectorKey: {},
|
||||
}
|
||||
|
||||
var settableStr = func() string {
|
||||
|
@ -508,12 +508,19 @@ var genesisSetRemainderCmd = &cli.Command{
|
||||
}
|
||||
|
||||
var genesisSetActorVersionCmd = &cli.Command{
|
||||
Name: "set-network-version",
|
||||
Usage: "Set the version that this network will start from",
|
||||
ArgsUsage: "<genesisFile> <actorVersion>",
|
||||
Name: "set-network-version",
|
||||
Usage: "Set the version that this network will start from",
|
||||
Flags: []cli.Flag{
|
||||
&cli.IntFlag{
|
||||
Name: "network-version",
|
||||
Usage: "network version to start genesis with",
|
||||
Value: int(build.GenesisNetworkVersion),
|
||||
},
|
||||
},
|
||||
ArgsUsage: "<genesisFile>",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.Args().Len() != 2 {
|
||||
return fmt.Errorf("must specify genesis file and network version (e.g. '0'")
|
||||
if cctx.Args().Len() != 1 {
|
||||
return fmt.Errorf("must specify genesis file")
|
||||
}
|
||||
|
||||
genf, err := homedir.Expand(cctx.Args().First())
|
||||
@ -531,16 +538,12 @@ var genesisSetActorVersionCmd = &cli.Command{
|
||||
return xerrors.Errorf("unmarshal genesis template: %w", err)
|
||||
}
|
||||
|
||||
nv, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parsing network version: %w", err)
|
||||
}
|
||||
|
||||
if nv > uint64(build.NewestNetworkVersion) {
|
||||
nv := network.Version(cctx.Int("network-version"))
|
||||
if nv > build.NewestNetworkVersion {
|
||||
return xerrors.Errorf("invalid network version: %d", nv)
|
||||
}
|
||||
|
||||
template.NetworkVersion = network.Version(nv)
|
||||
template.NetworkVersion = nv
|
||||
|
||||
b, err = json.MarshalIndent(&template, "", " ")
|
||||
if err != nil {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user