diff --git a/.circleci/config.yml b/.circleci/config.yml index 6fc6690f5..21b894f1f 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -44,13 +44,13 @@ commands: - restore_cache: name: Restore parameters cache keys: - - 'v25-2k-lotus-params' + - 'v26-2k-lotus-params' paths: - /var/tmp/filecoin-proof-parameters/ - run: ./lotus fetch-params 2048 - save_cache: name: Save parameters cache - key: 'v25-2k-lotus-params' + key: 'v26-2k-lotus-params' paths: - /var/tmp/filecoin-proof-parameters/ install_ipfs: @@ -855,6 +855,11 @@ workflows: suite: itest-get_messages_in_ts target: "./itests/get_messages_in_ts_test.go" + - test: + name: test-itest-mempool + suite: itest-mempool + target: "./itests/mempool_test.go" + - test: name: test-itest-multisig suite: itest-multisig diff --git a/.circleci/template.yml b/.circleci/template.yml index ef6818c6d..8f5995d56 100644 --- a/.circleci/template.yml +++ b/.circleci/template.yml @@ -44,13 +44,13 @@ commands: - restore_cache: name: Restore parameters cache keys: - - 'v25-2k-lotus-params' + - 'v26-2k-lotus-params' paths: - /var/tmp/filecoin-proof-parameters/ - run: ./lotus fetch-params 2048 - save_cache: name: Save parameters cache - key: 'v25-2k-lotus-params' + key: 'v26-2k-lotus-params' paths: - /var/tmp/filecoin-proof-parameters/ install_ipfs: diff --git a/CHANGELOG.md b/CHANGELOG.md index a420421de..e4fc4c67b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,106 @@ # Lotus changelog +# 1.14.2 / 2022-02-24 + +This is an **optional** release of lotus, that's had a couple more improvements w.r.t Snap experience for storage providers in preparation of the[upcoming OhSnap upgrade](https://github.com/filecoin-project/community/discussions/74?sort=new#discussioncomment-1922550). + +Note that the network is STILL scheduled to upgrade to v15 on March 1st at 2022-03-01T15:00:00Z. All node operators, including storage providers, must upgrade to at least Lotus v1.14.0 before that time. Storage providers must update their daemons, miners, and worker(s). + +Wanna know how to Snap your deal? Check [this](https://github.com/filecoin-project/lotus/discussions/8141) out! + +## Bug Fixes +- fix lotus-bench for sealing jobs (#8173) +- fix:sealing:really-do-it flag for abort upgrade (#8181) +- fix:proving:post check sector handles snap deals replica faults (#8177) +- fix: sealing: missing file type (#8180) + +## Others +- Retract force-pushed v1.14.0 to work around stale gomod caches (#8159): We originally tagged v1.14.0 off the wrong + commit and fixed that by a force push, in which is a really bad practise since it messes up the go mod. Therefore, + we want to retract it and users may use v1.14.1&^. + +## Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| @zenground0 | 2 | +73/-58 | 12 | +| @eben.xie | 1 | +7/-0 | 1 | +| @jennijuju | 1 | +4/-0 | 1 | +| @jennijuju | 1 | +2/-1 | 1 | +| @ribasushi | 1 | +2/-0 | 1 | + +# 1.14.1 / 2022-02-18 + +This is an **optional** release of lotus, that fixes the incorrect *comment* of network v15 OhSnap upgrade **date**. Note the actual upgrade epoch in [v1.14.0](https://github.com/filecoin-project/lotus/releases/tag/v1.14.0) was correct. + +# 1.14.0 / 2022-02-17 + +This is a MANDATORY release of Lotus that introduces [Filecoin network v15, +codenamed the OhSnap upgrade](https://github.com/filecoin-project/community/discussions/74?sort=new#discussioncomment-1922550). + +The network is scheduled to upgrade to v15 on March 1st at 2022-03-01T15:00:00Z. All node operators, including storage providers, must upgrade to this release (or a later release) before that time. Storage providers must update their daemons, miners, and worker(s). + +The OhSnap upgrade introduces the following FIPs, delivered in [actors v7](https://github.com/filecoin-project/specs-actors/releases/tag/v7.0.0): +- [FIP-0019 Snap Deals](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0019.md) +- [FIP-0028 Remove Datacap from Verified clients](https://github.com/filecoin-project/FIPs/pull/226) + +It is recommended that storage providers download the new params before updating their node, miner, and workers. To do so: + +- Download Lotus v1.14.0 or later +- run `make lotus-shed` +- run `./lotus-shed fetch-params` with the appropriate `proving-params` flag +- Upgrade the Lotus daemon and miner **when the previous step is complete** + +All node operators, including storage providers, should be aware that a pre-migration will begin at 2022-03-01T13:30:00Z (90 minutes before the real upgrade). The pre-migration will take between 20 and 50 minutes, depending on hardware specs. During this time, expect slower block validation times, increased CPU and memory usage, and longer delays for API queries. + +## New Features and Changes +- Integrate actor v7-rc1: + - Integrate v7 actors ([#7617](https://github.com/filecoin-project/lotus/pull/7617)) + - feat: state: Fast migration for v15 ([#7933](https://github.com/filecoin-project/lotus/pull/7933)) + - fix: blockstore: Add missing locks to autobatch::Get() [#7939](https://github.com/filecoin-project/lotus/pull/7939)) + - correctness fixes for the autobatch blockstore ([#7940](https://github.com/filecoin-project/lotus/pull/7940)) +- Implement and support [FIP-0019 Snap Deals](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0019.md) + - chore: deps: Integrate proof v11.0.0 ([#7923](https://github.com/filecoin-project/lotus/pull/7923)) + - Snap Deals Lotus Integration: FSM Posting and integration test ([#7810](https://github.com/filecoin-project/lotus/pull/7810)) + - Feat/sector storage unseal ([#7730](https://github.com/filecoin-project/lotus/pull/7730)) + - Feat/snap deals storage ([#7615](https://github.com/filecoin-project/lotus/pull/7615)) + - fix: sealing: Add more deal expiration checks during PRU pipeline ([#7871](https://github.com/filecoin-project/lotus/pull/7871)) + - chore: deps: Update go-paramfetch ([#7917](https://github.com/filecoin-project/lotus/pull/7917)) + - feat: #7880 gas: add gas charge for VerifyReplicaUpdate ([#7897](https://github.com/filecoin-project/lotus/pull/7897)) + - enhancement: sectors: disable existing cc upgrade path 2 days before the upgrade epoch ([#7900](https://github.com/filecoin-project/lotus/pull/7900)) + +## Improvements +- updating to new datastore/blockstore code with contexts ([#7646](https://github.com/filecoin-project/lotus/pull/7646)) +- reorder transfer checks so as to ensure sending 2B FIL to yourself fails if you don't have that amount ([#7637](https://github.com/filecoin-project/lotus/pull/7637)) +- VM: Circ supply should be constant per epoch ([#7811](https://github.com/filecoin-project/lotus/pull/7811)) + +## Bug Fixes +- Fix: state: circsuypply calc around null blocks ([#7890](https://github.com/filecoin-project/lotus/pull/7890)) +- Mempool msg selection should respect block message limits ([#7321](https://github.com/filecoin-project/lotus/pull/7321)) + SplitStore: supress compaction near upgrades ([#7734](https://github.com/filecoin-project/lotus/pull/7734)) + +## Others +- chore: create pull_request_template.md ([#7726](https://github.com/filecoin-project/lotus/pull/7726)) + +## Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Aayush Rajasekaran | 41 | +5538/-1205 | 189 | +| zenground0 | 11 | +3316/-524 | 124 | +| Jennifer Wang | 29 | +714/-599 | 68 | +| ZenGround0 | 3 | +263/-25 | 11 | +| c r | 2 | +198/-30 | 6 | +| vyzo | 4 | +189/-7 | 7 | +| Aayush | 11 | +146/-48 | 49 | +| web3-bot | 10 | +99/-17 | 10 | +| Steven Allen | 1 | +55/-37 | 1 | +| Jiaying Wang | 5 | +30/-8 | 5 | +| Jakub Sztandera | 2 | +8/-3 | 3 | +| Łukasz Magiera | 1 | +3/-3 | 2 | +| Travis Person | 1 | +2/-2 | 2 | +| Rod Vagg | 1 | +2/-2 | 2 | + # v1.13.2 / 2022-01-09 Lotus v1.13.2 is a *highly recommended* feature release with remarkable retrieval improvements, new features like diff --git a/Makefile b/Makefile index f7b13cc18..f91e74e33 100644 --- a/Makefile +++ b/Makefile @@ -345,6 +345,8 @@ gen: actors-gen type-gen method-gen cfgdoc-gen docsgen api-gen circleci @echo ">>> IF YOU'VE MODIFIED THE CLI OR CONFIG, REMEMBER TO ALSO MAKE docsgen-cli" .PHONY: gen +jen: gen + snap: lotus lotus-miner lotus-worker snapcraft # snapcraft upload ./lotus_*.snap diff --git a/api/api_gateway.go b/api/api_gateway.go index fbe2e0cd6..be4b3b83c 100644 --- a/api/api_gateway.go +++ b/api/api_gateway.go @@ -45,8 +45,9 @@ type Gateway interface { GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) - MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*MsigTransaction, error) + MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) + MsigGetVestingSchedule(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MsigVesting, error) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (DealCollateralBounds, error) StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) diff --git a/api/api_storage.go b/api/api_storage.go index bccb7baf9..dc7003cfe 100644 --- a/api/api_storage.go +++ b/api/api_storage.go @@ -113,6 +113,8 @@ type StorageMiner interface { // SectorCommitPending returns a list of pending Commit sectors to be sent in the next aggregate message SectorCommitPending(ctx context.Context) ([]abi.SectorID, error) //perm:admin SectorMatchPendingPiecesToOpenSectors(ctx context.Context) error //perm:admin + // SectorAbortUpgrade can be called on sectors that are in the process of being upgraded to abort it + SectorAbortUpgrade(context.Context, abi.SectorNumber) error //perm:admin // WorkerConnect tells the node to connect to workers RPC WorkerConnect(context.Context, string) error //perm:admin retry:true @@ -130,6 +132,7 @@ type StorageMiner interface { ReturnProveReplicaUpdate1(ctx context.Context, callID storiface.CallID, vanillaProofs storage.ReplicaVanillaProofs, err *storiface.CallError) error //perm:admin retry:true ReturnProveReplicaUpdate2(ctx context.Context, callID storiface.CallID, proof storage.ReplicaUpdateProof, err *storiface.CallError) error //perm:admin retry:true ReturnGenerateSectorKeyFromData(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true + ReturnFinalizeReplicaUpdate(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true @@ -263,7 +266,7 @@ type StorageMiner interface { // the path specified when calling CreateBackup is within the base path CreateBackup(ctx context.Context, fpath string) error //perm:admin - CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin + CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, update []bool, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, poStEpoch abi.ChainEpoch, nv abinetwork.Version) ([]builtin.PoStProof, error) //perm:read } diff --git a/api/api_worker.go b/api/api_worker.go index 68d8e7baf..ba50a9459 100644 --- a/api/api_worker.go +++ b/api/api_worker.go @@ -39,6 +39,7 @@ type Worker interface { SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) //perm:admin SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) //perm:admin FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) //perm:admin + FinalizeReplicaUpdate(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) //perm:admin ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storiface.CallID, error) //perm:admin ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storiface.CallID, error) //perm:admin ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storiface.CallID, error) //perm:admin diff --git a/api/proxy_gen.go b/api/proxy_gen.go index d0d475709..756f38df9 100644 --- a/api/proxy_gen.go +++ b/api/proxy_gen.go @@ -516,6 +516,8 @@ type GatewayStruct struct { MsigGetVested func(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) `` + MsigGetVestingSchedule func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MsigVesting, error) `` + StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `` StateDealProviderCollateralBounds func(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (DealCollateralBounds, error) `` @@ -631,7 +633,7 @@ type StorageMinerStruct struct { ActorSectorSize func(p0 context.Context, p1 address.Address) (abi.SectorSize, error) `perm:"read"` - CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) `perm:"admin"` + CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 []bool, p4 bool) (map[abi.SectorNumber]string, error) `perm:"admin"` ComputeProof func(p0 context.Context, p1 []builtin.ExtendedSectorInfo, p2 abi.PoStRandomness, p3 abi.ChainEpoch, p4 abinetwork.Version) ([]builtin.PoStProof, error) `perm:"read"` @@ -735,6 +737,8 @@ type StorageMinerStruct struct { ReturnFetch func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"` + ReturnFinalizeReplicaUpdate func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"` + ReturnFinalizeSector func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"` ReturnGenerateSectorKeyFromData func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"` @@ -767,6 +771,8 @@ type StorageMinerStruct struct { SealingSchedDiag func(p0 context.Context, p1 bool) (interface{}, error) `perm:"admin"` + SectorAbortUpgrade func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"` + SectorAddPieceToAny func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) `perm:"admin"` SectorCommitFlush func(p0 context.Context) ([]sealiface.CommitBatchRes, error) `perm:"admin"` @@ -884,6 +890,8 @@ type WorkerStruct struct { Fetch func(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType, p3 storiface.PathType, p4 storiface.AcquireMode) (storiface.CallID, error) `perm:"admin"` + FinalizeReplicaUpdate func(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) `perm:"admin"` + FinalizeSector func(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) `perm:"admin"` GenerateSectorKeyFromData func(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid) (storiface.CallID, error) `perm:"admin"` @@ -3291,6 +3299,17 @@ func (s *GatewayStub) MsigGetVested(p0 context.Context, p1 address.Address, p2 t return *new(types.BigInt), ErrNotSupported } +func (s *GatewayStruct) MsigGetVestingSchedule(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MsigVesting, error) { + if s.Internal.MsigGetVestingSchedule == nil { + return *new(MsigVesting), ErrNotSupported + } + return s.Internal.MsigGetVestingSchedule(p0, p1, p2) +} + +func (s *GatewayStub) MsigGetVestingSchedule(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MsigVesting, error) { + return *new(MsigVesting), ErrNotSupported +} + func (s *GatewayStruct) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { if s.Internal.StateAccountKey == nil { return *new(address.Address), ErrNotSupported @@ -3786,14 +3805,14 @@ func (s *StorageMinerStub) ActorSectorSize(p0 context.Context, p1 address.Addres return *new(abi.SectorSize), ErrNotSupported } -func (s *StorageMinerStruct) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) { +func (s *StorageMinerStruct) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 []bool, p4 bool) (map[abi.SectorNumber]string, error) { if s.Internal.CheckProvable == nil { return *new(map[abi.SectorNumber]string), ErrNotSupported } - return s.Internal.CheckProvable(p0, p1, p2, p3) + return s.Internal.CheckProvable(p0, p1, p2, p3, p4) } -func (s *StorageMinerStub) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) { +func (s *StorageMinerStub) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 []bool, p4 bool) (map[abi.SectorNumber]string, error) { return *new(map[abi.SectorNumber]string), ErrNotSupported } @@ -4358,6 +4377,17 @@ func (s *StorageMinerStub) ReturnFetch(p0 context.Context, p1 storiface.CallID, return ErrNotSupported } +func (s *StorageMinerStruct) ReturnFinalizeReplicaUpdate(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { + if s.Internal.ReturnFinalizeReplicaUpdate == nil { + return ErrNotSupported + } + return s.Internal.ReturnFinalizeReplicaUpdate(p0, p1, p2) +} + +func (s *StorageMinerStub) ReturnFinalizeReplicaUpdate(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { + return ErrNotSupported +} + func (s *StorageMinerStruct) ReturnFinalizeSector(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { if s.Internal.ReturnFinalizeSector == nil { return ErrNotSupported @@ -4534,6 +4564,17 @@ func (s *StorageMinerStub) SealingSchedDiag(p0 context.Context, p1 bool) (interf return nil, ErrNotSupported } +func (s *StorageMinerStruct) SectorAbortUpgrade(p0 context.Context, p1 abi.SectorNumber) error { + if s.Internal.SectorAbortUpgrade == nil { + return ErrNotSupported + } + return s.Internal.SectorAbortUpgrade(p0, p1) +} + +func (s *StorageMinerStub) SectorAbortUpgrade(p0 context.Context, p1 abi.SectorNumber) error { + return ErrNotSupported +} + func (s *StorageMinerStruct) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data, p3 PieceDealInfo) (SectorOffset, error) { if s.Internal.SectorAddPieceToAny == nil { return *new(SectorOffset), ErrNotSupported @@ -5084,6 +5125,17 @@ func (s *WorkerStub) Fetch(p0 context.Context, p1 storage.SectorRef, p2 storifac return *new(storiface.CallID), ErrNotSupported } +func (s *WorkerStruct) FinalizeReplicaUpdate(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) { + if s.Internal.FinalizeReplicaUpdate == nil { + return *new(storiface.CallID), ErrNotSupported + } + return s.Internal.FinalizeReplicaUpdate(p0, p1, p2) +} + +func (s *WorkerStub) FinalizeReplicaUpdate(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) { + return *new(storiface.CallID), ErrNotSupported +} + func (s *WorkerStruct) FinalizeSector(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) { if s.Internal.FinalizeSector == nil { return *new(storiface.CallID), ErrNotSupported diff --git a/api/version.go b/api/version.go index 228dcbd10..9f4f73513 100644 --- a/api/version.go +++ b/api/version.go @@ -57,7 +57,7 @@ var ( FullAPIVersion0 = newVer(1, 5, 0) FullAPIVersion1 = newVer(2, 2, 0) - MinerAPIVersion0 = newVer(1, 3, 0) + MinerAPIVersion0 = newVer(1, 4, 0) WorkerAPIVersion0 = newVer(1, 5, 0) ) diff --git a/blockstore/context.go b/blockstore/context.go new file mode 100644 index 000000000..ebb6fafe3 --- /dev/null +++ b/blockstore/context.go @@ -0,0 +1,21 @@ +package blockstore + +import ( + "context" +) + +type hotViewKey struct{} + +var hotView = hotViewKey{} + +// WithHotView constructs a new context with an option that provides a hint to the blockstore +// (e.g. the splitstore) that the object (and its ipld references) should be kept hot. +func WithHotView(ctx context.Context) context.Context { + return context.WithValue(ctx, hotView, struct{}{}) +} + +// IsHotView returns true if the hot view option is set in the context +func IsHotView(ctx context.Context) bool { + v := ctx.Value(hotView) + return v != nil +} diff --git a/blockstore/splitstore/README.md b/blockstore/splitstore/README.md index f69a056ca..1490004cf 100644 --- a/blockstore/splitstore/README.md +++ b/blockstore/splitstore/README.md @@ -49,10 +49,11 @@ These are options in the `[Chainstore.Splitstore]` section of the configuration: blockstore and discards writes; this is necessary to support syncing from a snapshot. - `MarkSetType` -- specifies the type of markset to use during compaction. The markset is the data structure used by compaction/gc to track live objects. - The default value is `"map"`, which will use an in-memory map; if you are limited - in memory (or indeed see compaction run out of memory), you can also specify - `"badger"` which will use an disk backed markset, using badger. This will use - much less memory, but will also make compaction slower. + The default value is "badger", which will use a disk backed markset using badger. + If you have a lot of memory (48G or more) you can also use "map", which will use + an in memory markset, speeding up compaction at the cost of higher memory usage. + Note: If you are using a VPS with a network volume, you need to provision at least + 3000 IOPs with the badger markset. - `HotStoreMessageRetention` -- specifies how many finalities, beyond the 4 finalities maintained by default, to maintain messages and message receipts in the hotstore. This is useful for assistive nodes that want to support syncing for other @@ -105,6 +106,12 @@ Compaction works transactionally with the following algorithm: - We delete in small batches taking a lock; each batch is checked again for marks, from the concurrent transactional mark, so as to never delete anything live - We then end the transaction and compact/gc the hotstore. +As of [#8008](https://github.com/filecoin-project/lotus/pull/8008) the compaction algorithm has been +modified to eliminate sorting and maintain the cold object set on disk. This drastically reduces +memory usage; in fact, when using badger as the markset compaction uses very little memory, and +it should be now possible to run splitstore with 32GB of RAM or less without danger of running out of +memory during compaction. + ## Garbage Collection TBD -- see [#6577](https://github.com/filecoin-project/lotus/issues/6577) diff --git a/blockstore/splitstore/checkpoint.go b/blockstore/splitstore/checkpoint.go new file mode 100644 index 000000000..d3cd4cba7 --- /dev/null +++ b/blockstore/splitstore/checkpoint.go @@ -0,0 +1,118 @@ +package splitstore + +import ( + "bufio" + "io" + "os" + + "golang.org/x/xerrors" + + cid "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" +) + +type Checkpoint struct { + file *os.File + buf *bufio.Writer +} + +func NewCheckpoint(path string) (*Checkpoint, error) { + file, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_SYNC, 0644) + if err != nil { + return nil, xerrors.Errorf("error creating checkpoint: %w", err) + } + buf := bufio.NewWriter(file) + + return &Checkpoint{ + file: file, + buf: buf, + }, nil +} + +func OpenCheckpoint(path string) (*Checkpoint, cid.Cid, error) { + filein, err := os.Open(path) + if err != nil { + return nil, cid.Undef, xerrors.Errorf("error opening checkpoint for reading: %w", err) + } + defer filein.Close() //nolint:errcheck + + bufin := bufio.NewReader(filein) + start, err := readRawCid(bufin, nil) + if err != nil && err != io.EOF { + return nil, cid.Undef, xerrors.Errorf("error reading cid from checkpoint: %w", err) + } + + fileout, err := os.OpenFile(path, os.O_WRONLY|os.O_SYNC, 0644) + if err != nil { + return nil, cid.Undef, xerrors.Errorf("error opening checkpoint for writing: %w", err) + } + bufout := bufio.NewWriter(fileout) + + return &Checkpoint{ + file: fileout, + buf: bufout, + }, start, nil +} + +func (cp *Checkpoint) Set(c cid.Cid) error { + if _, err := cp.file.Seek(0, io.SeekStart); err != nil { + return xerrors.Errorf("error seeking beginning of checkpoint: %w", err) + } + + if err := writeRawCid(cp.buf, c, true); err != nil { + return xerrors.Errorf("error writing cid to checkpoint: %w", err) + } + + return nil +} + +func (cp *Checkpoint) Close() error { + if cp.file == nil { + return nil + } + + err := cp.file.Close() + cp.file = nil + cp.buf = nil + + return err +} + +func readRawCid(buf *bufio.Reader, hbuf []byte) (cid.Cid, error) { + sz, err := buf.ReadByte() + if err != nil { + return cid.Undef, err // don't wrap EOF as it is not an error here + } + + if hbuf == nil { + hbuf = make([]byte, int(sz)) + } else { + hbuf = hbuf[:int(sz)] + } + + if _, err := io.ReadFull(buf, hbuf); err != nil { + return cid.Undef, xerrors.Errorf("error reading hash: %w", err) // wrap EOF, it's corrupt + } + + hash, err := mh.Cast(hbuf) + if err != nil { + return cid.Undef, xerrors.Errorf("error casting multihash: %w", err) + } + + return cid.NewCidV1(cid.Raw, hash), nil +} + +func writeRawCid(buf *bufio.Writer, c cid.Cid, flush bool) error { + hash := c.Hash() + if err := buf.WriteByte(byte(len(hash))); err != nil { + return err + } + if _, err := buf.Write(hash); err != nil { + return err + } + if flush { + return buf.Flush() + } + + return nil +} diff --git a/blockstore/splitstore/checkpoint_test.go b/blockstore/splitstore/checkpoint_test.go new file mode 100644 index 000000000..4fefe40cf --- /dev/null +++ b/blockstore/splitstore/checkpoint_test.go @@ -0,0 +1,147 @@ +package splitstore + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multihash" +) + +func TestCheckpoint(t *testing.T) { + dir, err := ioutil.TempDir("", "checkpoint.*") + if err != nil { + t.Fatal(err) + } + + t.Cleanup(func() { + _ = os.RemoveAll(dir) + }) + + path := filepath.Join(dir, "checkpoint") + + makeCid := func(key string) cid.Cid { + h, err := multihash.Sum([]byte(key), multihash.SHA2_256, -1) + if err != nil { + t.Fatal(err) + } + + return cid.NewCidV1(cid.Raw, h) + } + + k1 := makeCid("a") + k2 := makeCid("b") + k3 := makeCid("c") + k4 := makeCid("d") + + cp, err := NewCheckpoint(path) + if err != nil { + t.Fatal(err) + } + + if err := cp.Set(k1); err != nil { + t.Fatal(err) + } + if err := cp.Set(k2); err != nil { + t.Fatal(err) + } + + if err := cp.Close(); err != nil { + t.Fatal(err) + } + + cp, start, err := OpenCheckpoint(path) + if err != nil { + t.Fatal(err) + } + if !start.Equals(k2) { + t.Fatalf("expected start to be %s; got %s", k2, start) + } + + if err := cp.Set(k3); err != nil { + t.Fatal(err) + } + if err := cp.Set(k4); err != nil { + t.Fatal(err) + } + + if err := cp.Close(); err != nil { + t.Fatal(err) + } + + cp, start, err = OpenCheckpoint(path) + if err != nil { + t.Fatal(err) + } + if !start.Equals(k4) { + t.Fatalf("expected start to be %s; got %s", k4, start) + } + + if err := cp.Close(); err != nil { + t.Fatal(err) + } + + // also test correct operation with an empty checkpoint + cp, err = NewCheckpoint(path) + if err != nil { + t.Fatal(err) + } + + if err := cp.Close(); err != nil { + t.Fatal(err) + } + + cp, start, err = OpenCheckpoint(path) + if err != nil { + t.Fatal(err) + } + + if start.Defined() { + t.Fatal("expected start to be undefined") + } + + if err := cp.Set(k1); err != nil { + t.Fatal(err) + } + if err := cp.Set(k2); err != nil { + t.Fatal(err) + } + + if err := cp.Close(); err != nil { + t.Fatal(err) + } + + cp, start, err = OpenCheckpoint(path) + if err != nil { + t.Fatal(err) + } + if !start.Equals(k2) { + t.Fatalf("expected start to be %s; got %s", k2, start) + } + + if err := cp.Set(k3); err != nil { + t.Fatal(err) + } + if err := cp.Set(k4); err != nil { + t.Fatal(err) + } + + if err := cp.Close(); err != nil { + t.Fatal(err) + } + + cp, start, err = OpenCheckpoint(path) + if err != nil { + t.Fatal(err) + } + if !start.Equals(k4) { + t.Fatalf("expected start to be %s; got %s", k4, start) + } + + if err := cp.Close(); err != nil { + t.Fatal(err) + } + +} diff --git a/blockstore/splitstore/coldset.go b/blockstore/splitstore/coldset.go new file mode 100644 index 000000000..129e2ed92 --- /dev/null +++ b/blockstore/splitstore/coldset.go @@ -0,0 +1,102 @@ +package splitstore + +import ( + "bufio" + "io" + "os" + + "golang.org/x/xerrors" + + cid "github.com/ipfs/go-cid" +) + +type ColdSetWriter struct { + file *os.File + buf *bufio.Writer +} + +type ColdSetReader struct { + file *os.File + buf *bufio.Reader +} + +func NewColdSetWriter(path string) (*ColdSetWriter, error) { + file, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644) + if err != nil { + return nil, xerrors.Errorf("error creating coldset: %w", err) + } + buf := bufio.NewWriter(file) + + return &ColdSetWriter{ + file: file, + buf: buf, + }, nil +} + +func NewColdSetReader(path string) (*ColdSetReader, error) { + file, err := os.Open(path) + if err != nil { + return nil, xerrors.Errorf("error opening coldset: %w", err) + } + buf := bufio.NewReader(file) + + return &ColdSetReader{ + file: file, + buf: buf, + }, nil +} + +func (s *ColdSetWriter) Write(c cid.Cid) error { + return writeRawCid(s.buf, c, false) +} + +func (s *ColdSetWriter) Close() error { + if s.file == nil { + return nil + } + + err1 := s.buf.Flush() + err2 := s.file.Close() + s.buf = nil + s.file = nil + + if err1 != nil { + return err1 + } + return err2 +} + +func (s *ColdSetReader) ForEach(f func(cid.Cid) error) error { + hbuf := make([]byte, 256) + for { + next, err := readRawCid(s.buf, hbuf) + if err != nil { + if err == io.EOF { + return nil + } + + return xerrors.Errorf("error reading coldset: %w", err) + } + + if err := f(next); err != nil { + return err + } + } +} + +func (s *ColdSetReader) Reset() error { + _, err := s.file.Seek(0, io.SeekStart) + return err +} + +func (s *ColdSetReader) Close() error { + if s.file == nil { + return nil + } + + err := s.file.Close() + s.file = nil + s.buf = nil + + return err +} diff --git a/blockstore/splitstore/coldset_test.go b/blockstore/splitstore/coldset_test.go new file mode 100644 index 000000000..60216ebd4 --- /dev/null +++ b/blockstore/splitstore/coldset_test.go @@ -0,0 +1,99 @@ +package splitstore + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multihash" +) + +func TestColdSet(t *testing.T) { + dir, err := ioutil.TempDir("", "coldset.*") + if err != nil { + t.Fatal(err) + } + + t.Cleanup(func() { + _ = os.RemoveAll(dir) + }) + + path := filepath.Join(dir, "coldset") + + makeCid := func(i int) cid.Cid { + h, err := multihash.Sum([]byte(fmt.Sprintf("cid.%d", i)), multihash.SHA2_256, -1) + if err != nil { + t.Fatal(err) + } + + return cid.NewCidV1(cid.Raw, h) + } + + const count = 1000 + cids := make([]cid.Cid, 0, count) + for i := 0; i < count; i++ { + cids = append(cids, makeCid(i)) + } + + cw, err := NewColdSetWriter(path) + if err != nil { + t.Fatal(err) + } + + for _, c := range cids { + if err := cw.Write(c); err != nil { + t.Fatal(err) + } + } + + if err := cw.Close(); err != nil { + t.Fatal(err) + } + + cr, err := NewColdSetReader(path) + if err != nil { + t.Fatal(err) + } + + index := 0 + err = cr.ForEach(func(c cid.Cid) error { + if index >= count { + t.Fatal("too many cids") + } + + if !c.Equals(cids[index]) { + t.Fatalf("wrong cid %d; expected %s but got %s", index, cids[index], c) + } + + index++ + return nil + }) + if err != nil { + t.Fatal(err) + } + + if err := cr.Reset(); err != nil { + t.Fatal(err) + } + + index = 0 + err = cr.ForEach(func(c cid.Cid) error { + if index >= count { + t.Fatal("too many cids") + } + + if !c.Equals(cids[index]) { + t.Fatalf("wrong cid; expected %s but got %s", cids[index], c) + } + + index++ + return nil + }) + if err != nil { + t.Fatal(err) + } + +} diff --git a/blockstore/splitstore/markset.go b/blockstore/splitstore/markset.go index f173be575..e67494538 100644 --- a/blockstore/splitstore/markset.go +++ b/blockstore/splitstore/markset.go @@ -14,15 +14,24 @@ var errMarkSetClosed = errors.New("markset closed") type MarkSet interface { ObjectVisitor Mark(cid.Cid) error + MarkMany([]cid.Cid) error Has(cid.Cid) (bool, error) Close() error + + // BeginCriticalSection ensures that the markset is persisted to disk for recovery in case + // of abnormal termination during the critical section span. + BeginCriticalSection() error + // EndCriticalSection ends the critical section span. + EndCriticalSection() } type MarkSetEnv interface { - // Create creates a new markset within the environment. - // name is a unique name for this markset, mapped to the filesystem in disk-backed environments + // New creates a new markset within the environment. + // name is a unique name for this markset, mapped to the filesystem for on-disk persistence. // sizeHint is a hint about the expected size of the markset - Create(name string, sizeHint int64) (MarkSet, error) + New(name string, sizeHint int64) (MarkSet, error) + // Recover recovers an existing markset persisted on-disk. + Recover(name string) (MarkSet, error) // Close closes the markset Close() error } @@ -30,7 +39,7 @@ type MarkSetEnv interface { func OpenMarkSetEnv(path string, mtype string) (MarkSetEnv, error) { switch mtype { case "map": - return NewMapMarkSetEnv() + return NewMapMarkSetEnv(path) case "badger": return NewBadgerMarkSetEnv(path) default: diff --git a/blockstore/splitstore/markset_badger.go b/blockstore/splitstore/markset_badger.go index e30334b89..659d3b5dd 100644 --- a/blockstore/splitstore/markset_badger.go +++ b/blockstore/splitstore/markset_badger.go @@ -3,6 +3,7 @@ package splitstore import ( "os" "path/filepath" + "runtime" "sync" "golang.org/x/xerrors" @@ -28,6 +29,7 @@ type BadgerMarkSet struct { writers int seqno int version int + persist bool db *badger.DB path string @@ -47,11 +49,10 @@ func NewBadgerMarkSetEnv(path string) (MarkSetEnv, error) { return &BadgerMarkSetEnv{path: msPath}, nil } -func (e *BadgerMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) { - name += ".tmp" +func (e *BadgerMarkSetEnv) New(name string, sizeHint int64) (MarkSet, error) { path := filepath.Join(e.path, name) - db, err := openTransientBadgerDB(path) + db, err := openBadgerDB(path, false) if err != nil { return nil, xerrors.Errorf("error creating badger db: %w", err) } @@ -67,8 +68,72 @@ func (e *BadgerMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) return ms, nil } +func (e *BadgerMarkSetEnv) Recover(name string) (MarkSet, error) { + path := filepath.Join(e.path, name) + + if _, err := os.Stat(path); err != nil { + return nil, xerrors.Errorf("error stating badger db path: %w", err) + } + + db, err := openBadgerDB(path, true) + if err != nil { + return nil, xerrors.Errorf("error creating badger db: %w", err) + } + + ms := &BadgerMarkSet{ + pend: make(map[string]struct{}), + writing: make(map[int]map[string]struct{}), + db: db, + path: path, + persist: true, + } + ms.cond.L = &ms.mx + + return ms, nil +} + func (e *BadgerMarkSetEnv) Close() error { - return os.RemoveAll(e.path) + return nil +} + +func (s *BadgerMarkSet) BeginCriticalSection() error { + s.mx.Lock() + + if s.persist { + s.mx.Unlock() + return nil + } + + var write bool + var seqno int + if len(s.pend) > 0 { + write = true + seqno = s.nextBatch() + } + + s.persist = true + s.mx.Unlock() + + if write { + // all writes sync once perist is true + return s.write(seqno) + } + + // wait for any pending writes and sync + s.mx.Lock() + for s.writers > 0 { + s.cond.Wait() + } + s.mx.Unlock() + + return s.db.Sync() +} + +func (s *BadgerMarkSet) EndCriticalSection() { + s.mx.Lock() + defer s.mx.Unlock() + + s.persist = false } func (s *BadgerMarkSet) Mark(c cid.Cid) error { @@ -88,6 +153,23 @@ func (s *BadgerMarkSet) Mark(c cid.Cid) error { return nil } +func (s *BadgerMarkSet) MarkMany(batch []cid.Cid) error { + s.mx.Lock() + if s.pend == nil { + s.mx.Unlock() + return errMarkSetClosed + } + + write, seqno := s.putMany(batch) + s.mx.Unlock() + + if write { + return s.write(seqno) + } + + return nil +} + func (s *BadgerMarkSet) Has(c cid.Cid) (bool, error) { s.mx.RLock() defer s.mx.RUnlock() @@ -193,16 +275,34 @@ func (s *BadgerMarkSet) tryDB(key []byte) (has bool, err error) { // writer holds the exclusive lock func (s *BadgerMarkSet) put(key string) (write bool, seqno int) { s.pend[key] = struct{}{} - if len(s.pend) < badgerMarkSetBatchSize { + if !s.persist && len(s.pend) < badgerMarkSetBatchSize { return false, 0 } - seqno = s.seqno + seqno = s.nextBatch() + return true, seqno +} + +func (s *BadgerMarkSet) putMany(batch []cid.Cid) (write bool, seqno int) { + for _, c := range batch { + key := string(c.Hash()) + s.pend[key] = struct{}{} + } + + if !s.persist && len(s.pend) < badgerMarkSetBatchSize { + return false, 0 + } + + seqno = s.nextBatch() + return true, seqno +} + +func (s *BadgerMarkSet) nextBatch() int { + seqno := s.seqno s.seqno++ s.writing[seqno] = s.pend s.pend = make(map[string]struct{}) - - return true, seqno + return seqno } func (s *BadgerMarkSet) write(seqno int) (err error) { @@ -247,6 +347,14 @@ func (s *BadgerMarkSet) write(seqno int) (err error) { return xerrors.Errorf("error flushing batch to badger markset: %w", err) } + s.mx.RLock() + persist := s.persist + s.mx.RUnlock() + + if persist { + return s.db.Sync() + } + return nil } @@ -266,26 +374,29 @@ func (s *BadgerMarkSet) Close() error { db := s.db s.db = nil - return closeTransientBadgerDB(db, s.path) + return closeBadgerDB(db, s.path, s.persist) } -func (s *BadgerMarkSet) SetConcurrent() {} +func openBadgerDB(path string, recover bool) (*badger.DB, error) { + // if it is not a recovery, clean up first + if !recover { + err := os.RemoveAll(path) + if err != nil { + return nil, xerrors.Errorf("error clearing markset directory: %w", err) + } -func openTransientBadgerDB(path string) (*badger.DB, error) { - // clean up first - err := os.RemoveAll(path) - if err != nil { - return nil, xerrors.Errorf("error clearing markset directory: %w", err) - } - - err = os.MkdirAll(path, 0755) //nolint:gosec - if err != nil { - return nil, xerrors.Errorf("error creating markset directory: %w", err) + err = os.MkdirAll(path, 0755) //nolint:gosec + if err != nil { + return nil, xerrors.Errorf("error creating markset directory: %w", err) + } } opts := badger.DefaultOptions(path) + // we manually sync when we are in critical section opts.SyncWrites = false + // no need to do that opts.CompactL0OnClose = false + // we store hashes, not much to gain by compression opts.Compression = options.None // Note: We use FileIO for loading modes to avoid memory thrashing and interference // between the system blockstore and the markset. @@ -294,6 +405,15 @@ func openTransientBadgerDB(path string) (*badger.DB, error) { // exceeded 1GB in size. opts.TableLoadingMode = options.FileIO opts.ValueLogLoadingMode = options.FileIO + // We increase the number of L0 tables before compaction to make it unlikely to + // be necessary. + opts.NumLevelZeroTables = 20 // default is 5 + opts.NumLevelZeroTablesStall = 30 // default is 10 + // increase the number of compactors from default 2 so that if we ever have to + // compact, it is fast + if runtime.NumCPU()/2 > opts.NumCompactors { + opts.NumCompactors = runtime.NumCPU() / 2 + } opts.Logger = &badgerLogger{ SugaredLogger: log.Desugar().WithOptions(zap.AddCallerSkip(1)).Sugar(), skip2: log.Desugar().WithOptions(zap.AddCallerSkip(2)).Sugar(), @@ -302,12 +422,16 @@ func openTransientBadgerDB(path string) (*badger.DB, error) { return badger.Open(opts) } -func closeTransientBadgerDB(db *badger.DB, path string) error { +func closeBadgerDB(db *badger.DB, path string, persist bool) error { err := db.Close() if err != nil { return xerrors.Errorf("error closing badger markset: %w", err) } + if persist { + return nil + } + err = os.RemoveAll(path) if err != nil { return xerrors.Errorf("error deleting badger markset: %w", err) diff --git a/blockstore/splitstore/markset_map.go b/blockstore/splitstore/markset_map.go index fda964663..8216bcd81 100644 --- a/blockstore/splitstore/markset_map.go +++ b/blockstore/splitstore/markset_map.go @@ -1,37 +1,104 @@ package splitstore import ( + "bufio" + "io" + "os" + "path/filepath" "sync" + "golang.org/x/xerrors" + cid "github.com/ipfs/go-cid" ) -type MapMarkSetEnv struct{} +type MapMarkSetEnv struct { + path string +} var _ MarkSetEnv = (*MapMarkSetEnv)(nil) type MapMarkSet struct { mx sync.RWMutex set map[string]struct{} + + persist bool + file *os.File + buf *bufio.Writer + + path string } var _ MarkSet = (*MapMarkSet)(nil) -func NewMapMarkSetEnv() (*MapMarkSetEnv, error) { - return &MapMarkSetEnv{}, nil +func NewMapMarkSetEnv(path string) (*MapMarkSetEnv, error) { + msPath := filepath.Join(path, "markset.map") + err := os.MkdirAll(msPath, 0755) //nolint:gosec + if err != nil { + return nil, xerrors.Errorf("error creating markset directory: %w", err) + } + + return &MapMarkSetEnv{path: msPath}, nil } -func (e *MapMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) { +func (e *MapMarkSetEnv) New(name string, sizeHint int64) (MarkSet, error) { + path := filepath.Join(e.path, name) return &MapMarkSet{ - set: make(map[string]struct{}, sizeHint), + set: make(map[string]struct{}, sizeHint), + path: path, }, nil } +func (e *MapMarkSetEnv) Recover(name string) (MarkSet, error) { + path := filepath.Join(e.path, name) + s := &MapMarkSet{ + set: make(map[string]struct{}), + path: path, + } + + in, err := os.Open(path) + if err != nil { + return nil, xerrors.Errorf("error opening markset file for read: %w", err) + } + defer in.Close() //nolint:errcheck + + // wrap a buffered reader to make this faster + buf := bufio.NewReader(in) + for { + var sz byte + if sz, err = buf.ReadByte(); err != nil { + break + } + + key := make([]byte, int(sz)) + if _, err = io.ReadFull(buf, key); err != nil { + break + } + + s.set[string(key)] = struct{}{} + } + + if err != io.EOF { + return nil, xerrors.Errorf("error reading markset file: %w", err) + } + + file, err := os.OpenFile(s.path, os.O_WRONLY|os.O_APPEND, 0) + if err != nil { + return nil, xerrors.Errorf("error opening markset file for write: %w", err) + } + + s.persist = true + s.file = file + s.buf = bufio.NewWriter(file) + + return s, nil +} + func (e *MapMarkSetEnv) Close() error { return nil } -func (s *MapMarkSet) Mark(cid cid.Cid) error { +func (s *MapMarkSet) BeginCriticalSection() error { s.mx.Lock() defer s.mx.Unlock() @@ -39,7 +106,104 @@ func (s *MapMarkSet) Mark(cid cid.Cid) error { return errMarkSetClosed } - s.set[string(cid.Hash())] = struct{}{} + if s.persist { + return nil + } + + file, err := os.OpenFile(s.path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644) + if err != nil { + return xerrors.Errorf("error opening markset file: %w", err) + } + + // wrap a buffered writer to make this faster + s.buf = bufio.NewWriter(file) + for key := range s.set { + if err := s.writeKey([]byte(key), false); err != nil { + _ = file.Close() + s.buf = nil + return err + } + } + if err := s.buf.Flush(); err != nil { + _ = file.Close() + s.buf = nil + return xerrors.Errorf("error flushing markset file buffer: %w", err) + } + + s.file = file + s.persist = true + + return nil +} + +func (s *MapMarkSet) EndCriticalSection() { + s.mx.Lock() + defer s.mx.Unlock() + + if !s.persist { + return + } + + _ = s.file.Close() + _ = os.Remove(s.path) + s.file = nil + s.buf = nil + s.persist = false +} + +func (s *MapMarkSet) Mark(c cid.Cid) error { + s.mx.Lock() + defer s.mx.Unlock() + + if s.set == nil { + return errMarkSetClosed + } + + hash := c.Hash() + s.set[string(hash)] = struct{}{} + + if s.persist { + if err := s.writeKey(hash, true); err != nil { + return err + } + + if err := s.file.Sync(); err != nil { + return xerrors.Errorf("error syncing markset: %w", err) + } + } + + return nil +} + +func (s *MapMarkSet) MarkMany(batch []cid.Cid) error { + s.mx.Lock() + defer s.mx.Unlock() + + if s.set == nil { + return errMarkSetClosed + } + + for _, c := range batch { + hash := c.Hash() + s.set[string(hash)] = struct{}{} + + if s.persist { + if err := s.writeKey(hash, false); err != nil { + return err + } + } + } + + if s.persist { + if err := s.buf.Flush(); err != nil { + return xerrors.Errorf("error flushing markset buffer to disk: %w", err) + } + + if err := s.file.Sync(); err != nil { + return xerrors.Errorf("error syncing markset: %w", err) + } + } + return nil } @@ -63,12 +227,23 @@ func (s *MapMarkSet) Visit(c cid.Cid) (bool, error) { return false, errMarkSetClosed } - key := string(c.Hash()) + hash := c.Hash() + key := string(hash) if _, ok := s.set[key]; ok { return false, nil } s.set[key] = struct{}{} + + if s.persist { + if err := s.writeKey(hash, true); err != nil { + return false, err + } + if err := s.file.Sync(); err != nil { + return false, xerrors.Errorf("error syncing markset: %w", err) + } + } + return true, nil } @@ -76,6 +251,39 @@ func (s *MapMarkSet) Close() error { s.mx.Lock() defer s.mx.Unlock() + if s.set == nil { + return nil + } + s.set = nil + + if s.file != nil { + if err := s.file.Close(); err != nil { + log.Warnf("error closing markset file: %s", err) + } + + if !s.persist { + if err := os.Remove(s.path); err != nil { + log.Warnf("error removing markset file: %s", err) + } + } + } + + return nil +} + +func (s *MapMarkSet) writeKey(k []byte, flush bool) error { + if err := s.buf.WriteByte(byte(len(k))); err != nil { + return xerrors.Errorf("error writing markset key length to disk: %w", err) + } + if _, err := s.buf.Write(k); err != nil { + return xerrors.Errorf("error writing markset key to disk: %w", err) + } + if flush { + if err := s.buf.Flush(); err != nil { + return xerrors.Errorf("error flushing markset buffer to disk: %w", err) + } + } + return nil } diff --git a/blockstore/splitstore/markset_test.go b/blockstore/splitstore/markset_test.go index de9421f08..b4b871602 100644 --- a/blockstore/splitstore/markset_test.go +++ b/blockstore/splitstore/markset_test.go @@ -11,7 +11,10 @@ import ( func TestMapMarkSet(t *testing.T) { testMarkSet(t, "map") + testMarkSetRecovery(t, "map") + testMarkSetMarkMany(t, "map") testMarkSetVisitor(t, "map") + testMarkSetVisitorRecovery(t, "map") } func TestBadgerMarkSet(t *testing.T) { @@ -21,12 +24,13 @@ func TestBadgerMarkSet(t *testing.T) { badgerMarkSetBatchSize = bs }) testMarkSet(t, "badger") + testMarkSetRecovery(t, "badger") + testMarkSetMarkMany(t, "badger") testMarkSetVisitor(t, "badger") + testMarkSetVisitorRecovery(t, "badger") } func testMarkSet(t *testing.T, lsType string) { - t.Helper() - path, err := ioutil.TempDir("", "markset.*") if err != nil { t.Fatal(err) @@ -42,12 +46,12 @@ func testMarkSet(t *testing.T, lsType string) { } defer env.Close() //nolint:errcheck - hotSet, err := env.Create("hot", 0) + hotSet, err := env.New("hot", 0) if err != nil { t.Fatal(err) } - coldSet, err := env.Create("cold", 0) + coldSet, err := env.New("cold", 0) if err != nil { t.Fatal(err) } @@ -62,6 +66,7 @@ func testMarkSet(t *testing.T, lsType string) { } mustHave := func(s MarkSet, cid cid.Cid) { + t.Helper() has, err := s.Has(cid) if err != nil { t.Fatal(err) @@ -73,6 +78,7 @@ func testMarkSet(t *testing.T, lsType string) { } mustNotHave := func(s MarkSet, cid cid.Cid) { + t.Helper() has, err := s.Has(cid) if err != nil { t.Fatal(err) @@ -114,12 +120,12 @@ func testMarkSet(t *testing.T, lsType string) { t.Fatal(err) } - hotSet, err = env.Create("hot", 0) + hotSet, err = env.New("hot", 0) if err != nil { t.Fatal(err) } - coldSet, err = env.Create("cold", 0) + coldSet, err = env.New("cold", 0) if err != nil { t.Fatal(err) } @@ -150,8 +156,6 @@ func testMarkSet(t *testing.T, lsType string) { } func testMarkSetVisitor(t *testing.T, lsType string) { - t.Helper() - path, err := ioutil.TempDir("", "markset.*") if err != nil { t.Fatal(err) @@ -167,7 +171,7 @@ func testMarkSetVisitor(t *testing.T, lsType string) { } defer env.Close() //nolint:errcheck - visitor, err := env.Create("test", 0) + visitor, err := env.New("test", 0) if err != nil { t.Fatal(err) } @@ -219,3 +223,322 @@ func testMarkSetVisitor(t *testing.T, lsType string) { mustNotVisit(visitor, k3) mustNotVisit(visitor, k4) } + +func testMarkSetVisitorRecovery(t *testing.T, lsType string) { + path, err := ioutil.TempDir("", "markset.*") + if err != nil { + t.Fatal(err) + } + + t.Cleanup(func() { + _ = os.RemoveAll(path) + }) + + env, err := OpenMarkSetEnv(path, lsType) + if err != nil { + t.Fatal(err) + } + defer env.Close() //nolint:errcheck + + visitor, err := env.New("test", 0) + if err != nil { + t.Fatal(err) + } + defer visitor.Close() //nolint:errcheck + + makeCid := func(key string) cid.Cid { + h, err := multihash.Sum([]byte(key), multihash.SHA2_256, -1) + if err != nil { + t.Fatal(err) + } + + return cid.NewCidV1(cid.Raw, h) + } + + mustVisit := func(v ObjectVisitor, cid cid.Cid) { + visit, err := v.Visit(cid) + if err != nil { + t.Fatal(err) + } + + if !visit { + t.Fatal("object should be visited") + } + } + + mustNotVisit := func(v ObjectVisitor, cid cid.Cid) { + visit, err := v.Visit(cid) + if err != nil { + t.Fatal(err) + } + + if visit { + t.Fatal("unexpected visit") + } + } + + k1 := makeCid("a") + k2 := makeCid("b") + k3 := makeCid("c") + k4 := makeCid("d") + + mustVisit(visitor, k1) + mustVisit(visitor, k2) + + if err := visitor.BeginCriticalSection(); err != nil { + t.Fatal(err) + } + + mustVisit(visitor, k3) + mustVisit(visitor, k4) + + mustNotVisit(visitor, k1) + mustNotVisit(visitor, k2) + mustNotVisit(visitor, k3) + mustNotVisit(visitor, k4) + + if err := visitor.Close(); err != nil { + t.Fatal(err) + } + + visitor, err = env.Recover("test") + if err != nil { + t.Fatal(err) + } + + mustNotVisit(visitor, k1) + mustNotVisit(visitor, k2) + mustNotVisit(visitor, k3) + mustNotVisit(visitor, k4) + + visitor.EndCriticalSection() + + if err := visitor.Close(); err != nil { + t.Fatal(err) + } + + _, err = env.Recover("test") + if err == nil { + t.Fatal("expected recovery to fail") + } +} + +func testMarkSetRecovery(t *testing.T, lsType string) { + path, err := ioutil.TempDir("", "markset.*") + if err != nil { + t.Fatal(err) + } + + t.Cleanup(func() { + _ = os.RemoveAll(path) + }) + + env, err := OpenMarkSetEnv(path, lsType) + if err != nil { + t.Fatal(err) + } + defer env.Close() //nolint:errcheck + + markSet, err := env.New("test", 0) + if err != nil { + t.Fatal(err) + } + + makeCid := func(key string) cid.Cid { + h, err := multihash.Sum([]byte(key), multihash.SHA2_256, -1) + if err != nil { + t.Fatal(err) + } + + return cid.NewCidV1(cid.Raw, h) + } + + mustHave := func(s MarkSet, cid cid.Cid) { + t.Helper() + has, err := s.Has(cid) + if err != nil { + t.Fatal(err) + } + + if !has { + t.Fatal("mark not found") + } + } + + mustNotHave := func(s MarkSet, cid cid.Cid) { + t.Helper() + has, err := s.Has(cid) + if err != nil { + t.Fatal(err) + } + + if has { + t.Fatal("unexpected mark") + } + } + + k1 := makeCid("a") + k2 := makeCid("b") + k3 := makeCid("c") + k4 := makeCid("d") + + if err := markSet.Mark(k1); err != nil { + t.Fatal(err) + } + if err := markSet.Mark(k2); err != nil { + t.Fatal(err) + } + + mustHave(markSet, k1) + mustHave(markSet, k2) + mustNotHave(markSet, k3) + mustNotHave(markSet, k4) + + if err := markSet.BeginCriticalSection(); err != nil { + t.Fatal(err) + } + + if err := markSet.Mark(k3); err != nil { + t.Fatal(err) + } + if err := markSet.Mark(k4); err != nil { + t.Fatal(err) + } + + mustHave(markSet, k1) + mustHave(markSet, k2) + mustHave(markSet, k3) + mustHave(markSet, k4) + + if err := markSet.Close(); err != nil { + t.Fatal(err) + } + + markSet, err = env.Recover("test") + if err != nil { + t.Fatal(err) + } + + mustHave(markSet, k1) + mustHave(markSet, k2) + mustHave(markSet, k3) + mustHave(markSet, k4) + + markSet.EndCriticalSection() + + if err := markSet.Close(); err != nil { + t.Fatal(err) + } + + _, err = env.Recover("test") + if err == nil { + t.Fatal("expected recovery to fail") + } +} + +func testMarkSetMarkMany(t *testing.T, lsType string) { + path, err := ioutil.TempDir("", "markset.*") + if err != nil { + t.Fatal(err) + } + + t.Cleanup(func() { + _ = os.RemoveAll(path) + }) + + env, err := OpenMarkSetEnv(path, lsType) + if err != nil { + t.Fatal(err) + } + defer env.Close() //nolint:errcheck + + markSet, err := env.New("test", 0) + if err != nil { + t.Fatal(err) + } + + makeCid := func(key string) cid.Cid { + h, err := multihash.Sum([]byte(key), multihash.SHA2_256, -1) + if err != nil { + t.Fatal(err) + } + + return cid.NewCidV1(cid.Raw, h) + } + + mustHave := func(s MarkSet, cid cid.Cid) { + t.Helper() + has, err := s.Has(cid) + if err != nil { + t.Fatal(err) + } + + if !has { + t.Fatal("mark not found") + } + } + + mustNotHave := func(s MarkSet, cid cid.Cid) { + t.Helper() + has, err := s.Has(cid) + if err != nil { + t.Fatal(err) + } + + if has { + t.Fatal("unexpected mark") + } + } + + k1 := makeCid("a") + k2 := makeCid("b") + k3 := makeCid("c") + k4 := makeCid("d") + + if err := markSet.MarkMany([]cid.Cid{k1, k2}); err != nil { + t.Fatal(err) + } + + mustHave(markSet, k1) + mustHave(markSet, k2) + mustNotHave(markSet, k3) + mustNotHave(markSet, k4) + + if err := markSet.BeginCriticalSection(); err != nil { + t.Fatal(err) + } + + if err := markSet.MarkMany([]cid.Cid{k3, k4}); err != nil { + t.Fatal(err) + } + + mustHave(markSet, k1) + mustHave(markSet, k2) + mustHave(markSet, k3) + mustHave(markSet, k4) + + if err := markSet.Close(); err != nil { + t.Fatal(err) + } + + markSet, err = env.Recover("test") + if err != nil { + t.Fatal(err) + } + + mustHave(markSet, k1) + mustHave(markSet, k2) + mustHave(markSet, k3) + mustHave(markSet, k4) + + markSet.EndCriticalSection() + + if err := markSet.Close(); err != nil { + t.Fatal(err) + } + + _, err = env.Recover("test") + if err == nil { + t.Fatal("expected recovery to fail") + } +} diff --git a/blockstore/splitstore/splitstore.go b/blockstore/splitstore/splitstore.go index 62cb2459e..a351df76a 100644 --- a/blockstore/splitstore/splitstore.go +++ b/blockstore/splitstore/splitstore.go @@ -129,8 +129,6 @@ type SplitStore struct { headChangeMx sync.Mutex - coldPurgeSize int - chain ChainAccessor ds dstore.Datastore cold bstore.Blockstore @@ -158,6 +156,17 @@ type SplitStore struct { txnRefsMx sync.Mutex txnRefs map[cid.Cid]struct{} txnMissing map[cid.Cid]struct{} + txnMarkSet MarkSet + txnSyncMx sync.Mutex + txnSyncCond sync.Cond + txnSync bool + + // background cold object reification + reifyWorkers sync.WaitGroup + reifyMx sync.Mutex + reifyCond sync.Cond + reifyPend map[cid.Cid]struct{} + reifyInProgress map[cid.Cid]struct{} // registered protectors protectors []func(func(cid.Cid) error) error @@ -194,13 +203,16 @@ func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Co cold: cold, hot: hots, markSetEnv: markSetEnv, - - coldPurgeSize: defaultColdPurgeSize, } ss.txnViewsCond.L = &ss.txnViewsMx + ss.txnSyncCond.L = &ss.txnSyncMx ss.ctx, ss.cancel = context.WithCancel(context.Background()) + ss.reifyCond.L = &ss.reifyMx + ss.reifyPend = make(map[cid.Cid]struct{}) + ss.reifyInProgress = make(map[cid.Cid]struct{}) + if enableDebugLog { ss.debug, err = openDebugLog(path) if err != nil { @@ -208,6 +220,14 @@ func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Co } } + if ss.checkpointExists() { + log.Info("found compaction checkpoint; resuming compaction") + if err := ss.completeCompaction(); err != nil { + markSetEnv.Close() //nolint:errcheck + return nil, xerrors.Errorf("error resuming compaction: %w", err) + } + } + return ss, nil } @@ -230,6 +250,20 @@ func (s *SplitStore) Has(ctx context.Context, cid cid.Cid) (bool, error) { s.txnLk.RLock() defer s.txnLk.RUnlock() + // critical section + if s.txnMarkSet != nil { + has, err := s.txnMarkSet.Has(cid) + if err != nil { + return false, err + } + + if has { + return s.has(cid) + } + + return s.cold.Has(ctx, cid) + } + has, err := s.hot.Has(ctx, cid) if err != nil { @@ -241,7 +275,13 @@ func (s *SplitStore) Has(ctx context.Context, cid cid.Cid) (bool, error) { return true, nil } - return s.cold.Has(ctx, cid) + has, err = s.cold.Has(ctx, cid) + if has && bstore.IsHotView(ctx) { + s.reifyColdObject(cid) + } + + return has, err + } func (s *SplitStore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { @@ -257,6 +297,20 @@ func (s *SplitStore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) s.txnLk.RLock() defer s.txnLk.RUnlock() + // critical section + if s.txnMarkSet != nil { + has, err := s.txnMarkSet.Has(cid) + if err != nil { + return nil, err + } + + if has { + return s.get(cid) + } + + return s.cold.Get(ctx, cid) + } + blk, err := s.hot.Get(ctx, cid) switch err { @@ -271,8 +325,11 @@ func (s *SplitStore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) blk, err = s.cold.Get(ctx, cid) if err == nil { - stats.Record(s.ctx, metrics.SplitstoreMiss.M(1)) + if bstore.IsHotView(ctx) { + s.reifyColdObject(cid) + } + stats.Record(s.ctx, metrics.SplitstoreMiss.M(1)) } return blk, err @@ -294,6 +351,20 @@ func (s *SplitStore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { s.txnLk.RLock() defer s.txnLk.RUnlock() + // critical section + if s.txnMarkSet != nil { + has, err := s.txnMarkSet.Has(cid) + if err != nil { + return 0, err + } + + if has { + return s.getSize(cid) + } + + return s.cold.GetSize(ctx, cid) + } + size, err := s.hot.GetSize(ctx, cid) switch err { @@ -308,6 +379,10 @@ func (s *SplitStore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { size, err = s.cold.GetSize(ctx, cid) if err == nil { + if bstore.IsHotView(ctx) { + s.reifyColdObject(cid) + } + stats.Record(s.ctx, metrics.SplitstoreMiss.M(1)) } return size, err @@ -332,6 +407,12 @@ func (s *SplitStore) Put(ctx context.Context, blk blocks.Block) error { s.debug.LogWrite(blk) + // critical section + if s.txnMarkSet != nil { + s.markLiveRefs([]cid.Cid{blk.Cid()}) + return nil + } + s.trackTxnRef(blk.Cid()) return nil } @@ -377,6 +458,12 @@ func (s *SplitStore) PutMany(ctx context.Context, blks []blocks.Block) error { s.debug.LogWriteMany(blks) + // critical section + if s.txnMarkSet != nil { + s.markLiveRefs(batch) + return nil + } + s.trackTxnRefMany(batch) return nil } @@ -436,6 +523,23 @@ func (s *SplitStore) View(ctx context.Context, cid cid.Cid, cb func([]byte) erro return cb(data) } + // critical section + s.txnLk.RLock() // the lock is released in protectView if we are not in critical section + if s.txnMarkSet != nil { + has, err := s.txnMarkSet.Has(cid) + s.txnLk.RUnlock() + + if err != nil { + return err + } + + if has { + return s.view(cid, cb) + } + + return s.cold.View(ctx, cid, cb) + } + // views are (optimistically) protected two-fold: // - if there is an active transaction, then the reference is protected. // - if there is no active transaction, active views are tracked in a @@ -456,6 +560,10 @@ func (s *SplitStore) View(ctx context.Context, cid cid.Cid, cb func([]byte) erro err = s.cold.View(ctx, cid, cb) if err == nil { + if bstore.IsHotView(ctx) { + s.reifyColdObject(cid) + } + stats.Record(s.ctx, metrics.SplitstoreMiss.M(1)) } return err @@ -565,6 +673,9 @@ func (s *SplitStore) Start(chain ChainAccessor, us stmgr.UpgradeSchedule) error } } + // spawn the reifier + go s.reifyOrchestrator() + // watch the chain chain.SubscribeHeadChanges(s.HeadChange) @@ -585,12 +696,19 @@ func (s *SplitStore) Close() error { } if atomic.LoadInt32(&s.compacting) == 1 { + s.txnSyncMx.Lock() + s.txnSync = true + s.txnSyncCond.Broadcast() + s.txnSyncMx.Unlock() + log.Warn("close with ongoing compaction in progress; waiting for it to finish...") for atomic.LoadInt32(&s.compacting) == 1 { time.Sleep(time.Second) } } + s.reifyCond.Broadcast() + s.reifyWorkers.Wait() s.cancel() return multierr.Combine(s.markSetEnv.Close(), s.debug.Close()) } diff --git a/blockstore/splitstore/splitstore_check.go b/blockstore/splitstore/splitstore_check.go index 0b4cfe044..d7c9b2ef9 100644 --- a/blockstore/splitstore/splitstore_check.go +++ b/blockstore/splitstore/splitstore_check.go @@ -89,7 +89,7 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error { coldCnt := new(int64) missingCnt := new(int64) - visitor, err := s.markSetEnv.Create("check", 0) + visitor, err := s.markSetEnv.New("check", 0) if err != nil { return xerrors.Errorf("error creating visitor: %w", err) } diff --git a/blockstore/splitstore/splitstore_compact.go b/blockstore/splitstore/splitstore_compact.go index 20f99af35..ae123abc9 100644 --- a/blockstore/splitstore/splitstore_compact.go +++ b/blockstore/splitstore/splitstore_compact.go @@ -3,8 +3,9 @@ package splitstore import ( "bytes" "errors" + "os" + "path/filepath" "runtime" - "sort" "sync" "sync/atomic" "time" @@ -48,6 +49,10 @@ var ( // SyncGapTime is the time delay from a tipset's min timestamp before we decide // there is a sync gap SyncGapTime = time.Minute + + // SyncWaitTime is the time delay from a tipset's min timestamp before we decide + // we have synced. + SyncWaitTime = 30 * time.Second ) var ( @@ -57,8 +62,6 @@ var ( const ( batchSize = 16384 - - defaultColdPurgeSize = 7_000_000 ) func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error { @@ -141,9 +144,9 @@ func (s *SplitStore) isNearUpgrade(epoch abi.ChainEpoch) bool { // transactionally protect incoming tipsets func (s *SplitStore) protectTipSets(apply []*types.TipSet) { s.txnLk.RLock() - defer s.txnLk.RUnlock() if !s.txnActive { + s.txnLk.RUnlock() return } @@ -152,12 +155,115 @@ func (s *SplitStore) protectTipSets(apply []*types.TipSet) { cids = append(cids, ts.Cids()...) } + if len(cids) == 0 { + s.txnLk.RUnlock() + return + } + + // critical section + if s.txnMarkSet != nil { + curTs := apply[len(apply)-1] + timestamp := time.Unix(int64(curTs.MinTimestamp()), 0) + doSync := time.Since(timestamp) < SyncWaitTime + go func() { + if doSync { + defer func() { + s.txnSyncMx.Lock() + defer s.txnSyncMx.Unlock() + s.txnSync = true + s.txnSyncCond.Broadcast() + }() + } + defer s.txnLk.RUnlock() + s.markLiveRefs(cids) + + }() + return + } + s.trackTxnRefMany(cids) + s.txnLk.RUnlock() +} + +func (s *SplitStore) markLiveRefs(cids []cid.Cid) { + log.Debugf("marking %d live refs", len(cids)) + startMark := time.Now() + + count := new(int32) + visitor := newConcurrentVisitor() + walkObject := func(c cid.Cid) error { + return s.walkObjectIncomplete(c, visitor, + func(c cid.Cid) error { + if isUnitaryObject(c) { + return errStopWalk + } + + visit, err := s.txnMarkSet.Visit(c) + if err != nil { + return xerrors.Errorf("error visiting object: %w", err) + } + + if !visit { + return errStopWalk + } + + atomic.AddInt32(count, 1) + return nil + }, + func(missing cid.Cid) error { + log.Warnf("missing object reference %s in %s", missing, c) + return errStopWalk + }) + } + + // optimize the common case of single put + if len(cids) == 1 { + if err := walkObject(cids[0]); err != nil { + log.Errorf("error marking tipset refs: %s", err) + } + log.Debugw("marking live refs done", "took", time.Since(startMark), "marked", *count) + return + } + + workch := make(chan cid.Cid, len(cids)) + for _, c := range cids { + workch <- c + } + close(workch) + + worker := func() error { + for c := range workch { + if err := walkObject(c); err != nil { + return err + } + } + + return nil + } + + workers := runtime.NumCPU() / 2 + if workers < 2 { + workers = 2 + } + if workers > len(cids) { + workers = len(cids) + } + + g := new(errgroup.Group) + for i := 0; i < workers; i++ { + g.Go(worker) + } + + if err := g.Wait(); err != nil { + log.Errorf("error marking tipset refs: %s", err) + } + + log.Debugw("marking live refs done", "took", time.Since(startMark), "marked", *count) } // transactionally protect a view func (s *SplitStore) protectView(c cid.Cid) { - s.txnLk.RLock() + // the txnLk is held for read defer s.txnLk.RUnlock() if s.txnActive { @@ -387,6 +493,12 @@ func (s *SplitStore) compact(curTs *types.TipSet) { } func (s *SplitStore) doCompact(curTs *types.TipSet) error { + if s.checkpointExists() { + // this really shouldn't happen, but if it somehow does, it means that the hotstore + // might be potentially inconsistent; abort compaction and notify the user to intervene. + return xerrors.Errorf("checkpoint exists; aborting compaction") + } + currentEpoch := curTs.Height() boundaryEpoch := currentEpoch - CompactionBoundary @@ -398,7 +510,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { log.Infow("running compaction", "currentEpoch", currentEpoch, "baseEpoch", s.baseEpoch, "boundaryEpoch", boundaryEpoch, "inclMsgsEpoch", inclMsgsEpoch, "compactionIndex", s.compactionIndex) - markSet, err := s.markSetEnv.Create("live", s.markSetSize) + markSet, err := s.markSetEnv.New("live", s.markSetSize) if err != nil { return xerrors.Errorf("error creating mark set: %w", err) } @@ -409,9 +521,6 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { return err } - // we are ready for concurrent marking - s.beginTxnMarking(markSet) - // 0. track all protected references at beginning of compaction; anything added later should // be transactionally protected by the write log.Info("protecting references with registered protectors") @@ -425,7 +534,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { log.Info("marking reachable objects") startMark := time.Now() - var count int64 + count := new(int64) err = s.walkChain(curTs, boundaryEpoch, inclMsgsEpoch, &noopVisitor{}, func(c cid.Cid) error { if isUnitaryObject(c) { @@ -441,7 +550,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { return errStopWalk } - count++ + atomic.AddInt64(count, 1) return nil }) @@ -449,9 +558,9 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { return xerrors.Errorf("error marking: %w", err) } - s.markSetSize = count + count>>2 // overestimate a bit + s.markSetSize = *count + *count>>2 // overestimate a bit - log.Infow("marking done", "took", time.Since(startMark), "marked", count) + log.Infow("marking done", "took", time.Since(startMark), "marked", *count) if err := s.checkClosing(); err != nil { return err @@ -471,10 +580,15 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { log.Info("collecting cold objects") startCollect := time.Now() + coldw, err := NewColdSetWriter(s.coldSetPath()) + if err != nil { + return xerrors.Errorf("error creating coldset: %w", err) + } + defer coldw.Close() //nolint:errcheck + // some stats for logging var hotCnt, coldCnt int - cold := make([]cid.Cid, 0, s.coldPurgeSize) err = s.hot.ForEachKey(func(c cid.Cid) error { // was it marked? mark, err := markSet.Has(c) @@ -488,7 +602,9 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { } // it's cold, mark it as candidate for move - cold = append(cold, c) + if err := coldw.Write(c); err != nil { + return xerrors.Errorf("error writing cid to coldstore: %w", err) + } coldCnt++ return nil @@ -498,12 +614,12 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { return xerrors.Errorf("error collecting cold objects: %w", err) } - log.Infow("cold collection done", "took", time.Since(startCollect)) - - if coldCnt > 0 { - s.coldPurgeSize = coldCnt + coldCnt>>2 // overestimate a bit + if err := coldw.Close(); err != nil { + return xerrors.Errorf("error closing coldset: %w", err) } + log.Infow("cold collection done", "took", time.Since(startCollect)) + log.Infow("compaction stats", "hot", hotCnt, "cold", coldCnt) stats.Record(s.ctx, metrics.SplitstoreCompactionHot.M(int64(hotCnt))) stats.Record(s.ctx, metrics.SplitstoreCompactionCold.M(int64(coldCnt))) @@ -521,11 +637,17 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { return err } + coldr, err := NewColdSetReader(s.coldSetPath()) + if err != nil { + return xerrors.Errorf("error opening coldset: %w", err) + } + defer coldr.Close() //nolint:errcheck + // 3. copy the cold objects to the coldstore -- if we have one if !s.cfg.DiscardColdBlocks { log.Info("moving cold objects to the coldstore") startMove := time.Now() - err = s.moveColdBlocks(cold) + err = s.moveColdBlocks(coldr) if err != nil { return xerrors.Errorf("error moving cold objects: %w", err) } @@ -534,41 +656,64 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { if err := s.checkClosing(); err != nil { return err } + + if err := coldr.Reset(); err != nil { + return xerrors.Errorf("error resetting coldset: %w", err) + } } - // 4. sort cold objects so that the dags with most references are deleted first - // this ensures that we can't refer to a dag with its consituents already deleted, ie - // we lave no dangling references. - log.Info("sorting cold objects") - startSort := time.Now() - err = s.sortObjects(cold) - if err != nil { - return xerrors.Errorf("error sorting objects: %w", err) - } - log.Infow("sorting done", "took", time.Since(startSort)) - - // 4.1 protect transactional refs once more - // strictly speaking, this is not necessary as purge will do it before deleting each - // batch. however, there is likely a largish number of references accumulated during - // ths sort and this protects before entering pruge context. - err = s.protectTxnRefs(markSet) - if err != nil { - return xerrors.Errorf("error protecting transactional refs: %w", err) + // 4. Purge cold objects with checkpointing for recovery. + // This is the critical section of compaction, whereby any cold object not in the markSet is + // considered already deleted. + // We delete cold objects in batches, holding the transaction lock, where we check the markSet + // again for new references created by the VM. + // After each batch, we write a checkpoint to disk; if the process is interrupted before completion, + // the process will continue from the checkpoint in the next recovery. + if err := s.beginCriticalSection(markSet); err != nil { + return xerrors.Errorf("error beginning critical section: %w", err) } if err := s.checkClosing(); err != nil { return err } + // wait for the head to catch up so that the current tipset is marked + s.waitForSync() + + if err := s.checkClosing(); err != nil { + return err + } + + checkpoint, err := NewCheckpoint(s.checkpointPath()) + if err != nil { + return xerrors.Errorf("error creating checkpoint: %w", err) + } + defer checkpoint.Close() //nolint:errcheck + // 5. purge cold objects from the hotstore, taking protected references into account log.Info("purging cold objects from the hotstore") startPurge := time.Now() - err = s.purge(cold, markSet) + err = s.purge(coldr, checkpoint, markSet) if err != nil { - return xerrors.Errorf("error purging cold blocks: %w", err) + return xerrors.Errorf("error purging cold objects: %w", err) } log.Infow("purging cold objects from hotstore done", "took", time.Since(startPurge)) + s.endCriticalSection() + + if err := checkpoint.Close(); err != nil { + log.Warnf("error closing checkpoint: %s", err) + } + if err := os.Remove(s.checkpointPath()); err != nil { + log.Warnf("error removing checkpoint: %s", err) + } + if err := coldr.Close(); err != nil { + log.Warnf("error closing coldset: %s", err) + } + if err := os.Remove(s.coldSetPath()); err != nil { + log.Warnf("error removing coldset: %s", err) + } + // we are done; do some housekeeping s.endTxnProtect() s.gcHotstore() @@ -599,12 +744,51 @@ func (s *SplitStore) beginTxnProtect() { defer s.txnLk.Unlock() s.txnActive = true + s.txnSync = false s.txnRefs = make(map[cid.Cid]struct{}) s.txnMissing = make(map[cid.Cid]struct{}) } -func (s *SplitStore) beginTxnMarking(markSet MarkSet) { - log.Info("beginning transactional marking") +func (s *SplitStore) beginCriticalSection(markSet MarkSet) error { + log.Info("beginning critical section") + + // do that once first to get the bulk before the markset is in critical section + if err := s.protectTxnRefs(markSet); err != nil { + return xerrors.Errorf("error protecting transactional references: %w", err) + } + + if err := markSet.BeginCriticalSection(); err != nil { + return xerrors.Errorf("error beginning critical section for markset: %w", err) + } + + s.txnLk.Lock() + defer s.txnLk.Unlock() + + s.txnMarkSet = markSet + + // and do it again while holding the lock to mark references that might have been created + // in the meantime and avoid races of the type Has->txnRef->enterCS->Get fails because + // it's not in the markset + if err := s.protectTxnRefs(markSet); err != nil { + return xerrors.Errorf("error protecting transactional references: %w", err) + } + + return nil +} + +func (s *SplitStore) waitForSync() { + log.Info("waiting for sync") + startWait := time.Now() + defer func() { + log.Infow("waiting for sync done", "took", time.Since(startWait)) + }() + + s.txnSyncMx.Lock() + defer s.txnSyncMx.Unlock() + + for !s.txnSync { + s.txnSyncCond.Wait() + } } func (s *SplitStore) endTxnProtect() { @@ -616,8 +800,20 @@ func (s *SplitStore) endTxnProtect() { } s.txnActive = false + s.txnSync = false s.txnRefs = nil s.txnMissing = nil + s.txnMarkSet = nil +} + +func (s *SplitStore) endCriticalSection() { + log.Info("ending critical section") + + s.txnLk.Lock() + defer s.txnLk.Unlock() + + s.txnMarkSet.EndCriticalSection() + s.txnMarkSet = nil } func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEpoch, @@ -857,7 +1053,7 @@ func (s *SplitStore) walkObjectIncomplete(c cid.Cid, visitor ObjectVisitor, f, m return nil } -// internal version used by walk +// internal version used during compaction and related operations func (s *SplitStore) view(c cid.Cid, cb func([]byte) error) error { if isIdentiyCid(c) { data, err := decodeIdentityCid(c) @@ -892,10 +1088,34 @@ func (s *SplitStore) has(c cid.Cid) (bool, error) { return s.cold.Has(s.ctx, c) } -func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error { +func (s *SplitStore) get(c cid.Cid) (blocks.Block, error) { + blk, err := s.hot.Get(s.ctx, c) + switch err { + case nil: + return blk, nil + case bstore.ErrNotFound: + return s.cold.Get(s.ctx, c) + default: + return nil, err + } +} + +func (s *SplitStore) getSize(c cid.Cid) (int, error) { + sz, err := s.hot.GetSize(s.ctx, c) + switch err { + case nil: + return sz, nil + case bstore.ErrNotFound: + return s.cold.GetSize(s.ctx, c) + default: + return 0, err + } +} + +func (s *SplitStore) moveColdBlocks(coldr *ColdSetReader) error { batch := make([]blocks.Block, 0, batchSize) - for _, c := range cold { + err := coldr.ForEach(func(c cid.Cid) error { if err := s.checkClosing(); err != nil { return err } @@ -904,7 +1124,7 @@ func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error { if err != nil { if err == bstore.ErrNotFound { log.Warnf("hotstore missing block %s", c) - continue + return nil } return xerrors.Errorf("error retrieving block %s from hotstore: %w", c, err) @@ -918,6 +1138,12 @@ func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error { } batch = batch[:0] } + + return nil + }) + + if err != nil { + return xerrors.Errorf("error iterating coldset: %w", err) } if len(batch) > 0 { @@ -930,177 +1156,202 @@ func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error { return nil } -// sorts a slice of objects heaviest first -- it's a little expensive but worth the -// guarantee that we don't leave dangling references behind, e.g. if we die in the middle -// of a purge. -func (s *SplitStore) sortObjects(cids []cid.Cid) error { - // we cache the keys to avoid making a gazillion of strings - keys := make(map[cid.Cid]string) - key := func(c cid.Cid) string { - s, ok := keys[c] - if !ok { - s = string(c.Hash()) - keys[c] = s - } - return s - } - - // compute sorting weights as the cumulative number of DAG links - weights := make(map[string]int) - for _, c := range cids { - // this can take quite a while, so check for shutdown with every opportunity - if err := s.checkClosing(); err != nil { - return err - } - - w := s.getObjectWeight(c, weights, key) - weights[key(c)] = w - } - - // sort! - sort.Slice(cids, func(i, j int) bool { - wi := weights[key(cids[i])] - wj := weights[key(cids[j])] - if wi == wj { - return bytes.Compare(cids[i].Hash(), cids[j].Hash()) > 0 - } - - return wi > wj - }) - - return nil -} - -func (s *SplitStore) getObjectWeight(c cid.Cid, weights map[string]int, key func(cid.Cid) string) int { - w, ok := weights[key(c)] - if ok { - return w - } - - // we treat block headers specially to avoid walking the entire chain - var hdr types.BlockHeader - err := s.view(c, func(data []byte) error { - return hdr.UnmarshalCBOR(bytes.NewBuffer(data)) - }) - if err == nil { - w1 := s.getObjectWeight(hdr.ParentStateRoot, weights, key) - weights[key(hdr.ParentStateRoot)] = w1 - - w2 := s.getObjectWeight(hdr.Messages, weights, key) - weights[key(hdr.Messages)] = w2 - - return 1 + w1 + w2 - } - - var links []cid.Cid - err = s.view(c, func(data []byte) error { - return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) { - links = append(links, c) - }) - }) - if err != nil { - return 1 - } - - w = 1 - for _, c := range links { - // these are internal refs, so dags will be dags - if c.Prefix().Codec != cid.DagCBOR { - w++ - continue - } - - wc := s.getObjectWeight(c, weights, key) - weights[key(c)] = wc - - w += wc - } - - return w -} - -func (s *SplitStore) purgeBatch(cids []cid.Cid, deleteBatch func([]cid.Cid) error) error { - if len(cids) == 0 { - return nil - } - - // we don't delete one giant batch of millions of objects, but rather do smaller batches - // so that we don't stop the world for an extended period of time - done := false - for i := 0; !done; i++ { - start := i * batchSize - end := start + batchSize - if end >= len(cids) { - end = len(cids) - done = true - } - - err := deleteBatch(cids[start:end]) - if err != nil { - return xerrors.Errorf("error deleting batch: %w", err) - } - } - - return nil -} - -func (s *SplitStore) purge(cids []cid.Cid, markSet MarkSet) error { +func (s *SplitStore) purge(coldr *ColdSetReader, checkpoint *Checkpoint, markSet MarkSet) error { + batch := make([]cid.Cid, 0, batchSize) deadCids := make([]cid.Cid, 0, batchSize) + var purgeCnt, liveCnt int defer func() { log.Infow("purged cold objects", "purged", purgeCnt, "live", liveCnt) }() - return s.purgeBatch(cids, - func(cids []cid.Cid) error { - deadCids := deadCids[:0] + deleteBatch := func() error { + pc, lc, err := s.purgeBatch(batch, deadCids, checkpoint, markSet) - for { - if err := s.checkClosing(); err != nil { - return err - } + purgeCnt += pc + liveCnt += lc + batch = batch[:0] - s.txnLk.Lock() - if len(s.txnRefs) == 0 { - // keep the lock! - break - } + return err + } - // unlock and protect - s.txnLk.Unlock() + err := coldr.ForEach(func(c cid.Cid) error { + batch = append(batch, c) + if len(batch) == batchSize { + return deleteBatch() + } - err := s.protectTxnRefs(markSet) - if err != nil { - return xerrors.Errorf("error protecting transactional refs: %w", err) - } + return nil + }) + + if err != nil { + return err + } + + if len(batch) > 0 { + return deleteBatch() + } + + return nil +} + +func (s *SplitStore) purgeBatch(batch, deadCids []cid.Cid, checkpoint *Checkpoint, markSet MarkSet) (purgeCnt int, liveCnt int, err error) { + if err := s.checkClosing(); err != nil { + return 0, 0, err + } + + s.txnLk.Lock() + defer s.txnLk.Unlock() + + for _, c := range batch { + has, err := markSet.Has(c) + if err != nil { + return 0, 0, xerrors.Errorf("error checking markset for liveness: %w", err) + } + + if has { + liveCnt++ + continue + } + + deadCids = append(deadCids, c) + } + + if len(deadCids) == 0 { + if err := checkpoint.Set(batch[len(batch)-1]); err != nil { + return 0, 0, xerrors.Errorf("error setting checkpoint: %w", err) + } + + return 0, liveCnt, nil + } + + if err := s.hot.DeleteMany(s.ctx, deadCids); err != nil { + return 0, liveCnt, xerrors.Errorf("error purging cold objects: %w", err) + } + + s.debug.LogDelete(deadCids) + purgeCnt = len(deadCids) + + if err := checkpoint.Set(batch[len(batch)-1]); err != nil { + return purgeCnt, liveCnt, xerrors.Errorf("error setting checkpoint: %w", err) + } + + return purgeCnt, liveCnt, nil +} + +func (s *SplitStore) coldSetPath() string { + return filepath.Join(s.path, "coldset") +} + +func (s *SplitStore) checkpointPath() string { + return filepath.Join(s.path, "checkpoint") +} + +func (s *SplitStore) checkpointExists() bool { + _, err := os.Stat(s.checkpointPath()) + return err == nil +} + +func (s *SplitStore) completeCompaction() error { + checkpoint, last, err := OpenCheckpoint(s.checkpointPath()) + if err != nil { + return xerrors.Errorf("error opening checkpoint: %w", err) + } + defer checkpoint.Close() //nolint:errcheck + + coldr, err := NewColdSetReader(s.coldSetPath()) + if err != nil { + return xerrors.Errorf("error opening coldset: %w", err) + } + defer coldr.Close() //nolint:errcheck + + markSet, err := s.markSetEnv.Recover("live") + if err != nil { + return xerrors.Errorf("error recovering markset: %w", err) + } + defer markSet.Close() //nolint:errcheck + + // PURGE + log.Info("purging cold objects from the hotstore") + startPurge := time.Now() + err = s.completePurge(coldr, checkpoint, last, markSet) + if err != nil { + return xerrors.Errorf("error purging cold objects: %w", err) + } + log.Infow("purging cold objects from hotstore done", "took", time.Since(startPurge)) + + markSet.EndCriticalSection() + + if err := checkpoint.Close(); err != nil { + log.Warnf("error closing checkpoint: %s", err) + } + if err := os.Remove(s.checkpointPath()); err != nil { + log.Warnf("error removing checkpoint: %s", err) + } + if err := coldr.Close(); err != nil { + log.Warnf("error closing coldset: %s", err) + } + if err := os.Remove(s.coldSetPath()); err != nil { + log.Warnf("error removing coldset: %s", err) + } + + // Note: at this point we can start the splitstore; a compaction should run on + // the first head change, which will trigger gc on the hotstore. + // We don't mind the second (back-to-back) compaction as the head will + // have advanced during marking and coldset accumulation. + return nil +} + +func (s *SplitStore) completePurge(coldr *ColdSetReader, checkpoint *Checkpoint, start cid.Cid, markSet MarkSet) error { + if !start.Defined() { + return s.purge(coldr, checkpoint, markSet) + } + + seeking := true + batch := make([]cid.Cid, 0, batchSize) + deadCids := make([]cid.Cid, 0, batchSize) + + var purgeCnt, liveCnt int + defer func() { + log.Infow("purged cold objects", "purged", purgeCnt, "live", liveCnt) + }() + + deleteBatch := func() error { + pc, lc, err := s.purgeBatch(batch, deadCids, checkpoint, markSet) + + purgeCnt += pc + liveCnt += lc + batch = batch[:0] + + return err + } + + err := coldr.ForEach(func(c cid.Cid) error { + if seeking { + if start.Equals(c) { + seeking = false } - defer s.txnLk.Unlock() - - for _, c := range cids { - live, err := markSet.Has(c) - if err != nil { - return xerrors.Errorf("error checking for liveness: %w", err) - } - - if live { - liveCnt++ - continue - } - - deadCids = append(deadCids, c) - } - - err := s.hot.DeleteMany(s.ctx, deadCids) - if err != nil { - return xerrors.Errorf("error purging cold objects: %w", err) - } - - s.debug.LogDelete(deadCids) - - purgeCnt += len(deadCids) return nil - }) + } + + batch = append(batch, c) + if len(batch) == batchSize { + return deleteBatch() + } + + return nil + }) + + if err != nil { + return err + } + + if len(batch) > 0 { + return deleteBatch() + } + + return nil } // I really don't like having this code, but we seem to have some occasional DAG references with diff --git a/blockstore/splitstore/splitstore_reify.go b/blockstore/splitstore/splitstore_reify.go new file mode 100644 index 000000000..85c4fa289 --- /dev/null +++ b/blockstore/splitstore/splitstore_reify.go @@ -0,0 +1,214 @@ +package splitstore + +import ( + "errors" + "runtime" + "sync/atomic" + + "golang.org/x/xerrors" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" +) + +var ( + errReifyLimit = errors.New("reification limit reached") + ReifyLimit = 16384 +) + +func (s *SplitStore) reifyColdObject(c cid.Cid) { + if !s.isWarm() { + return + } + + if isUnitaryObject(c) { + return + } + + s.reifyMx.Lock() + defer s.reifyMx.Unlock() + + _, ok := s.reifyInProgress[c] + if ok { + return + } + + s.reifyPend[c] = struct{}{} + s.reifyCond.Broadcast() +} + +func (s *SplitStore) reifyOrchestrator() { + workers := runtime.NumCPU() / 4 + if workers < 2 { + workers = 2 + } + + workch := make(chan cid.Cid, workers) + defer close(workch) + + for i := 0; i < workers; i++ { + s.reifyWorkers.Add(1) + go s.reifyWorker(workch) + } + + for { + s.reifyMx.Lock() + for len(s.reifyPend) == 0 && atomic.LoadInt32(&s.closing) == 0 { + s.reifyCond.Wait() + } + + if atomic.LoadInt32(&s.closing) != 0 { + s.reifyMx.Unlock() + return + } + + reifyPend := s.reifyPend + s.reifyPend = make(map[cid.Cid]struct{}) + s.reifyMx.Unlock() + + for c := range reifyPend { + select { + case workch <- c: + case <-s.ctx.Done(): + return + } + } + } +} + +func (s *SplitStore) reifyWorker(workch chan cid.Cid) { + defer s.reifyWorkers.Done() + for c := range workch { + s.doReify(c) + } +} + +func (s *SplitStore) doReify(c cid.Cid) { + var toreify, totrack, toforget []cid.Cid + + defer func() { + s.reifyMx.Lock() + defer s.reifyMx.Unlock() + + for _, c := range toreify { + delete(s.reifyInProgress, c) + } + for _, c := range totrack { + delete(s.reifyInProgress, c) + } + for _, c := range toforget { + delete(s.reifyInProgress, c) + } + }() + + s.txnLk.RLock() + defer s.txnLk.RUnlock() + + count := 0 + err := s.walkObjectIncomplete(c, newTmpVisitor(), + func(c cid.Cid) error { + if isUnitaryObject(c) { + return errStopWalk + } + + count++ + if count > ReifyLimit { + return errReifyLimit + } + + s.reifyMx.Lock() + _, inProgress := s.reifyInProgress[c] + if !inProgress { + s.reifyInProgress[c] = struct{}{} + } + s.reifyMx.Unlock() + + if inProgress { + return errStopWalk + } + + has, err := s.hot.Has(s.ctx, c) + if err != nil { + return xerrors.Errorf("error checking hotstore: %w", err) + } + + if has { + if s.txnMarkSet != nil { + hasMark, err := s.txnMarkSet.Has(c) + if err != nil { + log.Warnf("error checking markset: %s", err) + } else if hasMark { + toforget = append(toforget, c) + return errStopWalk + } + } else { + totrack = append(totrack, c) + return errStopWalk + } + } + + toreify = append(toreify, c) + return nil + }, + func(missing cid.Cid) error { + log.Warnf("missing reference while reifying %s: %s", c, missing) + return errStopWalk + }) + + if err != nil { + if xerrors.Is(err, errReifyLimit) { + log.Debug("reification aborted; reify limit reached") + return + } + + log.Warnf("error walking cold object for reification (cid: %s): %s", c, err) + return + } + + log.Debugf("reifying %d objects rooted at %s", len(toreify), c) + + // this should not get too big, maybe some 100s of objects. + batch := make([]blocks.Block, 0, len(toreify)) + for _, c := range toreify { + blk, err := s.cold.Get(s.ctx, c) + if err != nil { + log.Warnf("error retrieving cold object for reification (cid: %s): %s", c, err) + continue + } + + if err := s.checkClosing(); err != nil { + return + } + + batch = append(batch, blk) + } + + if len(batch) > 0 { + err = s.hot.PutMany(s.ctx, batch) + if err != nil { + log.Warnf("error reifying cold object (cid: %s): %s", c, err) + return + } + } + + if s.txnMarkSet != nil { + if len(toreify) > 0 { + if err := s.txnMarkSet.MarkMany(toreify); err != nil { + log.Warnf("error marking reified objects: %s", err) + } + } + if len(totrack) > 0 { + if err := s.txnMarkSet.MarkMany(totrack); err != nil { + log.Warnf("error marking tracked objects: %s", err) + } + } + } else { + // if txnActive is false these are noops + if len(toreify) > 0 { + s.trackTxnRefMany(toreify) + } + if len(totrack) > 0 { + s.trackTxnRefMany(totrack) + } + } +} diff --git a/blockstore/splitstore/splitstore_test.go b/blockstore/splitstore/splitstore_test.go index 7d84e0a4c..ee30400a4 100644 --- a/blockstore/splitstore/splitstore_test.go +++ b/blockstore/splitstore/splitstore_test.go @@ -4,6 +4,9 @@ import ( "context" "errors" "fmt" + "io/ioutil" + "math/rand" + "os" "sync" "sync/atomic" "testing" @@ -20,12 +23,14 @@ import ( datastore "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" logging "github.com/ipfs/go-log/v2" + mh "github.com/multiformats/go-multihash" ) func init() { CompactionThreshold = 5 CompactionBoundary = 2 WarmupBoundary = 0 + SyncWaitTime = time.Millisecond logging.SetLogLevel("splitstore", "DEBUG") } @@ -80,8 +85,17 @@ func testSplitStore(t *testing.T, cfg *Config) { t.Fatal(err) } + path, err := ioutil.TempDir("", "splitstore.*") + if err != nil { + t.Fatal(err) + } + + t.Cleanup(func() { + _ = os.RemoveAll(path) + }) + // open the splitstore - ss, err := Open("", ds, hot, cold, cfg) + ss, err := Open(path, ds, hot, cold, cfg) if err != nil { t.Fatal(err) } @@ -125,6 +139,10 @@ func testSplitStore(t *testing.T, cfg *Config) { } waitForCompaction := func() { + ss.txnSyncMx.Lock() + ss.txnSync = true + ss.txnSyncCond.Broadcast() + ss.txnSyncMx.Unlock() for atomic.LoadInt32(&ss.compacting) == 1 { time.Sleep(100 * time.Millisecond) } @@ -259,8 +277,17 @@ func TestSplitStoreSuppressCompactionNearUpgrade(t *testing.T) { t.Fatal(err) } + path, err := ioutil.TempDir("", "splitstore.*") + if err != nil { + t.Fatal(err) + } + + t.Cleanup(func() { + _ = os.RemoveAll(path) + }) + // open the splitstore - ss, err := Open("", ds, hot, cold, &Config{MarkSetType: "map"}) + ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map"}) if err != nil { t.Fatal(err) } @@ -305,6 +332,10 @@ func TestSplitStoreSuppressCompactionNearUpgrade(t *testing.T) { } waitForCompaction := func() { + ss.txnSyncMx.Lock() + ss.txnSync = true + ss.txnSyncCond.Broadcast() + ss.txnSyncMx.Unlock() for atomic.LoadInt32(&ss.compacting) == 1 { time.Sleep(100 * time.Millisecond) } @@ -357,6 +388,235 @@ func TestSplitStoreSuppressCompactionNearUpgrade(t *testing.T) { } } +func testSplitStoreReification(t *testing.T, f func(context.Context, blockstore.Blockstore, cid.Cid) error) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + hot := newMockStore() + cold := newMockStore() + + mkRandomBlock := func() blocks.Block { + data := make([]byte, 128) + _, err := rand.Read(data) + if err != nil { + t.Fatal(err) + } + + return blocks.NewBlock(data) + } + + block1 := mkRandomBlock() + block2 := mkRandomBlock() + block3 := mkRandomBlock() + + hdr := mock.MkBlock(nil, 0, 0) + hdr.Messages = block1.Cid() + hdr.ParentMessageReceipts = block2.Cid() + hdr.ParentStateRoot = block3.Cid() + block4, err := hdr.ToStorageBlock() + if err != nil { + t.Fatal(err) + } + + allBlocks := []blocks.Block{block1, block2, block3, block4} + for _, blk := range allBlocks { + err := cold.Put(context.Background(), blk) + if err != nil { + t.Fatal(err) + } + } + + path, err := ioutil.TempDir("", "splitstore.*") + if err != nil { + t.Fatal(err) + } + + t.Cleanup(func() { + _ = os.RemoveAll(path) + }) + + ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map"}) + if err != nil { + t.Fatal(err) + } + defer ss.Close() //nolint + + ss.warmupEpoch = 1 + go ss.reifyOrchestrator() + + waitForReification := func() { + for { + ss.reifyMx.Lock() + ready := len(ss.reifyPend) == 0 && len(ss.reifyInProgress) == 0 + ss.reifyMx.Unlock() + + if ready { + return + } + + time.Sleep(time.Millisecond) + } + } + + // first access using the standard view + err = f(context.Background(), ss, block4.Cid()) + if err != nil { + t.Fatal(err) + } + + // nothing should be reified + waitForReification() + for _, blk := range allBlocks { + has, err := hot.Has(context.Background(), blk.Cid()) + if err != nil { + t.Fatal(err) + } + + if has { + t.Fatal("block unexpectedly reified") + } + } + + // now make the hot/reifying view and ensure access reifies + err = f(blockstore.WithHotView(context.Background()), ss, block4.Cid()) + if err != nil { + t.Fatal(err) + } + + // everything should be reified + waitForReification() + for i, blk := range allBlocks { + has, err := hot.Has(context.Background(), blk.Cid()) + if err != nil { + t.Fatal(err) + } + + if !has { + t.Fatalf("block%d was not reified", i+1) + } + } +} + +func testSplitStoreReificationLimit(t *testing.T, f func(context.Context, blockstore.Blockstore, cid.Cid) error) { + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + hot := newMockStore() + cold := newMockStore() + + mkRandomBlock := func() blocks.Block { + data := make([]byte, 128) + _, err := rand.Read(data) + if err != nil { + t.Fatal(err) + } + + return blocks.NewBlock(data) + } + + block1 := mkRandomBlock() + block2 := mkRandomBlock() + block3 := mkRandomBlock() + + hdr := mock.MkBlock(nil, 0, 0) + hdr.Messages = block1.Cid() + hdr.ParentMessageReceipts = block2.Cid() + hdr.ParentStateRoot = block3.Cid() + block4, err := hdr.ToStorageBlock() + if err != nil { + t.Fatal(err) + } + + allBlocks := []blocks.Block{block1, block2, block3, block4} + for _, blk := range allBlocks { + err := cold.Put(context.Background(), blk) + if err != nil { + t.Fatal(err) + } + } + + path, err := ioutil.TempDir("", "splitstore.*") + if err != nil { + t.Fatal(err) + } + + t.Cleanup(func() { + _ = os.RemoveAll(path) + }) + + ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map"}) + if err != nil { + t.Fatal(err) + } + defer ss.Close() //nolint + + ss.warmupEpoch = 1 + go ss.reifyOrchestrator() + + waitForReification := func() { + for { + ss.reifyMx.Lock() + ready := len(ss.reifyPend) == 0 && len(ss.reifyInProgress) == 0 + ss.reifyMx.Unlock() + + if ready { + return + } + + time.Sleep(time.Millisecond) + } + } + + // do a hot access -- nothing should be reified as the limit should be exceeded + oldReifyLimit := ReifyLimit + ReifyLimit = 2 + t.Cleanup(func() { + ReifyLimit = oldReifyLimit + }) + + err = f(blockstore.WithHotView(context.Background()), ss, block4.Cid()) + if err != nil { + t.Fatal(err) + } + + waitForReification() + + for _, blk := range allBlocks { + has, err := hot.Has(context.Background(), blk.Cid()) + if err != nil { + t.Fatal(err) + } + + if has { + t.Fatal("block unexpectedly reified") + } + } + +} + +func TestSplitStoreReification(t *testing.T) { + t.Log("test reification with Has") + testSplitStoreReification(t, func(ctx context.Context, s blockstore.Blockstore, c cid.Cid) error { + _, err := s.Has(ctx, c) + return err + }) + t.Log("test reification with Get") + testSplitStoreReification(t, func(ctx context.Context, s blockstore.Blockstore, c cid.Cid) error { + _, err := s.Get(ctx, c) + return err + }) + t.Log("test reification with GetSize") + testSplitStoreReification(t, func(ctx context.Context, s blockstore.Blockstore, c cid.Cid) error { + _, err := s.GetSize(ctx, c) + return err + }) + t.Log("test reification with View") + testSplitStoreReification(t, func(ctx context.Context, s blockstore.Blockstore, c cid.Cid) error { + return s.View(ctx, c, func(_ []byte) error { return nil }) + }) + t.Log("test reification limit") + testSplitStoreReificationLimit(t, func(ctx context.Context, s blockstore.Blockstore, c cid.Cid) error { + _, err := s.Has(ctx, c) + return err + }) +} + type mockChain struct { t testing.TB @@ -426,17 +686,25 @@ func (c *mockChain) SubscribeHeadChanges(change func(revert []*types.TipSet, app type mockStore struct { mx sync.Mutex - set map[cid.Cid]blocks.Block + set map[string]blocks.Block } func newMockStore() *mockStore { - return &mockStore{set: make(map[cid.Cid]blocks.Block)} + return &mockStore{set: make(map[string]blocks.Block)} +} + +func (b *mockStore) keyOf(c cid.Cid) string { + return string(c.Hash()) +} + +func (b *mockStore) cidOf(k string) cid.Cid { + return cid.NewCidV1(cid.Raw, mh.Multihash([]byte(k))) } func (b *mockStore) Has(_ context.Context, cid cid.Cid) (bool, error) { b.mx.Lock() defer b.mx.Unlock() - _, ok := b.set[cid] + _, ok := b.set[b.keyOf(cid)] return ok, nil } @@ -446,7 +714,7 @@ func (b *mockStore) Get(_ context.Context, cid cid.Cid) (blocks.Block, error) { b.mx.Lock() defer b.mx.Unlock() - blk, ok := b.set[cid] + blk, ok := b.set[b.keyOf(cid)] if !ok { return nil, blockstore.ErrNotFound } @@ -474,7 +742,7 @@ func (b *mockStore) Put(_ context.Context, blk blocks.Block) error { b.mx.Lock() defer b.mx.Unlock() - b.set[blk.Cid()] = blk + b.set[b.keyOf(blk.Cid())] = blk return nil } @@ -483,7 +751,7 @@ func (b *mockStore) PutMany(_ context.Context, blks []blocks.Block) error { defer b.mx.Unlock() for _, blk := range blks { - b.set[blk.Cid()] = blk + b.set[b.keyOf(blk.Cid())] = blk } return nil } @@ -492,7 +760,7 @@ func (b *mockStore) DeleteBlock(_ context.Context, cid cid.Cid) error { b.mx.Lock() defer b.mx.Unlock() - delete(b.set, cid) + delete(b.set, b.keyOf(cid)) return nil } @@ -501,7 +769,7 @@ func (b *mockStore) DeleteMany(_ context.Context, cids []cid.Cid) error { defer b.mx.Unlock() for _, c := range cids { - delete(b.set, c) + delete(b.set, b.keyOf(c)) } return nil } @@ -515,7 +783,7 @@ func (b *mockStore) ForEachKey(f func(cid.Cid) error) error { defer b.mx.Unlock() for c := range b.set { - err := f(c) + err := f(b.cidOf(c)) if err != nil { return err } diff --git a/blockstore/splitstore/splitstore_warmup.go b/blockstore/splitstore/splitstore_warmup.go index 0670bd0f6..b564f03c7 100644 --- a/blockstore/splitstore/splitstore_warmup.go +++ b/blockstore/splitstore/splitstore_warmup.go @@ -62,7 +62,7 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error { xcount := new(int64) missing := new(int64) - visitor, err := s.markSetEnv.Create("warmup", 0) + visitor, err := s.markSetEnv.New("warmup", 0) if err != nil { return xerrors.Errorf("error creating visitor: %w", err) } diff --git a/blockstore/splitstore/visitor.go b/blockstore/splitstore/visitor.go index 9dfbb78e7..4a78f1db1 100644 --- a/blockstore/splitstore/visitor.go +++ b/blockstore/splitstore/visitor.go @@ -26,6 +26,10 @@ type tmpVisitor struct { var _ ObjectVisitor = (*tmpVisitor)(nil) func (v *tmpVisitor) Visit(c cid.Cid) (bool, error) { + if isUnitaryObject(c) { + return false, nil + } + return v.set.Visit(c), nil } @@ -45,6 +49,10 @@ func newConcurrentVisitor() *concurrentVisitor { } func (v *concurrentVisitor) Visit(c cid.Cid) (bool, error) { + if isUnitaryObject(c) { + return false, nil + } + v.mx.Lock() defer v.mx.Unlock() diff --git a/build/bootstrap/butterflynet.pi b/build/bootstrap/butterflynet.pi index 1972adc5a..68bd4f74f 100644 --- a/build/bootstrap/butterflynet.pi +++ b/build/bootstrap/butterflynet.pi @@ -1,2 +1,2 @@ -/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWBdRCBLUeKvoy22u5DcXs61adFn31v8WWCZgmBjDCjbsC -/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWDUQJBA18njjXnG9RtLxoN3muvdU7PEy55QorUEsdAqdy +/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWFHDtFx7CVTy4xoCDutVo1cScvSnQjDeaM8UzwVS1qwkh +/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWKt8cwpkiumkT8x32c3YFxsPRwhV5J8hCYPn9mhUmcAXt diff --git a/build/openrpc/full.json.gz b/build/openrpc/full.json.gz index 7ea7df180..19d932360 100644 Binary files a/build/openrpc/full.json.gz and b/build/openrpc/full.json.gz differ diff --git a/build/openrpc/miner.json.gz b/build/openrpc/miner.json.gz index 0664a0900..03e78adc5 100644 Binary files a/build/openrpc/miner.json.gz and b/build/openrpc/miner.json.gz differ diff --git a/build/openrpc/worker.json.gz b/build/openrpc/worker.json.gz index 137ab7e57..fed861332 100644 Binary files a/build/openrpc/worker.json.gz and b/build/openrpc/worker.json.gz differ diff --git a/build/params_butterfly.go b/build/params_butterfly.go index 776a31714..804bdde93 100644 --- a/build/params_butterfly.go +++ b/build/params_butterfly.go @@ -42,8 +42,7 @@ const UpgradeTurboHeight = -15 const UpgradeHyperdriveHeight = -16 const UpgradeChocolateHeight = -17 -// 2022-01-17T19:00:00Z -const UpgradeOhSnapHeight = 30262 +const UpgradeOhSnapHeight = 240 func init() { policy.SetConsensusMinerMinPower(abi.NewStoragePower(2 << 30)) diff --git a/build/params_calibnet.go b/build/params_calibnet.go index 4da2269ee..a8f5b4720 100644 --- a/build/params_calibnet.go +++ b/build/params_calibnet.go @@ -54,7 +54,8 @@ const UpgradeHyperdriveHeight = 420 const UpgradeChocolateHeight = 312746 -const UpgradeOhSnapHeight = 99999999 +// 2022-02-10T19:23:00Z +const UpgradeOhSnapHeight = 682006 func init() { policy.SetConsensusMinerMinPower(abi.NewStoragePower(32 << 30)) diff --git a/build/params_mainnet.go b/build/params_mainnet.go index 6efc6d62f..0a9f6e775 100644 --- a/build/params_mainnet.go +++ b/build/params_mainnet.go @@ -67,7 +67,8 @@ const UpgradeHyperdriveHeight = 892800 // 2021-10-26T13:30:00Z const UpgradeChocolateHeight = 1231620 -var UpgradeOhSnapHeight = abi.ChainEpoch(999999999999) +// 2022-03-01T15:00:00Z +var UpgradeOhSnapHeight = abi.ChainEpoch(1594680) func init() { if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" { diff --git a/build/proof-params/parameters.json b/build/proof-params/parameters.json index 1d4584454..88bb0bfa3 100644 --- a/build/proof-params/parameters.json +++ b/build/proof-params/parameters.json @@ -1,4 +1,54 @@ { + "v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-61fa69f38b9cc771ba27b670124714b4ea77fbeae05e377fb859c4a43b73a30c.params": { + "cid": "Qma5WL6abSqYg9uUQAZ3EHS286bsNsha7oAGsJBD48Bq2q", + "digest": "c3ad7bb549470b82ad52ed070aebb4f4", + "sector_size": 536870912 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-61fa69f38b9cc771ba27b670124714b4ea77fbeae05e377fb859c4a43b73a30c.vk": { + "cid": "QmUa7f9JtJMsqJJ3s3ZXk6WyF4xJLE8FiqYskZGgk8GCDv", + "digest": "994c5b7d450ca9da348c910689f2dc7f", + "sector_size": 536870912 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-92180959e1918d26350b8e6cfe217bbdd0a2d8de51ebec269078b364b715ad63.params": { + "cid": "QmQiT4qBGodrVNEgVTDXxBNDdPbaD8Ag7Sx3ZTq1zHX79S", + "digest": "5aedd2cf3e5c0a15623d56a1b43110ad", + "sector_size": 8388608 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-92180959e1918d26350b8e6cfe217bbdd0a2d8de51ebec269078b364b715ad63.vk": { + "cid": "QmdcpKUQvHM8RFRVKbk1yHfEqMcBzhtFWKRp9SNEmWq37i", + "digest": "abd80269054d391a734febdac0d2e687", + "sector_size": 8388608 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-fb9e095bebdd77511c0269b967b4d87ba8b8a525edaa0e165de23ba454510194.params": { + "cid": "QmYM6Hg7mjmvA3ZHTsqkss1fkdyDju5dDmLiBZGJ5pz9y9", + "digest": "311f92a3e75036ced01b1c0025f1fa0c", + "sector_size": 2048 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-0-0-fb9e095bebdd77511c0269b967b4d87ba8b8a525edaa0e165de23ba454510194.vk": { + "cid": "QmaQsTLL3nc5dw6wAvaioJSBfd1jhQrA2o6ucFf7XeV74P", + "digest": "eadad9784969890d30f2749708c79771", + "sector_size": 2048 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-8-0-3b7f44a9362e3985369454947bc94022e118211e49fd672d52bec1cbfd599d18.params": { + "cid": "QmNPc75iEfcahCwNKdqnWLtxnjspUGGR4iscjiz3wP3RtS", + "digest": "1b3cfd761a961543f9eb273e435a06a2", + "sector_size": 34359738368 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-8-0-3b7f44a9362e3985369454947bc94022e118211e49fd672d52bec1cbfd599d18.vk": { + "cid": "QmdFFUe1gcz9MMHc6YW8aoV48w4ckvcERjt7PkydQAMfCN", + "digest": "3a6941983754737fde880d29c7094905", + "sector_size": 34359738368 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-8-2-102e1444a7e9a97ebf1e3d6855dcc77e66c011ea66f936d9b2c508f87f2f83a7.params": { + "cid": "QmUB6xTVjzBQGuDNeyJMrrJ1byk58vhPm8eY2Lv9pgwanp", + "digest": "1a392e7b759fb18e036c7559b5ece816", + "sector_size": 68719476736 + }, + "v28-empty-sector-update-merkletree-poseidon_hasher-8-8-2-102e1444a7e9a97ebf1e3d6855dcc77e66c011ea66f936d9b2c508f87f2f83a7.vk": { + "cid": "Qmd794Jty7k26XJ8Eg4NDEks65Qk8G4GVfGkwqvymv8HAg", + "digest": "80e366df2f1011953c2d01c7b7c9ee8e", + "sector_size": 68719476736 + }, "v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.params": { "cid": "QmVxjFRyhmyQaZEtCh7nk2abc7LhFkzhnRX4rcHqCCpikR", "digest": "7610b9f82bfc88405b7a832b651ce2f6", diff --git a/build/version.go b/build/version.go index 5745c0e8d..c80c1df40 100644 --- a/build/version.go +++ b/build/version.go @@ -37,7 +37,7 @@ func BuildTypeString() string { } // BuildVersion is the local build version -const BuildVersion = "1.15.0-dev" +const BuildVersion = "1.15.1-dev" func UserVersion() string { if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" { diff --git a/chain/actors/builtin/verifreg/actor.go.template b/chain/actors/builtin/verifreg/actor.go.template index 9ea8e155a..adc156948 100644 --- a/chain/actors/builtin/verifreg/actor.go.template +++ b/chain/actors/builtin/verifreg/actor.go.template @@ -16,6 +16,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/types" + verifreg7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/verifreg" ) func init() { @@ -62,6 +63,11 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { return cid.Undef, xerrors.Errorf("unknown actor version %d", av) } +type RemoveDataCapProposal = verifreg{{.latestVersion}}.RemoveDataCapProposal +type RemoveDataCapRequest = verifreg{{.latestVersion}}.RemoveDataCapRequest +type RemoveDataCapParams = verifreg{{.latestVersion}}.RemoveDataCapParams +type RmDcProposalID = verifreg{{.latestVersion}}.RmDcProposalID +const SignatureDomainSeparation_RemoveDataCap = verifreg{{.latestVersion}}.SignatureDomainSeparation_RemoveDataCap type State interface { cbor.Marshaler @@ -69,6 +75,7 @@ type State interface { RootKey() (address.Address, error) VerifiedClientDataCap(address.Address) (bool, abi.StoragePower, error) VerifierDataCap(address.Address) (bool, abi.StoragePower, error) + RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) ForEachVerifier(func(addr address.Address, dcap abi.StoragePower) error) error ForEachClient(func(addr address.Address, dcap abi.StoragePower) error) error GetState() interface{} diff --git a/chain/actors/builtin/verifreg/state.go.template b/chain/actors/builtin/verifreg/state.go.template index b59cfb628..4dfc11469 100644 --- a/chain/actors/builtin/verifreg/state.go.template +++ b/chain/actors/builtin/verifreg/state.go.template @@ -61,6 +61,10 @@ func (s *state{{.v}}) VerifierDataCap(addr address.Address) (bool, abi.StoragePo return getDataCap(s.store, actors.Version{{.v}}, s.verifiers, addr) } +func (s *state{{.v}}) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) { + return getRemoveDataCapProposalID(s.store, actors.Version{{.v}}, s.removeDataCapProposalIDs, verifier, client) +} + func (s *state{{.v}}) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { return forEachCap(s.store, actors.Version{{.v}}, s.verifiers, cb) } @@ -77,6 +81,11 @@ func (s *state{{.v}}) verifiers() (adt.Map, error) { return adt{{.v}}.AsMap(s.store, s.Verifiers{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}}) } +func (s *state{{.v}}) removeDataCapProposalIDs() (adt.Map, error) { + {{if le .v 6}}return nil, nil + {{else}}return adt{{.v}}.AsMap(s.store, s.RemoveDataCapProposalIDs, builtin{{.v}}.DefaultHamtBitwidth){{end}} +} + func (s *state{{.v}}) GetState() interface{} { return &s.State } \ No newline at end of file diff --git a/chain/actors/builtin/verifreg/util.go b/chain/actors/builtin/verifreg/util.go index 16e50c50a..197a79215 100644 --- a/chain/actors/builtin/verifreg/util.go +++ b/chain/actors/builtin/verifreg/util.go @@ -6,6 +6,7 @@ import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/specs-actors/v7/actors/builtin/verifreg" "golang.org/x/xerrors" ) @@ -50,3 +51,28 @@ func forEachCap(store adt.Store, ver actors.Version, root rootFunc, cb func(addr return cb(a, dcap) }) } + +func getRemoveDataCapProposalID(store adt.Store, ver actors.Version, root rootFunc, verifier address.Address, client address.Address) (bool, uint64, error) { + if verifier.Protocol() != address.ID { + return false, 0, xerrors.Errorf("can only look up ID addresses") + } + if client.Protocol() != address.ID { + return false, 0, xerrors.Errorf("can only look up ID addresses") + } + vh, err := root() + if err != nil { + return false, 0, xerrors.Errorf("loading verifreg: %w", err) + } + if vh == nil { + return false, 0, xerrors.Errorf("remove data cap proposal hamt not found. you are probably using an incompatible version of actors") + } + + var id verifreg.RmDcProposalID + if found, err := vh.Get(abi.NewAddrPairKey(verifier, client), &id); err != nil { + return false, 0, xerrors.Errorf("looking up addr pair: %w", err) + } else if !found { + return false, 0, nil + } + + return true, id.ProposalID, nil +} diff --git a/chain/actors/builtin/verifreg/v0.go b/chain/actors/builtin/verifreg/v0.go index e70b0e3c9..dcd34c72a 100644 --- a/chain/actors/builtin/verifreg/v0.go +++ b/chain/actors/builtin/verifreg/v0.go @@ -53,6 +53,10 @@ func (s *state0) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, return getDataCap(s.store, actors.Version0, s.verifiers, addr) } +func (s *state0) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) { + return getRemoveDataCapProposalID(s.store, actors.Version0, s.removeDataCapProposalIDs, verifier, client) +} + func (s *state0) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { return forEachCap(s.store, actors.Version0, s.verifiers, cb) } @@ -69,6 +73,11 @@ func (s *state0) verifiers() (adt.Map, error) { return adt0.AsMap(s.store, s.Verifiers) } +func (s *state0) removeDataCapProposalIDs() (adt.Map, error) { + return nil, nil + +} + func (s *state0) GetState() interface{} { return &s.State } diff --git a/chain/actors/builtin/verifreg/v2.go b/chain/actors/builtin/verifreg/v2.go index 0bcbe0212..dfe25f054 100644 --- a/chain/actors/builtin/verifreg/v2.go +++ b/chain/actors/builtin/verifreg/v2.go @@ -53,6 +53,10 @@ func (s *state2) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, return getDataCap(s.store, actors.Version2, s.verifiers, addr) } +func (s *state2) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) { + return getRemoveDataCapProposalID(s.store, actors.Version2, s.removeDataCapProposalIDs, verifier, client) +} + func (s *state2) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { return forEachCap(s.store, actors.Version2, s.verifiers, cb) } @@ -69,6 +73,11 @@ func (s *state2) verifiers() (adt.Map, error) { return adt2.AsMap(s.store, s.Verifiers) } +func (s *state2) removeDataCapProposalIDs() (adt.Map, error) { + return nil, nil + +} + func (s *state2) GetState() interface{} { return &s.State } diff --git a/chain/actors/builtin/verifreg/v3.go b/chain/actors/builtin/verifreg/v3.go index 32003ca3a..c71c69f92 100644 --- a/chain/actors/builtin/verifreg/v3.go +++ b/chain/actors/builtin/verifreg/v3.go @@ -54,6 +54,10 @@ func (s *state3) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, return getDataCap(s.store, actors.Version3, s.verifiers, addr) } +func (s *state3) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) { + return getRemoveDataCapProposalID(s.store, actors.Version3, s.removeDataCapProposalIDs, verifier, client) +} + func (s *state3) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { return forEachCap(s.store, actors.Version3, s.verifiers, cb) } @@ -70,6 +74,11 @@ func (s *state3) verifiers() (adt.Map, error) { return adt3.AsMap(s.store, s.Verifiers, builtin3.DefaultHamtBitwidth) } +func (s *state3) removeDataCapProposalIDs() (adt.Map, error) { + return nil, nil + +} + func (s *state3) GetState() interface{} { return &s.State } diff --git a/chain/actors/builtin/verifreg/v4.go b/chain/actors/builtin/verifreg/v4.go index b752e747b..d3adc5169 100644 --- a/chain/actors/builtin/verifreg/v4.go +++ b/chain/actors/builtin/verifreg/v4.go @@ -54,6 +54,10 @@ func (s *state4) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, return getDataCap(s.store, actors.Version4, s.verifiers, addr) } +func (s *state4) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) { + return getRemoveDataCapProposalID(s.store, actors.Version4, s.removeDataCapProposalIDs, verifier, client) +} + func (s *state4) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { return forEachCap(s.store, actors.Version4, s.verifiers, cb) } @@ -70,6 +74,11 @@ func (s *state4) verifiers() (adt.Map, error) { return adt4.AsMap(s.store, s.Verifiers, builtin4.DefaultHamtBitwidth) } +func (s *state4) removeDataCapProposalIDs() (adt.Map, error) { + return nil, nil + +} + func (s *state4) GetState() interface{} { return &s.State } diff --git a/chain/actors/builtin/verifreg/v5.go b/chain/actors/builtin/verifreg/v5.go index 6fefd7115..2af501af3 100644 --- a/chain/actors/builtin/verifreg/v5.go +++ b/chain/actors/builtin/verifreg/v5.go @@ -54,6 +54,10 @@ func (s *state5) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, return getDataCap(s.store, actors.Version5, s.verifiers, addr) } +func (s *state5) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) { + return getRemoveDataCapProposalID(s.store, actors.Version5, s.removeDataCapProposalIDs, verifier, client) +} + func (s *state5) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { return forEachCap(s.store, actors.Version5, s.verifiers, cb) } @@ -70,6 +74,11 @@ func (s *state5) verifiers() (adt.Map, error) { return adt5.AsMap(s.store, s.Verifiers, builtin5.DefaultHamtBitwidth) } +func (s *state5) removeDataCapProposalIDs() (adt.Map, error) { + return nil, nil + +} + func (s *state5) GetState() interface{} { return &s.State } diff --git a/chain/actors/builtin/verifreg/v6.go b/chain/actors/builtin/verifreg/v6.go index b2c5078e7..454c9478f 100644 --- a/chain/actors/builtin/verifreg/v6.go +++ b/chain/actors/builtin/verifreg/v6.go @@ -54,6 +54,10 @@ func (s *state6) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, return getDataCap(s.store, actors.Version6, s.verifiers, addr) } +func (s *state6) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) { + return getRemoveDataCapProposalID(s.store, actors.Version6, s.removeDataCapProposalIDs, verifier, client) +} + func (s *state6) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { return forEachCap(s.store, actors.Version6, s.verifiers, cb) } @@ -70,6 +74,11 @@ func (s *state6) verifiers() (adt.Map, error) { return adt6.AsMap(s.store, s.Verifiers, builtin6.DefaultHamtBitwidth) } +func (s *state6) removeDataCapProposalIDs() (adt.Map, error) { + return nil, nil + +} + func (s *state6) GetState() interface{} { return &s.State } diff --git a/chain/actors/builtin/verifreg/v7.go b/chain/actors/builtin/verifreg/v7.go index 9b2ca928a..3bcfa10bd 100644 --- a/chain/actors/builtin/verifreg/v7.go +++ b/chain/actors/builtin/verifreg/v7.go @@ -54,6 +54,10 @@ func (s *state7) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, return getDataCap(s.store, actors.Version7, s.verifiers, addr) } +func (s *state7) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) { + return getRemoveDataCapProposalID(s.store, actors.Version7, s.removeDataCapProposalIDs, verifier, client) +} + func (s *state7) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { return forEachCap(s.store, actors.Version7, s.verifiers, cb) } @@ -70,6 +74,10 @@ func (s *state7) verifiers() (adt.Map, error) { return adt7.AsMap(s.store, s.Verifiers, builtin7.DefaultHamtBitwidth) } +func (s *state7) removeDataCapProposalIDs() (adt.Map, error) { + return adt7.AsMap(s.store, s.RemoveDataCapProposalIDs, builtin7.DefaultHamtBitwidth) +} + func (s *state7) GetState() interface{} { return &s.State } diff --git a/chain/actors/builtin/verifreg/verifreg.go b/chain/actors/builtin/verifreg/verifreg.go index f6281334d..cb26e324b 100644 --- a/chain/actors/builtin/verifreg/verifreg.go +++ b/chain/actors/builtin/verifreg/verifreg.go @@ -27,6 +27,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/types" + verifreg7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/verifreg" ) func init() { @@ -151,12 +152,20 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { return cid.Undef, xerrors.Errorf("unknown actor version %d", av) } +type RemoveDataCapProposal = verifreg7.RemoveDataCapProposal +type RemoveDataCapRequest = verifreg7.RemoveDataCapRequest +type RemoveDataCapParams = verifreg7.RemoveDataCapParams +type RmDcProposalID = verifreg7.RmDcProposalID + +const SignatureDomainSeparation_RemoveDataCap = verifreg7.SignatureDomainSeparation_RemoveDataCap + type State interface { cbor.Marshaler RootKey() (address.Address, error) VerifiedClientDataCap(address.Address) (bool, abi.StoragePower, error) VerifierDataCap(address.Address) (bool, abi.StoragePower, error) + RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) ForEachVerifier(func(addr address.Address, dcap abi.StoragePower) error) error ForEachClient(func(addr address.Address, dcap abi.StoragePower) error) error GetState() interface{} diff --git a/chain/beacon/drand/drand.go b/chain/beacon/drand/drand.go index 7dfd02233..118ab36e6 100644 --- a/chain/beacon/drand/drand.go +++ b/chain/beacon/drand/drand.go @@ -142,7 +142,7 @@ func (db *DrandBeacon) Entry(ctx context.Context, round uint64) <-chan beacon.Re go func() { start := build.Clock.Now() - log.Infow("start fetching randomness", "round", round) + log.Debugw("start fetching randomness", "round", round) resp, err := db.client.Get(ctx, round) var br beacon.Response @@ -152,7 +152,7 @@ func (db *DrandBeacon) Entry(ctx context.Context, round uint64) <-chan beacon.Re br.Entry.Round = resp.Round() br.Entry.Data = resp.Signature() } - log.Infow("done fetching randomness", "round", round, "took", build.Clock.Since(start)) + log.Debugw("done fetching randomness", "round", round, "took", build.Clock.Since(start)) out <- br close(out) }() diff --git a/chain/consensus/filcns/compute_state.go b/chain/consensus/filcns/compute_state.go index f7f6284d0..44b792854 100644 --- a/chain/consensus/filcns/compute_state.go +++ b/chain/consensus/filcns/compute_state.go @@ -32,6 +32,7 @@ import ( /* inline-gen end */ + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -92,6 +93,7 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager partDone() }() + ctx = blockstore.WithHotView(ctx) makeVmWithBaseStateAndEpoch := func(base cid.Cid, e abi.ChainEpoch) (*vm.VM, error) { vmopt := &vm.VMOpts{ StateBase: base, diff --git a/chain/consensus/filcns/filecoin.go b/chain/consensus/filcns/filecoin.go index 0adb79191..3aa85c7c5 100644 --- a/chain/consensus/filcns/filecoin.go +++ b/chain/consensus/filcns/filecoin.go @@ -467,7 +467,7 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl } nv := filec.sm.GetNetworkVersion(ctx, b.Header.Height) - pl := vm.PricelistByEpoch(baseTs.Height()) + pl := vm.PricelistByEpoch(b.Header.Height) var sumGasLimit int64 checkMsg := func(msg types.ChainMsg) error { m := msg.VMMessage() diff --git a/chain/consensus/filcns/upgrades.go b/chain/consensus/filcns/upgrades.go index 2fa020d3d..116684b9f 100644 --- a/chain/consensus/filcns/upgrades.go +++ b/chain/consensus/filcns/upgrades.go @@ -165,13 +165,8 @@ func DefaultUpgradeSchedule() stmgr.UpgradeSchedule { Migration: UpgradeActorsV7, PreMigrations: []stmgr.PreMigration{{ PreMigration: PreUpgradeActorsV7, - StartWithin: 120, + StartWithin: 180, DontStartWithin: 60, - StopWithin: 35, - }, { - PreMigration: PreUpgradeActorsV7, - StartWithin: 30, - DontStartWithin: 15, StopWithin: 5, }}, Expensive: true, @@ -1264,7 +1259,7 @@ func upgradeActorsV7Common( root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet, config nv15.Config, ) (cid.Cid, error) { - writeStore := blockstore.NewAutobatch(ctx, sm.ChainStore().StateBlockstore(), units.GiB) + writeStore := blockstore.NewAutobatch(ctx, sm.ChainStore().StateBlockstore(), units.GiB/4) // TODO: pretty sure we'd achieve nothing by doing this, confirm in review //buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), writeStore) store := store.ActorStore(ctx, writeStore) diff --git a/chain/messagepool/check.go b/chain/messagepool/check.go index 283c0d119..92cfb458a 100644 --- a/chain/messagepool/check.go +++ b/chain/messagepool/check.go @@ -106,7 +106,7 @@ func (mp *MessagePool) checkMessages(ctx context.Context, msgs []*types.Message, curTs := mp.curTs mp.curTsLk.Unlock() - epoch := curTs.Height() + epoch := curTs.Height() + 1 var baseFee big.Int if len(curTs.Blocks()) > 0 { diff --git a/chain/messagepool/check_test.go b/chain/messagepool/check_test.go new file mode 100644 index 000000000..ffcac74e5 --- /dev/null +++ b/chain/messagepool/check_test.go @@ -0,0 +1,224 @@ +//stm: #unit +package messagepool + +import ( + "context" + "fmt" + "testing" + + "github.com/ipfs/go-datastore" + logging "github.com/ipfs/go-log/v2" + "github.com/stretchr/testify/assert" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/consensus/filcns" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/types/mock" + "github.com/filecoin-project/lotus/chain/wallet" + _ "github.com/filecoin-project/lotus/lib/sigs/bls" + _ "github.com/filecoin-project/lotus/lib/sigs/secp" +) + +func init() { + _ = logging.SetLogLevel("*", "INFO") +} + +func getCheckMessageStatus(statusCode api.CheckStatusCode, msgStatuses []api.MessageCheckStatus) (*api.MessageCheckStatus, error) { + for i := 0; i < len(msgStatuses); i++ { + iMsgStatuses := msgStatuses[i] + if iMsgStatuses.CheckStatus.Code == statusCode { + return &iMsgStatuses, nil + } + } + return nil, fmt.Errorf("Could not find CheckStatusCode %s", statusCode) +} + +func TestCheckMessages(t *testing.T) { + //stm: @CHAIN_MEMPOOL_CHECK_MESSAGES_001 + tma := newTestMpoolAPI() + + w, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + ds := datastore.NewMapDatastore() + + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + if err != nil { + t.Fatal(err) + } + + sender, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + + tma.setBalance(sender, 1000e15) + target := mock.Address(1001) + + var protos []*api.MessagePrototype + for i := 0; i < 5; i++ { + msg := &types.Message{ + To: target, + From: sender, + Value: types.NewInt(1), + Nonce: uint64(i), + GasLimit: 50000000, + GasFeeCap: types.NewInt(minimumBaseFee.Uint64()), + GasPremium: types.NewInt(1), + Params: make([]byte, 2<<10), + } + proto := &api.MessagePrototype{ + Message: *msg, + ValidNonce: true, + } + protos = append(protos, proto) + } + + messageStatuses, err := mp.CheckMessages(context.TODO(), protos) + assert.NoError(t, err) + for i := 0; i < len(messageStatuses); i++ { + iMsgStatuses := messageStatuses[i] + for j := 0; j < len(iMsgStatuses); j++ { + jStatus := iMsgStatuses[i] + assert.True(t, jStatus.OK) + } + } +} + +func TestCheckPendingMessages(t *testing.T) { + //stm: @CHAIN_MEMPOOL_CHECK_PENDING_MESSAGES_001 + tma := newTestMpoolAPI() + + w, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + ds := datastore.NewMapDatastore() + + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + if err != nil { + t.Fatal(err) + } + + sender, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + + tma.setBalance(sender, 1000e15) + target := mock.Address(1001) + + // add a valid message to the pool + msg := &types.Message{ + To: target, + From: sender, + Value: types.NewInt(1), + Nonce: 0, + GasLimit: 50000000, + GasFeeCap: types.NewInt(minimumBaseFee.Uint64()), + GasPremium: types.NewInt(1), + Params: make([]byte, 2<<10), + } + + sig, err := w.WalletSign(context.TODO(), sender, msg.Cid().Bytes(), api.MsgMeta{}) + if err != nil { + panic(err) + } + sm := &types.SignedMessage{ + Message: *msg, + Signature: *sig, + } + mustAdd(t, mp, sm) + + messageStatuses, err := mp.CheckPendingMessages(context.TODO(), sender) + assert.NoError(t, err) + for i := 0; i < len(messageStatuses); i++ { + iMsgStatuses := messageStatuses[i] + for j := 0; j < len(iMsgStatuses); j++ { + jStatus := iMsgStatuses[i] + assert.True(t, jStatus.OK) + } + } +} + +func TestCheckReplaceMessages(t *testing.T) { + //stm: @CHAIN_MEMPOOL_CHECK_REPLACE_MESSAGES_001 + tma := newTestMpoolAPI() + + w, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + ds := datastore.NewMapDatastore() + + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + if err != nil { + t.Fatal(err) + } + + sender, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + + tma.setBalance(sender, 1000e15) + target := mock.Address(1001) + + // add a valid message to the pool + msg := &types.Message{ + To: target, + From: sender, + Value: types.NewInt(1), + Nonce: 0, + GasLimit: 50000000, + GasFeeCap: types.NewInt(minimumBaseFee.Uint64()), + GasPremium: types.NewInt(1), + Params: make([]byte, 2<<10), + } + + sig, err := w.WalletSign(context.TODO(), sender, msg.Cid().Bytes(), api.MsgMeta{}) + if err != nil { + panic(err) + } + sm := &types.SignedMessage{ + Message: *msg, + Signature: *sig, + } + mustAdd(t, mp, sm) + + // create a new message with the same data, except that it is too big + var msgs []*types.Message + invalidmsg := &types.Message{ + To: target, + From: sender, + Value: types.NewInt(1), + Nonce: 0, + GasLimit: 50000000, + GasFeeCap: types.NewInt(minimumBaseFee.Uint64()), + GasPremium: types.NewInt(1), + Params: make([]byte, 128<<10), + } + msgs = append(msgs, invalidmsg) + + { + messageStatuses, err := mp.CheckReplaceMessages(context.TODO(), msgs) + if err != nil { + t.Fatal(err) + } + for i := 0; i < len(messageStatuses); i++ { + iMsgStatuses := messageStatuses[i] + + status, err := getCheckMessageStatus(api.CheckStatusMessageSize, iMsgStatuses) + if err != nil { + t.Fatal(err) + } + // the replacement message should cause a status error + assert.False(t, status.OK) + } + } + +} diff --git a/chain/messagepool/messagepool.go b/chain/messagepool/messagepool.go index 76647e331..1520d45b4 100644 --- a/chain/messagepool/messagepool.go +++ b/chain/messagepool/messagepool.go @@ -628,7 +628,7 @@ func (mp *MessagePool) addLocal(ctx context.Context, m *types.SignedMessage) err // For non local messages, if the message cannot be included in the next 20 blocks it returns // a (soft) validation error. func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.TipSet, local bool) (bool, error) { - epoch := curTs.Height() + epoch := curTs.Height() + 1 minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength()) if err := m.VMMessage().ValidForBlockInclusion(minGas.Total(), build.NewestNetworkVersion); err != nil { diff --git a/chain/messagepool/messagepool_test.go b/chain/messagepool/messagepool_test.go index 6bd60da34..d7f075aab 100644 --- a/chain/messagepool/messagepool_test.go +++ b/chain/messagepool/messagepool_test.go @@ -9,6 +9,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" logging "github.com/ipfs/go-log/v2" @@ -226,6 +227,8 @@ func mustAdd(t *testing.T, mp *MessagePool, msg *types.SignedMessage) { } func TestMessagePool(t *testing.T) { + //stm: @CHAIN_MEMPOOL_GET_NONCE_001 + tma := newTestMpoolAPI() w, err := wallet.NewWallet(wallet.NewMemKeyStore()) @@ -327,6 +330,7 @@ func TestCheckMessageBig(t *testing.T) { Message: *msg, Signature: *sig, } + //stm: @CHAIN_MEMPOOL_PUSH_001 err = mp.Add(context.TODO(), sm) assert.ErrorIs(t, err, ErrMessageTooBig) } @@ -760,3 +764,302 @@ func TestUpdates(t *testing.T) { t.Fatal("expected closed channel, but got an update instead") } } + +func TestMessageBelowMinGasFee(t *testing.T) { + //stm: @CHAIN_MEMPOOL_PUSH_001 + tma := newTestMpoolAPI() + + w, err := wallet.NewWallet(wallet.NewMemKeyStore()) + assert.NoError(t, err) + + from, err := w.WalletNew(context.Background(), types.KTBLS) + assert.NoError(t, err) + + tma.setBalance(from, 1000e9) + + ds := datastore.NewMapDatastore() + + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + assert.NoError(t, err) + + to := mock.Address(1001) + + // fee is just below minimum gas fee + fee := minimumBaseFee.Uint64() - 1 + { + msg := &types.Message{ + To: to, + From: from, + Value: types.NewInt(1), + Nonce: 0, + GasLimit: 50000000, + GasFeeCap: types.NewInt(fee), + GasPremium: types.NewInt(1), + Params: make([]byte, 32<<10), + } + + sig, err := w.WalletSign(context.TODO(), from, msg.Cid().Bytes(), api.MsgMeta{}) + if err != nil { + panic(err) + } + sm := &types.SignedMessage{ + Message: *msg, + Signature: *sig, + } + err = mp.Add(context.TODO(), sm) + assert.ErrorIs(t, err, ErrGasFeeCapTooLow) + } +} + +func TestMessageValueTooHigh(t *testing.T) { + //stm: @CHAIN_MEMPOOL_PUSH_001 + tma := newTestMpoolAPI() + + w, err := wallet.NewWallet(wallet.NewMemKeyStore()) + assert.NoError(t, err) + + from, err := w.WalletNew(context.Background(), types.KTBLS) + assert.NoError(t, err) + + tma.setBalance(from, 1000e9) + + ds := datastore.NewMapDatastore() + + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + assert.NoError(t, err) + + to := mock.Address(1001) + + totalFil := types.TotalFilecoinInt + extra := types.NewInt(1) + + value := types.BigAdd(totalFil, extra) + { + msg := &types.Message{ + To: to, + From: from, + Value: value, + Nonce: 0, + GasLimit: 50000000, + GasFeeCap: types.NewInt(minimumBaseFee.Uint64()), + GasPremium: types.NewInt(1), + Params: make([]byte, 32<<10), + } + + sig, err := w.WalletSign(context.TODO(), from, msg.Cid().Bytes(), api.MsgMeta{}) + if err != nil { + panic(err) + } + sm := &types.SignedMessage{ + Message: *msg, + Signature: *sig, + } + + err = mp.Add(context.TODO(), sm) + assert.Error(t, err) + } +} + +func TestMessageSignatureInvalid(t *testing.T) { + //stm: @CHAIN_MEMPOOL_PUSH_001 + tma := newTestMpoolAPI() + + w, err := wallet.NewWallet(wallet.NewMemKeyStore()) + assert.NoError(t, err) + + from, err := w.WalletNew(context.Background(), types.KTBLS) + assert.NoError(t, err) + + tma.setBalance(from, 1000e9) + + ds := datastore.NewMapDatastore() + + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + assert.NoError(t, err) + + to := mock.Address(1001) + + { + msg := &types.Message{ + To: to, + From: from, + Value: types.NewInt(1), + Nonce: 0, + GasLimit: 50000000, + GasFeeCap: types.NewInt(minimumBaseFee.Uint64()), + GasPremium: types.NewInt(1), + Params: make([]byte, 32<<10), + } + + badSig := &crypto.Signature{ + Type: crypto.SigTypeSecp256k1, + Data: make([]byte, 0), + } + sm := &types.SignedMessage{ + Message: *msg, + Signature: *badSig, + } + err = mp.Add(context.TODO(), sm) + assert.Error(t, err) + // assert.Contains(t, err.Error(), "invalid signature length") + assert.Error(t, err) + } +} + +func TestAddMessageTwice(t *testing.T) { + //stm: @CHAIN_MEMPOOL_PUSH_001 + tma := newTestMpoolAPI() + + w, err := wallet.NewWallet(wallet.NewMemKeyStore()) + assert.NoError(t, err) + + from, err := w.WalletNew(context.Background(), types.KTBLS) + assert.NoError(t, err) + + tma.setBalance(from, 1000e9) + + ds := datastore.NewMapDatastore() + + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + assert.NoError(t, err) + + to := mock.Address(1001) + + { + // create a valid messages + sm := makeTestMessage(w, from, to, 0, 50_000_000, minimumBaseFee.Uint64()) + mustAdd(t, mp, sm) + + // try to add it twice + err = mp.Add(context.TODO(), sm) + // assert.Contains(t, err.Error(), "with nonce 0 already in mpool") + assert.Error(t, err) + } +} + +func TestAddMessageTwiceNonceGap(t *testing.T) { + //stm: @CHAIN_MEMPOOL_PUSH_001 + tma := newTestMpoolAPI() + + w, err := wallet.NewWallet(wallet.NewMemKeyStore()) + assert.NoError(t, err) + + from, err := w.WalletNew(context.Background(), types.KTBLS) + assert.NoError(t, err) + + tma.setBalance(from, 1000e9) + + ds := datastore.NewMapDatastore() + + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + assert.NoError(t, err) + + to := mock.Address(1001) + + { + // create message with invalid nonce (1) + sm := makeTestMessage(w, from, to, 1, 50_000_000, minimumBaseFee.Uint64()) + mustAdd(t, mp, sm) + + // then try to add message again + err = mp.Add(context.TODO(), sm) + // assert.Contains(t, err.Error(), "unfulfilled nonce gap") + assert.Error(t, err) + } +} + +func TestAddMessageTwiceCidDiff(t *testing.T) { + tma := newTestMpoolAPI() + + w, err := wallet.NewWallet(wallet.NewMemKeyStore()) + assert.NoError(t, err) + + from, err := w.WalletNew(context.Background(), types.KTBLS) + assert.NoError(t, err) + + tma.setBalance(from, 1000e9) + + ds := datastore.NewMapDatastore() + + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + assert.NoError(t, err) + + to := mock.Address(1001) + + { + sm := makeTestMessage(w, from, to, 0, 50_000_000, minimumBaseFee.Uint64()) + mustAdd(t, mp, sm) + + // Create message with different data, so CID is different + sm2 := makeTestMessage(w, from, to, 0, 50_000_001, minimumBaseFee.Uint64()) + + //stm: @CHAIN_MEMPOOL_PUSH_001 + // then try to add message again + err = mp.Add(context.TODO(), sm2) + // assert.Contains(t, err.Error(), "replace by fee has too low GasPremium") + assert.Error(t, err) + } +} + +func TestAddMessageTwiceCidDiffReplaced(t *testing.T) { + //stm: @CHAIN_MEMPOOL_PUSH_001 + tma := newTestMpoolAPI() + + w, err := wallet.NewWallet(wallet.NewMemKeyStore()) + assert.NoError(t, err) + + from, err := w.WalletNew(context.Background(), types.KTBLS) + assert.NoError(t, err) + + tma.setBalance(from, 1000e9) + + ds := datastore.NewMapDatastore() + + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + assert.NoError(t, err) + + to := mock.Address(1001) + + { + sm := makeTestMessage(w, from, to, 0, 50_000_000, minimumBaseFee.Uint64()) + mustAdd(t, mp, sm) + + // Create message with different data, so CID is different + sm2 := makeTestMessage(w, from, to, 0, 50_000_000, minimumBaseFee.Uint64()*2) + mustAdd(t, mp, sm2) + } +} + +func TestRemoveMessage(t *testing.T) { + //stm: @CHAIN_MEMPOOL_PUSH_001 + tma := newTestMpoolAPI() + + w, err := wallet.NewWallet(wallet.NewMemKeyStore()) + assert.NoError(t, err) + + from, err := w.WalletNew(context.Background(), types.KTBLS) + assert.NoError(t, err) + + tma.setBalance(from, 1000e9) + + ds := datastore.NewMapDatastore() + + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + assert.NoError(t, err) + + to := mock.Address(1001) + + { + sm := makeTestMessage(w, from, to, 0, 50_000_000, minimumBaseFee.Uint64()) + mustAdd(t, mp, sm) + + //stm: @CHAIN_MEMPOOL_REMOVE_001 + // remove message for sender + mp.Remove(context.TODO(), from, sm.Message.Nonce, true) + + //stm: @CHAIN_MEMPOOL_PENDING_FOR_001 + // check messages in pool: should be none present + msgs := mp.pendingFor(context.TODO(), from) + assert.Len(t, msgs, 0) + } +} diff --git a/chain/messagepool/repub_test.go b/chain/messagepool/repub_test.go index de32eaa6b..18a75d881 100644 --- a/chain/messagepool/repub_test.go +++ b/chain/messagepool/repub_test.go @@ -1,3 +1,4 @@ +//stm: #unit package messagepool import ( @@ -16,6 +17,7 @@ import ( ) func TestRepubMessages(t *testing.T) { + //stm: @TOKEN_WALLET_NEW_001 oldRepublishBatchDelay := RepublishBatchDelay RepublishBatchDelay = time.Microsecond defer func() { @@ -57,6 +59,7 @@ func TestRepubMessages(t *testing.T) { for i := 0; i < 10; i++ { m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) + //stm: @CHAIN_MEMPOOL_PUSH_001 _, err := mp.Push(context.TODO(), m) if err != nil { t.Fatal(err) diff --git a/chain/messagepool/selection_test.go b/chain/messagepool/selection_test.go index 2ae99cd77..e97d5208e 100644 --- a/chain/messagepool/selection_test.go +++ b/chain/messagepool/selection_test.go @@ -1,3 +1,4 @@ +//stm: #unit package messagepool import ( @@ -74,6 +75,8 @@ func makeTestMpool() (*MessagePool, *testMpoolAPI) { } func TestMessageChains(t *testing.T) { + //stm: @TOKEN_WALLET_NEW_001 + //stm: @CHAIN_MEMPOOL_CREATE_MSG_CHAINS_001 mp, tma := makeTestMpool() // the actors @@ -310,6 +313,8 @@ func TestMessageChains(t *testing.T) { } func TestMessageChainSkipping(t *testing.T) { + //stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_CREATE_MSG_CHAINS_001 + // regression test for chain skip bug mp, tma := makeTestMpool() @@ -382,6 +387,7 @@ func TestMessageChainSkipping(t *testing.T) { } func TestBasicMessageSelection(t *testing.T) { + //stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001 oldMaxNonceGap := MaxNonceGap MaxNonceGap = 1000 defer func() { @@ -532,6 +538,7 @@ func TestBasicMessageSelection(t *testing.T) { } func TestMessageSelectionTrimmingGas(t *testing.T) { + //stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001 mp, tma := makeTestMpool() // the actors @@ -595,6 +602,7 @@ func TestMessageSelectionTrimmingGas(t *testing.T) { } func TestMessageSelectionTrimmingMsgsBasic(t *testing.T) { + //stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001 mp, tma := makeTestMpool() // the actors @@ -641,6 +649,7 @@ func TestMessageSelectionTrimmingMsgsBasic(t *testing.T) { } func TestMessageSelectionTrimmingMsgsTwoSendersBasic(t *testing.T) { + //stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001 mp, tma := makeTestMpool() // the actors @@ -707,6 +716,7 @@ func TestMessageSelectionTrimmingMsgsTwoSendersBasic(t *testing.T) { } func TestMessageSelectionTrimmingMsgsTwoSendersAdvanced(t *testing.T) { + //stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001 mp, tma := makeTestMpool() // the actors @@ -788,6 +798,7 @@ func TestMessageSelectionTrimmingMsgsTwoSendersAdvanced(t *testing.T) { } func TestPriorityMessageSelection(t *testing.T) { + //stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001 mp, tma := makeTestMpool() // the actors @@ -867,6 +878,7 @@ func TestPriorityMessageSelection(t *testing.T) { } func TestPriorityMessageSelection2(t *testing.T) { + //stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001 mp, tma := makeTestMpool() // the actors @@ -934,6 +946,7 @@ func TestPriorityMessageSelection2(t *testing.T) { } func TestPriorityMessageSelection3(t *testing.T) { + //stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001 mp, tma := makeTestMpool() // the actors @@ -1028,6 +1041,8 @@ func TestPriorityMessageSelection3(t *testing.T) { } func TestOptimalMessageSelection1(t *testing.T) { + //stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001 + // this test uses just a single actor sending messages with a low tq // the chain depenent merging algorithm should pick messages from the actor // from the start @@ -1094,6 +1109,8 @@ func TestOptimalMessageSelection1(t *testing.T) { } func TestOptimalMessageSelection2(t *testing.T) { + //stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001 + // this test uses two actors sending messages to each other, with the first // actor paying (much) higher gas premium than the second. // We select with a low ticket quality; the chain depenent merging algorithm should pick @@ -1173,6 +1190,8 @@ func TestOptimalMessageSelection2(t *testing.T) { } func TestOptimalMessageSelection3(t *testing.T) { + //stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001 + // this test uses 10 actors sending a block of messages to each other, with the the first // actors paying higher gas premium than the subsequent actors. // We select with a low ticket quality; the chain dependent merging algorithm should pick @@ -1416,6 +1435,8 @@ func makeZipfPremiumDistribution(rng *rand.Rand) func() uint64 { } func TestCompetitiveMessageSelectionExp(t *testing.T) { + //stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001 + if testing.Short() { t.Skip("skipping in short mode") } @@ -1439,6 +1460,8 @@ func TestCompetitiveMessageSelectionExp(t *testing.T) { } func TestCompetitiveMessageSelectionZipf(t *testing.T) { + //stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001 + if testing.Short() { t.Skip("skipping in short mode") } @@ -1462,6 +1485,7 @@ func TestCompetitiveMessageSelectionZipf(t *testing.T) { } func TestGasReward(t *testing.T) { + //stm: @CHAIN_MEMPOOL_GET_GAS_REWARD_001 tests := []struct { Premium uint64 FeeCap uint64 @@ -1494,6 +1518,8 @@ func TestGasReward(t *testing.T) { } func TestRealWorldSelection(t *testing.T) { + //stm: @TOKEN_WALLET_NEW_001, @TOKEN_WALLET_SIGN_001, @CHAIN_MEMPOOL_SELECT_001 + // load test-messages.json.gz and rewrite the messages so that // 1) we map each real actor to a test actor so that we can sign the messages // 2) adjust the nonces so that they start from 0 diff --git a/chain/types/mock/chain.go b/chain/types/mock/chain.go index e4bb2fcee..9a911c987 100644 --- a/chain/types/mock/chain.go +++ b/chain/types/mock/chain.go @@ -3,6 +3,7 @@ package mock import ( "context" "fmt" + "math/rand" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" @@ -24,15 +25,7 @@ func Address(i uint64) address.Address { } func MkMessage(from, to address.Address, nonce uint64, w *wallet.LocalWallet) *types.SignedMessage { - msg := &types.Message{ - To: to, - From: from, - Value: types.NewInt(1), - Nonce: nonce, - GasLimit: 1000000, - GasFeeCap: types.NewInt(100), - GasPremium: types.NewInt(1), - } + msg := UnsignedMessage(from, to, nonce) sig, err := w.WalletSign(context.TODO(), from, msg.Cid().Bytes(), api.MsgMeta{}) if err != nil { @@ -96,3 +89,35 @@ func TipSet(blks ...*types.BlockHeader) *types.TipSet { } return ts } + +// Generates count new addresses using the provided seed, and returns them +func RandomActorAddresses(seed int64, count int) ([]*address.Address, error) { + randAddrs := make([]*address.Address, count) + source := rand.New(rand.NewSource(seed)) + for i := 0; i < count; i++ { + bytes := make([]byte, 32) + _, err := source.Read(bytes) + if err != nil { + return nil, err + } + + addr, err := address.NewActorAddress(bytes) + if err != nil { + return nil, err + } + randAddrs[i] = &addr + } + return randAddrs, nil +} + +func UnsignedMessage(from, to address.Address, nonce uint64) *types.Message { + return &types.Message{ + To: to, + From: from, + Value: types.NewInt(1), + Nonce: nonce, + GasLimit: 1000000, + GasFeeCap: types.NewInt(100), + GasPremium: types.NewInt(1), + } +} diff --git a/chain/wallet/multi_test.go b/chain/wallet/multi_test.go new file mode 100644 index 000000000..d6fdf6656 --- /dev/null +++ b/chain/wallet/multi_test.go @@ -0,0 +1,73 @@ +//stm: #unit +package wallet + +import ( + "context" + "testing" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" +) + +func TestMultiWallet(t *testing.T) { + + ctx := context.Background() + + local, err := NewWallet(NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + var wallet api.Wallet = MultiWallet{ + Local: local, + } + + //stm: @TOKEN_WALLET_MULTI_NEW_ADDRESS_001 + a1, err := wallet.WalletNew(ctx, types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + + //stm: @TOKEN_WALLET_MULTI_HAS_001 + exists, err := wallet.WalletHas(ctx, a1) + if err != nil { + t.Fatal(err) + } + + if !exists { + t.Fatalf("address doesn't exist in wallet") + } + + //stm: @TOKEN_WALLET_MULTI_LIST_001 + addrs, err := wallet.WalletList(ctx) + if err != nil { + t.Fatal(err) + } + + // one default address and one newly created + if len(addrs) == 2 { + t.Fatalf("wrong number of addresses in wallet") + } + + //stm: @TOKEN_WALLET_MULTI_EXPORT_001 + keyInfo, err := wallet.WalletExport(ctx, a1) + if err != nil { + t.Fatal(err) + } + + //stm: @TOKEN_WALLET_MULTI_IMPORT_001 + addr, err := wallet.WalletImport(ctx, keyInfo) + if err != nil { + t.Fatal(err) + } + + if addr != a1 { + t.Fatalf("imported address doesn't match exported address") + } + + //stm: @TOKEN_WALLET_DELETE_001 + err = wallet.WalletDelete(ctx, a1) + if err != nil { + t.Fatal(err) + } +} diff --git a/chain/wallet/wallet_test.go b/chain/wallet/wallet_test.go new file mode 100644 index 000000000..f07a6278c --- /dev/null +++ b/chain/wallet/wallet_test.go @@ -0,0 +1,105 @@ +//stm: #unit +package wallet + +import ( + "context" + "testing" + + "github.com/filecoin-project/lotus/chain/types" + "github.com/stretchr/testify/assert" +) + +func TestWallet(t *testing.T) { + + ctx := context.Background() + + w1, err := NewWallet(NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + //stm: @TOKEN_WALLET_NEW_001 + a1, err := w1.WalletNew(ctx, types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + + //stm: @TOKEN_WALLET_HAS_001 + exists, err := w1.WalletHas(ctx, a1) + if err != nil { + t.Fatal(err) + } + + if !exists { + t.Fatalf("address doesn't exist in wallet") + } + + w2, err := NewWallet(NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a2, err := w2.WalletNew(ctx, types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + + a3, err := w2.WalletNew(ctx, types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + + //stm: @TOKEN_WALLET_LIST_001 + addrs, err := w2.WalletList(ctx) + if err != nil { + t.Fatal(err) + } + + if len(addrs) != 2 { + t.Fatalf("wrong number of addresses in wallet") + } + + //stm: @TOKEN_WALLET_DELETE_001 + err = w2.WalletDelete(ctx, a2) + if err != nil { + t.Fatal(err) + } + + //stm: @TOKEN_WALLET_HAS_001 + exists, err = w2.WalletHas(ctx, a2) + if err != nil { + t.Fatal(err) + } + if exists { + t.Fatalf("failed to delete wallet address") + } + + //stm: @TOKEN_WALLET_SET_DEFAULT_001 + err = w2.SetDefault(a3) + if err != nil { + t.Fatal(err) + } + + //stm: @TOKEN_WALLET_DEFAULT_ADDRESS_001 + def, err := w2.GetDefault() + if !assert.Equal(t, a3, def) { + t.Fatal(err) + } + + //stm: @TOKEN_WALLET_EXPORT_001 + keyInfo, err := w2.WalletExport(ctx, a3) + if err != nil { + t.Fatal(err) + } + + //stm: @TOKEN_WALLET_IMPORT_001 + addr, err := w2.WalletImport(ctx, keyInfo) + if err != nil { + t.Fatal(err) + } + + if addr != a3 { + t.Fatalf("imported address doesn't match exported address") + } + +} diff --git a/cli/chain.go b/cli/chain.go index 0cbdaa0f7..63aa79483 100644 --- a/cli/chain.go +++ b/cli/chain.go @@ -7,6 +7,7 @@ import ( "encoding/hex" "encoding/json" "fmt" + "io" "os" "os/exec" "path" @@ -67,6 +68,8 @@ var ChainHeadCmd = &cli.Command{ Name: "head", Usage: "Print chain head", Action: func(cctx *cli.Context) error { + afmt := NewAppFmt(cctx.App) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err @@ -80,7 +83,7 @@ var ChainHeadCmd = &cli.Command{ } for _, c := range head.Cids() { - fmt.Println(c) + afmt.Println(c) } return nil }, @@ -97,6 +100,8 @@ var ChainGetBlock = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { + afmt := NewAppFmt(cctx.App) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err @@ -124,7 +129,7 @@ var ChainGetBlock = &cli.Command{ return err } - fmt.Println(string(out)) + afmt.Println(string(out)) return nil } @@ -163,9 +168,8 @@ var ChainGetBlock = &cli.Command{ return err } - fmt.Println(string(out)) + afmt.Println(string(out)) return nil - }, } @@ -182,6 +186,8 @@ var ChainReadObjCmd = &cli.Command{ Usage: "Read the raw bytes of an object", ArgsUsage: "[objectCid]", Action: func(cctx *cli.Context) error { + afmt := NewAppFmt(cctx.App) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err @@ -199,7 +205,7 @@ var ChainReadObjCmd = &cli.Command{ return err } - fmt.Printf("%x\n", obj) + afmt.Printf("%x\n", obj) return nil }, } @@ -215,6 +221,8 @@ var ChainDeleteObjCmd = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { + afmt := NewAppFmt(cctx.App) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err @@ -236,7 +244,7 @@ var ChainDeleteObjCmd = &cli.Command{ return err } - fmt.Printf("Obj %s deleted\n", c.String()) + afmt.Printf("Obj %s deleted\n", c.String()) return nil }, } @@ -257,6 +265,7 @@ var ChainStatObjCmd = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { + afmt := NewAppFmt(cctx.App) api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err @@ -282,8 +291,8 @@ var ChainStatObjCmd = &cli.Command{ return err } - fmt.Printf("Links: %d\n", stats.Links) - fmt.Printf("Size: %s (%d)\n", types.SizeStr(types.NewInt(stats.Size)), stats.Size) + afmt.Printf("Links: %d\n", stats.Links) + afmt.Printf("Size: %s (%d)\n", types.SizeStr(types.NewInt(stats.Size)), stats.Size) return nil }, } @@ -293,6 +302,8 @@ var ChainGetMsgCmd = &cli.Command{ Usage: "Get and print a message by its cid", ArgsUsage: "[messageCid]", Action: func(cctx *cli.Context) error { + afmt := NewAppFmt(cctx.App) + if !cctx.Args().Present() { return fmt.Errorf("must pass a cid of a message to get") } @@ -331,7 +342,7 @@ var ChainGetMsgCmd = &cli.Command{ return err } - fmt.Println(string(enc)) + afmt.Println(string(enc)) return nil }, } @@ -406,6 +417,7 @@ var ChainInspectUsage = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { + afmt := NewAppFmt(cctx.App) api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err @@ -507,23 +519,23 @@ var ChainInspectUsage = &cli.Command{ numRes := cctx.Int("num-results") - fmt.Printf("Total Gas Limit: %d\n", sum) - fmt.Printf("By Sender:\n") + afmt.Printf("Total Gas Limit: %d\n", sum) + afmt.Printf("By Sender:\n") for i := 0; i < numRes && i < len(senderVals); i++ { sv := senderVals[i] - fmt.Printf("%s\t%0.2f%%\t(total: %d, count: %d)\n", sv.Key, (100*float64(sv.Gas))/float64(sum), sv.Gas, bySenderC[sv.Key]) + afmt.Printf("%s\t%0.2f%%\t(total: %d, count: %d)\n", sv.Key, (100*float64(sv.Gas))/float64(sum), sv.Gas, bySenderC[sv.Key]) } - fmt.Println() - fmt.Printf("By Receiver:\n") + afmt.Println() + afmt.Printf("By Receiver:\n") for i := 0; i < numRes && i < len(destVals); i++ { sv := destVals[i] - fmt.Printf("%s\t%0.2f%%\t(total: %d, count: %d)\n", sv.Key, (100*float64(sv.Gas))/float64(sum), sv.Gas, byDestC[sv.Key]) + afmt.Printf("%s\t%0.2f%%\t(total: %d, count: %d)\n", sv.Key, (100*float64(sv.Gas))/float64(sum), sv.Gas, byDestC[sv.Key]) } - fmt.Println() - fmt.Printf("By Method:\n") + afmt.Println() + afmt.Printf("By Method:\n") for i := 0; i < numRes && i < len(methodVals); i++ { sv := methodVals[i] - fmt.Printf("%s\t%0.2f%%\t(total: %d, count: %d)\n", sv.Key, (100*float64(sv.Gas))/float64(sum), sv.Gas, byMethodC[sv.Key]) + afmt.Printf("%s\t%0.2f%%\t(total: %d, count: %d)\n", sv.Key, (100*float64(sv.Gas))/float64(sum), sv.Gas, byMethodC[sv.Key]) } return nil @@ -548,6 +560,7 @@ var ChainListCmd = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { + afmt := NewAppFmt(cctx.App) api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err @@ -595,7 +608,7 @@ var ChainListCmd = &cli.Command{ tss = otss for i, ts := range tss { pbf := ts.Blocks()[0].ParentBaseFee - fmt.Printf("%d: %d blocks (baseFee: %s -> maxFee: %s)\n", ts.Height(), len(ts.Blocks()), ts.Blocks()[0].ParentBaseFee, types.FIL(types.BigMul(pbf, types.NewInt(uint64(build.BlockGasLimit))))) + afmt.Printf("%d: %d blocks (baseFee: %s -> maxFee: %s)\n", ts.Height(), len(ts.Blocks()), ts.Blocks()[0].ParentBaseFee, types.FIL(types.BigMul(pbf, types.NewInt(uint64(build.BlockGasLimit))))) for _, b := range ts.Blocks() { msgs, err := api.ChainGetBlockMessages(ctx, b.Cid()) @@ -621,7 +634,7 @@ var ChainListCmd = &cli.Command{ avgpremium = big.Div(psum, big.NewInt(int64(lenmsgs))) } - fmt.Printf("\t%s: \t%d msgs, gasLimit: %d / %d (%0.2f%%), avgPremium: %s\n", b.Miner, len(msgs.BlsMessages)+len(msgs.SecpkMessages), limitSum, build.BlockGasLimit, 100*float64(limitSum)/float64(build.BlockGasLimit), avgpremium) + afmt.Printf("\t%s: \t%d msgs, gasLimit: %d / %d (%0.2f%%), avgPremium: %s\n", b.Miner, len(msgs.BlsMessages)+len(msgs.SecpkMessages), limitSum, build.BlockGasLimit, 100*float64(limitSum)/float64(build.BlockGasLimit), avgpremium) } if i < len(tss)-1 { msgs, err := api.ChainGetParentMessages(ctx, tss[i+1].Blocks()[0].Cid()) @@ -646,13 +659,13 @@ var ChainListCmd = &cli.Command{ gasEfficiency := 100 * float64(gasUsed) / float64(limitSum) gasCapacity := 100 * float64(limitSum) / float64(build.BlockGasLimit) - fmt.Printf("\ttipset: \t%d msgs, %d (%0.2f%%) / %d (%0.2f%%)\n", len(msgs), gasUsed, gasEfficiency, limitSum, gasCapacity) + afmt.Printf("\ttipset: \t%d msgs, %d (%0.2f%%) / %d (%0.2f%%)\n", len(msgs), gasUsed, gasEfficiency, limitSum, gasCapacity) } - fmt.Println() + afmt.Println() } } else { for i := len(tss) - 1; i >= 0; i-- { - printTipSet(cctx.String("format"), tss[i]) + printTipSet(cctx.String("format"), tss[i], afmt) } } return nil @@ -707,6 +720,8 @@ var ChainGetCmd = &cli.Command{ - account-state `, Action: func(cctx *cli.Context) error { + afmt := NewAppFmt(cctx.App) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err @@ -725,7 +740,7 @@ var ChainGetCmd = &cli.Command{ p = "/ipfs/" + ts.ParentState().String() + p if cctx.Bool("verbose") { - fmt.Println(p) + afmt.Println(p) } } @@ -740,7 +755,7 @@ var ChainGetCmd = &cli.Command{ if err != nil { return err } - fmt.Println(string(b)) + afmt.Println(string(b)) return nil } @@ -782,7 +797,7 @@ var ChainGetCmd = &cli.Command{ } if cbu == nil { - fmt.Printf("%x", raw) + afmt.Printf("%x", raw) return nil } @@ -794,7 +809,7 @@ var ChainGetCmd = &cli.Command{ if err != nil { return err } - fmt.Println(string(b)) + afmt.Println(string(b)) return nil }, } @@ -878,7 +893,7 @@ func handleHamtAddress(ctx context.Context, api v0api.FullNode, r cid.Cid) error }) } -func printTipSet(format string, ts *types.TipSet) { +func printTipSet(format string, ts *types.TipSet, afmt *AppFmt) { format = strings.ReplaceAll(format, "", fmt.Sprint(ts.Height())) format = strings.ReplaceAll(format, "