lotus/chain/consensus/filcns/upgrades.go

2748 lines
87 KiB
Go
Raw Normal View History

2021-09-02 16:07:23 +00:00
package filcns
import (
"bytes"
"context"
_ "embed"
"fmt"
"os"
"runtime"
"strconv"
"time"
2022-01-11 16:31:59 +00:00
"github.com/docker/go-units"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
2022-09-06 15:49:29 +00:00
actorstypes "github.com/filecoin-project/go-state-types/actors"
"github.com/filecoin-project/go-state-types/big"
nv18 "github.com/filecoin-project/go-state-types/builtin/v10/migration"
init11 "github.com/filecoin-project/go-state-types/builtin/v11/init"
chore: build: Merge/v22 into 21 for 23 (#10702) * chore: update ffi to increase execution parallelism * Don't enforce walking receipt tree during compaction * fix: build: drop drand incentinet servers * chore: release lotus v1.20.4 * Apply suggestions from code review Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> * feat: Introduce nv19 skeleton Update to go-state-types v0.11.0-alpha-1 Introduce dummy v11 actor bundles Make new actors adapters Add upgrade to Upgrade Schedules make jen Update to go-state-types v0.11.0-alpha-2 * feat: vm: switch to the new exec trace format (#10372) This is now "FVM" native. Changes include: 1. Don't treat "trace" messages like off-chain messages. E.g., don't include CIDs, versions, etc. 2. Include IPLD codecs where applicable. 3. Remove fields that aren't filled by the FVM (timing, some errors, code locations, etc.). * feat: implement FIP-0061 * Address review * Add and test the FIP-0061 migration * Update actors bundles to fip/20230406 * Update to go-state-types master * Update to actors v11.0.0-rc1 * - Update go state types - Keep current expiration defaults on creation, extension some tests - Update ffi * ffi experiment * Integration nv19 migration - Open splitstore in migration shed tool - Update state root version * Post rebase fixup * Fix * gen * nv19 invariant checking * Try fixig blockstore so bundle is loaded * Debug * Fix * Make butterfly upgrades happen * Another ffi experiment * Fix copy paste error * Actually schedule migration (#10656) Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * Butterfly artifacts * Set calibration net upgrade height * Review Response * Fix state tree version assert * Quick butterfly upgrade to sanity check (#10660) * Quick butterfly upgrade to sanity check * Update butterfly artifacts * Revert fake fix * Give butterfly net correct genesis * Butterfly artifacts * Give time before upgrade --------- Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * chore:releasepolish v1.22 release (#10666) * Update butterfly artifacts * register actors v11 * Update calibration upgrade time * State inspection shed cmds * Fix * make gen * Fix swallowed errors * Lint fixup --------- Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * v1.22.0-rc3 * bundle fix * Feat/expedite nv19 (#10681) * Update go-state-types * Modify upgrade schedule and params * Revert fip 0052 * Update gst * docsgen * fast butterfly migration to validate migration * Correct epoch to match specified date * Update actors v11 * Update changelog build version * Update butterfly artifacts * Fix lotus-miner init to work after upgrade --------- Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * fix:deps:stable ffi for stable release (#10698) * Point to stable ffi for stable lotus release * go mod tidy --------- Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * Update CHANGELOG.md Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> --------- Co-authored-by: Aayush Rajasekaran <arajasek94@gmail.com> Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> Co-authored-by: Steven Allen <steven@stebalien.com> Co-authored-by: jennijuju <jiayingw703@gmail.com>
2023-04-19 22:40:18 +00:00
nv19 "github.com/filecoin-project/go-state-types/builtin/v11/migration"
system11 "github.com/filecoin-project/go-state-types/builtin/v11/system"
init12 "github.com/filecoin-project/go-state-types/builtin/v12/init"
nv21 "github.com/filecoin-project/go-state-types/builtin/v12/migration"
system12 "github.com/filecoin-project/go-state-types/builtin/v12/system"
chore: Merge nv22 into master (#11699) * [WIP] feat: Add nv22 skeleton Addition of Network Version 22 skeleton * update FFI * feat: drand: refactor round verification * feat: sealing: Support nv22 DDO features in the sealing pipeline (#11226) * Initial work supporting DDO pieces in lotus-miner * sealing: Update pipeline input to operate on UniversalPiece * sealing: Update pipeline checks/sealing states to operate on UniversalPiece * sealing: Make pipeline build with UniversalPiece * move PieceDealInfo out of api * make gen * make sealing pipeline unit tests pass * fix itest ensemble build * don't panic in SectorsStatus with deals * stop linter from complaining about checkPieces * fix sector import tests * mod tidy * sealing: Add logic for (pre)committing DDO sectors * sealing: state-types with method defs * DDO non-snap pipeline works(?), DDO Itests * DDO support in snapdeals pipeline * make gen * update actor bundles * update the gst market fix * fix: chain: use PreCommitSectorsBatch2 when setting up genesis * some bug fixes * integration working changes * update actor bundles * Make TestOnboardRawPieceSnap pass * Appease the linter * Make deadlines test pass with v12 actors * Update go-state-types, abstract market DealState * make gen * mod tidy, lint fixes * Fix some more tests * Bump version in master Bump version in master * Make gen Make gen * fix sender * fix: lotus-provider: Fix winning PoSt * fix: sql Scan cannot write to an object * Actually show miner-addrs in info-log Actually show miner-addrs in lotus-provider info-log * [WIP] feat: Add nv22 skeleton Addition of Network Version 22 skeleton * update FFI * ddo is now nv22 * make gen * temp actor bundle with ddo * use working go-state-types * gst with v13 market migration * update bundle, builtin.MethodsMiner.ProveCommitSectors2 -> 3 * actually working v13 migration, v13 migration itest * Address review * sealing: Correct DDO snap pledge math * itests: Mixed ddo itest * pipeline: Fix sectorWeight * sealing: convert market deals into PAMs in mixed sectors * sealing: make market to ddo conversion work * fix lint * update gst * Update actors and GST to lastest integ branch * commit batcher: Update ProveCommitSectors3Params builder logic * make gen * use builtin-actors master * ddo: address review * itests: Add commd assertions to ddo tests * make gen * gst with fixed types * config knobs for RequireActivationSuccess * storage: Drop obsolete flaky tasts --------- Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: Phi <orjan.roren@gmail.com> Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com> Co-authored-by: TippyFlits <james.bluett@protocol.ai> * feat: implement FIP-0063 * chore: deps: update to go-multiaddr v0.12.2 (#11602) * feat: fvm: update the FVM/FFI to v4.1 (#11608) (#11612) This: 1. Adds nv22 support. 2. Updates the message tracing format. Co-authored-by: Steven Allen <steven@stebalien.com> * AggregateProofType nil when doing batch updates Use latest nv22 go-state-types version with matching update * Update to v13.0.0-rc.2 bundle * chore: Upgrade heights and codename Update upgrade heights Co-Authored-By: Steven Allen <steven@stebalien.com> * Update epoch after nv22 DRAND switch Update epoch after nv22 DRAND switch * Update Mango codename to Phoneix Make the codename for the Drand-change inline with Dragon style. * Add UpgradePhoenixHeight to API params * set UpgradePhoenixHeight to be one hour after Dragon * Make gen Make gen and UpgradePhoenixHeight in butterfly and local devnet to be in line with Calibration and Mainnet * Update epoch heights (#11637) Update epoch heights * new: add forest bootstrap nodes (#11636) Signed-off-by: samuelarogbonlo <sbayo971@gmail.com> * Merge pull request #11491 from filecoin-project/fix/remove-decommissioned-pl-bootstrap-nodes Remove PL operated bootstrap nodes from mainnet.pi * feat: api: new verified registry methods to get all allocations and claims (#11631) * new verireg methods * update changelog and add itest * update itest and cli * update new method's support till v9 * remove gateway APIs * fix cli internal var names * chore:: backport #11609 to the feat/nv22 branch (#11644) * feat: api: improve the correctness of Eth's trace_block (#11609) * Improve the correctness of Eth's trace_block - Improve encoding/decoding of parameters and return values: - Encode "native" parameters and return values with Solidity ABI. - Correctly decode parameters to "create" calls. - Use the correct (ish) output for "create" calls. - Handle all forms of "create". - Make robust with respect to reverts: - Use the actor ID/address from the trace instead of looking it up in the state-tree (may not exist in the state-tree due to a revert). - Gracefully handle failed actor/contract creation. - Improve performance: - We avoid looking anything up in the state-tree when translating the trace, which should significantly improve performance. - Improve code readability: - Remove all "backtracking" logic. - Use an "environment" struct to store temporary state instead of attaching it to the trace. - Fix random bugs: - Fix an allocation bug in the "address" logic (need to set the capacity before modifying the slice). - Improved error checking/handling. - Use correct types for `trace_block` action/results (create, call, etc.). - And use the correct types for Result/Action structs instead of reusing the same "Call" action every time. - Improve error messages. * Make gen Make gen --------- Co-authored-by: Steven Allen <steven@stebalien.com> * fix: add UpgradePhoenixHeight to StateGetNetworkParams (#11648) * chore: deps: update to go-state-types v13.0.0-rc.1 * do NOT update the cache when running the real migration * Merge pull request #11632 from hanabi1224/hm/drand-test feat: drand quicknet: allow scheduling drand quicknet upgrade before nv22 on 2k devnet * chore: deps: update to go-state-types v13.0.0-rc.2 chore: deps: update to go-state-types v13.0.0-rc.2 * feat: set migration config UpgradeEpoch for v13 actors upgrade * Built-in actor events first draft * itest for DDO non-market verified data w/ builtin actor events * Tests for builtin actor events API * Clean up DDO+Events tests, add lots of explainer comments * Minor tweaks to events types * Avoid duplicate messages when looking for receipts * Rename internal events modules for clarity * Adjust actor event API after review * s/ActorEvents/Events/g in global config * Manage event sending rate for SubscribeActorEvents * Terminate SubscribeActorEvents chan when at max height * Document future API changes * More clarity in actor event API docs * More post-review changes, lots of tests for SubscribeActorEvents Use BlockDelay as the window for receiving events on the SubscribeActorEvents channel. We expect the user to have received the initial batch of historical events (if any) in one block's time. For real-time events we expect them to not fall behind by roughly one block's time. * Remove duplicate code from actor event type marshalling tests Reduce verbosity and remove duplicate test logic from actor event types JSON marshalling tests. * Rename actor events test to follow go convention Add missing `s` to `actor_events` test file to follow golang convention used across the repo. * Run actor events table tests in deterministic order Refactor `map` usage for actor event table tests to ensure deterministic test execution order, making debugging potential issues easier. If non-determinism is a target, leverage Go's built-in parallel testing capabilities. * Reduce scope for filter removal failure when getting actor events Use a fresh context to remove the temporary filter installed solely to get the actor events. This should reduce chances of failure in a case where the original context may be expired/cancelled. Refactor removal into a `defer` statement for a more readable, concise return statement. * Use fixed RNG seed for actor event tests Improve determinism in actor event tests by using a fixed RNG seed. This makes up a more reproducible test suit. * Use provided libraries to assert eventual conditions Use the functionalities already provided by `testify` to assert eventual conditions, and remove the use of `time.Sleep`. Remove duplicate code in utility functions that are already defined. Refactor assertion helper functions to use consistent terminology: "require" implies fatal error, whereas "assert" implies error where the test may proceed executing. * Update changelog for actor events APIs * Fix concerns and docs identified by review * Update actor bundle to v13.0.0-rc3 Update actor bundle to v13.0.0-rc3 * Prep Lotus v1.26.0-rc1 - For sanity reverting the mainnet upgrade epoch to 99999999, and then only set it when cutting the final release -Update Calibnet CIDs to v13.0.0-rc3 - Add GetActorEvents, SubscribeActorEvents, GetAllClaims and GetAllAllocations methods to the changelog Co-Authored-By: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> * Update CHANGELOG.md Co-authored-by: Masih H. Derkani <m@derkani.org> * Make gen Make gen * fix: beacon: validate drand change at nv16 correctly * bump to v1.26.0-rc2 * test: cleanup ddo verified itest, extract steps to functions also add allocation-removed event case * test: extract verified DDO test to separate file, add more checks * test: add additional actor events checks * Add verification for "deal-activated" actor event * docs(drand): document the meaning of "IsChained" (#11692) * Resolve conflicts I encountered multiple issues when trying to run make gen. And these changes fixed a couple of them: - go mod tidy - Remove RaftState/RaftLeader - Revert `if ts.Height() > claim.TermMax+claim.TermStart || !cctx.IsSet("expired")` to the what is in the release/v1.26.0: `if tsHeight > val.TermMax || !expired` * fixup imports, make jen * Update version Update version in master to v1.27.0-dev * Update node/impl/full/dummy.go Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> * Adjust ListClaimsCmd Adjust ListClaimsCmd according to review --------- Signed-off-by: samuelarogbonlo <sbayo971@gmail.com> Co-authored-by: TippyFlits <james.bluett@protocol.ai> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com> Co-authored-by: Steven Allen <steven@stebalien.com> Co-authored-by: Rod Vagg <rod@vagg.org> Co-authored-by: Samuel Arogbonlo <47984109+samuelarogbonlo@users.noreply.github.com> Co-authored-by: LexLuthr <88259624+LexLuthr@users.noreply.github.com> Co-authored-by: tom123222 <160735201+tom123222@users.noreply.github.com> Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Masih H. Derkani <m@derkani.org> Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com>
2024-03-12 09:33:58 +00:00
nv22 "github.com/filecoin-project/go-state-types/builtin/v13/migration"
nv23 "github.com/filecoin-project/go-state-types/builtin/v14/migration"
2022-09-06 15:49:29 +00:00
nv17 "github.com/filecoin-project/go-state-types/builtin/v9/migration"
"github.com/filecoin-project/go-state-types/manifest"
"github.com/filecoin-project/go-state-types/migration"
"github.com/filecoin-project/go-state-types/network"
2021-09-02 16:07:23 +00:00
"github.com/filecoin-project/go-state-types/rt"
gstStore "github.com/filecoin-project/go-state-types/store"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
"github.com/filecoin-project/specs-actors/actors/migration/nv3"
adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/specs-actors/v2/actors/migration/nv4"
"github.com/filecoin-project/specs-actors/v2/actors/migration/nv7"
"github.com/filecoin-project/specs-actors/v3/actors/migration/nv10"
"github.com/filecoin-project/specs-actors/v4/actors/migration/nv12"
"github.com/filecoin-project/specs-actors/v5/actors/migration/nv13"
"github.com/filecoin-project/specs-actors/v6/actors/migration/nv14"
"github.com/filecoin-project/specs-actors/v7/actors/migration/nv15"
"github.com/filecoin-project/specs-actors/v8/actors/migration/nv16"
"github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
"github.com/filecoin-project/lotus/chain/actors/builtin/system"
"github.com/filecoin-project/lotus/chain/state"
2021-09-02 16:07:23 +00:00
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/node/bundle"
)
//go:embed FVMLiftoff.txt
var fvmLiftoffBanner string
var (
MigrationMaxWorkerCount int
EnvMigrationMaxWorkerCount = "LOTUS_MIGRATION_MAX_WORKER_COUNT"
)
func init() {
// the default calculation used for migration worker count
MigrationMaxWorkerCount = runtime.NumCPU()
// check if an alternative value was request by environment
if mwcs := os.Getenv(EnvMigrationMaxWorkerCount); mwcs != "" {
mwc, err := strconv.ParseInt(mwcs, 10, 32)
if err != nil {
log.Warnf("invalid value for %s (%s) defaulting to %d: %s", EnvMigrationMaxWorkerCount, mwcs, MigrationMaxWorkerCount, err)
return
}
// use value from environment
2024-03-08 08:04:25 +00:00
log.Infof("migration worker count set from %s (%d)", EnvMigrationMaxWorkerCount, mwc)
MigrationMaxWorkerCount = int(mwc)
return
}
log.Infof("migration worker count: %d", MigrationMaxWorkerCount)
}
2021-09-02 16:07:23 +00:00
func DefaultUpgradeSchedule() stmgr.UpgradeSchedule {
var us stmgr.UpgradeSchedule
2021-09-02 16:07:23 +00:00
updates := []stmgr.Upgrade{{
Height: build.UpgradeBreezeHeight,
Network: network.Version1,
Migration: UpgradeFaucetBurnRecovery,
}, {
Height: build.UpgradeSmokeHeight,
Network: network.Version2,
Migration: nil,
}, {
Height: build.UpgradeIgnitionHeight,
Network: network.Version3,
Migration: UpgradeIgnition,
}, {
Height: build.UpgradeRefuelHeight,
Network: network.Version3,
Migration: UpgradeRefuel,
}, {
Height: build.UpgradeAssemblyHeight,
Network: network.Version4,
Expensive: true,
Migration: UpgradeActorsV2,
}, {
Height: build.UpgradeTapeHeight,
Network: network.Version5,
Migration: nil,
}, {
Height: build.UpgradeLiftoffHeight,
Network: network.Version5,
Migration: UpgradeLiftoff,
}, {
Height: build.UpgradeKumquatHeight,
Network: network.Version6,
Migration: nil,
}, {
Height: build.UpgradeCalicoHeight,
Network: network.Version7,
Migration: UpgradeCalico,
}, {
Height: build.UpgradePersianHeight,
Network: network.Version8,
Migration: nil,
}, {
Height: build.UpgradeOrangeHeight,
Network: network.Version9,
Migration: nil,
}, {
Height: build.UpgradeTrustHeight,
Network: network.Version10,
Migration: UpgradeActorsV3,
2021-09-02 16:07:23 +00:00
PreMigrations: []stmgr.PreMigration{{
PreMigration: PreUpgradeActorsV3,
StartWithin: 120,
DontStartWithin: 60,
StopWithin: 35,
}, {
PreMigration: PreUpgradeActorsV3,
StartWithin: 30,
DontStartWithin: 15,
StopWithin: 5,
}},
Expensive: true,
}, {
Height: build.UpgradeNorwegianHeight,
Network: network.Version11,
Migration: nil,
}, {
Height: build.UpgradeTurboHeight,
Network: network.Version12,
Migration: UpgradeActorsV4,
2021-09-02 16:07:23 +00:00
PreMigrations: []stmgr.PreMigration{{
PreMigration: PreUpgradeActorsV4,
StartWithin: 120,
DontStartWithin: 60,
StopWithin: 35,
}, {
PreMigration: PreUpgradeActorsV4,
StartWithin: 30,
DontStartWithin: 15,
StopWithin: 5,
}},
Expensive: true,
}, {
Height: build.UpgradeHyperdriveHeight,
Network: network.Version13,
Migration: UpgradeActorsV5,
2021-09-02 16:07:23 +00:00
PreMigrations: []stmgr.PreMigration{{
PreMigration: PreUpgradeActorsV5,
StartWithin: 120,
DontStartWithin: 60,
StopWithin: 35,
}, {
PreMigration: PreUpgradeActorsV5,
StartWithin: 30,
DontStartWithin: 15,
StopWithin: 5,
}},
2021-09-15 15:22:25 +00:00
Expensive: true,
}, {
Height: build.UpgradeChocolateHeight,
Network: network.Version14,
Migration: UpgradeActorsV6,
2021-09-15 15:22:25 +00:00
PreMigrations: []stmgr.PreMigration{{
PreMigration: PreUpgradeActorsV6,
2021-09-15 15:22:25 +00:00
StartWithin: 120,
DontStartWithin: 60,
StopWithin: 35,
}, {
PreMigration: PreUpgradeActorsV6,
2021-09-15 15:22:25 +00:00
StartWithin: 30,
DontStartWithin: 15,
StopWithin: 5,
}},
Expensive: true,
2021-11-04 15:59:29 +00:00
}, {
2022-01-11 22:41:20 +00:00
Height: build.UpgradeOhSnapHeight,
2021-11-04 15:59:29 +00:00
Network: network.Version15,
Migration: UpgradeActorsV7,
2021-11-04 15:59:29 +00:00
PreMigrations: []stmgr.PreMigration{{
PreMigration: PreUpgradeActorsV7,
2022-02-16 18:04:48 +00:00
StartWithin: 180,
2021-11-04 15:59:29 +00:00
DontStartWithin: 60,
StopWithin: 5,
}},
Expensive: true,
2022-03-01 03:57:40 +00:00
}, {
2022-05-26 22:20:49 +00:00
Height: build.UpgradeSkyrHeight,
2022-03-01 03:57:40 +00:00
Network: network.Version16,
Migration: UpgradeActorsV8,
PreMigrations: []stmgr.PreMigration{{
PreMigration: PreUpgradeActorsV8,
StartWithin: 180,
DontStartWithin: 60,
StopWithin: 5,
}},
Expensive: true,
2022-09-06 15:49:29 +00:00
}, {
2022-10-13 16:18:28 +00:00
Height: build.UpgradeSharkHeight,
2022-09-06 15:49:29 +00:00
Network: network.Version17,
Migration: UpgradeActorsV9,
PreMigrations: []stmgr.PreMigration{{
PreMigration: PreUpgradeActorsV9,
2022-10-19 20:24:02 +00:00
StartWithin: 240,
2022-09-06 15:49:29 +00:00
DontStartWithin: 60,
2022-10-20 17:34:29 +00:00
StopWithin: 20,
2022-10-19 20:24:02 +00:00
}, {
PreMigration: PreUpgradeActorsV9,
StartWithin: 15,
DontStartWithin: 10,
StopWithin: 5,
2022-09-06 15:49:29 +00:00
}},
Expensive: true,
}, {
Height: build.UpgradeHyggeHeight,
Network: network.Version18,
Migration: UpgradeActorsV10,
PreMigrations: []stmgr.PreMigration{{
PreMigration: PreUpgradeActorsV10,
2023-02-16 21:19:07 +00:00
StartWithin: 60,
DontStartWithin: 10,
StopWithin: 5,
}},
Expensive: true,
chore: build: Merge/v22 into 21 for 23 (#10702) * chore: update ffi to increase execution parallelism * Don't enforce walking receipt tree during compaction * fix: build: drop drand incentinet servers * chore: release lotus v1.20.4 * Apply suggestions from code review Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> * feat: Introduce nv19 skeleton Update to go-state-types v0.11.0-alpha-1 Introduce dummy v11 actor bundles Make new actors adapters Add upgrade to Upgrade Schedules make jen Update to go-state-types v0.11.0-alpha-2 * feat: vm: switch to the new exec trace format (#10372) This is now "FVM" native. Changes include: 1. Don't treat "trace" messages like off-chain messages. E.g., don't include CIDs, versions, etc. 2. Include IPLD codecs where applicable. 3. Remove fields that aren't filled by the FVM (timing, some errors, code locations, etc.). * feat: implement FIP-0061 * Address review * Add and test the FIP-0061 migration * Update actors bundles to fip/20230406 * Update to go-state-types master * Update to actors v11.0.0-rc1 * - Update go state types - Keep current expiration defaults on creation, extension some tests - Update ffi * ffi experiment * Integration nv19 migration - Open splitstore in migration shed tool - Update state root version * Post rebase fixup * Fix * gen * nv19 invariant checking * Try fixig blockstore so bundle is loaded * Debug * Fix * Make butterfly upgrades happen * Another ffi experiment * Fix copy paste error * Actually schedule migration (#10656) Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * Butterfly artifacts * Set calibration net upgrade height * Review Response * Fix state tree version assert * Quick butterfly upgrade to sanity check (#10660) * Quick butterfly upgrade to sanity check * Update butterfly artifacts * Revert fake fix * Give butterfly net correct genesis * Butterfly artifacts * Give time before upgrade --------- Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * chore:releasepolish v1.22 release (#10666) * Update butterfly artifacts * register actors v11 * Update calibration upgrade time * State inspection shed cmds * Fix * make gen * Fix swallowed errors * Lint fixup --------- Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * v1.22.0-rc3 * bundle fix * Feat/expedite nv19 (#10681) * Update go-state-types * Modify upgrade schedule and params * Revert fip 0052 * Update gst * docsgen * fast butterfly migration to validate migration * Correct epoch to match specified date * Update actors v11 * Update changelog build version * Update butterfly artifacts * Fix lotus-miner init to work after upgrade --------- Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * fix:deps:stable ffi for stable release (#10698) * Point to stable ffi for stable lotus release * go mod tidy --------- Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * Update CHANGELOG.md Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> --------- Co-authored-by: Aayush Rajasekaran <arajasek94@gmail.com> Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> Co-authored-by: Steven Allen <steven@stebalien.com> Co-authored-by: jennijuju <jiayingw703@gmail.com>
2023-04-19 22:40:18 +00:00
}, {
Height: build.UpgradeLightningHeight,
Network: network.Version19,
Migration: UpgradeActorsV11,
PreMigrations: []stmgr.PreMigration{{
PreMigration: PreUpgradeActorsV11,
StartWithin: 120,
DontStartWithin: 15,
StopWithin: 10,
}},
Expensive: true,
}, {
Height: build.UpgradeThunderHeight,
Network: network.Version20,
Migration: nil,
}, {
Height: build.UpgradeWatermelonHeight,
Network: network.Version21,
Migration: UpgradeActorsV12,
PreMigrations: []stmgr.PreMigration{{
PreMigration: PreUpgradeActorsV12,
StartWithin: 180,
DontStartWithin: 15,
StopWithin: 10,
}},
Expensive: true,
}, {
Height: build.UpgradeWatermelonFixHeight,
Network: network.Version21,
Migration: buildUpgradeActorsV12MinerFix(calibnetv12BuggyMinerCID1, calibnetv12BuggyManifestCID2),
chore: Merge nv22 into master (#11699) * [WIP] feat: Add nv22 skeleton Addition of Network Version 22 skeleton * update FFI * feat: drand: refactor round verification * feat: sealing: Support nv22 DDO features in the sealing pipeline (#11226) * Initial work supporting DDO pieces in lotus-miner * sealing: Update pipeline input to operate on UniversalPiece * sealing: Update pipeline checks/sealing states to operate on UniversalPiece * sealing: Make pipeline build with UniversalPiece * move PieceDealInfo out of api * make gen * make sealing pipeline unit tests pass * fix itest ensemble build * don't panic in SectorsStatus with deals * stop linter from complaining about checkPieces * fix sector import tests * mod tidy * sealing: Add logic for (pre)committing DDO sectors * sealing: state-types with method defs * DDO non-snap pipeline works(?), DDO Itests * DDO support in snapdeals pipeline * make gen * update actor bundles * update the gst market fix * fix: chain: use PreCommitSectorsBatch2 when setting up genesis * some bug fixes * integration working changes * update actor bundles * Make TestOnboardRawPieceSnap pass * Appease the linter * Make deadlines test pass with v12 actors * Update go-state-types, abstract market DealState * make gen * mod tidy, lint fixes * Fix some more tests * Bump version in master Bump version in master * Make gen Make gen * fix sender * fix: lotus-provider: Fix winning PoSt * fix: sql Scan cannot write to an object * Actually show miner-addrs in info-log Actually show miner-addrs in lotus-provider info-log * [WIP] feat: Add nv22 skeleton Addition of Network Version 22 skeleton * update FFI * ddo is now nv22 * make gen * temp actor bundle with ddo * use working go-state-types * gst with v13 market migration * update bundle, builtin.MethodsMiner.ProveCommitSectors2 -> 3 * actually working v13 migration, v13 migration itest * Address review * sealing: Correct DDO snap pledge math * itests: Mixed ddo itest * pipeline: Fix sectorWeight * sealing: convert market deals into PAMs in mixed sectors * sealing: make market to ddo conversion work * fix lint * update gst * Update actors and GST to lastest integ branch * commit batcher: Update ProveCommitSectors3Params builder logic * make gen * use builtin-actors master * ddo: address review * itests: Add commd assertions to ddo tests * make gen * gst with fixed types * config knobs for RequireActivationSuccess * storage: Drop obsolete flaky tasts --------- Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: Phi <orjan.roren@gmail.com> Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com> Co-authored-by: TippyFlits <james.bluett@protocol.ai> * feat: implement FIP-0063 * chore: deps: update to go-multiaddr v0.12.2 (#11602) * feat: fvm: update the FVM/FFI to v4.1 (#11608) (#11612) This: 1. Adds nv22 support. 2. Updates the message tracing format. Co-authored-by: Steven Allen <steven@stebalien.com> * AggregateProofType nil when doing batch updates Use latest nv22 go-state-types version with matching update * Update to v13.0.0-rc.2 bundle * chore: Upgrade heights and codename Update upgrade heights Co-Authored-By: Steven Allen <steven@stebalien.com> * Update epoch after nv22 DRAND switch Update epoch after nv22 DRAND switch * Update Mango codename to Phoneix Make the codename for the Drand-change inline with Dragon style. * Add UpgradePhoenixHeight to API params * set UpgradePhoenixHeight to be one hour after Dragon * Make gen Make gen and UpgradePhoenixHeight in butterfly and local devnet to be in line with Calibration and Mainnet * Update epoch heights (#11637) Update epoch heights * new: add forest bootstrap nodes (#11636) Signed-off-by: samuelarogbonlo <sbayo971@gmail.com> * Merge pull request #11491 from filecoin-project/fix/remove-decommissioned-pl-bootstrap-nodes Remove PL operated bootstrap nodes from mainnet.pi * feat: api: new verified registry methods to get all allocations and claims (#11631) * new verireg methods * update changelog and add itest * update itest and cli * update new method's support till v9 * remove gateway APIs * fix cli internal var names * chore:: backport #11609 to the feat/nv22 branch (#11644) * feat: api: improve the correctness of Eth's trace_block (#11609) * Improve the correctness of Eth's trace_block - Improve encoding/decoding of parameters and return values: - Encode "native" parameters and return values with Solidity ABI. - Correctly decode parameters to "create" calls. - Use the correct (ish) output for "create" calls. - Handle all forms of "create". - Make robust with respect to reverts: - Use the actor ID/address from the trace instead of looking it up in the state-tree (may not exist in the state-tree due to a revert). - Gracefully handle failed actor/contract creation. - Improve performance: - We avoid looking anything up in the state-tree when translating the trace, which should significantly improve performance. - Improve code readability: - Remove all "backtracking" logic. - Use an "environment" struct to store temporary state instead of attaching it to the trace. - Fix random bugs: - Fix an allocation bug in the "address" logic (need to set the capacity before modifying the slice). - Improved error checking/handling. - Use correct types for `trace_block` action/results (create, call, etc.). - And use the correct types for Result/Action structs instead of reusing the same "Call" action every time. - Improve error messages. * Make gen Make gen --------- Co-authored-by: Steven Allen <steven@stebalien.com> * fix: add UpgradePhoenixHeight to StateGetNetworkParams (#11648) * chore: deps: update to go-state-types v13.0.0-rc.1 * do NOT update the cache when running the real migration * Merge pull request #11632 from hanabi1224/hm/drand-test feat: drand quicknet: allow scheduling drand quicknet upgrade before nv22 on 2k devnet * chore: deps: update to go-state-types v13.0.0-rc.2 chore: deps: update to go-state-types v13.0.0-rc.2 * feat: set migration config UpgradeEpoch for v13 actors upgrade * Built-in actor events first draft * itest for DDO non-market verified data w/ builtin actor events * Tests for builtin actor events API * Clean up DDO+Events tests, add lots of explainer comments * Minor tweaks to events types * Avoid duplicate messages when looking for receipts * Rename internal events modules for clarity * Adjust actor event API after review * s/ActorEvents/Events/g in global config * Manage event sending rate for SubscribeActorEvents * Terminate SubscribeActorEvents chan when at max height * Document future API changes * More clarity in actor event API docs * More post-review changes, lots of tests for SubscribeActorEvents Use BlockDelay as the window for receiving events on the SubscribeActorEvents channel. We expect the user to have received the initial batch of historical events (if any) in one block's time. For real-time events we expect them to not fall behind by roughly one block's time. * Remove duplicate code from actor event type marshalling tests Reduce verbosity and remove duplicate test logic from actor event types JSON marshalling tests. * Rename actor events test to follow go convention Add missing `s` to `actor_events` test file to follow golang convention used across the repo. * Run actor events table tests in deterministic order Refactor `map` usage for actor event table tests to ensure deterministic test execution order, making debugging potential issues easier. If non-determinism is a target, leverage Go's built-in parallel testing capabilities. * Reduce scope for filter removal failure when getting actor events Use a fresh context to remove the temporary filter installed solely to get the actor events. This should reduce chances of failure in a case where the original context may be expired/cancelled. Refactor removal into a `defer` statement for a more readable, concise return statement. * Use fixed RNG seed for actor event tests Improve determinism in actor event tests by using a fixed RNG seed. This makes up a more reproducible test suit. * Use provided libraries to assert eventual conditions Use the functionalities already provided by `testify` to assert eventual conditions, and remove the use of `time.Sleep`. Remove duplicate code in utility functions that are already defined. Refactor assertion helper functions to use consistent terminology: "require" implies fatal error, whereas "assert" implies error where the test may proceed executing. * Update changelog for actor events APIs * Fix concerns and docs identified by review * Update actor bundle to v13.0.0-rc3 Update actor bundle to v13.0.0-rc3 * Prep Lotus v1.26.0-rc1 - For sanity reverting the mainnet upgrade epoch to 99999999, and then only set it when cutting the final release -Update Calibnet CIDs to v13.0.0-rc3 - Add GetActorEvents, SubscribeActorEvents, GetAllClaims and GetAllAllocations methods to the changelog Co-Authored-By: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> * Update CHANGELOG.md Co-authored-by: Masih H. Derkani <m@derkani.org> * Make gen Make gen * fix: beacon: validate drand change at nv16 correctly * bump to v1.26.0-rc2 * test: cleanup ddo verified itest, extract steps to functions also add allocation-removed event case * test: extract verified DDO test to separate file, add more checks * test: add additional actor events checks * Add verification for "deal-activated" actor event * docs(drand): document the meaning of "IsChained" (#11692) * Resolve conflicts I encountered multiple issues when trying to run make gen. And these changes fixed a couple of them: - go mod tidy - Remove RaftState/RaftLeader - Revert `if ts.Height() > claim.TermMax+claim.TermStart || !cctx.IsSet("expired")` to the what is in the release/v1.26.0: `if tsHeight > val.TermMax || !expired` * fixup imports, make jen * Update version Update version in master to v1.27.0-dev * Update node/impl/full/dummy.go Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> * Adjust ListClaimsCmd Adjust ListClaimsCmd according to review --------- Signed-off-by: samuelarogbonlo <sbayo971@gmail.com> Co-authored-by: TippyFlits <james.bluett@protocol.ai> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com> Co-authored-by: Steven Allen <steven@stebalien.com> Co-authored-by: Rod Vagg <rod@vagg.org> Co-authored-by: Samuel Arogbonlo <47984109+samuelarogbonlo@users.noreply.github.com> Co-authored-by: LexLuthr <88259624+LexLuthr@users.noreply.github.com> Co-authored-by: tom123222 <160735201+tom123222@users.noreply.github.com> Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Masih H. Derkani <m@derkani.org> Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com>
2024-03-12 09:33:58 +00:00
}, {
Height: build.UpgradeWatermelonFix2Height,
Network: network.Version21,
Migration: buildUpgradeActorsV12MinerFix(calibnetv12BuggyMinerCID2, calibnetv12CorrectManifestCID1),
}, {
Height: build.UpgradeDragonHeight,
Network: network.Version22,
Migration: UpgradeActorsV13,
PreMigrations: []stmgr.PreMigration{{
PreMigration: PreUpgradeActorsV13,
StartWithin: 120,
DontStartWithin: 15,
StopWithin: 10,
}},
Expensive: true,
}, {
Height: build.UpgradeCalibrationDragonFixHeight,
Network: network.Version22,
Migration: upgradeActorsV13VerifregFix(calibnetv13BuggyVerifregCID1, calibnetv13CorrectManifestCID1),
}, {
Height: build.UpgradeAussieHeight,
Network: network.Version23,
Migration: UpgradeActorsV14,
PreMigrations: []stmgr.PreMigration{{
PreMigration: PreUpgradeActorsV14,
StartWithin: 120,
DontStartWithin: 15,
StopWithin: 10,
}},
Expensive: true,
2021-09-15 15:22:25 +00:00
},
}
for _, u := range updates {
if u.Height < 0 {
// upgrade disabled
continue
}
us = append(us, u)
}
return us
}
2021-09-02 16:07:23 +00:00
func UpgradeFaucetBurnRecovery(ctx context.Context, sm *stmgr.StateManager, _ stmgr.MigrationCache, em stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
// Some initial parameters
FundsForMiners := types.FromFil(1_000_000)
LookbackEpoch := abi.ChainEpoch(32000)
AccountCap := types.FromFil(0)
BaseMinerBalance := types.FromFil(20)
DesiredReimbursementBalance := types.FromFil(5_000_000)
isSystemAccount := func(addr address.Address) (bool, error) {
id, err := address.IDFromAddress(addr)
if err != nil {
return false, xerrors.Errorf("id address: %w", err)
}
if id < 1000 {
return true, nil
}
return false, nil
}
minerFundsAlloc := func(pow, tpow abi.StoragePower) abi.TokenAmount {
return types.BigDiv(types.BigMul(pow, FundsForMiners), tpow)
}
// Grab lookback state for account checks
lbts, err := sm.ChainStore().GetTipsetByHeight(ctx, LookbackEpoch, ts, false)
if err != nil {
return cid.Undef, xerrors.Errorf("failed to get tipset at lookback height: %w", err)
}
lbtree, err := sm.ParentState(lbts)
if err != nil {
return cid.Undef, xerrors.Errorf("loading state tree failed: %w", err)
}
tree, err := sm.StateTree(root)
if err != nil {
return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
}
type transfer struct {
From address.Address
To address.Address
Amt abi.TokenAmount
}
var transfers []transfer
subcalls := make([]types.ExecutionTrace, 0)
transferCb := func(trace types.ExecutionTrace) {
subcalls = append(subcalls, trace)
}
// Take all excess funds away, put them into the reserve account
err = tree.ForEach(func(addr address.Address, act *types.Actor) error {
switch act.Code {
case builtin0.AccountActorCodeID, builtin0.MultisigActorCodeID, builtin0.PaymentChannelActorCodeID:
sysAcc, err := isSystemAccount(addr)
if err != nil {
return xerrors.Errorf("checking system account: %w", err)
}
if !sysAcc {
transfers = append(transfers, transfer{
From: addr,
To: builtin.ReserveAddress,
Amt: act.Balance,
})
}
case builtin0.StorageMinerActorCodeID:
var st miner0.State
if err := sm.ChainStore().ActorStore(ctx).Get(ctx, act.Head, &st); err != nil {
return xerrors.Errorf("failed to load miner state: %w", err)
}
var available abi.TokenAmount
{
defer func() {
if err := recover(); err != nil {
log.Warnf("Get available balance failed (%s, %s, %s): %s", addr, act.Head, act.Balance, err)
}
available = abi.NewTokenAmount(0)
}()
// this panics if the miner doesnt have enough funds to cover their locked pledge
available = st.GetAvailableBalance(act.Balance)
}
if !available.IsZero() {
transfers = append(transfers, transfer{
From: addr,
To: builtin.ReserveAddress,
Amt: available,
})
}
}
return nil
})
if err != nil {
return cid.Undef, xerrors.Errorf("foreach over state tree failed: %w", err)
}
// Execute transfers from previous step
for _, t := range transfers {
2021-09-02 16:07:23 +00:00
if err := stmgr.DoTransfer(tree, t.From, t.To, t.Amt, transferCb); err != nil {
return cid.Undef, xerrors.Errorf("transfer %s %s->%s failed: %w", t.Amt, t.From, t.To, err)
}
}
// pull up power table to give miners back some funds proportional to their power
var ps power0.State
powAct, err := tree.GetActor(builtin0.StoragePowerActorAddr)
if err != nil {
return cid.Undef, xerrors.Errorf("failed to load power actor: %w", err)
}
cst := cbor.NewCborStore(sm.ChainStore().StateBlockstore())
if err := cst.Get(ctx, powAct.Head, &ps); err != nil {
return cid.Undef, xerrors.Errorf("failed to get power actor state: %w", err)
}
totalPower := ps.TotalBytesCommitted
var transfersBack []transfer
// Now, we return some funds to places where they are needed
err = tree.ForEach(func(addr address.Address, act *types.Actor) error {
lbact, err := lbtree.GetActor(addr)
if err != nil {
if !xerrors.Is(err, types.ErrActorNotFound) {
return xerrors.Errorf("failed to get actor in lookback state")
}
}
prevBalance := abi.NewTokenAmount(0)
if lbact != nil {
prevBalance = lbact.Balance
}
switch act.Code {
case builtin0.AccountActorCodeID, builtin0.MultisigActorCodeID, builtin0.PaymentChannelActorCodeID:
nbalance := big.Min(prevBalance, AccountCap)
if nbalance.Sign() != 0 {
transfersBack = append(transfersBack, transfer{
From: builtin.ReserveAddress,
To: addr,
Amt: nbalance,
})
}
case builtin0.StorageMinerActorCodeID:
var st miner0.State
if err := sm.ChainStore().ActorStore(ctx).Get(ctx, act.Head, &st); err != nil {
return xerrors.Errorf("failed to load miner state: %w", err)
}
var minfo miner0.MinerInfo
if err := cst.Get(ctx, st.Info, &minfo); err != nil {
return xerrors.Errorf("failed to get miner info: %w", err)
}
sectorsArr, err := adt0.AsArray(sm.ChainStore().ActorStore(ctx), st.Sectors)
if err != nil {
return xerrors.Errorf("failed to load sectors array: %w", err)
}
slen := sectorsArr.Length()
power := types.BigMul(types.NewInt(slen), types.NewInt(uint64(minfo.SectorSize)))
mfunds := minerFundsAlloc(power, totalPower)
transfersBack = append(transfersBack, transfer{
From: builtin.ReserveAddress,
To: minfo.Worker,
Amt: mfunds,
})
// Now make sure to give each miner who had power at the lookback some FIL
lbact, err := lbtree.GetActor(addr)
if err == nil {
var lbst miner0.State
if err := sm.ChainStore().ActorStore(ctx).Get(ctx, lbact.Head, &lbst); err != nil {
return xerrors.Errorf("failed to load miner state: %w", err)
}
lbsectors, err := adt0.AsArray(sm.ChainStore().ActorStore(ctx), lbst.Sectors)
if err != nil {
return xerrors.Errorf("failed to load lb sectors array: %w", err)
}
if lbsectors.Length() > 0 {
transfersBack = append(transfersBack, transfer{
From: builtin.ReserveAddress,
To: minfo.Worker,
Amt: BaseMinerBalance,
})
}
} else {
log.Warnf("failed to get miner in lookback state: %s", err)
}
}
return nil
})
if err != nil {
return cid.Undef, xerrors.Errorf("foreach over state tree failed: %w", err)
}
for _, t := range transfersBack {
2021-09-02 16:07:23 +00:00
if err := stmgr.DoTransfer(tree, t.From, t.To, t.Amt, transferCb); err != nil {
return cid.Undef, xerrors.Errorf("transfer %s %s->%s failed: %w", t.Amt, t.From, t.To, err)
}
}
// transfer all burnt funds back to the reserve account
burntAct, err := tree.GetActor(builtin0.BurntFundsActorAddr)
if err != nil {
return cid.Undef, xerrors.Errorf("failed to load burnt funds actor: %w", err)
}
2021-09-02 16:07:23 +00:00
if err := stmgr.DoTransfer(tree, builtin0.BurntFundsActorAddr, builtin.ReserveAddress, burntAct.Balance, transferCb); err != nil {
return cid.Undef, xerrors.Errorf("failed to unburn funds: %w", err)
}
// Top up the reimbursement service
reimbAddr, err := address.NewFromString("t0111")
if err != nil {
return cid.Undef, xerrors.Errorf("failed to parse reimbursement service address")
}
reimb, err := tree.GetActor(reimbAddr)
if err != nil {
return cid.Undef, xerrors.Errorf("failed to load reimbursement account actor: %w", err)
}
difference := types.BigSub(DesiredReimbursementBalance, reimb.Balance)
2021-09-02 16:07:23 +00:00
if err := stmgr.DoTransfer(tree, builtin.ReserveAddress, reimbAddr, difference, transferCb); err != nil {
return cid.Undef, xerrors.Errorf("failed to top up reimbursement account: %w", err)
}
// Now, a final sanity check to make sure the balances all check out
total := abi.NewTokenAmount(0)
err = tree.ForEach(func(addr address.Address, act *types.Actor) error {
total = types.BigAdd(total, act.Balance)
return nil
})
if err != nil {
return cid.Undef, xerrors.Errorf("checking final state balance failed: %w", err)
}
exp := types.FromFil(build.FilBase)
if !exp.Equals(total) {
return cid.Undef, xerrors.Errorf("resultant state tree account balance was not correct: %s", total)
}
if em != nil {
// record the transfer in execution traces
2021-09-02 16:07:23 +00:00
fakeMsg := stmgr.MakeFakeMsg(builtin.SystemActorAddr, builtin.SystemActorAddr, big.Zero(), uint64(epoch))
if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
2021-09-02 16:07:23 +00:00
MessageReceipt: *stmgr.MakeFakeRct(),
ActorErr: nil,
ExecutionTrace: types.ExecutionTrace{
Msg: types.MessageTrace{
To: fakeMsg.To,
From: fakeMsg.From,
},
Subcalls: subcalls,
},
Duration: 0,
GasCosts: nil,
}, false); err != nil {
return cid.Undef, xerrors.Errorf("recording transfers: %w", err)
}
}
return tree.Flush(ctx)
}
2021-09-02 16:07:23 +00:00
func UpgradeIgnition(ctx context.Context, sm *stmgr.StateManager, _ stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
store := sm.ChainStore().ActorStore(ctx)
if build.UpgradeLiftoffHeight <= epoch {
return cid.Undef, xerrors.Errorf("liftoff height must be beyond ignition height")
}
nst, err := nv3.MigrateStateTree(ctx, store, root, epoch)
if err != nil {
return cid.Undef, xerrors.Errorf("migrating actors state: %w", err)
}
tree, err := sm.StateTree(nst)
if err != nil {
return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
}
2021-09-02 16:07:23 +00:00
err = stmgr.SetNetworkName(ctx, store, tree, "ignition")
if err != nil {
return cid.Undef, xerrors.Errorf("setting network name: %w", err)
}
split1, err := address.NewFromString("t0115")
if err != nil {
return cid.Undef, xerrors.Errorf("first split address: %w", err)
}
split2, err := address.NewFromString("t0116")
if err != nil {
return cid.Undef, xerrors.Errorf("second split address: %w", err)
}
err = resetGenesisMsigs0(ctx, sm, store, tree, build.UpgradeLiftoffHeight)
if err != nil {
return cid.Undef, xerrors.Errorf("resetting genesis msig start epochs: %w", err)
}
err = splitGenesisMultisig0(ctx, cb, split1, store, tree, 50, epoch, ts)
if err != nil {
return cid.Undef, xerrors.Errorf("splitting first msig: %w", err)
}
err = splitGenesisMultisig0(ctx, cb, split2, store, tree, 50, epoch, ts)
if err != nil {
return cid.Undef, xerrors.Errorf("splitting second msig: %w", err)
}
err = nv3.CheckStateTree(ctx, store, nst, epoch, builtin0.TotalFilecoin)
if err != nil {
return cid.Undef, xerrors.Errorf("sanity check after ignition upgrade failed: %w", err)
}
return tree.Flush(ctx)
}
2021-09-02 16:07:23 +00:00
func splitGenesisMultisig0(ctx context.Context, em stmgr.ExecMonitor, addr address.Address, store adt0.Store, tree *state.StateTree, portions uint64, epoch abi.ChainEpoch, ts *types.TipSet) error {
if portions < 1 {
return xerrors.Errorf("cannot split into 0 portions")
}
mact, err := tree.GetActor(addr)
if err != nil {
return xerrors.Errorf("getting msig actor: %w", err)
}
mst, err := multisig.Load(store, mact)
if err != nil {
return xerrors.Errorf("getting msig state: %w", err)
}
signers, err := mst.Signers()
if err != nil {
return xerrors.Errorf("getting msig signers: %w", err)
}
thresh, err := mst.Threshold()
if err != nil {
return xerrors.Errorf("getting msig threshold: %w", err)
}
ibal, err := mst.InitialBalance()
if err != nil {
return xerrors.Errorf("getting msig initial balance: %w", err)
}
se, err := mst.StartEpoch()
if err != nil {
return xerrors.Errorf("getting msig start epoch: %w", err)
}
ud, err := mst.UnlockDuration()
if err != nil {
return xerrors.Errorf("getting msig unlock duration: %w", err)
}
pending, err := adt0.MakeEmptyMap(store).Root()
if err != nil {
return xerrors.Errorf("failed to create empty map: %w", err)
}
newIbal := big.Div(ibal, types.NewInt(portions))
newState := &multisig0.State{
Signers: signers,
NumApprovalsThreshold: thresh,
NextTxnID: 0,
InitialBalance: newIbal,
StartEpoch: se,
UnlockDuration: ud,
PendingTxns: pending,
}
scid, err := store.Put(ctx, newState)
if err != nil {
return xerrors.Errorf("storing new state: %w", err)
}
newActor := types.Actor{
Code: builtin0.MultisigActorCodeID,
Head: scid,
Nonce: 0,
Balance: big.Zero(),
}
i := uint64(0)
subcalls := make([]types.ExecutionTrace, 0, portions)
transferCb := func(trace types.ExecutionTrace) {
subcalls = append(subcalls, trace)
}
for i < portions {
2021-09-02 16:07:23 +00:00
keyAddr, err := stmgr.MakeKeyAddr(addr, i)
if err != nil {
return xerrors.Errorf("creating key address: %w", err)
}
idAddr, err := tree.RegisterNewAddress(keyAddr)
if err != nil {
return xerrors.Errorf("registering new address: %w", err)
}
err = tree.SetActor(idAddr, &newActor)
if err != nil {
return xerrors.Errorf("setting new msig actor state: %w", err)
}
2021-09-02 16:07:23 +00:00
if err := stmgr.DoTransfer(tree, addr, idAddr, newIbal, transferCb); err != nil {
return xerrors.Errorf("transferring split msig balance: %w", err)
}
i++
}
if em != nil {
// record the transfer in execution traces
2021-09-02 16:07:23 +00:00
fakeMsg := stmgr.MakeFakeMsg(builtin.SystemActorAddr, addr, big.Zero(), uint64(epoch))
if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
2021-09-02 16:07:23 +00:00
MessageReceipt: *stmgr.MakeFakeRct(),
ActorErr: nil,
ExecutionTrace: types.ExecutionTrace{
Msg: types.MessageTrace{
From: fakeMsg.From,
To: fakeMsg.To,
},
Subcalls: subcalls,
},
Duration: 0,
GasCosts: nil,
}, false); err != nil {
return xerrors.Errorf("recording transfers: %w", err)
}
}
return nil
}
// TODO: After the Liftoff epoch, refactor this to use resetMultisigVesting
2021-09-02 16:07:23 +00:00
func resetGenesisMsigs0(ctx context.Context, sm *stmgr.StateManager, store adt0.Store, tree *state.StateTree, startEpoch abi.ChainEpoch) error {
2021-12-11 21:03:00 +00:00
gb, err := sm.ChainStore().GetGenesis(ctx)
if err != nil {
return xerrors.Errorf("getting genesis block: %w", err)
}
gts, err := types.NewTipSet([]*types.BlockHeader{gb})
if err != nil {
return xerrors.Errorf("getting genesis tipset: %w", err)
}
2021-09-02 16:07:23 +00:00
cst := cbor.NewCborStore(sm.ChainStore().StateBlockstore())
genesisTree, err := state.LoadStateTree(cst, gts.ParentState())
if err != nil {
return xerrors.Errorf("loading state tree: %w", err)
}
err = genesisTree.ForEach(func(addr address.Address, genesisActor *types.Actor) error {
if genesisActor.Code == builtin0.MultisigActorCodeID {
currActor, err := tree.GetActor(addr)
if err != nil {
return xerrors.Errorf("loading actor: %w", err)
}
var currState multisig0.State
if err := store.Get(ctx, currActor.Head, &currState); err != nil {
return xerrors.Errorf("reading multisig state: %w", err)
}
currState.StartEpoch = startEpoch
currActor.Head, err = store.Put(ctx, &currState)
if err != nil {
return xerrors.Errorf("writing new multisig state: %w", err)
}
if err := tree.SetActor(addr, currActor); err != nil {
return xerrors.Errorf("setting multisig actor: %w", err)
}
}
return nil
})
if err != nil {
return xerrors.Errorf("iterating over genesis actors: %w", err)
}
return nil
}
func resetMultisigVesting0(ctx context.Context, store adt0.Store, tree *state.StateTree, addr address.Address, startEpoch abi.ChainEpoch, duration abi.ChainEpoch, balance abi.TokenAmount) error {
act, err := tree.GetActor(addr)
if err != nil {
return xerrors.Errorf("getting actor: %w", err)
}
if !builtin.IsMultisigActor(act.Code) {
return xerrors.Errorf("actor wasn't msig: %w", err)
}
var msigState multisig0.State
if err := store.Get(ctx, act.Head, &msigState); err != nil {
return xerrors.Errorf("reading multisig state: %w", err)
}
msigState.StartEpoch = startEpoch
msigState.UnlockDuration = duration
msigState.InitialBalance = balance
act.Head, err = store.Put(ctx, &msigState)
if err != nil {
return xerrors.Errorf("writing new multisig state: %w", err)
}
if err := tree.SetActor(addr, act); err != nil {
return xerrors.Errorf("setting multisig actor: %w", err)
}
return nil
}
2021-09-02 16:07:23 +00:00
func UpgradeRefuel(ctx context.Context, sm *stmgr.StateManager, _ stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
2021-09-02 16:07:23 +00:00
store := sm.ChainStore().ActorStore(ctx)
tree, err := sm.StateTree(root)
if err != nil {
return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
}
err = resetMultisigVesting0(ctx, store, tree, builtin.SaftAddress, 0, 0, big.Zero())
if err != nil {
return cid.Undef, xerrors.Errorf("tweaking msig vesting: %w", err)
}
err = resetMultisigVesting0(ctx, store, tree, builtin.ReserveAddress, 0, 0, big.Zero())
if err != nil {
return cid.Undef, xerrors.Errorf("tweaking msig vesting: %w", err)
}
err = resetMultisigVesting0(ctx, store, tree, builtin.RootVerifierAddress, 0, 0, big.Zero())
if err != nil {
return cid.Undef, xerrors.Errorf("tweaking msig vesting: %w", err)
}
return tree.Flush(ctx)
}
2021-09-02 16:07:23 +00:00
func UpgradeActorsV2(ctx context.Context, sm *stmgr.StateManager, _ stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync())
store := store.ActorStore(ctx, buf)
info, err := store.Put(ctx, new(types.StateInfo0))
if err != nil {
return cid.Undef, xerrors.Errorf("failed to create new state info for actors v2: %w", err)
}
newHamtRoot, err := nv4.MigrateStateTree(ctx, store, root, epoch, nv4.DefaultConfig())
if err != nil {
return cid.Undef, xerrors.Errorf("upgrading to actors v2: %w", err)
}
newRoot, err := store.Put(ctx, &types.StateRoot{
Version: types.StateTreeVersion1,
Actors: newHamtRoot,
Info: info,
})
if err != nil {
return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
}
// perform some basic sanity checks to make sure everything still works.
if newSm, err := state.LoadStateTree(store, newRoot); err != nil {
return cid.Undef, xerrors.Errorf("state tree sanity load failed: %w", err)
} else if newRoot2, err := newSm.Flush(ctx); err != nil {
return cid.Undef, xerrors.Errorf("state tree sanity flush failed: %w", err)
} else if newRoot2 != newRoot {
return cid.Undef, xerrors.Errorf("state-root mismatch: %s != %s", newRoot, newRoot2)
} else if _, err := newSm.GetActor(builtin0.InitActorAddr); err != nil {
return cid.Undef, xerrors.Errorf("failed to load init actor after upgrade: %w", err)
}
{
from := buf
to := buf.Read()
if err := vm.Copy(ctx, from, to, newRoot); err != nil {
return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err)
}
}
return newRoot, nil
}
2021-09-02 16:07:23 +00:00
func UpgradeLiftoff(ctx context.Context, sm *stmgr.StateManager, _ stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
tree, err := sm.StateTree(root)
if err != nil {
return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
}
2021-09-02 16:07:23 +00:00
err = stmgr.SetNetworkName(ctx, sm.ChainStore().ActorStore(ctx), tree, "mainnet")
if err != nil {
return cid.Undef, xerrors.Errorf("setting network name: %w", err)
}
return tree.Flush(ctx)
}
2021-09-02 16:07:23 +00:00
func UpgradeCalico(ctx context.Context, sm *stmgr.StateManager, _ stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
if build.BuildType != build.BuildMainnet {
return root, nil
}
2021-09-02 16:07:23 +00:00
store := sm.ChainStore().ActorStore(ctx)
var stateRoot types.StateRoot
if err := store.Get(ctx, root, &stateRoot); err != nil {
return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
}
if stateRoot.Version != types.StateTreeVersion1 {
return cid.Undef, xerrors.Errorf(
"expected state root version 1 for calico upgrade, got %d",
stateRoot.Version,
)
}
newHamtRoot, err := nv7.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, nv7.DefaultConfig())
if err != nil {
return cid.Undef, xerrors.Errorf("running nv7 migration: %w", err)
}
newRoot, err := store.Put(ctx, &types.StateRoot{
Version: stateRoot.Version,
Actors: newHamtRoot,
Info: stateRoot.Info,
})
if err != nil {
return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
}
// perform some basic sanity checks to make sure everything still works.
if newSm, err := state.LoadStateTree(store, newRoot); err != nil {
return cid.Undef, xerrors.Errorf("state tree sanity load failed: %w", err)
} else if newRoot2, err := newSm.Flush(ctx); err != nil {
return cid.Undef, xerrors.Errorf("state tree sanity flush failed: %w", err)
} else if newRoot2 != newRoot {
return cid.Undef, xerrors.Errorf("state-root mismatch: %s != %s", newRoot, newRoot2)
} else if _, err := newSm.GetActor(builtin0.InitActorAddr); err != nil {
return cid.Undef, xerrors.Errorf("failed to load init actor after upgrade: %w", err)
}
return newRoot, nil
}
2021-09-02 16:07:23 +00:00
func UpgradeActorsV3(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
// Use all the CPUs except 3.
workerCount := MigrationMaxWorkerCount - 3
if workerCount <= 0 {
workerCount = 1
}
config := nv10.Config{
MaxWorkers: uint(workerCount),
JobQueueSize: 1000,
ResultQueueSize: 100,
ProgressLogPeriod: 10 * time.Second,
}
newRoot, err := upgradeActorsV3Common(ctx, sm, cache, root, epoch, ts, config)
if err != nil {
return cid.Undef, xerrors.Errorf("migrating actors v3 state: %w", err)
}
tree, err := sm.StateTree(newRoot)
if err != nil {
return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
}
if build.BuildType == build.BuildMainnet {
2021-09-02 16:07:23 +00:00
err := stmgr.TerminateActor(ctx, tree, build.ZeroAddress, cb, epoch, ts)
if err != nil && !xerrors.Is(err, types.ErrActorNotFound) {
return cid.Undef, xerrors.Errorf("deleting zero bls actor: %w", err)
}
newRoot, err = tree.Flush(ctx)
if err != nil {
return cid.Undef, xerrors.Errorf("flushing state tree: %w", err)
}
}
return newRoot, nil
}
2021-09-02 16:07:23 +00:00
func PreUpgradeActorsV3(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
// Use half the CPUs for pre-migration, but leave at least 3.
workerCount := MigrationMaxWorkerCount
if workerCount <= 4 {
workerCount = 1
} else {
workerCount /= 2
}
config := nv10.Config{MaxWorkers: uint(workerCount)}
_, err := upgradeActorsV3Common(ctx, sm, cache, root, epoch, ts, config)
return err
}
func upgradeActorsV3Common(
2021-09-02 16:07:23 +00:00
ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache,
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
config nv10.Config,
) (cid.Cid, error) {
2021-09-02 16:07:23 +00:00
buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync())
store := store.ActorStore(ctx, buf)
// Load the state root.
var stateRoot types.StateRoot
if err := store.Get(ctx, root, &stateRoot); err != nil {
return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
}
if stateRoot.Version != types.StateTreeVersion1 {
return cid.Undef, xerrors.Errorf(
"expected state root version 1 for actors v3 upgrade, got %d",
stateRoot.Version,
)
}
// Perform the migration
newHamtRoot, err := nv10.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache)
if err != nil {
return cid.Undef, xerrors.Errorf("upgrading to actors v3: %w", err)
}
// Persist the result.
newRoot, err := store.Put(ctx, &types.StateRoot{
Version: types.StateTreeVersion2,
Actors: newHamtRoot,
Info: stateRoot.Info,
})
if err != nil {
return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
}
// Persist the new tree.
{
from := buf
to := buf.Read()
if err := vm.Copy(ctx, from, to, newRoot); err != nil {
return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err)
}
}
return newRoot, nil
}
2021-09-02 16:07:23 +00:00
func UpgradeActorsV4(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
// Use all the CPUs except 3.
workerCount := MigrationMaxWorkerCount - 3
if workerCount <= 0 {
workerCount = 1
}
config := nv12.Config{
MaxWorkers: uint(workerCount),
JobQueueSize: 1000,
ResultQueueSize: 100,
ProgressLogPeriod: 10 * time.Second,
}
newRoot, err := upgradeActorsV4Common(ctx, sm, cache, root, epoch, ts, config)
if err != nil {
return cid.Undef, xerrors.Errorf("migrating actors v4 state: %w", err)
}
return newRoot, nil
}
2021-09-02 16:07:23 +00:00
func PreUpgradeActorsV4(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
// Use half the CPUs for pre-migration, but leave at least 3.
workerCount := MigrationMaxWorkerCount
if workerCount <= 4 {
workerCount = 1
} else {
workerCount /= 2
}
config := nv12.Config{MaxWorkers: uint(workerCount)}
_, err := upgradeActorsV4Common(ctx, sm, cache, root, epoch, ts, config)
return err
}
func upgradeActorsV4Common(
2021-09-02 16:07:23 +00:00
ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache,
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
config nv12.Config,
) (cid.Cid, error) {
2021-09-02 16:07:23 +00:00
buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync())
store := store.ActorStore(ctx, buf)
// Load the state root.
var stateRoot types.StateRoot
if err := store.Get(ctx, root, &stateRoot); err != nil {
return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
}
if stateRoot.Version != types.StateTreeVersion2 {
return cid.Undef, xerrors.Errorf(
"expected state root version 2 for actors v4 upgrade, got %d",
stateRoot.Version,
)
}
// Perform the migration
newHamtRoot, err := nv12.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache)
if err != nil {
return cid.Undef, xerrors.Errorf("upgrading to actors v4: %w", err)
}
// Persist the result.
newRoot, err := store.Put(ctx, &types.StateRoot{
Version: types.StateTreeVersion3,
Actors: newHamtRoot,
Info: stateRoot.Info,
})
if err != nil {
return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
}
// Persist the new tree.
{
from := buf
to := buf.Read()
if err := vm.Copy(ctx, from, to, newRoot); err != nil {
return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err)
}
}
return newRoot, nil
}
2021-09-02 16:07:23 +00:00
func UpgradeActorsV5(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
// Use all the CPUs except 3.
workerCount := MigrationMaxWorkerCount - 3
if workerCount <= 0 {
workerCount = 1
}
config := nv13.Config{
MaxWorkers: uint(workerCount),
JobQueueSize: 1000,
ResultQueueSize: 100,
ProgressLogPeriod: 10 * time.Second,
}
newRoot, err := upgradeActorsV5Common(ctx, sm, cache, root, epoch, ts, config)
if err != nil {
return cid.Undef, xerrors.Errorf("migrating actors v5 state: %w", err)
}
return newRoot, nil
}
2021-09-02 16:07:23 +00:00
func PreUpgradeActorsV5(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
// Use half the CPUs for pre-migration, but leave at least 3.
workerCount := MigrationMaxWorkerCount
if workerCount <= 4 {
workerCount = 1
} else {
workerCount /= 2
}
config := nv13.Config{MaxWorkers: uint(workerCount)}
_, err := upgradeActorsV5Common(ctx, sm, cache, root, epoch, ts, config)
return err
}
func upgradeActorsV5Common(
2021-09-02 16:07:23 +00:00
ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache,
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
config nv13.Config,
) (cid.Cid, error) {
2021-09-02 16:07:23 +00:00
buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync())
store := store.ActorStore(ctx, buf)
// Load the state root.
var stateRoot types.StateRoot
if err := store.Get(ctx, root, &stateRoot); err != nil {
return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
}
if stateRoot.Version != types.StateTreeVersion3 {
return cid.Undef, xerrors.Errorf(
"expected state root version 3 for actors v5 upgrade, got %d",
stateRoot.Version,
)
}
// Perform the migration
newHamtRoot, err := nv13.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache)
if err != nil {
return cid.Undef, xerrors.Errorf("upgrading to actors v5: %w", err)
}
// Persist the result.
newRoot, err := store.Put(ctx, &types.StateRoot{
Version: types.StateTreeVersion4,
Actors: newHamtRoot,
Info: stateRoot.Info,
})
if err != nil {
return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
}
// Persist the new tree.
{
from := buf
to := buf.Read()
if err := vm.Copy(ctx, from, to, newRoot); err != nil {
return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err)
}
}
return newRoot, nil
}
2021-09-02 16:07:23 +00:00
2021-09-15 15:22:25 +00:00
func UpgradeActorsV6(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
// Use all the CPUs except 3.
workerCount := MigrationMaxWorkerCount - 3
2021-09-15 15:22:25 +00:00
if workerCount <= 0 {
workerCount = 1
}
config := nv14.Config{
MaxWorkers: uint(workerCount),
JobQueueSize: 1000,
ResultQueueSize: 100,
ProgressLogPeriod: 10 * time.Second,
}
newRoot, err := upgradeActorsV6Common(ctx, sm, cache, root, epoch, ts, config)
if err != nil {
return cid.Undef, xerrors.Errorf("migrating actors v5 state: %w", err)
}
return newRoot, nil
}
func PreUpgradeActorsV6(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
// Use half the CPUs for pre-migration, but leave at least 3.
workerCount := MigrationMaxWorkerCount
2021-09-15 15:22:25 +00:00
if workerCount <= 4 {
workerCount = 1
} else {
workerCount /= 2
}
config := nv14.Config{MaxWorkers: uint(workerCount)}
_, err := upgradeActorsV6Common(ctx, sm, cache, root, epoch, ts, config)
return err
}
func upgradeActorsV6Common(
ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache,
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
config nv14.Config,
) (cid.Cid, error) {
buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync())
store := store.ActorStore(ctx, buf)
// Load the state root.
var stateRoot types.StateRoot
if err := store.Get(ctx, root, &stateRoot); err != nil {
return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
}
if stateRoot.Version != types.StateTreeVersion4 {
return cid.Undef, xerrors.Errorf(
"expected state root version 4 for actors v6 upgrade, got %d",
stateRoot.Version,
)
}
// Perform the migration
newHamtRoot, err := nv14.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache)
if err != nil {
2021-11-04 15:59:29 +00:00
return cid.Undef, xerrors.Errorf("upgrading to actors v6: %w", err)
}
// Persist the result.
newRoot, err := store.Put(ctx, &types.StateRoot{
Version: types.StateTreeVersion4,
Actors: newHamtRoot,
Info: stateRoot.Info,
})
if err != nil {
return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
}
// Persist the new tree.
{
from := buf
to := buf.Read()
if err := vm.Copy(ctx, from, to, newRoot); err != nil {
return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err)
}
}
return newRoot, nil
}
func UpgradeActorsV7(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
// Use all the CPUs except 3.
workerCount := MigrationMaxWorkerCount - 3
2021-11-04 15:59:29 +00:00
if workerCount <= 0 {
workerCount = 1
}
2021-11-17 17:41:42 +00:00
config := nv15.Config{
2021-11-04 15:59:29 +00:00
MaxWorkers: uint(workerCount),
JobQueueSize: 1000,
ResultQueueSize: 100,
ProgressLogPeriod: 10 * time.Second,
}
newRoot, err := upgradeActorsV7Common(ctx, sm, cache, root, epoch, ts, config)
if err != nil {
return cid.Undef, xerrors.Errorf("migrating actors v6 state: %w", err)
}
return newRoot, nil
}
func PreUpgradeActorsV7(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
// Use half the CPUs for pre-migration, but leave at least 3.
workerCount := MigrationMaxWorkerCount
2021-11-04 15:59:29 +00:00
if workerCount <= 4 {
workerCount = 1
} else {
workerCount /= 2
}
2021-12-20 01:44:19 +00:00
lbts, lbRoot, err := stmgr.GetLookbackTipSetForRound(ctx, sm, ts, epoch)
if err != nil {
return xerrors.Errorf("error getting lookback ts for premigration: %w", err)
}
config := nv15.Config{MaxWorkers: uint(workerCount),
ProgressLogPeriod: time.Minute * 5}
_, err = upgradeActorsV7Common(ctx, sm, cache, lbRoot, epoch, lbts, config)
2021-11-04 15:59:29 +00:00
return err
}
func upgradeActorsV7Common(
ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache,
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
2021-11-17 17:41:42 +00:00
config nv15.Config,
2021-11-04 15:59:29 +00:00
) (cid.Cid, error) {
2022-02-16 18:04:48 +00:00
writeStore := blockstore.NewAutobatch(ctx, sm.ChainStore().StateBlockstore(), units.GiB/4)
2022-01-11 16:31:59 +00:00
// TODO: pretty sure we'd achieve nothing by doing this, confirm in review
//buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), writeStore)
store := store.ActorStore(ctx, writeStore)
2021-11-04 15:59:29 +00:00
// Load the state root.
var stateRoot types.StateRoot
if err := store.Get(ctx, root, &stateRoot); err != nil {
return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
}
if stateRoot.Version != types.StateTreeVersion4 {
return cid.Undef, xerrors.Errorf(
"expected state root version 4 for actors v7 upgrade, got %d",
stateRoot.Version,
)
}
// Perform the migration
2021-11-17 17:41:42 +00:00
newHamtRoot, err := nv15.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache)
2021-11-04 15:59:29 +00:00
if err != nil {
return cid.Undef, xerrors.Errorf("upgrading to actors v7: %w", err)
2021-09-15 15:22:25 +00:00
}
// Persist the result.
newRoot, err := store.Put(ctx, &types.StateRoot{
Version: types.StateTreeVersion4,
Actors: newHamtRoot,
Info: stateRoot.Info,
})
if err != nil {
return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
}
// Persists the new tree and shuts down the flush worker
2022-01-12 17:57:34 +00:00
if err := writeStore.Flush(ctx); err != nil {
return cid.Undef, xerrors.Errorf("writeStore flush failed: %w", err)
}
2021-09-15 15:22:25 +00:00
if err := writeStore.Shutdown(ctx); err != nil {
2022-01-12 17:57:34 +00:00
return cid.Undef, xerrors.Errorf("writeStore shutdown failed: %w", err)
2021-09-15 15:22:25 +00:00
}
return newRoot, nil
}
func UpgradeActorsV8(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
2022-03-01 03:57:40 +00:00
// Use all the CPUs except 3.
workerCount := MigrationMaxWorkerCount - 3
2022-03-01 03:57:40 +00:00
if workerCount <= 0 {
workerCount = 1
}
config := nv16.Config{
MaxWorkers: uint(workerCount),
JobQueueSize: 1000,
ResultQueueSize: 100,
ProgressLogPeriod: 10 * time.Second,
}
newRoot, err := upgradeActorsV8Common(ctx, sm, cache, root, epoch, ts, config)
2022-03-01 03:57:40 +00:00
if err != nil {
return cid.Undef, xerrors.Errorf("migrating actors v7 state: %w", err)
2022-03-01 03:57:40 +00:00
}
fmt.Print(fvmLiftoffBanner)
2022-03-01 03:57:40 +00:00
return newRoot, nil
}
func PreUpgradeActorsV8(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
2022-03-01 03:57:40 +00:00
// Use half the CPUs for pre-migration, but leave at least 3.
workerCount := MigrationMaxWorkerCount
2022-03-01 03:57:40 +00:00
if workerCount <= 4 {
workerCount = 1
} else {
workerCount /= 2
}
lbts, lbRoot, err := stmgr.GetLookbackTipSetForRound(ctx, sm, ts, epoch)
if err != nil {
return xerrors.Errorf("error getting lookback ts for premigration: %w", err)
}
config := nv16.Config{MaxWorkers: uint(workerCount),
ProgressLogPeriod: time.Minute * 5}
_, err = upgradeActorsV8Common(ctx, sm, cache, lbRoot, epoch, lbts, config)
2022-03-01 03:57:40 +00:00
return err
}
func upgradeActorsV8Common(
ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache,
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
config nv16.Config,
) (cid.Cid, error) {
buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync())
store := store.ActorStore(ctx, buf)
// ensure that the manifest is loaded in the blockstore
2022-09-06 15:49:29 +00:00
if err := bundle.LoadBundles(ctx, buf, actorstypes.Version8); err != nil {
return cid.Undef, xerrors.Errorf("failed to load manifest bundle: %w", err)
}
2022-03-01 03:57:40 +00:00
// Load the state root.
var stateRoot types.StateRoot
if err := store.Get(ctx, root, &stateRoot); err != nil {
return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
}
if stateRoot.Version != types.StateTreeVersion4 {
return cid.Undef, xerrors.Errorf(
"expected state root version 4 for actors v8 upgrade, got %d",
stateRoot.Version,
)
}
2022-09-06 15:49:29 +00:00
manifest, ok := actors.GetManifest(actorstypes.Version8)
if !ok {
return cid.Undef, xerrors.Errorf("no manifest CID for v8 upgrade")
}
2022-03-01 03:57:40 +00:00
// Perform the migration
newHamtRoot, err := nv16.MigrateStateTree(ctx, store, manifest, stateRoot.Actors, epoch, config, migrationLogger{}, cache)
2022-03-01 03:57:40 +00:00
if err != nil {
return cid.Undef, xerrors.Errorf("upgrading to actors v8: %w", err)
}
// Persist the result.
newRoot, err := store.Put(ctx, &types.StateRoot{
Version: types.StateTreeVersion4,
Actors: newHamtRoot,
Info: stateRoot.Info,
})
if err != nil {
return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
}
// Persist the new tree.
{
from := buf
to := buf.Read()
if err := vm.Copy(ctx, from, to, newRoot); err != nil {
return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err)
}
}
return newRoot, nil
}
2022-09-06 15:49:29 +00:00
func UpgradeActorsV9(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor,
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
// Use all the CPUs except 3.
workerCount := MigrationMaxWorkerCount - 3
2022-09-06 15:49:29 +00:00
if workerCount <= 0 {
workerCount = 1
}
config := nv17.Config{
MaxWorkers: uint(workerCount),
JobQueueSize: 1000,
ResultQueueSize: 100,
ProgressLogPeriod: 10 * time.Second,
}
newRoot, err := upgradeActorsV9Common(ctx, sm, cache, root, epoch, ts, config)
if err != nil {
return cid.Undef, xerrors.Errorf("migrating actors v8 state: %w", err)
}
return newRoot, nil
}
func PreUpgradeActorsV9(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid,
epoch abi.ChainEpoch, ts *types.TipSet) error {
// Use half the CPUs for pre-migration, but leave at least 3.
workerCount := MigrationMaxWorkerCount
2022-09-06 15:49:29 +00:00
if workerCount <= 4 {
workerCount = 1
} else {
workerCount /= 2
}
lbts, lbRoot, err := stmgr.GetLookbackTipSetForRound(ctx, sm, ts, epoch)
if err != nil {
return xerrors.Errorf("error getting lookback ts for premigration: %w", err)
}
config := nv17.Config{MaxWorkers: uint(workerCount),
ProgressLogPeriod: time.Minute * 5}
_, err = upgradeActorsV9Common(ctx, sm, cache, lbRoot, epoch, lbts, config)
return err
}
func upgradeActorsV9Common(
ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache,
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
config nv17.Config,
) (cid.Cid, error) {
2022-10-19 15:47:33 +00:00
writeStore := blockstore.NewAutobatch(ctx, sm.ChainStore().StateBlockstore(), units.GiB/4)
store := store.ActorStore(ctx, writeStore)
2022-09-06 15:49:29 +00:00
// ensure that the manifest is loaded in the blockstore
2022-10-19 15:47:33 +00:00
if err := bundle.LoadBundles(ctx, sm.ChainStore().StateBlockstore(), actorstypes.Version9); err != nil {
2022-09-06 15:49:29 +00:00
return cid.Undef, xerrors.Errorf("failed to load manifest bundle: %w", err)
}
// Load the state root.
var stateRoot types.StateRoot
if err := store.Get(ctx, root, &stateRoot); err != nil {
return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
}
if stateRoot.Version != types.StateTreeVersion4 {
return cid.Undef, xerrors.Errorf(
"expected state root version 4 for actors v9 upgrade, got %d",
stateRoot.Version,
)
}
manifest, ok := actors.GetManifest(actorstypes.Version9)
if !ok {
return cid.Undef, xerrors.Errorf("no manifest CID for v9 upgrade")
}
// Perform the migration
newHamtRoot, err := nv17.MigrateStateTree(ctx, store, manifest, stateRoot.Actors, epoch, config,
migrationLogger{}, cache)
if err != nil {
return cid.Undef, xerrors.Errorf("upgrading to actors v9: %w", err)
}
// Persist the result.
newRoot, err := store.Put(ctx, &types.StateRoot{
Version: types.StateTreeVersion4,
Actors: newHamtRoot,
Info: stateRoot.Info,
})
if err != nil {
return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
}
2022-10-19 15:47:33 +00:00
// Persists the new tree and shuts down the flush worker
if err := writeStore.Flush(ctx); err != nil {
return cid.Undef, xerrors.Errorf("writeStore flush failed: %w", err)
}
2022-09-06 15:49:29 +00:00
2022-10-19 15:47:33 +00:00
if err := writeStore.Shutdown(ctx); err != nil {
return cid.Undef, xerrors.Errorf("writeStore shutdown failed: %w", err)
2022-09-06 15:49:29 +00:00
}
return newRoot, nil
}
func PreUpgradeActorsV10(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
// Use half the CPUs for pre-migration, but leave at least 3.
workerCount := MigrationMaxWorkerCount
if workerCount <= 4 {
workerCount = 1
} else {
workerCount /= 2
}
lbts, lbRoot, err := stmgr.GetLookbackTipSetForRound(ctx, sm, ts, epoch)
if err != nil {
return xerrors.Errorf("error getting lookback ts for premigration: %w", err)
}
config := migration.Config{
MaxWorkers: uint(workerCount),
ProgressLogPeriod: time.Minute * 5,
}
_, err = upgradeActorsV10Common(ctx, sm, cache, lbRoot, epoch, lbts, config)
return err
}
chore: build: Merge/v22 into 21 for 23 (#10702) * chore: update ffi to increase execution parallelism * Don't enforce walking receipt tree during compaction * fix: build: drop drand incentinet servers * chore: release lotus v1.20.4 * Apply suggestions from code review Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> * feat: Introduce nv19 skeleton Update to go-state-types v0.11.0-alpha-1 Introduce dummy v11 actor bundles Make new actors adapters Add upgrade to Upgrade Schedules make jen Update to go-state-types v0.11.0-alpha-2 * feat: vm: switch to the new exec trace format (#10372) This is now "FVM" native. Changes include: 1. Don't treat "trace" messages like off-chain messages. E.g., don't include CIDs, versions, etc. 2. Include IPLD codecs where applicable. 3. Remove fields that aren't filled by the FVM (timing, some errors, code locations, etc.). * feat: implement FIP-0061 * Address review * Add and test the FIP-0061 migration * Update actors bundles to fip/20230406 * Update to go-state-types master * Update to actors v11.0.0-rc1 * - Update go state types - Keep current expiration defaults on creation, extension some tests - Update ffi * ffi experiment * Integration nv19 migration - Open splitstore in migration shed tool - Update state root version * Post rebase fixup * Fix * gen * nv19 invariant checking * Try fixig blockstore so bundle is loaded * Debug * Fix * Make butterfly upgrades happen * Another ffi experiment * Fix copy paste error * Actually schedule migration (#10656) Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * Butterfly artifacts * Set calibration net upgrade height * Review Response * Fix state tree version assert * Quick butterfly upgrade to sanity check (#10660) * Quick butterfly upgrade to sanity check * Update butterfly artifacts * Revert fake fix * Give butterfly net correct genesis * Butterfly artifacts * Give time before upgrade --------- Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * chore:releasepolish v1.22 release (#10666) * Update butterfly artifacts * register actors v11 * Update calibration upgrade time * State inspection shed cmds * Fix * make gen * Fix swallowed errors * Lint fixup --------- Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * v1.22.0-rc3 * bundle fix * Feat/expedite nv19 (#10681) * Update go-state-types * Modify upgrade schedule and params * Revert fip 0052 * Update gst * docsgen * fast butterfly migration to validate migration * Correct epoch to match specified date * Update actors v11 * Update changelog build version * Update butterfly artifacts * Fix lotus-miner init to work after upgrade --------- Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * fix:deps:stable ffi for stable release (#10698) * Point to stable ffi for stable lotus release * go mod tidy --------- Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * Update CHANGELOG.md Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> --------- Co-authored-by: Aayush Rajasekaran <arajasek94@gmail.com> Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> Co-authored-by: Steven Allen <steven@stebalien.com> Co-authored-by: jennijuju <jiayingw703@gmail.com>
2023-04-19 22:40:18 +00:00
func UpgradeActorsV10(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor,
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
// Use all the CPUs except 3.
workerCount := MigrationMaxWorkerCount - 3
if workerCount <= 0 {
workerCount = 1
}
config := migration.Config{
MaxWorkers: uint(workerCount),
JobQueueSize: 1000,
ResultQueueSize: 100,
ProgressLogPeriod: 10 * time.Second,
}
newRoot, err := upgradeActorsV10Common(ctx, sm, cache, root, epoch, ts, config)
if err != nil {
return cid.Undef, xerrors.Errorf("migrating actors v10 state: %w", err)
}
return newRoot, nil
}
func upgradeActorsV10Common(
ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache,
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
config migration.Config,
) (cid.Cid, error) {
buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync())
store := store.ActorStore(ctx, buf)
// ensure that the manifest is loaded in the blockstore
if err := bundle.LoadBundles(ctx, sm.ChainStore().StateBlockstore(), actorstypes.Version10); err != nil {
return cid.Undef, xerrors.Errorf("failed to load manifest bundle: %w", err)
}
// Load the state root.
var stateRoot types.StateRoot
if err := store.Get(ctx, root, &stateRoot); err != nil {
return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
}
if stateRoot.Version != types.StateTreeVersion4 {
return cid.Undef, xerrors.Errorf(
2024-03-04 22:07:14 +00:00
"expected state root version 4 for actors v10 upgrade, got %d",
stateRoot.Version,
)
}
manifest, ok := actors.GetManifest(actorstypes.Version10)
if !ok {
2024-03-04 22:07:14 +00:00
return cid.Undef, xerrors.Errorf("no manifest CID for v10 upgrade")
}
// Perform the migration
newHamtRoot, err := nv18.MigrateStateTree(ctx, store, manifest, stateRoot.Actors, epoch, config,
migrationLogger{}, cache)
if err != nil {
return cid.Undef, xerrors.Errorf("upgrading to actors v10: %w", err)
}
// Persist the result.
newRoot, err := store.Put(ctx, &types.StateRoot{
Version: types.StateTreeVersion5,
Actors: newHamtRoot,
Info: stateRoot.Info,
})
if err != nil {
return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
}
2022-11-08 06:21:12 +00:00
// Persist the new tree.
{
from := buf
to := buf.Read()
if err := vm.Copy(ctx, from, to, newRoot); err != nil {
return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err)
}
}
return newRoot, nil
2022-11-08 06:21:12 +00:00
}
chore: build: Merge/v22 into 21 for 23 (#10702) * chore: update ffi to increase execution parallelism * Don't enforce walking receipt tree during compaction * fix: build: drop drand incentinet servers * chore: release lotus v1.20.4 * Apply suggestions from code review Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> * feat: Introduce nv19 skeleton Update to go-state-types v0.11.0-alpha-1 Introduce dummy v11 actor bundles Make new actors adapters Add upgrade to Upgrade Schedules make jen Update to go-state-types v0.11.0-alpha-2 * feat: vm: switch to the new exec trace format (#10372) This is now "FVM" native. Changes include: 1. Don't treat "trace" messages like off-chain messages. E.g., don't include CIDs, versions, etc. 2. Include IPLD codecs where applicable. 3. Remove fields that aren't filled by the FVM (timing, some errors, code locations, etc.). * feat: implement FIP-0061 * Address review * Add and test the FIP-0061 migration * Update actors bundles to fip/20230406 * Update to go-state-types master * Update to actors v11.0.0-rc1 * - Update go state types - Keep current expiration defaults on creation, extension some tests - Update ffi * ffi experiment * Integration nv19 migration - Open splitstore in migration shed tool - Update state root version * Post rebase fixup * Fix * gen * nv19 invariant checking * Try fixig blockstore so bundle is loaded * Debug * Fix * Make butterfly upgrades happen * Another ffi experiment * Fix copy paste error * Actually schedule migration (#10656) Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * Butterfly artifacts * Set calibration net upgrade height * Review Response * Fix state tree version assert * Quick butterfly upgrade to sanity check (#10660) * Quick butterfly upgrade to sanity check * Update butterfly artifacts * Revert fake fix * Give butterfly net correct genesis * Butterfly artifacts * Give time before upgrade --------- Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * chore:releasepolish v1.22 release (#10666) * Update butterfly artifacts * register actors v11 * Update calibration upgrade time * State inspection shed cmds * Fix * make gen * Fix swallowed errors * Lint fixup --------- Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * v1.22.0-rc3 * bundle fix * Feat/expedite nv19 (#10681) * Update go-state-types * Modify upgrade schedule and params * Revert fip 0052 * Update gst * docsgen * fast butterfly migration to validate migration * Correct epoch to match specified date * Update actors v11 * Update changelog build version * Update butterfly artifacts * Fix lotus-miner init to work after upgrade --------- Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * fix:deps:stable ffi for stable release (#10698) * Point to stable ffi for stable lotus release * go mod tidy --------- Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> * Update CHANGELOG.md Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> --------- Co-authored-by: Aayush Rajasekaran <arajasek94@gmail.com> Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> Co-authored-by: Steven Allen <steven@stebalien.com> Co-authored-by: jennijuju <jiayingw703@gmail.com>
2023-04-19 22:40:18 +00:00
func PreUpgradeActorsV11(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
// Use half the CPUs for pre-migration, but leave at least 3.
workerCount := MigrationMaxWorkerCount
if workerCount <= 4 {
workerCount = 1
} else {
workerCount /= 2
}
lbts, lbRoot, err := stmgr.GetLookbackTipSetForRound(ctx, sm, ts, epoch)
if err != nil {
return xerrors.Errorf("error getting lookback ts for premigration: %w", err)
}
config := migration.Config{
MaxWorkers: uint(workerCount),
ProgressLogPeriod: time.Minute * 5,
}
_, err = upgradeActorsV11Common(ctx, sm, cache, lbRoot, epoch, lbts, config)
return err
}
func UpgradeActorsV11(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor,
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
// Use all the CPUs except 2.
workerCount := MigrationMaxWorkerCount - 3
if workerCount <= 0 {
workerCount = 1
}
config := migration.Config{
MaxWorkers: uint(workerCount),
JobQueueSize: 1000,
ResultQueueSize: 100,
ProgressLogPeriod: 10 * time.Second,
}
newRoot, err := upgradeActorsV11Common(ctx, sm, cache, root, epoch, ts, config)
if err != nil {
return cid.Undef, xerrors.Errorf("migrating actors v11 state: %w", err)
}
return newRoot, nil
}
func upgradeActorsV11Common(
ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache,
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
config migration.Config,
) (cid.Cid, error) {
writeStore := blockstore.NewAutobatch(ctx, sm.ChainStore().StateBlockstore(), units.GiB/4)
adtStore := store.ActorStore(ctx, writeStore)
// ensure that the manifest is loaded in the blockstore
if err := bundle.LoadBundles(ctx, writeStore, actorstypes.Version11); err != nil {
return cid.Undef, xerrors.Errorf("failed to load manifest bundle: %w", err)
}
// Load the state root.
var stateRoot types.StateRoot
if err := adtStore.Get(ctx, root, &stateRoot); err != nil {
return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
}
if stateRoot.Version != types.StateTreeVersion5 {
return cid.Undef, xerrors.Errorf(
"expected state root version 5 for actors v11 upgrade, got %d",
stateRoot.Version,
)
}
manifest, ok := actors.GetManifest(actorstypes.Version11)
if !ok {
return cid.Undef, xerrors.Errorf("no manifest CID for v11 upgrade")
}
// Perform the migration
newHamtRoot, err := nv19.MigrateStateTree(ctx, adtStore, manifest, stateRoot.Actors, epoch, config,
migrationLogger{}, cache)
if err != nil {
return cid.Undef, xerrors.Errorf("upgrading to actors v11: %w", err)
}
// Persist the result.
newRoot, err := adtStore.Put(ctx, &types.StateRoot{
Version: types.StateTreeVersion5,
Actors: newHamtRoot,
Info: stateRoot.Info,
})
if err != nil {
return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
}
// Persists the new tree and shuts down the flush worker
if err := writeStore.Flush(ctx); err != nil {
return cid.Undef, xerrors.Errorf("writeStore flush failed: %w", err)
}
if err := writeStore.Shutdown(ctx); err != nil {
return cid.Undef, xerrors.Errorf("writeStore shutdown failed: %w", err)
}
return newRoot, nil
}
func PreUpgradeActorsV12(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
// Use half the CPUs for pre-migration, but leave at least 3.
workerCount := MigrationMaxWorkerCount
if workerCount <= 4 {
workerCount = 1
} else {
workerCount /= 2
}
lbts, lbRoot, err := stmgr.GetLookbackTipSetForRound(ctx, sm, ts, epoch)
if err != nil {
return xerrors.Errorf("error getting lookback ts for premigration: %w", err)
}
config := migration.Config{
MaxWorkers: uint(workerCount),
ProgressLogPeriod: time.Minute * 5,
}
_, err = upgradeActorsV12Common(ctx, sm, cache, lbRoot, epoch, lbts, config)
return err
}
func UpgradeActorsV12(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor,
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
// Use all the CPUs except 2.
workerCount := MigrationMaxWorkerCount - 3
if workerCount <= 0 {
workerCount = 1
}
config := migration.Config{
MaxWorkers: uint(workerCount),
JobQueueSize: 1000,
ResultQueueSize: 100,
ProgressLogPeriod: 10 * time.Second,
}
newRoot, err := upgradeActorsV12Common(ctx, sm, cache, root, epoch, ts, config)
if err != nil {
2024-04-10 14:01:29 +00:00
return cid.Undef, xerrors.Errorf("migrating actors v12 state: %w", err)
}
return newRoot, nil
}
var (
calibnetv12BuggyMinerCID1 = cid.MustParse("bafk2bzacecnh2ouohmonvebq7uughh4h3ppmg4cjsk74dzxlbbtlcij4xbzxq")
calibnetv12BuggyMinerCID2 = cid.MustParse("bafk2bzaced7emkbbnrewv5uvrokxpf5tlm4jslu2jsv77ofw2yqdglg657uie")
calibnetv12BuggyBundleSuffix1 = "calibrationnet-12-rc1"
calibnetv12BuggyBundleSuffix2 = "calibrationnet-12-rc2"
calibnetv12BuggyManifestCID1 = cid.MustParse("bafy2bzacedrunxfqta5skb7q7x32lnp4efz2oq7fn226ffm7fu5iqs62jkmvs")
calibnetv12BuggyManifestCID2 = cid.MustParse("bafy2bzacebl4w5ptfvuw6746w7ev562idkbf5ppq72e6zub22435ws2rukzru")
calibnetv12CorrectManifestCID1 = cid.MustParse("bafy2bzacednzb3pkrfnbfhmoqtb3bc6dgvxszpqklf3qcc7qzcage4ewzxsca")
calibnetv13BuggyVerifregCID1 = cid.MustParse("bafk2bzacednskl3bykz5qpo54z2j2p4q44t5of4ktd6vs6ymmg2zebsbxazkm")
calibnetv13BuggyBundleSuffix1 = "calibrationnet-13-rc3"
calibnetv13BuggyManifestCID1 = cid.MustParse("bafy2bzacea4firkyvt2zzdwqjrws5pyeluaesh6uaid246tommayr4337xpmi")
calibnetv13CorrectManifestCID1 = cid.MustParse("bafy2bzacect4ktyujrwp6mjlsitnpvuw2pbuppz6w52sfljyo4agjevzm75qs")
)
func upgradeActorsV12Common(
ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache,
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
config migration.Config,
) (cid.Cid, error) {
writeStore := blockstore.NewAutobatch(ctx, sm.ChainStore().StateBlockstore(), units.GiB/4)
adtStore := store.ActorStore(ctx, writeStore)
// ensure that the manifest is loaded in the blockstore
if err := bundle.LoadBundles(ctx, writeStore, actorstypes.Version12); err != nil {
return cid.Undef, xerrors.Errorf("failed to load manifest bundle: %w", err)
}
// Load the state root.
var stateRoot types.StateRoot
if err := adtStore.Get(ctx, root, &stateRoot); err != nil {
return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
}
if stateRoot.Version != types.StateTreeVersion5 {
return cid.Undef, xerrors.Errorf(
"expected state root version 5 for actors v12 upgrade, got %d",
stateRoot.Version,
)
}
// check whether or not this is a calibnet upgrade
// we do this because calibnet upgraded to a "wrong" actors bundle, which was then corrected
// we thus upgrade to calibrationnet-buggy in this upgrade
actorsIn, err := state.LoadStateTree(adtStore, root)
if err != nil {
return cid.Undef, xerrors.Errorf("loading state tree: %w", err)
}
initActor, err := actorsIn.GetActor(builtin.InitActorAddr)
if err != nil {
return cid.Undef, xerrors.Errorf("failed to get system actor: %w", err)
}
var initState init11.State
if err := adtStore.Get(ctx, initActor.Head, &initState); err != nil {
return cid.Undef, xerrors.Errorf("failed to get system actor state: %w", err)
}
var manifestCid cid.Cid
if initState.NetworkName == "calibrationnet" {
embedded, ok := build.GetEmbeddedBuiltinActorsBundle(actorstypes.Version12, calibnetv12BuggyBundleSuffix1)
if !ok {
return cid.Undef, xerrors.Errorf("didn't find buggy calibrationnet bundle")
}
var err error
manifestCid, err = bundle.LoadBundle(ctx, writeStore, bytes.NewReader(embedded))
if err != nil {
return cid.Undef, xerrors.Errorf("failed to load buggy calibnet bundle: %w", err)
}
if manifestCid != calibnetv12BuggyManifestCID1 {
return cid.Undef, xerrors.Errorf("didn't find expected buggy calibnet bundle manifest: %s != %s", manifestCid, calibnetv12BuggyManifestCID1)
}
} else {
ok := false
manifestCid, ok = actors.GetManifest(actorstypes.Version12)
if !ok {
return cid.Undef, xerrors.Errorf("no manifest CID for v12 upgrade")
}
}
// Perform the migration
newHamtRoot, err := nv21.MigrateStateTree(ctx, adtStore, manifestCid, stateRoot.Actors, epoch, config,
migrationLogger{}, cache)
if err != nil {
return cid.Undef, xerrors.Errorf("upgrading to actors v12: %w", err)
}
// Persist the result.
newRoot, err := adtStore.Put(ctx, &types.StateRoot{
Version: types.StateTreeVersion5,
Actors: newHamtRoot,
Info: stateRoot.Info,
})
if err != nil {
return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
}
// Persists the new tree and shuts down the flush worker
if err := writeStore.Flush(ctx); err != nil {
return cid.Undef, xerrors.Errorf("writeStore flush failed: %w", err)
}
if err := writeStore.Shutdown(ctx); err != nil {
return cid.Undef, xerrors.Errorf("writeStore shutdown failed: %w", err)
}
return newRoot, nil
}
// ////////////////////
func buildUpgradeActorsV12MinerFix(oldBuggyMinerCID, newManifestCID cid.Cid) func(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
return func(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
stateStore := sm.ChainStore().StateBlockstore()
adtStore := store.ActorStore(ctx, stateStore)
// ensure that the manifest is loaded in the blockstore
// this loads the "correct" bundle for UpgradeWatermelonFix2Height
if err := bundle.LoadBundles(ctx, stateStore, actorstypes.Version12); err != nil {
return cid.Undef, xerrors.Errorf("failed to load manifest bundle: %w", err)
}
// this loads the second buggy bundle, for UpgradeWatermelonFixHeight
embedded, ok := build.GetEmbeddedBuiltinActorsBundle(actorstypes.Version12, calibnetv12BuggyBundleSuffix2)
if !ok {
return cid.Undef, xerrors.Errorf("didn't find buggy calibrationnet bundle")
}
_, err := bundle.LoadBundle(ctx, stateStore, bytes.NewReader(embedded))
if err != nil {
return cid.Undef, xerrors.Errorf("failed to load buggy calibnet bundle: %w", err)
}
// now confirm we have the one we're migrating to
if haveManifest, err := stateStore.Has(ctx, newManifestCID); err != nil {
return cid.Undef, xerrors.Errorf("blockstore error when loading manifest %s: %w", newManifestCID, err)
} else if !haveManifest {
return cid.Undef, xerrors.Errorf("missing new manifest %s in blockstore", newManifestCID)
}
// Load input state tree
actorsIn, err := state.LoadStateTree(adtStore, root)
if err != nil {
return cid.Undef, xerrors.Errorf("loading state tree: %w", err)
}
// load old manifest data
systemActor, err := actorsIn.GetActor(builtin.SystemActorAddr)
if err != nil {
return cid.Undef, xerrors.Errorf("failed to get system actor: %w", err)
}
var systemState system11.State
if err := adtStore.Get(ctx, systemActor.Head, &systemState); err != nil {
return cid.Undef, xerrors.Errorf("failed to get system actor state: %w", err)
}
var oldManifestData manifest.ManifestData
if err := adtStore.Get(ctx, systemState.BuiltinActors, &oldManifestData); err != nil {
return cid.Undef, xerrors.Errorf("failed to get old manifest data: %w", err)
}
// load new manifest
var newManifest manifest.Manifest
if err := adtStore.Get(ctx, newManifestCID, &newManifest); err != nil {
return cid.Undef, xerrors.Errorf("error reading actor manifest: %w", err)
}
if err := newManifest.Load(ctx, adtStore); err != nil {
return cid.Undef, xerrors.Errorf("error loading actor manifest: %w", err)
}
// build the CID mapping
codeMapping := make(map[cid.Cid]cid.Cid, len(oldManifestData.Entries))
for _, oldEntry := range oldManifestData.Entries {
newCID, ok := newManifest.Get(oldEntry.Name)
if !ok {
return cid.Undef, xerrors.Errorf("missing manifest entry for %s", oldEntry.Name)
}
// Note: we expect newCID to be the same as oldEntry.Code for all actors except the miner actor
codeMapping[oldEntry.Code] = newCID
}
// Create empty actorsOut
actorsOut, err := state.NewStateTree(adtStore, actorsIn.Version())
if err != nil {
return cid.Undef, xerrors.Errorf("failed to create new tree: %w", err)
}
// Perform the migration
err = actorsIn.ForEach(func(a address.Address, actor *types.Actor) error {
newCid, ok := codeMapping[actor.Code]
if !ok {
return xerrors.Errorf("didn't find mapping for %s", actor.Code)
}
return actorsOut.SetActor(a, &types.ActorV5{
Code: newCid,
Head: actor.Head,
Nonce: actor.Nonce,
Balance: actor.Balance,
Address: actor.Address,
})
})
if err != nil {
return cid.Undef, xerrors.Errorf("failed to perform migration: %w", err)
}
systemState.BuiltinActors = newManifest.Data
newSystemHead, err := adtStore.Put(ctx, &systemState)
if err != nil {
return cid.Undef, xerrors.Errorf("failed to put new system state: %w", err)
}
systemActor.Head = newSystemHead
if err = actorsOut.SetActor(builtin.SystemActorAddr, systemActor); err != nil {
return cid.Undef, xerrors.Errorf("failed to put new system actor: %w", err)
}
// Sanity checking
err = actorsIn.ForEach(func(a address.Address, inActor *types.Actor) error {
outActor, err := actorsOut.GetActor(a)
if err != nil {
return xerrors.Errorf("failed to get actor in outTree: %w", err)
}
if inActor.Nonce != outActor.Nonce {
return xerrors.Errorf("mismatched nonce for actor %s", a)
}
if !inActor.Balance.Equals(outActor.Balance) {
return xerrors.Errorf("mismatched balance for actor %s: %d != %d", a, inActor.Balance, outActor.Balance)
}
if inActor.Address != outActor.Address && inActor.Address.String() != outActor.Address.String() {
return xerrors.Errorf("mismatched address for actor %s: %s != %s", a, inActor.Address, outActor.Address)
}
if inActor.Head != outActor.Head && a != builtin.SystemActorAddr {
return xerrors.Errorf("mismatched head for actor %s", a)
}
// Actor Codes are only expected to change for the miner actor
if inActor.Code != oldBuggyMinerCID && inActor.Code != outActor.Code {
return xerrors.Errorf("unexpected change in code for actor %s", a)
}
return nil
})
if err != nil {
return cid.Undef, xerrors.Errorf("failed to sanity check migration: %w", err)
}
// Persist the result.
newRoot, err := actorsOut.Flush(ctx)
if err != nil {
return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
}
return newRoot, nil
}
}
chore: Merge nv22 into master (#11699) * [WIP] feat: Add nv22 skeleton Addition of Network Version 22 skeleton * update FFI * feat: drand: refactor round verification * feat: sealing: Support nv22 DDO features in the sealing pipeline (#11226) * Initial work supporting DDO pieces in lotus-miner * sealing: Update pipeline input to operate on UniversalPiece * sealing: Update pipeline checks/sealing states to operate on UniversalPiece * sealing: Make pipeline build with UniversalPiece * move PieceDealInfo out of api * make gen * make sealing pipeline unit tests pass * fix itest ensemble build * don't panic in SectorsStatus with deals * stop linter from complaining about checkPieces * fix sector import tests * mod tidy * sealing: Add logic for (pre)committing DDO sectors * sealing: state-types with method defs * DDO non-snap pipeline works(?), DDO Itests * DDO support in snapdeals pipeline * make gen * update actor bundles * update the gst market fix * fix: chain: use PreCommitSectorsBatch2 when setting up genesis * some bug fixes * integration working changes * update actor bundles * Make TestOnboardRawPieceSnap pass * Appease the linter * Make deadlines test pass with v12 actors * Update go-state-types, abstract market DealState * make gen * mod tidy, lint fixes * Fix some more tests * Bump version in master Bump version in master * Make gen Make gen * fix sender * fix: lotus-provider: Fix winning PoSt * fix: sql Scan cannot write to an object * Actually show miner-addrs in info-log Actually show miner-addrs in lotus-provider info-log * [WIP] feat: Add nv22 skeleton Addition of Network Version 22 skeleton * update FFI * ddo is now nv22 * make gen * temp actor bundle with ddo * use working go-state-types * gst with v13 market migration * update bundle, builtin.MethodsMiner.ProveCommitSectors2 -> 3 * actually working v13 migration, v13 migration itest * Address review * sealing: Correct DDO snap pledge math * itests: Mixed ddo itest * pipeline: Fix sectorWeight * sealing: convert market deals into PAMs in mixed sectors * sealing: make market to ddo conversion work * fix lint * update gst * Update actors and GST to lastest integ branch * commit batcher: Update ProveCommitSectors3Params builder logic * make gen * use builtin-actors master * ddo: address review * itests: Add commd assertions to ddo tests * make gen * gst with fixed types * config knobs for RequireActivationSuccess * storage: Drop obsolete flaky tasts --------- Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: Phi <orjan.roren@gmail.com> Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com> Co-authored-by: TippyFlits <james.bluett@protocol.ai> * feat: implement FIP-0063 * chore: deps: update to go-multiaddr v0.12.2 (#11602) * feat: fvm: update the FVM/FFI to v4.1 (#11608) (#11612) This: 1. Adds nv22 support. 2. Updates the message tracing format. Co-authored-by: Steven Allen <steven@stebalien.com> * AggregateProofType nil when doing batch updates Use latest nv22 go-state-types version with matching update * Update to v13.0.0-rc.2 bundle * chore: Upgrade heights and codename Update upgrade heights Co-Authored-By: Steven Allen <steven@stebalien.com> * Update epoch after nv22 DRAND switch Update epoch after nv22 DRAND switch * Update Mango codename to Phoneix Make the codename for the Drand-change inline with Dragon style. * Add UpgradePhoenixHeight to API params * set UpgradePhoenixHeight to be one hour after Dragon * Make gen Make gen and UpgradePhoenixHeight in butterfly and local devnet to be in line with Calibration and Mainnet * Update epoch heights (#11637) Update epoch heights * new: add forest bootstrap nodes (#11636) Signed-off-by: samuelarogbonlo <sbayo971@gmail.com> * Merge pull request #11491 from filecoin-project/fix/remove-decommissioned-pl-bootstrap-nodes Remove PL operated bootstrap nodes from mainnet.pi * feat: api: new verified registry methods to get all allocations and claims (#11631) * new verireg methods * update changelog and add itest * update itest and cli * update new method's support till v9 * remove gateway APIs * fix cli internal var names * chore:: backport #11609 to the feat/nv22 branch (#11644) * feat: api: improve the correctness of Eth's trace_block (#11609) * Improve the correctness of Eth's trace_block - Improve encoding/decoding of parameters and return values: - Encode "native" parameters and return values with Solidity ABI. - Correctly decode parameters to "create" calls. - Use the correct (ish) output for "create" calls. - Handle all forms of "create". - Make robust with respect to reverts: - Use the actor ID/address from the trace instead of looking it up in the state-tree (may not exist in the state-tree due to a revert). - Gracefully handle failed actor/contract creation. - Improve performance: - We avoid looking anything up in the state-tree when translating the trace, which should significantly improve performance. - Improve code readability: - Remove all "backtracking" logic. - Use an "environment" struct to store temporary state instead of attaching it to the trace. - Fix random bugs: - Fix an allocation bug in the "address" logic (need to set the capacity before modifying the slice). - Improved error checking/handling. - Use correct types for `trace_block` action/results (create, call, etc.). - And use the correct types for Result/Action structs instead of reusing the same "Call" action every time. - Improve error messages. * Make gen Make gen --------- Co-authored-by: Steven Allen <steven@stebalien.com> * fix: add UpgradePhoenixHeight to StateGetNetworkParams (#11648) * chore: deps: update to go-state-types v13.0.0-rc.1 * do NOT update the cache when running the real migration * Merge pull request #11632 from hanabi1224/hm/drand-test feat: drand quicknet: allow scheduling drand quicknet upgrade before nv22 on 2k devnet * chore: deps: update to go-state-types v13.0.0-rc.2 chore: deps: update to go-state-types v13.0.0-rc.2 * feat: set migration config UpgradeEpoch for v13 actors upgrade * Built-in actor events first draft * itest for DDO non-market verified data w/ builtin actor events * Tests for builtin actor events API * Clean up DDO+Events tests, add lots of explainer comments * Minor tweaks to events types * Avoid duplicate messages when looking for receipts * Rename internal events modules for clarity * Adjust actor event API after review * s/ActorEvents/Events/g in global config * Manage event sending rate for SubscribeActorEvents * Terminate SubscribeActorEvents chan when at max height * Document future API changes * More clarity in actor event API docs * More post-review changes, lots of tests for SubscribeActorEvents Use BlockDelay as the window for receiving events on the SubscribeActorEvents channel. We expect the user to have received the initial batch of historical events (if any) in one block's time. For real-time events we expect them to not fall behind by roughly one block's time. * Remove duplicate code from actor event type marshalling tests Reduce verbosity and remove duplicate test logic from actor event types JSON marshalling tests. * Rename actor events test to follow go convention Add missing `s` to `actor_events` test file to follow golang convention used across the repo. * Run actor events table tests in deterministic order Refactor `map` usage for actor event table tests to ensure deterministic test execution order, making debugging potential issues easier. If non-determinism is a target, leverage Go's built-in parallel testing capabilities. * Reduce scope for filter removal failure when getting actor events Use a fresh context to remove the temporary filter installed solely to get the actor events. This should reduce chances of failure in a case where the original context may be expired/cancelled. Refactor removal into a `defer` statement for a more readable, concise return statement. * Use fixed RNG seed for actor event tests Improve determinism in actor event tests by using a fixed RNG seed. This makes up a more reproducible test suit. * Use provided libraries to assert eventual conditions Use the functionalities already provided by `testify` to assert eventual conditions, and remove the use of `time.Sleep`. Remove duplicate code in utility functions that are already defined. Refactor assertion helper functions to use consistent terminology: "require" implies fatal error, whereas "assert" implies error where the test may proceed executing. * Update changelog for actor events APIs * Fix concerns and docs identified by review * Update actor bundle to v13.0.0-rc3 Update actor bundle to v13.0.0-rc3 * Prep Lotus v1.26.0-rc1 - For sanity reverting the mainnet upgrade epoch to 99999999, and then only set it when cutting the final release -Update Calibnet CIDs to v13.0.0-rc3 - Add GetActorEvents, SubscribeActorEvents, GetAllClaims and GetAllAllocations methods to the changelog Co-Authored-By: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> * Update CHANGELOG.md Co-authored-by: Masih H. Derkani <m@derkani.org> * Make gen Make gen * fix: beacon: validate drand change at nv16 correctly * bump to v1.26.0-rc2 * test: cleanup ddo verified itest, extract steps to functions also add allocation-removed event case * test: extract verified DDO test to separate file, add more checks * test: add additional actor events checks * Add verification for "deal-activated" actor event * docs(drand): document the meaning of "IsChained" (#11692) * Resolve conflicts I encountered multiple issues when trying to run make gen. And these changes fixed a couple of them: - go mod tidy - Remove RaftState/RaftLeader - Revert `if ts.Height() > claim.TermMax+claim.TermStart || !cctx.IsSet("expired")` to the what is in the release/v1.26.0: `if tsHeight > val.TermMax || !expired` * fixup imports, make jen * Update version Update version in master to v1.27.0-dev * Update node/impl/full/dummy.go Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> * Adjust ListClaimsCmd Adjust ListClaimsCmd according to review --------- Signed-off-by: samuelarogbonlo <sbayo971@gmail.com> Co-authored-by: TippyFlits <james.bluett@protocol.ai> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com> Co-authored-by: Steven Allen <steven@stebalien.com> Co-authored-by: Rod Vagg <rod@vagg.org> Co-authored-by: Samuel Arogbonlo <47984109+samuelarogbonlo@users.noreply.github.com> Co-authored-by: LexLuthr <88259624+LexLuthr@users.noreply.github.com> Co-authored-by: tom123222 <160735201+tom123222@users.noreply.github.com> Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Masih H. Derkani <m@derkani.org> Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com>
2024-03-12 09:33:58 +00:00
func PreUpgradeActorsV13(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
// Use half the CPUs for pre-migration, but leave at least 3.
workerCount := MigrationMaxWorkerCount
if workerCount <= 4 {
workerCount = 1
} else {
workerCount /= 2
}
lbts, lbRoot, err := stmgr.GetLookbackTipSetForRound(ctx, sm, ts, epoch)
if err != nil {
return xerrors.Errorf("error getting lookback ts for premigration: %w", err)
}
config := migration.Config{
MaxWorkers: uint(workerCount),
ProgressLogPeriod: time.Minute * 5,
UpgradeEpoch: build.UpgradeDragonHeight,
}
_, err = upgradeActorsV13Common(ctx, sm, cache, lbRoot, epoch, lbts, config)
return err
}
func UpgradeActorsV13(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor,
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
// Use all the CPUs except 2.
workerCount := MigrationMaxWorkerCount - 3
if workerCount <= 0 {
workerCount = 1
}
config := migration.Config{
MaxWorkers: uint(workerCount),
JobQueueSize: 1000,
ResultQueueSize: 100,
ProgressLogPeriod: 10 * time.Second,
UpgradeEpoch: build.UpgradeDragonHeight,
}
newRoot, err := upgradeActorsV13Common(ctx, sm, cache, root, epoch, ts, config)
if err != nil {
2024-04-10 14:01:29 +00:00
return cid.Undef, xerrors.Errorf("migrating actors v13 state: %w", err)
chore: Merge nv22 into master (#11699) * [WIP] feat: Add nv22 skeleton Addition of Network Version 22 skeleton * update FFI * feat: drand: refactor round verification * feat: sealing: Support nv22 DDO features in the sealing pipeline (#11226) * Initial work supporting DDO pieces in lotus-miner * sealing: Update pipeline input to operate on UniversalPiece * sealing: Update pipeline checks/sealing states to operate on UniversalPiece * sealing: Make pipeline build with UniversalPiece * move PieceDealInfo out of api * make gen * make sealing pipeline unit tests pass * fix itest ensemble build * don't panic in SectorsStatus with deals * stop linter from complaining about checkPieces * fix sector import tests * mod tidy * sealing: Add logic for (pre)committing DDO sectors * sealing: state-types with method defs * DDO non-snap pipeline works(?), DDO Itests * DDO support in snapdeals pipeline * make gen * update actor bundles * update the gst market fix * fix: chain: use PreCommitSectorsBatch2 when setting up genesis * some bug fixes * integration working changes * update actor bundles * Make TestOnboardRawPieceSnap pass * Appease the linter * Make deadlines test pass with v12 actors * Update go-state-types, abstract market DealState * make gen * mod tidy, lint fixes * Fix some more tests * Bump version in master Bump version in master * Make gen Make gen * fix sender * fix: lotus-provider: Fix winning PoSt * fix: sql Scan cannot write to an object * Actually show miner-addrs in info-log Actually show miner-addrs in lotus-provider info-log * [WIP] feat: Add nv22 skeleton Addition of Network Version 22 skeleton * update FFI * ddo is now nv22 * make gen * temp actor bundle with ddo * use working go-state-types * gst with v13 market migration * update bundle, builtin.MethodsMiner.ProveCommitSectors2 -> 3 * actually working v13 migration, v13 migration itest * Address review * sealing: Correct DDO snap pledge math * itests: Mixed ddo itest * pipeline: Fix sectorWeight * sealing: convert market deals into PAMs in mixed sectors * sealing: make market to ddo conversion work * fix lint * update gst * Update actors and GST to lastest integ branch * commit batcher: Update ProveCommitSectors3Params builder logic * make gen * use builtin-actors master * ddo: address review * itests: Add commd assertions to ddo tests * make gen * gst with fixed types * config knobs for RequireActivationSuccess * storage: Drop obsolete flaky tasts --------- Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: Phi <orjan.roren@gmail.com> Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com> Co-authored-by: TippyFlits <james.bluett@protocol.ai> * feat: implement FIP-0063 * chore: deps: update to go-multiaddr v0.12.2 (#11602) * feat: fvm: update the FVM/FFI to v4.1 (#11608) (#11612) This: 1. Adds nv22 support. 2. Updates the message tracing format. Co-authored-by: Steven Allen <steven@stebalien.com> * AggregateProofType nil when doing batch updates Use latest nv22 go-state-types version with matching update * Update to v13.0.0-rc.2 bundle * chore: Upgrade heights and codename Update upgrade heights Co-Authored-By: Steven Allen <steven@stebalien.com> * Update epoch after nv22 DRAND switch Update epoch after nv22 DRAND switch * Update Mango codename to Phoneix Make the codename for the Drand-change inline with Dragon style. * Add UpgradePhoenixHeight to API params * set UpgradePhoenixHeight to be one hour after Dragon * Make gen Make gen and UpgradePhoenixHeight in butterfly and local devnet to be in line with Calibration and Mainnet * Update epoch heights (#11637) Update epoch heights * new: add forest bootstrap nodes (#11636) Signed-off-by: samuelarogbonlo <sbayo971@gmail.com> * Merge pull request #11491 from filecoin-project/fix/remove-decommissioned-pl-bootstrap-nodes Remove PL operated bootstrap nodes from mainnet.pi * feat: api: new verified registry methods to get all allocations and claims (#11631) * new verireg methods * update changelog and add itest * update itest and cli * update new method's support till v9 * remove gateway APIs * fix cli internal var names * chore:: backport #11609 to the feat/nv22 branch (#11644) * feat: api: improve the correctness of Eth's trace_block (#11609) * Improve the correctness of Eth's trace_block - Improve encoding/decoding of parameters and return values: - Encode "native" parameters and return values with Solidity ABI. - Correctly decode parameters to "create" calls. - Use the correct (ish) output for "create" calls. - Handle all forms of "create". - Make robust with respect to reverts: - Use the actor ID/address from the trace instead of looking it up in the state-tree (may not exist in the state-tree due to a revert). - Gracefully handle failed actor/contract creation. - Improve performance: - We avoid looking anything up in the state-tree when translating the trace, which should significantly improve performance. - Improve code readability: - Remove all "backtracking" logic. - Use an "environment" struct to store temporary state instead of attaching it to the trace. - Fix random bugs: - Fix an allocation bug in the "address" logic (need to set the capacity before modifying the slice). - Improved error checking/handling. - Use correct types for `trace_block` action/results (create, call, etc.). - And use the correct types for Result/Action structs instead of reusing the same "Call" action every time. - Improve error messages. * Make gen Make gen --------- Co-authored-by: Steven Allen <steven@stebalien.com> * fix: add UpgradePhoenixHeight to StateGetNetworkParams (#11648) * chore: deps: update to go-state-types v13.0.0-rc.1 * do NOT update the cache when running the real migration * Merge pull request #11632 from hanabi1224/hm/drand-test feat: drand quicknet: allow scheduling drand quicknet upgrade before nv22 on 2k devnet * chore: deps: update to go-state-types v13.0.0-rc.2 chore: deps: update to go-state-types v13.0.0-rc.2 * feat: set migration config UpgradeEpoch for v13 actors upgrade * Built-in actor events first draft * itest for DDO non-market verified data w/ builtin actor events * Tests for builtin actor events API * Clean up DDO+Events tests, add lots of explainer comments * Minor tweaks to events types * Avoid duplicate messages when looking for receipts * Rename internal events modules for clarity * Adjust actor event API after review * s/ActorEvents/Events/g in global config * Manage event sending rate for SubscribeActorEvents * Terminate SubscribeActorEvents chan when at max height * Document future API changes * More clarity in actor event API docs * More post-review changes, lots of tests for SubscribeActorEvents Use BlockDelay as the window for receiving events on the SubscribeActorEvents channel. We expect the user to have received the initial batch of historical events (if any) in one block's time. For real-time events we expect them to not fall behind by roughly one block's time. * Remove duplicate code from actor event type marshalling tests Reduce verbosity and remove duplicate test logic from actor event types JSON marshalling tests. * Rename actor events test to follow go convention Add missing `s` to `actor_events` test file to follow golang convention used across the repo. * Run actor events table tests in deterministic order Refactor `map` usage for actor event table tests to ensure deterministic test execution order, making debugging potential issues easier. If non-determinism is a target, leverage Go's built-in parallel testing capabilities. * Reduce scope for filter removal failure when getting actor events Use a fresh context to remove the temporary filter installed solely to get the actor events. This should reduce chances of failure in a case where the original context may be expired/cancelled. Refactor removal into a `defer` statement for a more readable, concise return statement. * Use fixed RNG seed for actor event tests Improve determinism in actor event tests by using a fixed RNG seed. This makes up a more reproducible test suit. * Use provided libraries to assert eventual conditions Use the functionalities already provided by `testify` to assert eventual conditions, and remove the use of `time.Sleep`. Remove duplicate code in utility functions that are already defined. Refactor assertion helper functions to use consistent terminology: "require" implies fatal error, whereas "assert" implies error where the test may proceed executing. * Update changelog for actor events APIs * Fix concerns and docs identified by review * Update actor bundle to v13.0.0-rc3 Update actor bundle to v13.0.0-rc3 * Prep Lotus v1.26.0-rc1 - For sanity reverting the mainnet upgrade epoch to 99999999, and then only set it when cutting the final release -Update Calibnet CIDs to v13.0.0-rc3 - Add GetActorEvents, SubscribeActorEvents, GetAllClaims and GetAllAllocations methods to the changelog Co-Authored-By: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> * Update CHANGELOG.md Co-authored-by: Masih H. Derkani <m@derkani.org> * Make gen Make gen * fix: beacon: validate drand change at nv16 correctly * bump to v1.26.0-rc2 * test: cleanup ddo verified itest, extract steps to functions also add allocation-removed event case * test: extract verified DDO test to separate file, add more checks * test: add additional actor events checks * Add verification for "deal-activated" actor event * docs(drand): document the meaning of "IsChained" (#11692) * Resolve conflicts I encountered multiple issues when trying to run make gen. And these changes fixed a couple of them: - go mod tidy - Remove RaftState/RaftLeader - Revert `if ts.Height() > claim.TermMax+claim.TermStart || !cctx.IsSet("expired")` to the what is in the release/v1.26.0: `if tsHeight > val.TermMax || !expired` * fixup imports, make jen * Update version Update version in master to v1.27.0-dev * Update node/impl/full/dummy.go Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> * Adjust ListClaimsCmd Adjust ListClaimsCmd according to review --------- Signed-off-by: samuelarogbonlo <sbayo971@gmail.com> Co-authored-by: TippyFlits <james.bluett@protocol.ai> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com> Co-authored-by: Steven Allen <steven@stebalien.com> Co-authored-by: Rod Vagg <rod@vagg.org> Co-authored-by: Samuel Arogbonlo <47984109+samuelarogbonlo@users.noreply.github.com> Co-authored-by: LexLuthr <88259624+LexLuthr@users.noreply.github.com> Co-authored-by: tom123222 <160735201+tom123222@users.noreply.github.com> Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Masih H. Derkani <m@derkani.org> Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com>
2024-03-12 09:33:58 +00:00
}
return newRoot, nil
}
func upgradeActorsV13Common(
ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache,
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
config migration.Config,
) (cid.Cid, error) {
writeStore := blockstore.NewAutobatch(ctx, sm.ChainStore().StateBlockstore(), units.GiB/4)
adtStore := store.ActorStore(ctx, writeStore)
// ensure that the manifest is loaded in the blockstore
if err := bundle.LoadBundles(ctx, writeStore, actorstypes.Version13); err != nil {
return cid.Undef, xerrors.Errorf("failed to load manifest bundle: %w", err)
}
// Load the state root.
var stateRoot types.StateRoot
if err := adtStore.Get(ctx, root, &stateRoot); err != nil {
return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
}
if stateRoot.Version != types.StateTreeVersion5 {
return cid.Undef, xerrors.Errorf(
"expected state root version 5 for actors v13 upgrade, got %d",
stateRoot.Version,
)
}
// check whether or not this is a calibnet upgrade
// we do this because calibnet upgraded to a "wrong" actors bundle, which was then corrected
// we thus upgrade to calibrationnet-buggy in this upgrade
actorsIn, err := state.LoadStateTree(adtStore, root)
if err != nil {
return cid.Undef, xerrors.Errorf("loading state tree: %w", err)
}
initActor, err := actorsIn.GetActor(builtin.InitActorAddr)
if err != nil {
return cid.Undef, xerrors.Errorf("failed to get system actor: %w", err)
}
var initState init12.State
if err := adtStore.Get(ctx, initActor.Head, &initState); err != nil {
return cid.Undef, xerrors.Errorf("failed to get system actor state: %w", err)
}
var manifestCid cid.Cid
if initState.NetworkName == "calibrationnet" {
embedded, ok := build.GetEmbeddedBuiltinActorsBundle(actorstypes.Version13, calibnetv13BuggyBundleSuffix1)
if !ok {
return cid.Undef, xerrors.Errorf("didn't find buggy calibrationnet bundle")
}
var err error
manifestCid, err = bundle.LoadBundle(ctx, writeStore, bytes.NewReader(embedded))
if err != nil {
return cid.Undef, xerrors.Errorf("failed to load buggy calibnet bundle: %w", err)
}
if manifestCid != calibnetv13BuggyManifestCID1 {
return cid.Undef, xerrors.Errorf("didn't find expected buggy calibnet bundle manifest: %s != %s", manifestCid, calibnetv12BuggyManifestCID1)
}
} else {
ok := false
manifestCid, ok = actors.GetManifest(actorstypes.Version13)
if !ok {
return cid.Undef, xerrors.Errorf("no manifest CID for v13 upgrade")
}
chore: Merge nv22 into master (#11699) * [WIP] feat: Add nv22 skeleton Addition of Network Version 22 skeleton * update FFI * feat: drand: refactor round verification * feat: sealing: Support nv22 DDO features in the sealing pipeline (#11226) * Initial work supporting DDO pieces in lotus-miner * sealing: Update pipeline input to operate on UniversalPiece * sealing: Update pipeline checks/sealing states to operate on UniversalPiece * sealing: Make pipeline build with UniversalPiece * move PieceDealInfo out of api * make gen * make sealing pipeline unit tests pass * fix itest ensemble build * don't panic in SectorsStatus with deals * stop linter from complaining about checkPieces * fix sector import tests * mod tidy * sealing: Add logic for (pre)committing DDO sectors * sealing: state-types with method defs * DDO non-snap pipeline works(?), DDO Itests * DDO support in snapdeals pipeline * make gen * update actor bundles * update the gst market fix * fix: chain: use PreCommitSectorsBatch2 when setting up genesis * some bug fixes * integration working changes * update actor bundles * Make TestOnboardRawPieceSnap pass * Appease the linter * Make deadlines test pass with v12 actors * Update go-state-types, abstract market DealState * make gen * mod tidy, lint fixes * Fix some more tests * Bump version in master Bump version in master * Make gen Make gen * fix sender * fix: lotus-provider: Fix winning PoSt * fix: sql Scan cannot write to an object * Actually show miner-addrs in info-log Actually show miner-addrs in lotus-provider info-log * [WIP] feat: Add nv22 skeleton Addition of Network Version 22 skeleton * update FFI * ddo is now nv22 * make gen * temp actor bundle with ddo * use working go-state-types * gst with v13 market migration * update bundle, builtin.MethodsMiner.ProveCommitSectors2 -> 3 * actually working v13 migration, v13 migration itest * Address review * sealing: Correct DDO snap pledge math * itests: Mixed ddo itest * pipeline: Fix sectorWeight * sealing: convert market deals into PAMs in mixed sectors * sealing: make market to ddo conversion work * fix lint * update gst * Update actors and GST to lastest integ branch * commit batcher: Update ProveCommitSectors3Params builder logic * make gen * use builtin-actors master * ddo: address review * itests: Add commd assertions to ddo tests * make gen * gst with fixed types * config knobs for RequireActivationSuccess * storage: Drop obsolete flaky tasts --------- Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: Phi <orjan.roren@gmail.com> Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com> Co-authored-by: TippyFlits <james.bluett@protocol.ai> * feat: implement FIP-0063 * chore: deps: update to go-multiaddr v0.12.2 (#11602) * feat: fvm: update the FVM/FFI to v4.1 (#11608) (#11612) This: 1. Adds nv22 support. 2. Updates the message tracing format. Co-authored-by: Steven Allen <steven@stebalien.com> * AggregateProofType nil when doing batch updates Use latest nv22 go-state-types version with matching update * Update to v13.0.0-rc.2 bundle * chore: Upgrade heights and codename Update upgrade heights Co-Authored-By: Steven Allen <steven@stebalien.com> * Update epoch after nv22 DRAND switch Update epoch after nv22 DRAND switch * Update Mango codename to Phoneix Make the codename for the Drand-change inline with Dragon style. * Add UpgradePhoenixHeight to API params * set UpgradePhoenixHeight to be one hour after Dragon * Make gen Make gen and UpgradePhoenixHeight in butterfly and local devnet to be in line with Calibration and Mainnet * Update epoch heights (#11637) Update epoch heights * new: add forest bootstrap nodes (#11636) Signed-off-by: samuelarogbonlo <sbayo971@gmail.com> * Merge pull request #11491 from filecoin-project/fix/remove-decommissioned-pl-bootstrap-nodes Remove PL operated bootstrap nodes from mainnet.pi * feat: api: new verified registry methods to get all allocations and claims (#11631) * new verireg methods * update changelog and add itest * update itest and cli * update new method's support till v9 * remove gateway APIs * fix cli internal var names * chore:: backport #11609 to the feat/nv22 branch (#11644) * feat: api: improve the correctness of Eth's trace_block (#11609) * Improve the correctness of Eth's trace_block - Improve encoding/decoding of parameters and return values: - Encode "native" parameters and return values with Solidity ABI. - Correctly decode parameters to "create" calls. - Use the correct (ish) output for "create" calls. - Handle all forms of "create". - Make robust with respect to reverts: - Use the actor ID/address from the trace instead of looking it up in the state-tree (may not exist in the state-tree due to a revert). - Gracefully handle failed actor/contract creation. - Improve performance: - We avoid looking anything up in the state-tree when translating the trace, which should significantly improve performance. - Improve code readability: - Remove all "backtracking" logic. - Use an "environment" struct to store temporary state instead of attaching it to the trace. - Fix random bugs: - Fix an allocation bug in the "address" logic (need to set the capacity before modifying the slice). - Improved error checking/handling. - Use correct types for `trace_block` action/results (create, call, etc.). - And use the correct types for Result/Action structs instead of reusing the same "Call" action every time. - Improve error messages. * Make gen Make gen --------- Co-authored-by: Steven Allen <steven@stebalien.com> * fix: add UpgradePhoenixHeight to StateGetNetworkParams (#11648) * chore: deps: update to go-state-types v13.0.0-rc.1 * do NOT update the cache when running the real migration * Merge pull request #11632 from hanabi1224/hm/drand-test feat: drand quicknet: allow scheduling drand quicknet upgrade before nv22 on 2k devnet * chore: deps: update to go-state-types v13.0.0-rc.2 chore: deps: update to go-state-types v13.0.0-rc.2 * feat: set migration config UpgradeEpoch for v13 actors upgrade * Built-in actor events first draft * itest for DDO non-market verified data w/ builtin actor events * Tests for builtin actor events API * Clean up DDO+Events tests, add lots of explainer comments * Minor tweaks to events types * Avoid duplicate messages when looking for receipts * Rename internal events modules for clarity * Adjust actor event API after review * s/ActorEvents/Events/g in global config * Manage event sending rate for SubscribeActorEvents * Terminate SubscribeActorEvents chan when at max height * Document future API changes * More clarity in actor event API docs * More post-review changes, lots of tests for SubscribeActorEvents Use BlockDelay as the window for receiving events on the SubscribeActorEvents channel. We expect the user to have received the initial batch of historical events (if any) in one block's time. For real-time events we expect them to not fall behind by roughly one block's time. * Remove duplicate code from actor event type marshalling tests Reduce verbosity and remove duplicate test logic from actor event types JSON marshalling tests. * Rename actor events test to follow go convention Add missing `s` to `actor_events` test file to follow golang convention used across the repo. * Run actor events table tests in deterministic order Refactor `map` usage for actor event table tests to ensure deterministic test execution order, making debugging potential issues easier. If non-determinism is a target, leverage Go's built-in parallel testing capabilities. * Reduce scope for filter removal failure when getting actor events Use a fresh context to remove the temporary filter installed solely to get the actor events. This should reduce chances of failure in a case where the original context may be expired/cancelled. Refactor removal into a `defer` statement for a more readable, concise return statement. * Use fixed RNG seed for actor event tests Improve determinism in actor event tests by using a fixed RNG seed. This makes up a more reproducible test suit. * Use provided libraries to assert eventual conditions Use the functionalities already provided by `testify` to assert eventual conditions, and remove the use of `time.Sleep`. Remove duplicate code in utility functions that are already defined. Refactor assertion helper functions to use consistent terminology: "require" implies fatal error, whereas "assert" implies error where the test may proceed executing. * Update changelog for actor events APIs * Fix concerns and docs identified by review * Update actor bundle to v13.0.0-rc3 Update actor bundle to v13.0.0-rc3 * Prep Lotus v1.26.0-rc1 - For sanity reverting the mainnet upgrade epoch to 99999999, and then only set it when cutting the final release -Update Calibnet CIDs to v13.0.0-rc3 - Add GetActorEvents, SubscribeActorEvents, GetAllClaims and GetAllAllocations methods to the changelog Co-Authored-By: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> * Update CHANGELOG.md Co-authored-by: Masih H. Derkani <m@derkani.org> * Make gen Make gen * fix: beacon: validate drand change at nv16 correctly * bump to v1.26.0-rc2 * test: cleanup ddo verified itest, extract steps to functions also add allocation-removed event case * test: extract verified DDO test to separate file, add more checks * test: add additional actor events checks * Add verification for "deal-activated" actor event * docs(drand): document the meaning of "IsChained" (#11692) * Resolve conflicts I encountered multiple issues when trying to run make gen. And these changes fixed a couple of them: - go mod tidy - Remove RaftState/RaftLeader - Revert `if ts.Height() > claim.TermMax+claim.TermStart || !cctx.IsSet("expired")` to the what is in the release/v1.26.0: `if tsHeight > val.TermMax || !expired` * fixup imports, make jen * Update version Update version in master to v1.27.0-dev * Update node/impl/full/dummy.go Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> * Adjust ListClaimsCmd Adjust ListClaimsCmd according to review --------- Signed-off-by: samuelarogbonlo <sbayo971@gmail.com> Co-authored-by: TippyFlits <james.bluett@protocol.ai> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com> Co-authored-by: Steven Allen <steven@stebalien.com> Co-authored-by: Rod Vagg <rod@vagg.org> Co-authored-by: Samuel Arogbonlo <47984109+samuelarogbonlo@users.noreply.github.com> Co-authored-by: LexLuthr <88259624+LexLuthr@users.noreply.github.com> Co-authored-by: tom123222 <160735201+tom123222@users.noreply.github.com> Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Masih H. Derkani <m@derkani.org> Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com>
2024-03-12 09:33:58 +00:00
}
// Perform the migration
newHamtRoot, err := nv22.MigrateStateTree(ctx, adtStore, manifestCid, stateRoot.Actors, epoch, config,
chore: Merge nv22 into master (#11699) * [WIP] feat: Add nv22 skeleton Addition of Network Version 22 skeleton * update FFI * feat: drand: refactor round verification * feat: sealing: Support nv22 DDO features in the sealing pipeline (#11226) * Initial work supporting DDO pieces in lotus-miner * sealing: Update pipeline input to operate on UniversalPiece * sealing: Update pipeline checks/sealing states to operate on UniversalPiece * sealing: Make pipeline build with UniversalPiece * move PieceDealInfo out of api * make gen * make sealing pipeline unit tests pass * fix itest ensemble build * don't panic in SectorsStatus with deals * stop linter from complaining about checkPieces * fix sector import tests * mod tidy * sealing: Add logic for (pre)committing DDO sectors * sealing: state-types with method defs * DDO non-snap pipeline works(?), DDO Itests * DDO support in snapdeals pipeline * make gen * update actor bundles * update the gst market fix * fix: chain: use PreCommitSectorsBatch2 when setting up genesis * some bug fixes * integration working changes * update actor bundles * Make TestOnboardRawPieceSnap pass * Appease the linter * Make deadlines test pass with v12 actors * Update go-state-types, abstract market DealState * make gen * mod tidy, lint fixes * Fix some more tests * Bump version in master Bump version in master * Make gen Make gen * fix sender * fix: lotus-provider: Fix winning PoSt * fix: sql Scan cannot write to an object * Actually show miner-addrs in info-log Actually show miner-addrs in lotus-provider info-log * [WIP] feat: Add nv22 skeleton Addition of Network Version 22 skeleton * update FFI * ddo is now nv22 * make gen * temp actor bundle with ddo * use working go-state-types * gst with v13 market migration * update bundle, builtin.MethodsMiner.ProveCommitSectors2 -> 3 * actually working v13 migration, v13 migration itest * Address review * sealing: Correct DDO snap pledge math * itests: Mixed ddo itest * pipeline: Fix sectorWeight * sealing: convert market deals into PAMs in mixed sectors * sealing: make market to ddo conversion work * fix lint * update gst * Update actors and GST to lastest integ branch * commit batcher: Update ProveCommitSectors3Params builder logic * make gen * use builtin-actors master * ddo: address review * itests: Add commd assertions to ddo tests * make gen * gst with fixed types * config knobs for RequireActivationSuccess * storage: Drop obsolete flaky tasts --------- Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: Phi <orjan.roren@gmail.com> Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com> Co-authored-by: TippyFlits <james.bluett@protocol.ai> * feat: implement FIP-0063 * chore: deps: update to go-multiaddr v0.12.2 (#11602) * feat: fvm: update the FVM/FFI to v4.1 (#11608) (#11612) This: 1. Adds nv22 support. 2. Updates the message tracing format. Co-authored-by: Steven Allen <steven@stebalien.com> * AggregateProofType nil when doing batch updates Use latest nv22 go-state-types version with matching update * Update to v13.0.0-rc.2 bundle * chore: Upgrade heights and codename Update upgrade heights Co-Authored-By: Steven Allen <steven@stebalien.com> * Update epoch after nv22 DRAND switch Update epoch after nv22 DRAND switch * Update Mango codename to Phoneix Make the codename for the Drand-change inline with Dragon style. * Add UpgradePhoenixHeight to API params * set UpgradePhoenixHeight to be one hour after Dragon * Make gen Make gen and UpgradePhoenixHeight in butterfly and local devnet to be in line with Calibration and Mainnet * Update epoch heights (#11637) Update epoch heights * new: add forest bootstrap nodes (#11636) Signed-off-by: samuelarogbonlo <sbayo971@gmail.com> * Merge pull request #11491 from filecoin-project/fix/remove-decommissioned-pl-bootstrap-nodes Remove PL operated bootstrap nodes from mainnet.pi * feat: api: new verified registry methods to get all allocations and claims (#11631) * new verireg methods * update changelog and add itest * update itest and cli * update new method's support till v9 * remove gateway APIs * fix cli internal var names * chore:: backport #11609 to the feat/nv22 branch (#11644) * feat: api: improve the correctness of Eth's trace_block (#11609) * Improve the correctness of Eth's trace_block - Improve encoding/decoding of parameters and return values: - Encode "native" parameters and return values with Solidity ABI. - Correctly decode parameters to "create" calls. - Use the correct (ish) output for "create" calls. - Handle all forms of "create". - Make robust with respect to reverts: - Use the actor ID/address from the trace instead of looking it up in the state-tree (may not exist in the state-tree due to a revert). - Gracefully handle failed actor/contract creation. - Improve performance: - We avoid looking anything up in the state-tree when translating the trace, which should significantly improve performance. - Improve code readability: - Remove all "backtracking" logic. - Use an "environment" struct to store temporary state instead of attaching it to the trace. - Fix random bugs: - Fix an allocation bug in the "address" logic (need to set the capacity before modifying the slice). - Improved error checking/handling. - Use correct types for `trace_block` action/results (create, call, etc.). - And use the correct types for Result/Action structs instead of reusing the same "Call" action every time. - Improve error messages. * Make gen Make gen --------- Co-authored-by: Steven Allen <steven@stebalien.com> * fix: add UpgradePhoenixHeight to StateGetNetworkParams (#11648) * chore: deps: update to go-state-types v13.0.0-rc.1 * do NOT update the cache when running the real migration * Merge pull request #11632 from hanabi1224/hm/drand-test feat: drand quicknet: allow scheduling drand quicknet upgrade before nv22 on 2k devnet * chore: deps: update to go-state-types v13.0.0-rc.2 chore: deps: update to go-state-types v13.0.0-rc.2 * feat: set migration config UpgradeEpoch for v13 actors upgrade * Built-in actor events first draft * itest for DDO non-market verified data w/ builtin actor events * Tests for builtin actor events API * Clean up DDO+Events tests, add lots of explainer comments * Minor tweaks to events types * Avoid duplicate messages when looking for receipts * Rename internal events modules for clarity * Adjust actor event API after review * s/ActorEvents/Events/g in global config * Manage event sending rate for SubscribeActorEvents * Terminate SubscribeActorEvents chan when at max height * Document future API changes * More clarity in actor event API docs * More post-review changes, lots of tests for SubscribeActorEvents Use BlockDelay as the window for receiving events on the SubscribeActorEvents channel. We expect the user to have received the initial batch of historical events (if any) in one block's time. For real-time events we expect them to not fall behind by roughly one block's time. * Remove duplicate code from actor event type marshalling tests Reduce verbosity and remove duplicate test logic from actor event types JSON marshalling tests. * Rename actor events test to follow go convention Add missing `s` to `actor_events` test file to follow golang convention used across the repo. * Run actor events table tests in deterministic order Refactor `map` usage for actor event table tests to ensure deterministic test execution order, making debugging potential issues easier. If non-determinism is a target, leverage Go's built-in parallel testing capabilities. * Reduce scope for filter removal failure when getting actor events Use a fresh context to remove the temporary filter installed solely to get the actor events. This should reduce chances of failure in a case where the original context may be expired/cancelled. Refactor removal into a `defer` statement for a more readable, concise return statement. * Use fixed RNG seed for actor event tests Improve determinism in actor event tests by using a fixed RNG seed. This makes up a more reproducible test suit. * Use provided libraries to assert eventual conditions Use the functionalities already provided by `testify` to assert eventual conditions, and remove the use of `time.Sleep`. Remove duplicate code in utility functions that are already defined. Refactor assertion helper functions to use consistent terminology: "require" implies fatal error, whereas "assert" implies error where the test may proceed executing. * Update changelog for actor events APIs * Fix concerns and docs identified by review * Update actor bundle to v13.0.0-rc3 Update actor bundle to v13.0.0-rc3 * Prep Lotus v1.26.0-rc1 - For sanity reverting the mainnet upgrade epoch to 99999999, and then only set it when cutting the final release -Update Calibnet CIDs to v13.0.0-rc3 - Add GetActorEvents, SubscribeActorEvents, GetAllClaims and GetAllAllocations methods to the changelog Co-Authored-By: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> * Update CHANGELOG.md Co-authored-by: Masih H. Derkani <m@derkani.org> * Make gen Make gen * fix: beacon: validate drand change at nv16 correctly * bump to v1.26.0-rc2 * test: cleanup ddo verified itest, extract steps to functions also add allocation-removed event case * test: extract verified DDO test to separate file, add more checks * test: add additional actor events checks * Add verification for "deal-activated" actor event * docs(drand): document the meaning of "IsChained" (#11692) * Resolve conflicts I encountered multiple issues when trying to run make gen. And these changes fixed a couple of them: - go mod tidy - Remove RaftState/RaftLeader - Revert `if ts.Height() > claim.TermMax+claim.TermStart || !cctx.IsSet("expired")` to the what is in the release/v1.26.0: `if tsHeight > val.TermMax || !expired` * fixup imports, make jen * Update version Update version in master to v1.27.0-dev * Update node/impl/full/dummy.go Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> * Adjust ListClaimsCmd Adjust ListClaimsCmd according to review --------- Signed-off-by: samuelarogbonlo <sbayo971@gmail.com> Co-authored-by: TippyFlits <james.bluett@protocol.ai> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com> Co-authored-by: Steven Allen <steven@stebalien.com> Co-authored-by: Rod Vagg <rod@vagg.org> Co-authored-by: Samuel Arogbonlo <47984109+samuelarogbonlo@users.noreply.github.com> Co-authored-by: LexLuthr <88259624+LexLuthr@users.noreply.github.com> Co-authored-by: tom123222 <160735201+tom123222@users.noreply.github.com> Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Masih H. Derkani <m@derkani.org> Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com>
2024-03-12 09:33:58 +00:00
migrationLogger{}, cache)
if err != nil {
return cid.Undef, xerrors.Errorf("upgrading to actors v13: %w", err)
chore: Merge nv22 into master (#11699) * [WIP] feat: Add nv22 skeleton Addition of Network Version 22 skeleton * update FFI * feat: drand: refactor round verification * feat: sealing: Support nv22 DDO features in the sealing pipeline (#11226) * Initial work supporting DDO pieces in lotus-miner * sealing: Update pipeline input to operate on UniversalPiece * sealing: Update pipeline checks/sealing states to operate on UniversalPiece * sealing: Make pipeline build with UniversalPiece * move PieceDealInfo out of api * make gen * make sealing pipeline unit tests pass * fix itest ensemble build * don't panic in SectorsStatus with deals * stop linter from complaining about checkPieces * fix sector import tests * mod tidy * sealing: Add logic for (pre)committing DDO sectors * sealing: state-types with method defs * DDO non-snap pipeline works(?), DDO Itests * DDO support in snapdeals pipeline * make gen * update actor bundles * update the gst market fix * fix: chain: use PreCommitSectorsBatch2 when setting up genesis * some bug fixes * integration working changes * update actor bundles * Make TestOnboardRawPieceSnap pass * Appease the linter * Make deadlines test pass with v12 actors * Update go-state-types, abstract market DealState * make gen * mod tidy, lint fixes * Fix some more tests * Bump version in master Bump version in master * Make gen Make gen * fix sender * fix: lotus-provider: Fix winning PoSt * fix: sql Scan cannot write to an object * Actually show miner-addrs in info-log Actually show miner-addrs in lotus-provider info-log * [WIP] feat: Add nv22 skeleton Addition of Network Version 22 skeleton * update FFI * ddo is now nv22 * make gen * temp actor bundle with ddo * use working go-state-types * gst with v13 market migration * update bundle, builtin.MethodsMiner.ProveCommitSectors2 -> 3 * actually working v13 migration, v13 migration itest * Address review * sealing: Correct DDO snap pledge math * itests: Mixed ddo itest * pipeline: Fix sectorWeight * sealing: convert market deals into PAMs in mixed sectors * sealing: make market to ddo conversion work * fix lint * update gst * Update actors and GST to lastest integ branch * commit batcher: Update ProveCommitSectors3Params builder logic * make gen * use builtin-actors master * ddo: address review * itests: Add commd assertions to ddo tests * make gen * gst with fixed types * config knobs for RequireActivationSuccess * storage: Drop obsolete flaky tasts --------- Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: Phi <orjan.roren@gmail.com> Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com> Co-authored-by: TippyFlits <james.bluett@protocol.ai> * feat: implement FIP-0063 * chore: deps: update to go-multiaddr v0.12.2 (#11602) * feat: fvm: update the FVM/FFI to v4.1 (#11608) (#11612) This: 1. Adds nv22 support. 2. Updates the message tracing format. Co-authored-by: Steven Allen <steven@stebalien.com> * AggregateProofType nil when doing batch updates Use latest nv22 go-state-types version with matching update * Update to v13.0.0-rc.2 bundle * chore: Upgrade heights and codename Update upgrade heights Co-Authored-By: Steven Allen <steven@stebalien.com> * Update epoch after nv22 DRAND switch Update epoch after nv22 DRAND switch * Update Mango codename to Phoneix Make the codename for the Drand-change inline with Dragon style. * Add UpgradePhoenixHeight to API params * set UpgradePhoenixHeight to be one hour after Dragon * Make gen Make gen and UpgradePhoenixHeight in butterfly and local devnet to be in line with Calibration and Mainnet * Update epoch heights (#11637) Update epoch heights * new: add forest bootstrap nodes (#11636) Signed-off-by: samuelarogbonlo <sbayo971@gmail.com> * Merge pull request #11491 from filecoin-project/fix/remove-decommissioned-pl-bootstrap-nodes Remove PL operated bootstrap nodes from mainnet.pi * feat: api: new verified registry methods to get all allocations and claims (#11631) * new verireg methods * update changelog and add itest * update itest and cli * update new method's support till v9 * remove gateway APIs * fix cli internal var names * chore:: backport #11609 to the feat/nv22 branch (#11644) * feat: api: improve the correctness of Eth's trace_block (#11609) * Improve the correctness of Eth's trace_block - Improve encoding/decoding of parameters and return values: - Encode "native" parameters and return values with Solidity ABI. - Correctly decode parameters to "create" calls. - Use the correct (ish) output for "create" calls. - Handle all forms of "create". - Make robust with respect to reverts: - Use the actor ID/address from the trace instead of looking it up in the state-tree (may not exist in the state-tree due to a revert). - Gracefully handle failed actor/contract creation. - Improve performance: - We avoid looking anything up in the state-tree when translating the trace, which should significantly improve performance. - Improve code readability: - Remove all "backtracking" logic. - Use an "environment" struct to store temporary state instead of attaching it to the trace. - Fix random bugs: - Fix an allocation bug in the "address" logic (need to set the capacity before modifying the slice). - Improved error checking/handling. - Use correct types for `trace_block` action/results (create, call, etc.). - And use the correct types for Result/Action structs instead of reusing the same "Call" action every time. - Improve error messages. * Make gen Make gen --------- Co-authored-by: Steven Allen <steven@stebalien.com> * fix: add UpgradePhoenixHeight to StateGetNetworkParams (#11648) * chore: deps: update to go-state-types v13.0.0-rc.1 * do NOT update the cache when running the real migration * Merge pull request #11632 from hanabi1224/hm/drand-test feat: drand quicknet: allow scheduling drand quicknet upgrade before nv22 on 2k devnet * chore: deps: update to go-state-types v13.0.0-rc.2 chore: deps: update to go-state-types v13.0.0-rc.2 * feat: set migration config UpgradeEpoch for v13 actors upgrade * Built-in actor events first draft * itest for DDO non-market verified data w/ builtin actor events * Tests for builtin actor events API * Clean up DDO+Events tests, add lots of explainer comments * Minor tweaks to events types * Avoid duplicate messages when looking for receipts * Rename internal events modules for clarity * Adjust actor event API after review * s/ActorEvents/Events/g in global config * Manage event sending rate for SubscribeActorEvents * Terminate SubscribeActorEvents chan when at max height * Document future API changes * More clarity in actor event API docs * More post-review changes, lots of tests for SubscribeActorEvents Use BlockDelay as the window for receiving events on the SubscribeActorEvents channel. We expect the user to have received the initial batch of historical events (if any) in one block's time. For real-time events we expect them to not fall behind by roughly one block's time. * Remove duplicate code from actor event type marshalling tests Reduce verbosity and remove duplicate test logic from actor event types JSON marshalling tests. * Rename actor events test to follow go convention Add missing `s` to `actor_events` test file to follow golang convention used across the repo. * Run actor events table tests in deterministic order Refactor `map` usage for actor event table tests to ensure deterministic test execution order, making debugging potential issues easier. If non-determinism is a target, leverage Go's built-in parallel testing capabilities. * Reduce scope for filter removal failure when getting actor events Use a fresh context to remove the temporary filter installed solely to get the actor events. This should reduce chances of failure in a case where the original context may be expired/cancelled. Refactor removal into a `defer` statement for a more readable, concise return statement. * Use fixed RNG seed for actor event tests Improve determinism in actor event tests by using a fixed RNG seed. This makes up a more reproducible test suit. * Use provided libraries to assert eventual conditions Use the functionalities already provided by `testify` to assert eventual conditions, and remove the use of `time.Sleep`. Remove duplicate code in utility functions that are already defined. Refactor assertion helper functions to use consistent terminology: "require" implies fatal error, whereas "assert" implies error where the test may proceed executing. * Update changelog for actor events APIs * Fix concerns and docs identified by review * Update actor bundle to v13.0.0-rc3 Update actor bundle to v13.0.0-rc3 * Prep Lotus v1.26.0-rc1 - For sanity reverting the mainnet upgrade epoch to 99999999, and then only set it when cutting the final release -Update Calibnet CIDs to v13.0.0-rc3 - Add GetActorEvents, SubscribeActorEvents, GetAllClaims and GetAllAllocations methods to the changelog Co-Authored-By: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> * Update CHANGELOG.md Co-authored-by: Masih H. Derkani <m@derkani.org> * Make gen Make gen * fix: beacon: validate drand change at nv16 correctly * bump to v1.26.0-rc2 * test: cleanup ddo verified itest, extract steps to functions also add allocation-removed event case * test: extract verified DDO test to separate file, add more checks * test: add additional actor events checks * Add verification for "deal-activated" actor event * docs(drand): document the meaning of "IsChained" (#11692) * Resolve conflicts I encountered multiple issues when trying to run make gen. And these changes fixed a couple of them: - go mod tidy - Remove RaftState/RaftLeader - Revert `if ts.Height() > claim.TermMax+claim.TermStart || !cctx.IsSet("expired")` to the what is in the release/v1.26.0: `if tsHeight > val.TermMax || !expired` * fixup imports, make jen * Update version Update version in master to v1.27.0-dev * Update node/impl/full/dummy.go Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> * Adjust ListClaimsCmd Adjust ListClaimsCmd according to review --------- Signed-off-by: samuelarogbonlo <sbayo971@gmail.com> Co-authored-by: TippyFlits <james.bluett@protocol.ai> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: Andrew Jackson (Ajax) <snadrus@gmail.com> Co-authored-by: Steven Allen <steven@stebalien.com> Co-authored-by: Rod Vagg <rod@vagg.org> Co-authored-by: Samuel Arogbonlo <47984109+samuelarogbonlo@users.noreply.github.com> Co-authored-by: LexLuthr <88259624+LexLuthr@users.noreply.github.com> Co-authored-by: tom123222 <160735201+tom123222@users.noreply.github.com> Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Masih H. Derkani <m@derkani.org> Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com>
2024-03-12 09:33:58 +00:00
}
// Persist the result.
newRoot, err := adtStore.Put(ctx, &types.StateRoot{
Version: types.StateTreeVersion5,
Actors: newHamtRoot,
Info: stateRoot.Info,
})
if err != nil {
return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
}
// Persists the new tree and shuts down the flush worker
if err := writeStore.Flush(ctx); err != nil {
return cid.Undef, xerrors.Errorf("writeStore flush failed: %w", err)
}
if err := writeStore.Shutdown(ctx); err != nil {
return cid.Undef, xerrors.Errorf("writeStore shutdown failed: %w", err)
}
return newRoot, nil
}
// ////////////////////
func upgradeActorsV13VerifregFix(oldBuggyVerifregCID, newManifestCID cid.Cid) func(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
return func(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
stateStore := sm.ChainStore().StateBlockstore()
adtStore := store.ActorStore(ctx, stateStore)
// ensure that the manifest is loaded in the blockstore
// this loads the "correct" bundle for UpgradeCalibrationDragonFixHeight
if err := bundle.LoadBundles(ctx, stateStore, actorstypes.Version13); err != nil {
return cid.Undef, xerrors.Errorf("failed to load manifest bundle: %w", err)
}
// this loads the buggy bundle, for UpgradeDragonHeight
embedded, ok := build.GetEmbeddedBuiltinActorsBundle(actorstypes.Version13, calibnetv13BuggyBundleSuffix1)
if !ok {
return cid.Undef, xerrors.Errorf("didn't find buggy calibrationnet bundle")
}
_, err := bundle.LoadBundle(ctx, stateStore, bytes.NewReader(embedded))
if err != nil {
return cid.Undef, xerrors.Errorf("failed to load buggy calibnet bundle: %w", err)
}
// now confirm we have the one we're migrating to
if haveManifest, err := stateStore.Has(ctx, newManifestCID); err != nil {
return cid.Undef, xerrors.Errorf("blockstore error when loading manifest %s: %w", newManifestCID, err)
} else if !haveManifest {
return cid.Undef, xerrors.Errorf("missing new manifest %s in blockstore", newManifestCID)
}
// Load input state tree
actorsIn, err := state.LoadStateTree(adtStore, root)
if err != nil {
return cid.Undef, xerrors.Errorf("loading state tree: %w", err)
}
// load old manifest data
systemActor, err := actorsIn.GetActor(builtin.SystemActorAddr)
if err != nil {
return cid.Undef, xerrors.Errorf("failed to get system actor: %w", err)
}
var systemState system12.State
if err := adtStore.Get(ctx, systemActor.Head, &systemState); err != nil {
return cid.Undef, xerrors.Errorf("failed to get system actor state: %w", err)
}
var oldManifestData manifest.ManifestData
if err := adtStore.Get(ctx, systemState.BuiltinActors, &oldManifestData); err != nil {
return cid.Undef, xerrors.Errorf("failed to get old manifest data: %w", err)
}
// load new manifest
var newManifest manifest.Manifest
if err := adtStore.Get(ctx, newManifestCID, &newManifest); err != nil {
return cid.Undef, xerrors.Errorf("error reading actor manifest: %w", err)
}
if err := newManifest.Load(ctx, adtStore); err != nil {
return cid.Undef, xerrors.Errorf("error loading actor manifest: %w", err)
}
// build the CID mapping
codeMapping := make(map[cid.Cid]cid.Cid, len(oldManifestData.Entries))
for _, oldEntry := range oldManifestData.Entries {
newCID, ok := newManifest.Get(oldEntry.Name)
if !ok {
return cid.Undef, xerrors.Errorf("missing manifest entry for %s", oldEntry.Name)
}
// Note: we expect newCID to be the same as oldEntry.Code for all actors except the verifreg actor
codeMapping[oldEntry.Code] = newCID
}
// Create empty actorsOut
actorsOut, err := state.NewStateTree(adtStore, actorsIn.Version())
if err != nil {
return cid.Undef, xerrors.Errorf("failed to create new tree: %w", err)
}
// Perform the migration
err = actorsIn.ForEach(func(a address.Address, actor *types.Actor) error {
newCid, ok := codeMapping[actor.Code]
if !ok {
return xerrors.Errorf("didn't find mapping for %s", actor.Code)
}
return actorsOut.SetActor(a, &types.ActorV5{
Code: newCid,
Head: actor.Head,
Nonce: actor.Nonce,
Balance: actor.Balance,
Address: actor.Address,
})
})
if err != nil {
return cid.Undef, xerrors.Errorf("failed to perform migration: %w", err)
}
systemState.BuiltinActors = newManifest.Data
newSystemHead, err := adtStore.Put(ctx, &systemState)
if err != nil {
return cid.Undef, xerrors.Errorf("failed to put new system state: %w", err)
}
systemActor.Head = newSystemHead
if err = actorsOut.SetActor(builtin.SystemActorAddr, systemActor); err != nil {
return cid.Undef, xerrors.Errorf("failed to put new system actor: %w", err)
}
// Sanity checking
err = actorsIn.ForEach(func(a address.Address, inActor *types.Actor) error {
outActor, err := actorsOut.GetActor(a)
if err != nil {
return xerrors.Errorf("failed to get actor in outTree: %w", err)
}
if inActor.Nonce != outActor.Nonce {
return xerrors.Errorf("mismatched nonce for actor %s", a)
}
if !inActor.Balance.Equals(outActor.Balance) {
return xerrors.Errorf("mismatched balance for actor %s: %d != %d", a, inActor.Balance, outActor.Balance)
}
if inActor.Address != outActor.Address && inActor.Address.String() != outActor.Address.String() {
return xerrors.Errorf("mismatched address for actor %s: %s != %s", a, inActor.Address, outActor.Address)
}
if inActor.Head != outActor.Head && a != builtin.SystemActorAddr {
return xerrors.Errorf("mismatched head for actor %s", a)
}
// Actor Codes are only expected to change for the verifreg actor
if inActor.Code != oldBuggyVerifregCID && inActor.Code != outActor.Code {
return xerrors.Errorf("unexpected change in code for actor %s", a)
}
return nil
})
if err != nil {
return cid.Undef, xerrors.Errorf("failed to sanity check migration: %w", err)
}
// Persist the result.
newRoot, err := actorsOut.Flush(ctx)
if err != nil {
return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
}
return newRoot, nil
}
}
func PreUpgradeActorsV14(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
// Use half the CPUs for pre-migration, but leave at least 3.
workerCount := MigrationMaxWorkerCount
if workerCount <= 4 {
workerCount = 1
} else {
workerCount /= 2
}
lbts, lbRoot, err := stmgr.GetLookbackTipSetForRound(ctx, sm, ts, epoch)
if err != nil {
return xerrors.Errorf("error getting lookback ts for premigration: %w", err)
}
config := migration.Config{
MaxWorkers: uint(workerCount),
ProgressLogPeriod: time.Minute * 5,
}
_, err = upgradeActorsV14Common(ctx, sm, cache, lbRoot, epoch, lbts, config)
return err
}
func UpgradeActorsV14(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor,
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
// Use all the CPUs except 2.
workerCount := MigrationMaxWorkerCount - 3
if workerCount <= 0 {
workerCount = 1
}
config := migration.Config{
MaxWorkers: uint(workerCount),
JobQueueSize: 1000,
ResultQueueSize: 100,
ProgressLogPeriod: 10 * time.Second,
}
newRoot, err := upgradeActorsV14Common(ctx, sm, cache, root, epoch, ts, config)
if err != nil {
return cid.Undef, xerrors.Errorf("migrating actors v14 state: %w", err)
}
return newRoot, nil
}
func upgradeActorsV14Common(
ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache,
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
config migration.Config,
) (cid.Cid, error) {
writeStore := blockstore.NewAutobatch(ctx, sm.ChainStore().StateBlockstore(), units.GiB/4)
adtStore := store.ActorStore(ctx, writeStore)
// ensure that the manifest is loaded in the blockstore
if err := bundle.LoadBundles(ctx, writeStore, actorstypes.Version14); err != nil {
return cid.Undef, xerrors.Errorf("failed to load manifest bundle: %w", err)
}
// Load the state root.
var stateRoot types.StateRoot
if err := adtStore.Get(ctx, root, &stateRoot); err != nil {
return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
}
if stateRoot.Version != types.StateTreeVersion5 {
return cid.Undef, xerrors.Errorf(
"expected state root version 5 for actors v14 upgrade, got %d",
stateRoot.Version,
)
}
manifest, ok := actors.GetManifest(actorstypes.Version14)
if !ok {
return cid.Undef, xerrors.Errorf("no manifest CID for v14 upgrade")
}
// Perform the migration
newHamtRoot, err := nv23.MigrateStateTree(ctx, adtStore, manifest, stateRoot.Actors, epoch, config,
migrationLogger{}, cache)
if err != nil {
return cid.Undef, xerrors.Errorf("upgrading to actors v14: %w", err)
}
// Persist the result.
newRoot, err := adtStore.Put(ctx, &types.StateRoot{
Version: types.StateTreeVersion5,
Actors: newHamtRoot,
Info: stateRoot.Info,
})
if err != nil {
return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
}
// Persists the new tree and shuts down the flush worker
if err := writeStore.Flush(ctx); err != nil {
return cid.Undef, xerrors.Errorf("writeStore flush failed: %w", err)
}
if err := writeStore.Shutdown(ctx); err != nil {
return cid.Undef, xerrors.Errorf("writeStore shutdown failed: %w", err)
}
return newRoot, nil
}
////////////////////
// Example upgrade function if upgrade requires only code changes
//func UpgradeActorsV9(ctx context.Context, sm *stmgr.StateManager, _ stmgr.MigrationCache, _ stmgr.ExecMonitor, root cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) (cid.Cid, error) {
// buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync())
//
// av := actors.Version9
// // This may change for upgrade
// newStateTreeVersion := types.StateTreeVersion4
//
// // ensure that the manifest is loaded in the blockstore
// if err := bundle.FetchAndLoadBundles(ctx, buf, map[actors.Version]build.Bundle{
// av: build.BuiltinActorReleases[av],
// }); err != nil {
// return cid.Undef, xerrors.Errorf("failed to load manifest bundle: %w", err)
// }
//
// newActorsManifestCid, ok := actors.GetManifest(av)
// if !ok {
// return cid.Undef, xerrors.Errorf("no manifest CID for v8 upgrade")
// }
//
// bstore := sm.ChainStore().StateBlockstore()
// return LiteMigration(ctx, bstore, newActorsManifestCid, root, av, types.StateTreeVersion4, newStateTreeVersion)
//}
func LiteMigration(ctx context.Context, bstore blockstore.Blockstore, newActorsManifestCid cid.Cid, root cid.Cid, oldAv actorstypes.Version, newAv actorstypes.Version, oldStateTreeVersion types.StateTreeVersion, newStateTreeVersion types.StateTreeVersion) (cid.Cid, error) {
buf := blockstore.NewTieredBstore(bstore, blockstore.NewMemorySync())
store := store.ActorStore(ctx, buf)
adtStore := gstStore.WrapStore(ctx, store)
// Load the state root.
var stateRoot types.StateRoot
if err := store.Get(ctx, root, &stateRoot); err != nil {
return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
}
if stateRoot.Version != oldStateTreeVersion {
return cid.Undef, xerrors.Errorf(
"expected state tree version %d for actors code upgrade, got %d",
oldStateTreeVersion,
stateRoot.Version,
)
}
st, err := state.LoadStateTree(store, root)
if err != nil {
return cid.Undef, xerrors.Errorf("failed to load state tree: %w", err)
}
2022-06-28 22:42:58 +00:00
oldManifestData, err := stmgr.GetManifestData(ctx, st)
if err != nil {
return cid.Undef, xerrors.Errorf("error loading old actor manifest: %w", err)
}
// load new manifest
2022-06-29 18:23:35 +00:00
newManifest, err := actors.LoadManifest(ctx, newActorsManifestCid, store)
if err != nil {
return cid.Undef, xerrors.Errorf("error loading new manifest: %w", err)
}
2022-06-28 22:42:58 +00:00
2022-06-29 15:46:51 +00:00
var newManifestData manifest.ManifestData
if err := store.Get(ctx, newManifest.Data, &newManifestData); err != nil {
return cid.Undef, xerrors.Errorf("error loading new manifest data: %w", err)
}
if len(oldManifestData.Entries) != len(manifest.GetBuiltinActorsKeys(oldAv)) {
return cid.Undef, xerrors.Errorf("incomplete old manifest with %d code CIDs", len(oldManifestData.Entries))
}
if len(newManifestData.Entries) != len(manifest.GetBuiltinActorsKeys(newAv)) {
return cid.Undef, xerrors.Errorf("incomplete new manifest with %d code CIDs", len(newManifestData.Entries))
}
// Maps prior version code CIDs to migration functions.
migrations := make(map[cid.Cid]cid.Cid)
2022-06-29 15:46:51 +00:00
for _, entry := range oldManifestData.Entries {
newCodeCid, ok := newManifest.Get(entry.Name)
if !ok {
return cid.Undef, xerrors.Errorf("code cid for %s actor not found in new manifest", entry.Name)
}
migrations[entry.Code] = newCodeCid
}
startTime := time.Now()
// Load output state tree
actorsOut, err := state.NewStateTree(adtStore, newStateTreeVersion)
if err != nil {
return cid.Undef, err
}
// Insert migrated records in output state tree.
err = st.ForEach(func(addr address.Address, actorIn *types.Actor) error {
newCid, ok := migrations[actorIn.Code]
if !ok {
return xerrors.Errorf("new code cid not found in migrations for actor %s", addr)
}
var head cid.Cid
if addr == system.Address {
newSystemState, err := system.MakeState(store, newAv, newManifest.Data)
if err != nil {
return xerrors.Errorf("could not make system actor state: %w", err)
}
head, err = store.Put(ctx, newSystemState)
if err != nil {
return xerrors.Errorf("could not set system actor state head: %w", err)
}
} else {
head = actorIn.Head
}
newActor := types.Actor{
Code: newCid,
Head: head,
Nonce: actorIn.Nonce,
Balance: actorIn.Balance,
}
err = actorsOut.SetActor(addr, &newActor)
if err != nil {
return xerrors.Errorf("could not set actor at address %s: %w", addr, err)
}
return nil
})
if err != nil {
return cid.Undef, xerrors.Errorf("failed update actor states: %w", err)
}
elapsed := time.Since(startTime)
log.Infof("All done after %v. Flushing state tree root.", elapsed)
newRoot, err := actorsOut.Flush(ctx)
if err != nil {
return cid.Undef, xerrors.Errorf("failed to flush new actors: %w", err)
}
// Persist the new tree.
{
from := buf
to := buf.Read()
if err := vm.Copy(ctx, from, to, newRoot); err != nil {
return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err)
}
}
return newRoot, nil
}
2021-09-02 16:07:23 +00:00
type migrationLogger struct{}
func (ml migrationLogger) Log(level rt.LogLevel, msg string, args ...interface{}) {
switch level {
case rt.DEBUG:
log.Debugf(msg, args...)
case rt.INFO:
log.Infof(msg, args...)
case rt.WARN:
log.Warnf(msg, args...)
case rt.ERROR:
log.Errorf(msg, args...)
}
}