Merge pull request #12147 from filecoin-project/chore/nv23-v1.28.0-backport
chore: NV23 release: backport changes from master
This commit is contained in:
commit
f458606e35
2
.github/workflows/test.yml
vendored
2
.github/workflows/test.yml
vendored
@ -61,6 +61,7 @@ jobs:
|
||||
# to support resource intensive jobs.
|
||||
runners: |
|
||||
{
|
||||
"itest-niporep_manual": ["self-hosted", "linux", "x64", "4xlarge"],
|
||||
"itest-sector_pledge": ["self-hosted", "linux", "x64", "4xlarge"],
|
||||
"itest-worker": ["self-hosted", "linux", "x64", "4xlarge"],
|
||||
"itest-manual_onboarding": ["self-hosted", "linux", "x64", "4xlarge"],
|
||||
@ -118,6 +119,7 @@ jobs:
|
||||
"itest-direct_data_onboard_verified",
|
||||
"itest-direct_data_onboard",
|
||||
"itest-manual_onboarding",
|
||||
"itest-niporep_manual",
|
||||
"itest-net",
|
||||
"itest-path_detach_redeclare",
|
||||
"itest-sealing_resources",
|
||||
|
@ -2,7 +2,12 @@
|
||||
|
||||
# UNRELEASED
|
||||
|
||||
## ☢️ Upgrade Warnings ☢️
|
||||
|
||||
- This Lotus release includes some correctness improvements to the events subsystem, impacting RPC APIs including `GetActorEventsRaw`, `SubscribeActorEventsRaw`, `eth_getLogs` and the `eth` filter APIs. Part of these improvements involve an events database migration that may take some time to complete on nodes with extensive event databases. See [filecoin-project/lotus#12080](https://github.com/filecoin-project/lotus/pull/12080) for details.
|
||||
|
||||
## New features
|
||||
|
||||
- feat: Add trace transaction API supporting RPC method `trace_transaction` ([filecoin-project/lotus#12068](https://github.com/filecoin-project/lotus/pull/12068))
|
||||
|
||||
## Improvements
|
||||
|
Binary file not shown.
@ -143,25 +143,25 @@ var EmbeddedBuiltinActorsMetadata = []*BuiltinActorsMetadata{{
|
||||
}, {
|
||||
Network: "butterflynet",
|
||||
Version: 14,
|
||||
BundleGitTag: "v13.0.0",
|
||||
ManifestCid: MustParseCid("bafy2bzacec75zk7ufzwx6tg5avls5fxdjx5asaqmd2bfqdvkqrkzoxgyflosu"),
|
||||
BundleGitTag: "v14.0.0-rc.1",
|
||||
ManifestCid: MustParseCid("bafy2bzacecmkqezl3a5klkzz7z4ou4jwqk4zzd3nvz727l4qh44ngsxtxdblu"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzacedl533kwbzouqxibejpwp6syfdekvmzy4vmmno6j4iaydbdmv4xek"),
|
||||
"cron": MustParseCid("bafk2bzacecimv5xnuwyoqgxk26qt4xqpgntleret475pnh35s3vvhqtdct4ow"),
|
||||
"datacap": MustParseCid("bafk2bzacebpdd4ctavhs7wkcykfahpifct3p4hbptgtf4jfrqcp2trtlygvow"),
|
||||
"eam": MustParseCid("bafk2bzaceahw5rrgj7prgbnmn237di7ymjz2ssea32wr525jydpfrwpuhs67m"),
|
||||
"ethaccount": MustParseCid("bafk2bzacebrslcbew5mq3le2zsn36xqxd4gt5hryeoslxnuqwgw3rhuwh6ygu"),
|
||||
"evm": MustParseCid("bafk2bzaced5smz4lhpem4mbr7igcskv3e5qopbdp7dqshww2qs4ahacgzjzo4"),
|
||||
"init": MustParseCid("bafk2bzacedgj6hawhdw2ot2ufisci374o2bq6bfkvlvdt6q7s3uoe5ffyv43k"),
|
||||
"multisig": MustParseCid("bafk2bzacectnnnpwyqiccaymy3h6ghu74ghjrqyhtqv5odfd4opivzebjj6to"),
|
||||
"paymentchannel": MustParseCid("bafk2bzaceckhx44jawhzhkz6k23gfnv2gcutgb4j4ekhonj2plwaent4b2tpk"),
|
||||
"account": MustParseCid("bafk2bzaceazutruyfvvqxgp5qoneq36uv6yethps2bonil5psy2vivl5j2hks"),
|
||||
"cron": MustParseCid("bafk2bzacecsiz2nzjieposnkz2kqvjjnqyu5zwk6ccm4dbptx26v3qirm6zni"),
|
||||
"datacap": MustParseCid("bafk2bzaceaavii766hmiawhw2fjvtoy5kvbukou3zejf6gtwu7xi4jxt4uidk"),
|
||||
"eam": MustParseCid("bafk2bzacebkzhnamn5ohtsvn76opprsi3ao3ujgytjr3c6kdcvhmhg4ze5xxm"),
|
||||
"ethaccount": MustParseCid("bafk2bzacebvvri25rmgt6yy5qtdrikcsestk6z52aebynwd53s2rm2l3ukn7g"),
|
||||
"evm": MustParseCid("bafk2bzacebta2jkyxknvwnr6ldcimmwpzenhtdwqbuifzk6g2wktzqf3vj33a"),
|
||||
"init": MustParseCid("bafk2bzaceblybzwnn55uiivbsjae6l7haz5iocexnynfcz2yjg5spciimxdme"),
|
||||
"multisig": MustParseCid("bafk2bzaceb54rbdcfdcdtzwbohshn64opgsqf5vhqh3xqb37iignsm3plrtpa"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacebyyn42ie7jekdytacqpqfll7xctsfpza3tb2sonzsjdeltxqgmdo"),
|
||||
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
|
||||
"reward": MustParseCid("bafk2bzacebbs3rlg7y3wbvxrj4wgbsqmasw4ksbbr3lyqbkaxj2t25qz6zzuy"),
|
||||
"storagemarket": MustParseCid("bafk2bzaced3zmxsmlhp2nsiwkxcp2ugonbsebcd53t7htzo2jcoidvu464xmm"),
|
||||
"storageminer": MustParseCid("bafk2bzacebedx7iaa2ruspxvghkg46ez7un5b7oiijjtnvddq2aot5wk7p7ry"),
|
||||
"storagepower": MustParseCid("bafk2bzacebvne7m2l3hxxw4xa6oujol75x35yqpnlqiwx74jilyrop4cs7cse"),
|
||||
"system": MustParseCid("bafk2bzaceacjmlxrvydlud77ilpzbscez46yedx6zjsj6olxsdeuv6d4x4cwe"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacebs5muoq7ft2wgqojhjio7a4vltbyprqkmlr43ojlzbil4nwvj3jg"),
|
||||
"reward": MustParseCid("bafk2bzaceczaoglexx6w3m744s4emfmjkeizpl4ofdkh4xzhevjtd6zift5iu"),
|
||||
"storagemarket": MustParseCid("bafk2bzaceceaqmhkxuerleq2dpju35mcsdiklpkisglzlj5xkf32hbyqn7sam"),
|
||||
"storageminer": MustParseCid("bafk2bzacebkiqu5pclx5zze4ugcsn3lvumihyathpcrjfq36b3hgmd7jqe2bk"),
|
||||
"storagepower": MustParseCid("bafk2bzacebcxydq2iampltz5zoo3oojka45hjkd62vz46xtpl6qilhkkjdeaq"),
|
||||
"system": MustParseCid("bafk2bzacebbrs3dzgxwj43ztup7twz25xkbhhtmcbjjbscjvpsrpbwux3b32g"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacebj3znhdpxqjgvztrv3petqwdkvrefg4j6lrp3n7wfrkdoan4os42"),
|
||||
},
|
||||
}, {
|
||||
Network: "calibrationnet",
|
||||
@ -295,25 +295,25 @@ var EmbeddedBuiltinActorsMetadata = []*BuiltinActorsMetadata{{
|
||||
}, {
|
||||
Network: "calibrationnet",
|
||||
Version: 14,
|
||||
BundleGitTag: "v13.0.0",
|
||||
ManifestCid: MustParseCid("bafy2bzacect4ktyujrwp6mjlsitnpvuw2pbuppz6w52sfljyo4agjevzm75qs"),
|
||||
BundleGitTag: "v14.0.0-rc.1",
|
||||
ManifestCid: MustParseCid("bafy2bzacebq3hncszqpojglh2dkwekybq4zn6qpc4gceqbx36wndps5qehtau"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzaceb3j36ri5y5mfklgp5emlvrms6g4733ss2j3l7jismrxq6ng3tcc6"),
|
||||
"cron": MustParseCid("bafk2bzaceaz6rocamdxehgpwcbku6wlapwpgzyyvkrploj66mlqptsulf52bs"),
|
||||
"datacap": MustParseCid("bafk2bzacea22nv5g3yngpxvonqfj4r2nkfk64y6yw2malicm7odk77x7zuads"),
|
||||
"eam": MustParseCid("bafk2bzaceatqtjzj7623i426noaslouvluhz6e3md3vvquqzku5qj3532uaxg"),
|
||||
"ethaccount": MustParseCid("bafk2bzacean3hs7ga5csw6g3uu7watxfnqv5uvxviebn3ba6vg4sagwdur5pu"),
|
||||
"evm": MustParseCid("bafk2bzacec5ibmbtzuzjgwjmksm2n6zfq3gkicxqywwu7tsscqgdzajpfctxk"),
|
||||
"init": MustParseCid("bafk2bzaced5sq72oemz6qwi6yssxwlos2g54zfprslrx5qfhhx2vlgsbvdpcs"),
|
||||
"multisig": MustParseCid("bafk2bzacedbgei6jkx36fwdgvoohce4aghvpohqdhoco7p4thszgssms7olv2"),
|
||||
"paymentchannel": MustParseCid("bafk2bzaceasmgmfsi4mjanxlowsub65fmevhzky4toeqbtw4kp6tmu4kxjpgq"),
|
||||
"account": MustParseCid("bafk2bzaced5ecfm56dvtw26q56j4d32yoccyd7ggxn3qdki2enxpqqav45ths"),
|
||||
"cron": MustParseCid("bafk2bzacedpbtttpyvtjncqoyobr63mhqqtlrygbnudhxyp2vha56f626dkfs"),
|
||||
"datacap": MustParseCid("bafk2bzacecded3lcvo7ndsk66samyecw2trnhrgzi7jxsary3sqgopxlk6rku"),
|
||||
"eam": MustParseCid("bafk2bzacecsda4uw7dcu76a27gnrrdcm73tgms7wrte6jbou63vloktkqc5ne"),
|
||||
"ethaccount": MustParseCid("bafk2bzacebu2lcxfmohomjj3umslnylwugf5gssywdq3575tjarta7o227dls"),
|
||||
"evm": MustParseCid("bafk2bzacea4xnekruhfmdnzvzeo6cbf7jsfgco6x5wje2ckwc2ui2ojzcrlgu"),
|
||||
"init": MustParseCid("bafk2bzacedfmsdlewihdcrkdepnfata26nj7akbvexzs3chicujhjf2uxsazc"),
|
||||
"multisig": MustParseCid("bafk2bzacedwx4svscsp6wqqu2vlcunjihvvm4u2jnsqjkwutjhir7dwtl7z6m"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacedbit7oo6lryhbo64uikvtjtfcth6oxwy3eebxerenu2h7rj44n24"),
|
||||
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
|
||||
"reward": MustParseCid("bafk2bzacedjyp6ll5ez27dfgldjj4tntxfvyp4pa5zkk7s5uhipzqjyx2gmuc"),
|
||||
"storagemarket": MustParseCid("bafk2bzaceabolct6qdnefwcrtati2us3sxtxfghyqk6aamfhl6byyefmtssqi"),
|
||||
"storageminer": MustParseCid("bafk2bzaceckzw3v7wqliyggvjvihz4wywchnnsie4frfvkm3fm5znb64mofri"),
|
||||
"storagepower": MustParseCid("bafk2bzacea7t4wynzjajl442mpdqbnh3wusjusqtnzgpvefvweh4n2tgzgqhu"),
|
||||
"system": MustParseCid("bafk2bzacedjnrb5glewazsxpcx6rwiuhl4kwrfcqolyprn6rrjtlzmthlhdq6"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacebj2zdquagzy2xxn7up574oemg3w7ed3fe4aujkyhgdwj57voesn2"),
|
||||
"reward": MustParseCid("bafk2bzaced5rlycj7fzpscfc7p3wwxarngwqylqshj7te3uffey5tevunz4we"),
|
||||
"storagemarket": MustParseCid("bafk2bzaceatwbyrec2nnwggxc2alpqve7rl52fmbhqflebuxmmnvg3qckjb7c"),
|
||||
"storageminer": MustParseCid("bafk2bzacecr7ozkdz7l2pq3ig5qxae2ysivbnojhsn4gw3o57ov4mhksma7me"),
|
||||
"storagepower": MustParseCid("bafk2bzacedgeolvjtnw7fkji5kqmx322abv6uls2v34fuml6nw36dvfcw4mtu"),
|
||||
"system": MustParseCid("bafk2bzacederl6tlpieldsn6mkndqwd4wj5orfoqgab6p2klswfn3cjagxwla"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzaceczw2kp6gjjdcjbso7mewp7guik7gr525pal6dotdja2lrct6ok3c"),
|
||||
},
|
||||
}, {
|
||||
Network: "caterpillarnet",
|
||||
@ -456,25 +456,25 @@ var EmbeddedBuiltinActorsMetadata = []*BuiltinActorsMetadata{{
|
||||
}, {
|
||||
Network: "caterpillarnet",
|
||||
Version: 14,
|
||||
BundleGitTag: "v13.0.0",
|
||||
ManifestCid: MustParseCid("bafy2bzacedu7kk2zngxp7y3lynhtaht6vgadgn5jzkxe5nuowtwzasnogx63w"),
|
||||
BundleGitTag: "v14.0.0-rc.1",
|
||||
ManifestCid: MustParseCid("bafy2bzacecr6g2co4zkdvrsbz2z7wh44o6hrl3rpbgnlhe52772hhjs43vxge"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzacecro3uo6ypqhfzwdhnamzcole5qmhrbkx7qny6t2qsrcpqxelt6s2"),
|
||||
"cron": MustParseCid("bafk2bzaceam3kci46y4siltbw7f4itoap34kp7b7pvn2fco5s2bvnotomwdbe"),
|
||||
"datacap": MustParseCid("bafk2bzacecmtdspcbqmmjtsaz4vucuqoqjqfsgxjonns7tom7eblkngbcm7bw"),
|
||||
"eam": MustParseCid("bafk2bzaceaudqhrt7djewopqdnryvwxagfufyt7ja4gdvovrxbh6edh6evgrw"),
|
||||
"ethaccount": MustParseCid("bafk2bzaced676ds3z6xe333wr7frwq3f2iq5kjwp4okl3te6rne3xf7kuqrwm"),
|
||||
"evm": MustParseCid("bafk2bzacebeih4jt2s6mel6x4hje7xmnugh6twul2a5axx4iczu7fu4wcdi6k"),
|
||||
"init": MustParseCid("bafk2bzaceba7vvuzzwj5wqnq2bvpbgtxup53mhr3qybezbllftnxvpqbfymxo"),
|
||||
"multisig": MustParseCid("bafk2bzaceapkajhnqoczrgry5javqbl7uebgmsbpqqfemzc4yb5q2dqia2qog"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacebg7xq4ca22gafmdbkcq357x7v6slflib4h3fnj4amsovg6ulqg3o"),
|
||||
"account": MustParseCid("bafk2bzacecaoykugysdikvzptp5jc6qzhfxv3tsy7eggoabfk34jc6ozr3nju"),
|
||||
"cron": MustParseCid("bafk2bzacede6xljkkb6qww47hdho2dfs75bfxjwmxspldwclekd2ntdvcapj6"),
|
||||
"datacap": MustParseCid("bafk2bzacedjwq3frxklkkejk4payjpjwgdj352izzcmymtzib6xzgrrtczeuw"),
|
||||
"eam": MustParseCid("bafk2bzaceana2fkgcjhtr56rrqngvilybjpavfgfwf3kuqed6qxa4lfdjmkac"),
|
||||
"ethaccount": MustParseCid("bafk2bzacedg7tcqwoykjd57dkettxxato4yufxlyzy6hgsggc7jhf6v7zqgas"),
|
||||
"evm": MustParseCid("bafk2bzacedwqu6pi4csjod4c5677pgsr6g4ekhqnqgdnsiodyqczmcsj3sspk"),
|
||||
"init": MustParseCid("bafk2bzaceakukvex4wjcrmwp5msp4d2dhe4qlftqxgolxx5sbtshah3fxoepy"),
|
||||
"multisig": MustParseCid("bafk2bzaceamp2c2qfgnl7z6vz5majhcm64ml72kbg5kea4xgdnfcyqanynfks"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacebs44wqfedfx3o23q7tkza5cm3q54gevreqzetsei2bvhsmqdwz3i"),
|
||||
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
|
||||
"reward": MustParseCid("bafk2bzaceajt4idf26ffnyipybcib55fykjxnek7oszkqzi7lu7mbgijmkgos"),
|
||||
"storagemarket": MustParseCid("bafk2bzaceadfmay7pyl7osjsdmrireafasnjnoziacljy5ewrcsxpp56kzqbw"),
|
||||
"storageminer": MustParseCid("bafk2bzaceardbn5a7aq5jxl7efr4btmsbl7txnxm4hrrd3llyhujuc2cr5vcs"),
|
||||
"storagepower": MustParseCid("bafk2bzacear4563jznjqyseoy42xl6kenyqk6umv6xl3bp5bsjb3hbs6sp6bm"),
|
||||
"system": MustParseCid("bafk2bzacecc5oavxivfnvirx2g7megpdf6lugooyoc2wijloju247xzjcdezy"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacebnkdt42mpf5emypo6iroux3hszfh5yt54v2mmnnura3ketholly4"),
|
||||
"reward": MustParseCid("bafk2bzacedyckhwkkmcoc2n6byzw6vxi3lqyp3zjs3b7flfqj2wybprwxkw2m"),
|
||||
"storagemarket": MustParseCid("bafk2bzacebx7ky74ry222rvi56wav3dbal4rqooeuafiakg7r6ksxh2uahbju"),
|
||||
"storageminer": MustParseCid("bafk2bzacecwfsmi6xxbwkddunjt2zs6qn2g4hvrwury7g36vqvafmok7gs642"),
|
||||
"storagepower": MustParseCid("bafk2bzacecqcvllpsttt3nqcso4ng2z4cjjn5sr36is7yze74ihqnol2dst2u"),
|
||||
"system": MustParseCid("bafk2bzacea75k6agtdmjxdfnwbctzstc6pozdblpdevc5ixjevvuzcqwwuzrc"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacedxwgmgffeapq4nffplapb5mixkrm2vosmc2i3fxcye5dookiqubu"),
|
||||
},
|
||||
}, {
|
||||
Network: "devnet",
|
||||
@ -608,25 +608,25 @@ var EmbeddedBuiltinActorsMetadata = []*BuiltinActorsMetadata{{
|
||||
}, {
|
||||
Network: "devnet",
|
||||
Version: 14,
|
||||
BundleGitTag: "v13.0.0",
|
||||
ManifestCid: MustParseCid("bafy2bzacecn7uxgehrqbcs462ktl2h23u23cmduy2etqj6xrd6tkkja56fna4"),
|
||||
BundleGitTag: "v14.0.0-rc.1",
|
||||
ManifestCid: MustParseCid("bafy2bzacebwn7ymtozv5yz3x5hnxl4bds2grlgsk5kncyxjak3hqyhslb534m"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzacebev3fu5geeehpx577b3kvza4xsmmggmepjj7rlsnr27hpoq27q2i"),
|
||||
"cron": MustParseCid("bafk2bzacedalzqahtuz2bmnf7uawbcujfhhe5xzv5ys5ufadu6ggs3tcu6lsy"),
|
||||
"datacap": MustParseCid("bafk2bzaceb7ou2vn7ac4xidespoowq2q5w7ognr7s4ujy3xzzgiishajpe7le"),
|
||||
"eam": MustParseCid("bafk2bzacedqic2qskattorj4svf6mbto2k76ej3ll3ugsyorqramrg7rpq3by"),
|
||||
"ethaccount": MustParseCid("bafk2bzaceaoad7iknpywijigv2h3jyvkijff2oxvohzue533v5hby3iix5vdu"),
|
||||
"evm": MustParseCid("bafk2bzacecjgiw26gagsn6a7tffkrgoor4zfgzfokp76u6cwervtmvjbopmwg"),
|
||||
"init": MustParseCid("bafk2bzaced2obubqojxggeddr246cpwtyzi6knnq52jsvsc2fs3tuk2kh6dtg"),
|
||||
"multisig": MustParseCid("bafk2bzacebquruzb6zho45orbdkku624t6w6jt4tudaqzraz4yh3li3jfstpg"),
|
||||
"paymentchannel": MustParseCid("bafk2bzaceaydrilyxvflsuzr24hmw32qwz6sy4hgls73bhpveydcsqskdgpca"),
|
||||
"account": MustParseCid("bafk2bzacecqyi3xhyrze7hlo73zzyzz5jw5e6eqqyesmnbef4xr7br5amqsm2"),
|
||||
"cron": MustParseCid("bafk2bzacebmeovn3fehscsm2ejcd2vejgqmuqidzx3ytlvp4osa6fes3w73yy"),
|
||||
"datacap": MustParseCid("bafk2bzaceasqdluegec5qllzjhu66jsyvb74dix6wjztpiaxvha74in7h4eek"),
|
||||
"eam": MustParseCid("bafk2bzaced4shnjesuxk44ufllcywjbaixerx6hkcyj5rqqopjuic725fymx2"),
|
||||
"ethaccount": MustParseCid("bafk2bzacebbw6hg766y4ouycqlr3llur7sxkgj7hnu7jq4xlwtycp3ovpqjee"),
|
||||
"evm": MustParseCid("bafk2bzaceafzevk77d6zhjbrw7grm5p3es2pzuklpvcthedjv6ejh7alvxqoc"),
|
||||
"init": MustParseCid("bafk2bzacebaywpmwlfhhog6vey3dkz25hjrlwnhacsjryq3ujymyttolglats"),
|
||||
"multisig": MustParseCid("bafk2bzacedm52r4h7upic7ynukzwjkadefbjeq7w7ozdonsbdumgoabk7xass"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacecsvfy77loubouoyqwl2ke574zpg3x5f2qon6ougjzfjna6eadwxg"),
|
||||
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
|
||||
"reward": MustParseCid("bafk2bzaceb74owpuzdddqoj2tson6ymbyuguqrnqefyiaxqvwm4ygitpabjrq"),
|
||||
"storagemarket": MustParseCid("bafk2bzaceaw6dslv6pfqha4ynghq2imij5khnnjrie22kmfgtpie3bvxho6jq"),
|
||||
"storageminer": MustParseCid("bafk2bzacecsputz6xygjfyrvx2d7bxkpp7b5v4icrmpckec7gnbabx2w377qs"),
|
||||
"storagepower": MustParseCid("bafk2bzaceceyaa5yjwhxvvcqouob4l746zp5nesivr6enhtpimakdtby6kafi"),
|
||||
"system": MustParseCid("bafk2bzaceaxg6k5vuozxlemfi5hv663m6jcawzu5puboo4znj73i36e3tsovs"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacea2czkb4vt2iiiwdb6e57qfwqse4mk2pcyvwjmdl5ojbnla57oh2u"),
|
||||
"reward": MustParseCid("bafk2bzacecvc7v5d2krwxqeyklfg2xb5qc3kalu66265smcbmwepjmj643uqu"),
|
||||
"storagemarket": MustParseCid("bafk2bzacecegzksfqu35xlcc6ymxae3vqpkntbajx4jtahugszvb77fnr37ii"),
|
||||
"storageminer": MustParseCid("bafk2bzacedoimzimltyfzbabwuam2bqw32nqwo2twjq73q7mklwtvqi2evsw2"),
|
||||
"storagepower": MustParseCid("bafk2bzacebszcfmepyvssrg2tbbqgpqm2cnrl5ub4n6cfy7eie2wwseyloxvs"),
|
||||
"system": MustParseCid("bafk2bzacea4rh5i36ucj23zb4mid4tw5ym2wqlfap4ejjaynvobengeuby4ja"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzaceapbsihfuk3munfpcoevtxlwuenxeiiv7dp7v3t2yjs26hcpypexi"),
|
||||
},
|
||||
}, {
|
||||
Network: "hyperspace",
|
||||
@ -783,25 +783,25 @@ var EmbeddedBuiltinActorsMetadata = []*BuiltinActorsMetadata{{
|
||||
}, {
|
||||
Network: "mainnet",
|
||||
Version: 14,
|
||||
BundleGitTag: "v13.0.0",
|
||||
ManifestCid: MustParseCid("bafy2bzacecdhvfmtirtojwhw2tyciu4jkbpsbk5g53oe24br27oy62sn4dc4e"),
|
||||
BundleGitTag: "v14.0.0-rc.1",
|
||||
ManifestCid: MustParseCid("bafy2bzacecbueuzsropvqawsri27owo7isa5gp2qtluhrfsto2qg7wpgxnkba"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzacedxnbtlsqdk76fsfmnhyvsblwyfducerwwtp3mqtx2wbrvs5idl52"),
|
||||
"cron": MustParseCid("bafk2bzacebbopddyn5csb3fsuhh2an4ttd23x6qnwixgohlirj5ahtcudphyc"),
|
||||
"datacap": MustParseCid("bafk2bzaceah42tfnhd7xnztawgf46gbvc3m2gudoxshlba2ucmmo2vy67t7ci"),
|
||||
"eam": MustParseCid("bafk2bzaceb23bhvvcjsth7cn7vp3gbaphrutsaz7v6hkls3ogotzs4bnhm4mk"),
|
||||
"ethaccount": MustParseCid("bafk2bzaceautge6zhuy6jbj3uldwoxwhpywuon6z3xfvmdbzpbdribc6zzmei"),
|
||||
"evm": MustParseCid("bafk2bzacedq6v2lyuhgywhlllwmudfj2zufzcauxcsvvd34m2ek5xr55mvh2q"),
|
||||
"init": MustParseCid("bafk2bzacedr4xacm3fts4vilyeiacjr2hpmwzclyzulbdo24lrfxbtau2wbai"),
|
||||
"multisig": MustParseCid("bafk2bzacecr5zqarfqak42xqcfeulsxlavcltawsx2fvc7zsjtby6ti4b3wqc"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacebntdhfmyc24e7tm52ggx5tnw4i3hrr3jmllsepv3mibez4hywsa2"),
|
||||
"account": MustParseCid("bafk2bzacebr7ik7lng7vysm754mu5x7sakphwm4soqi6zwbox4ukpd6ndwvqy"),
|
||||
"cron": MustParseCid("bafk2bzacecwn6eiwa7ysimmk6i57i5whj4cqzwijx3xdlxwb5canmweaez6xc"),
|
||||
"datacap": MustParseCid("bafk2bzacecidw7ajvtjhmygqs2yxhmuybyvtwp25dxpblvdxxo7u4gqfzirjg"),
|
||||
"eam": MustParseCid("bafk2bzaced2cxnfwngpcubg63h7zk4y5hjwwuhfjxrh43xozax2u6u2woweju"),
|
||||
"ethaccount": MustParseCid("bafk2bzacechu4u7asol5mpcsr6fo6jeaeltvayj5bllupyiux7tcynsxby7ko"),
|
||||
"evm": MustParseCid("bafk2bzacedupohbgwrcw5ztbbsvrpqyybnokr4ylegmk7hrbt3ueeykua6zxw"),
|
||||
"init": MustParseCid("bafk2bzacecbbcshenkb6z2v4irsudv7tyklfgphhizhghix6ke5gpl4r5f2b6"),
|
||||
"multisig": MustParseCid("bafk2bzaceajcmsngu3f2chk2y7nanlen5xlftzatytzm6hxwiiw5i5nz36bfc"),
|
||||
"paymentchannel": MustParseCid("bafk2bzaceavslp27u3f4zwjq45rlg6assj6cqod7r5f6wfwkptlpi6j4qkmne"),
|
||||
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
|
||||
"reward": MustParseCid("bafk2bzacedq4q2kwkruu4xm7rkyygumlbw2yt4nimna2ivea4qarvtkohnuwu"),
|
||||
"storagemarket": MustParseCid("bafk2bzacebjtoltdviyznpj34hh5qp6u257jnnbjole5rhqfixm7ug3epvrfu"),
|
||||
"storageminer": MustParseCid("bafk2bzacebf4rrqyk7gcfggggul6nfpzay7f2ordnkwm7z2wcf4mq6r7i77t2"),
|
||||
"storagepower": MustParseCid("bafk2bzacecjy4dkulvxppg3ocbmeixe2wgg6yxoyjxrm4ko2fm3uhpvfvam6e"),
|
||||
"system": MustParseCid("bafk2bzacecyf523quuq2kdjfdvyty446z2ounmamtgtgeqnr3ynlu5cqrlt6e"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacedkxehp7y7iyukbcje3wbpqcvufisos6exatkanyrbotoecdkrbta"),
|
||||
"reward": MustParseCid("bafk2bzacedvfnjittwrkhoar6n5xrykowg2e6rpur4poh2m572f7m7evyx4lc"),
|
||||
"storagemarket": MustParseCid("bafk2bzaceaju5wobednmornvdqcyi6khkvdttkru4dqduqicrdmohlwfddwhg"),
|
||||
"storageminer": MustParseCid("bafk2bzacea3f43rxzemmakjpktq2ukayngean3oo2de5cdxlg2wsyn53wmepc"),
|
||||
"storagepower": MustParseCid("bafk2bzacedo6scxizooytn53wjwg2ooiawnj4fsoylcadnp7mhgzluuckjl42"),
|
||||
"system": MustParseCid("bafk2bzacecak4ow7tmauku42s3u2yydonk4hx6ov6ov542hy7lcbji3nhrrhs"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacebvyzjzmvmjvpypphqsumpy6rzxuugnehgum7grc6sv3yqxzrshb4"),
|
||||
},
|
||||
}, {
|
||||
Network: "testing",
|
||||
@ -935,25 +935,25 @@ var EmbeddedBuiltinActorsMetadata = []*BuiltinActorsMetadata{{
|
||||
}, {
|
||||
Network: "testing",
|
||||
Version: 14,
|
||||
BundleGitTag: "v13.0.0",
|
||||
ManifestCid: MustParseCid("bafy2bzacedg47dqxmtgzjch6i42kth72esd7w23gujyd6c6oppg3n6auag5ou"),
|
||||
BundleGitTag: "v14.0.0-rc.1",
|
||||
ManifestCid: MustParseCid("bafy2bzacebgc3zha33w7wggshqjeeomjnxfsqdezpt6e5w4tcqnfp42hvhqyy"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzaceb3tncntgeqvzzr5fzhvpsc5ntv3tpqrsh4jst4irfyzpkdyigibc"),
|
||||
"cron": MustParseCid("bafk2bzacecwwasmhixpgtshczm5cfspwciyawc25mrefknqhlxfrd6m57tqmc"),
|
||||
"datacap": MustParseCid("bafk2bzaceckj66by6eohjrybazh5cymmovgl5bmikpvzki2q7huwk2fweoef2"),
|
||||
"eam": MustParseCid("bafk2bzaceafzm65wvnaam3775homn4vzsv7odftn5tkifmn44wd2t6gupy63y"),
|
||||
"ethaccount": MustParseCid("bafk2bzaced4q7m4mha2dsezhwub3ru64rgimkg52t25ul4gnekax6uq7hbkqu"),
|
||||
"evm": MustParseCid("bafk2bzaceakpknw5cuizil3552jr5z35rs6ijaignjigciswtok67drhzdss6"),
|
||||
"init": MustParseCid("bafk2bzacec7mbkemwugyg2p4oy2xgnovykk4dnsu5ym4wkreooujvxfsxbo3i"),
|
||||
"multisig": MustParseCid("bafk2bzacebmftoql6dcyqf54xznwjg2bfgdsi67spqquwslpvvtvcx6qenhz2"),
|
||||
"paymentchannel": MustParseCid("bafk2bzaceau57wpiiikea6pu5om4ryyqjrxjzfksfl4reqosnohydzv3pf4qq"),
|
||||
"account": MustParseCid("bafk2bzacedz5ko33xyknznoj5mcwnijhggapvn7hzmec3ix3vlqjg6uuwownm"),
|
||||
"cron": MustParseCid("bafk2bzacebifywvakusxpvnzn33qneqm46g3trnqvuvaw5cgslfg5cl7prtwa"),
|
||||
"datacap": MustParseCid("bafk2bzacedx4zxq5ae6qbucm6elokqhhmlwbtas7mbqm6rnigwkmorrtfzo66"),
|
||||
"eam": MustParseCid("bafk2bzaceavilsrvq7kdyqelapjabixsgmhf6caufc4hdyviccmerf3sawzdk"),
|
||||
"ethaccount": MustParseCid("bafk2bzaceavutdrzkspoiz6a5m46m2qzqbsmdb33s62xnkpevuwjrhxm6trn2"),
|
||||
"evm": MustParseCid("bafk2bzacedovnz466nuqdp5pzs6sztcqofsy5zet7k2gybnbelpwuxpmfphwo"),
|
||||
"init": MustParseCid("bafk2bzaceajnftptl2tdbrudshriqcg3caw5mqublmd6i5dm3qug3j5qo7g4q"),
|
||||
"multisig": MustParseCid("bafk2bzacebagezdbubvu4g3norwxaomzpk3mxyzfal3rsvvppdbt46y6h74yc"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacebmtufrtdxo757c6c5ndmsk3wlnzzuzqp4yzbtltqjutnlgh7awus"),
|
||||
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
|
||||
"reward": MustParseCid("bafk2bzacecvlcdgbqlk3dyfzkcjrywg2th5bmn7ilijifikulpxr4ffcrw23o"),
|
||||
"storagemarket": MustParseCid("bafk2bzacecgj53dwqla7eiubs2uiza7cgxkxtefxkfpjontj5jxefl3a4i2nq"),
|
||||
"storageminer": MustParseCid("bafk2bzaceailclue4dba2edjethfjw6ycufcwsx4qjjmgsh77xcyprmogdjvu"),
|
||||
"storagepower": MustParseCid("bafk2bzaceaqw6dhdjlqovhk3p4lb4sb25i5d6mhln2ir5m7tj6m4fegkgkinw"),
|
||||
"system": MustParseCid("bafk2bzaceby6aiiosnrtb5kzlmrvd4k3o27oo3idmbd6llydz2uqibbp23pzq"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzaceadw6mxuyb6ipaq3fhekk7u5osyyiis3c4wbkvysy2ut6qfsua5zs"),
|
||||
"reward": MustParseCid("bafk2bzacebtnflocubjjrtvzba4tmrn32tlx2klfgmjreqeo234lqdzu4ovna"),
|
||||
"storagemarket": MustParseCid("bafk2bzacec6tovj5vh6lyor3m67uhahhldrixnqg7e5246pce5gcwkqwmbvxo"),
|
||||
"storageminer": MustParseCid("bafk2bzacedcwqwswnkbb22eoajke4t72qvxrp3rpqaotymbqsnlqkz5ovde6m"),
|
||||
"storagepower": MustParseCid("bafk2bzacea4c3h6gnhgfcjf6lxhzcmp5qq7kgrcym7wv4vcnmd4whegpjdwuq"),
|
||||
"system": MustParseCid("bafk2bzacechwe3ehay6h7x7tecaajgsxrvvmzcdul6omejfvzf4mpbzkuryhm"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacec66mmy7sj6qy5urqplwhbem7wbjrzna7mxh6qbdljdawhu3g5rao"),
|
||||
},
|
||||
}, {
|
||||
Network: "testing-fake-proofs",
|
||||
@ -1087,24 +1087,24 @@ var EmbeddedBuiltinActorsMetadata = []*BuiltinActorsMetadata{{
|
||||
}, {
|
||||
Network: "testing-fake-proofs",
|
||||
Version: 14,
|
||||
BundleGitTag: "v13.0.0",
|
||||
ManifestCid: MustParseCid("bafy2bzaceaf7fz33sp2i5ag5xg5ompn3dwppqlbwfacrwuvzaqdbqrtni7m5q"),
|
||||
BundleGitTag: "v14.0.0-rc.1",
|
||||
ManifestCid: MustParseCid("bafy2bzacebca2c7r6m47fv5ojbzfgqxw74zxyzphlkxigj75q7vobbe6vm4x4"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzaceb3tncntgeqvzzr5fzhvpsc5ntv3tpqrsh4jst4irfyzpkdyigibc"),
|
||||
"cron": MustParseCid("bafk2bzacecwwasmhixpgtshczm5cfspwciyawc25mrefknqhlxfrd6m57tqmc"),
|
||||
"datacap": MustParseCid("bafk2bzaceckj66by6eohjrybazh5cymmovgl5bmikpvzki2q7huwk2fweoef2"),
|
||||
"eam": MustParseCid("bafk2bzaceafzm65wvnaam3775homn4vzsv7odftn5tkifmn44wd2t6gupy63y"),
|
||||
"ethaccount": MustParseCid("bafk2bzaced4q7m4mha2dsezhwub3ru64rgimkg52t25ul4gnekax6uq7hbkqu"),
|
||||
"evm": MustParseCid("bafk2bzaceakpknw5cuizil3552jr5z35rs6ijaignjigciswtok67drhzdss6"),
|
||||
"init": MustParseCid("bafk2bzacec7mbkemwugyg2p4oy2xgnovykk4dnsu5ym4wkreooujvxfsxbo3i"),
|
||||
"multisig": MustParseCid("bafk2bzacedy4vldq4viv6bzzh4fueip3by3axsbgbh655lashddgumknc6pvs"),
|
||||
"paymentchannel": MustParseCid("bafk2bzaceau57wpiiikea6pu5om4ryyqjrxjzfksfl4reqosnohydzv3pf4qq"),
|
||||
"account": MustParseCid("bafk2bzacedz5ko33xyknznoj5mcwnijhggapvn7hzmec3ix3vlqjg6uuwownm"),
|
||||
"cron": MustParseCid("bafk2bzacebifywvakusxpvnzn33qneqm46g3trnqvuvaw5cgslfg5cl7prtwa"),
|
||||
"datacap": MustParseCid("bafk2bzacedx4zxq5ae6qbucm6elokqhhmlwbtas7mbqm6rnigwkmorrtfzo66"),
|
||||
"eam": MustParseCid("bafk2bzaceavilsrvq7kdyqelapjabixsgmhf6caufc4hdyviccmerf3sawzdk"),
|
||||
"ethaccount": MustParseCid("bafk2bzaceavutdrzkspoiz6a5m46m2qzqbsmdb33s62xnkpevuwjrhxm6trn2"),
|
||||
"evm": MustParseCid("bafk2bzacedovnz466nuqdp5pzs6sztcqofsy5zet7k2gybnbelpwuxpmfphwo"),
|
||||
"init": MustParseCid("bafk2bzaceajnftptl2tdbrudshriqcg3caw5mqublmd6i5dm3qug3j5qo7g4q"),
|
||||
"multisig": MustParseCid("bafk2bzacecaioywepaxgtrpkmr7k2bjlwcpj6mu3r7ctk77a3sqwrb72wn4ra"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacebmtufrtdxo757c6c5ndmsk3wlnzzuzqp4yzbtltqjutnlgh7awus"),
|
||||
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
|
||||
"reward": MustParseCid("bafk2bzacecvlcdgbqlk3dyfzkcjrywg2th5bmn7ilijifikulpxr4ffcrw23o"),
|
||||
"storagemarket": MustParseCid("bafk2bzacecgj53dwqla7eiubs2uiza7cgxkxtefxkfpjontj5jxefl3a4i2nq"),
|
||||
"storageminer": MustParseCid("bafk2bzaceb6atn3k6yhmskgmc3lgfiwpzpfmaxzacohtnb2hivme2oroycqr6"),
|
||||
"storagepower": MustParseCid("bafk2bzacedameh56mp2g4y7nprhax5sddbzcmpk5p7l523l45rtn2wjc6ah4e"),
|
||||
"system": MustParseCid("bafk2bzaceby6aiiosnrtb5kzlmrvd4k3o27oo3idmbd6llydz2uqibbp23pzq"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzaceadw6mxuyb6ipaq3fhekk7u5osyyiis3c4wbkvysy2ut6qfsua5zs"),
|
||||
"reward": MustParseCid("bafk2bzacebtnflocubjjrtvzba4tmrn32tlx2klfgmjreqeo234lqdzu4ovna"),
|
||||
"storagemarket": MustParseCid("bafk2bzacec6tovj5vh6lyor3m67uhahhldrixnqg7e5246pce5gcwkqwmbvxo"),
|
||||
"storageminer": MustParseCid("bafk2bzacecgckzurejgtbw2xfmys5talfq5sloia2mrjcdsl5xolbwkolkrss"),
|
||||
"storagepower": MustParseCid("bafk2bzacea4c3h6gnhgfcjf6lxhzcmp5qq7kgrcym7wv4vcnmd4whegpjdwuq"),
|
||||
"system": MustParseCid("bafk2bzacechwe3ehay6h7x7tecaajgsxrvvmzcdul6omejfvzf4mpbzkuryhm"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacec66mmy7sj6qy5urqplwhbem7wbjrzna7mxh6qbdljdawhu3g5rao"),
|
||||
},
|
||||
}}
|
||||
|
@ -8,7 +8,16 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
)
|
||||
|
||||
type SealProofVariant int
|
||||
|
||||
const (
|
||||
SealProofVariant_Standard SealProofVariant = iota
|
||||
SealProofVariant_Synthetic
|
||||
SealProofVariant_NonInteractive
|
||||
)
|
||||
|
||||
var MinSyntheticPoRepVersion = network.Version21
|
||||
var MinNonInteractivePoRepVersion = network.Version23
|
||||
|
||||
func AllPartSectors(mas State, sget func(Partition) (bitfield.BitField, error)) (bitfield.BitField, error) {
|
||||
var parts []bitfield.BitField
|
||||
@ -33,7 +42,18 @@ func AllPartSectors(mas State, sget func(Partition) (bitfield.BitField, error))
|
||||
|
||||
// SealProofTypeFromSectorSize returns preferred seal proof type for creating
|
||||
// new miner actors and new sectors
|
||||
func SealProofTypeFromSectorSize(ssize abi.SectorSize, nv network.Version, synthetic bool) (abi.RegisteredSealProof, error) {
|
||||
func SealProofTypeFromSectorSize(ssize abi.SectorSize, nv network.Version, variant SealProofVariant) (abi.RegisteredSealProof, error) {
|
||||
switch variant {
|
||||
case SealProofVariant_Synthetic:
|
||||
if nv < MinSyntheticPoRepVersion {
|
||||
return 0, xerrors.Errorf("synthetic proofs are not supported on network version %d", nv)
|
||||
}
|
||||
case SealProofVariant_NonInteractive:
|
||||
if nv < MinNonInteractivePoRepVersion {
|
||||
return 0, xerrors.Errorf("non-interactive proofs are not supported on network version %d", nv)
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case nv < network.Version7:
|
||||
switch ssize {
|
||||
@ -67,11 +87,13 @@ func SealProofTypeFromSectorSize(ssize abi.SectorSize, nv network.Version, synth
|
||||
return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize)
|
||||
}
|
||||
|
||||
if nv >= MinSyntheticPoRepVersion && synthetic {
|
||||
switch variant {
|
||||
case SealProofVariant_Synthetic:
|
||||
return toSynthetic(v)
|
||||
} else {
|
||||
return v, nil
|
||||
case SealProofVariant_NonInteractive:
|
||||
return toNonInteractive(v)
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
return 0, xerrors.Errorf("unsupported network version")
|
||||
@ -94,6 +116,23 @@ func toSynthetic(in abi.RegisteredSealProof) (abi.RegisteredSealProof, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func toNonInteractive(in abi.RegisteredSealProof) (abi.RegisteredSealProof, error) {
|
||||
switch in {
|
||||
case abi.RegisteredSealProof_StackedDrg2KiBV1_1:
|
||||
return abi.RegisteredSealProof_StackedDrg2KiBV1_2_Feat_NiPoRep, nil
|
||||
case abi.RegisteredSealProof_StackedDrg8MiBV1_1:
|
||||
return abi.RegisteredSealProof_StackedDrg8MiBV1_2_Feat_NiPoRep, nil
|
||||
case abi.RegisteredSealProof_StackedDrg512MiBV1_1:
|
||||
return abi.RegisteredSealProof_StackedDrg512MiBV1_2_Feat_NiPoRep, nil
|
||||
case abi.RegisteredSealProof_StackedDrg32GiBV1_1:
|
||||
return abi.RegisteredSealProof_StackedDrg32GiBV1_2_Feat_NiPoRep, nil
|
||||
case abi.RegisteredSealProof_StackedDrg64GiBV1_1:
|
||||
return abi.RegisteredSealProof_StackedDrg64GiBV1_2_Feat_NiPoRep, nil
|
||||
default:
|
||||
return 0, xerrors.Errorf("unsupported conversion to non-interactive: %v", in)
|
||||
}
|
||||
}
|
||||
|
||||
// WindowPoStProofTypeFromSectorSize returns preferred post proof type for creating
|
||||
// new miner actors and new sectors
|
||||
func WindowPoStProofTypeFromSectorSize(ssize abi.SectorSize, nv network.Version) (abi.RegisteredPoStProof, error) {
|
||||
|
@ -18,6 +18,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
builtintypes "github.com/filecoin-project/go-state-types/builtin"
|
||||
power11 "github.com/filecoin-project/go-state-types/builtin/v11/power"
|
||||
miner14 "github.com/filecoin-project/go-state-types/builtin/v14/miner"
|
||||
minertypes "github.com/filecoin-project/go-state-types/builtin/v8/miner"
|
||||
markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market"
|
||||
miner9 "github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
||||
@ -41,6 +42,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/reward"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/system"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
"github.com/filecoin-project/lotus/chain/consensus"
|
||||
lrand "github.com/filecoin-project/lotus/chain/rand"
|
||||
@ -136,7 +138,11 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal
|
||||
i := i
|
||||
m := m
|
||||
|
||||
spt, err := miner.SealProofTypeFromSectorSize(m.SectorSize, nv, synthetic)
|
||||
variant := miner.SealProofVariant_Standard
|
||||
if synthetic {
|
||||
variant = miner.SealProofVariant_Synthetic
|
||||
}
|
||||
spt, err := miner.SealProofTypeFromSectorSize(m.SectorSize, nv, variant)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
@ -491,7 +497,12 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal
|
||||
// Commit one-by-one, otherwise pledge math tends to explode
|
||||
var paramBytes []byte
|
||||
|
||||
if av >= actorstypes.Version6 {
|
||||
if av >= actorstypes.Version14 {
|
||||
confirmParams := &miner14.InternalSectorSetupForPresealParams{
|
||||
Sectors: []abi.SectorNumber{preseal.SectorID},
|
||||
}
|
||||
paramBytes = mustEnc(confirmParams)
|
||||
} else if av >= actorstypes.Version6 {
|
||||
// TODO: fixup
|
||||
confirmParams := &builtin6.ConfirmSectorProofsParams{
|
||||
Sectors: []abi.SectorNumber{preseal.SectorID},
|
||||
@ -506,9 +517,17 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal
|
||||
paramBytes = mustEnc(confirmParams)
|
||||
}
|
||||
|
||||
_, err = doExecValue(ctx, genesisVm, minerInfos[i].maddr, power.Address, big.Zero(), builtintypes.MethodsMiner.ConfirmSectorProofsValid, paramBytes)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to confirm presealed sectors: %w", err)
|
||||
var csErr error
|
||||
if nv >= network.Version23 {
|
||||
_, csErr = doExecValue(ctx, genesisVm, minerInfos[i].maddr, system.Address, big.Zero(), builtintypes.MethodsMiner.InternalSectorSetupForPreseal,
|
||||
paramBytes)
|
||||
} else {
|
||||
_, csErr = doExecValue(ctx, genesisVm, minerInfos[i].maddr, power.Address, big.Zero(), builtintypes.MethodsMiner.InternalSectorSetupForPreseal,
|
||||
paramBytes)
|
||||
}
|
||||
|
||||
if csErr != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to confirm presealed sectors: %w", csErr)
|
||||
}
|
||||
|
||||
if av >= actorstypes.Version2 {
|
||||
|
89
cmd/lotus-bench/bench-sectors.sh
Executable file
89
cmd/lotus-bench/bench-sectors.sh
Executable file
@ -0,0 +1,89 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This is an example of how a full sector lifecycle can be benchmarked using `lotus-bench`. The
|
||||
# script generates an unsealed sector, runs PC1, PC2, C1, and C2, and prints the duration of each
|
||||
# step. The script also prints the proof length and total duration of the lifecycle.
|
||||
#
|
||||
# Change `flags` to `--non-interactive` to run NI-PoRep, and switch `sector_size` to the desired
|
||||
# sector size. The script assumes that the `lotus-bench` binary is in the same directory as the
|
||||
# script.
|
||||
#
|
||||
# Note that for larger sector sizes, /tmp may not have enough space for the full lifecycle.
|
||||
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
tmpdir=/tmp
|
||||
|
||||
flags=""
|
||||
# flags="--non-interactive"
|
||||
sector_size=2KiB
|
||||
# sector_size=8MiB
|
||||
# sector_size=512MiB
|
||||
# sector_size=32GiB
|
||||
# sector_size=64GiB
|
||||
|
||||
unsealed_file=${tmpdir}/unsealed${sector_size}
|
||||
sealed_file=${tmpdir}/sealed${sector_size}
|
||||
cache_dir=${tmpdir}/cache${sector_size}
|
||||
c1_file=${tmpdir}/c1_${sector_size}.json
|
||||
proof_out=${tmpdir}/proof_${sector_size}.hex
|
||||
rm -rf $unsealed_file $sealed_file $cache_dir $c1_file
|
||||
|
||||
echo "Generating unsealed sector ..."
|
||||
read -r unsealed_cid unsealed_size <<< $(./lotus-bench simple addpiece --sector-size $sector_size /dev/zero $unsealed_file | tail -1)
|
||||
if [ $? -ne 0 ]; then exit 1; fi
|
||||
echo "Unsealed CID: $unsealed_cid"
|
||||
echo "Unsealed Size: $unsealed_size"
|
||||
|
||||
start_total=$(date +%s%3N)
|
||||
|
||||
echo "Running PC1 ..."
|
||||
echo "./lotus-bench simple precommit1 --sector-size $sector_size $flags $unsealed_file $sealed_file $cache_dir $unsealed_cid $unsealed_size"
|
||||
start_pc1=$(date +%s%3N)
|
||||
pc1_output=$(./lotus-bench simple precommit1 --sector-size $sector_size $flags $unsealed_file $sealed_file $cache_dir $unsealed_cid $unsealed_size | tail -1)
|
||||
if [ $? -ne 0 ]; then exit 1; fi
|
||||
end_pc1=$(date +%s%3N)
|
||||
pc1_duration=$((end_pc1 - start_pc1))
|
||||
|
||||
echo "Running PC2 ..."
|
||||
echo "./lotus-bench simple precommit2 --sector-size $sector_size $flags $sealed_file $cache_dir $pc1_output"
|
||||
start_pc2=$(date +%s%3N)
|
||||
read -r commd commr <<< $(./lotus-bench simple precommit2 --sector-size $sector_size $flags $sealed_file $cache_dir $pc1_output | tail -1 | sed -E 's/[dr]://g')
|
||||
if [ $? -ne 0 ]; then exit 1; fi
|
||||
end_pc2=$(date +%s%3N)
|
||||
pc2_duration=$((end_pc2 - start_pc2))
|
||||
|
||||
echo "CommD CID: $commd"
|
||||
echo "CommR CID: $commr"
|
||||
|
||||
echo "Running C1 ..."
|
||||
echo "./lotus-bench simple commit1 --sector-size $sector_size $flags $sealed_file $cache_dir ${commd} ${commr} $c1_file"
|
||||
start_c1=$(date +%s%3N)
|
||||
./lotus-bench simple commit1 --sector-size $sector_size $flags $sealed_file $cache_dir ${commd} ${commr} $c1_file
|
||||
end_c1=$(date +%s%3N)
|
||||
c1_duration=$((end_c1 - start_c1))
|
||||
|
||||
echo "Running C2 ..."
|
||||
echo "./lotus-bench simple commit2 $flags $c1_file"
|
||||
start_c2=$(date +%s%3N)
|
||||
proof=$(./lotus-bench simple commit2 $flags $c1_file | tail -1 | sed 's/^proof: //')
|
||||
if [ $? -ne 0 ]; then exit 1; fi
|
||||
end_c2=$(date +%s%3N)
|
||||
c2_duration=$((end_c2 - start_c2))
|
||||
|
||||
echo $proof > $proof_out
|
||||
echo "Wrote proof to $proof_out"
|
||||
|
||||
# $proof is hex, calculate the length of it in bytes
|
||||
proof_len=$(echo "scale=0; ${#proof}/2" | bc)
|
||||
echo "Proof length: $proof_len"
|
||||
|
||||
end_total=$(date +%s%3N)
|
||||
total_duration=$((end_total - start_total))
|
||||
|
||||
echo "PC1 duration: $((pc1_duration / 1000)).$((pc1_duration % 1000)) seconds"
|
||||
echo "PC2 duration: $((pc2_duration / 1000)).$((pc2_duration % 1000)) seconds"
|
||||
echo "C1 duration: $((c1_duration / 1000)).$((c1_duration % 1000)) seconds"
|
||||
echo "C2 duration: $((c2_duration / 1000)).$((c2_duration % 1000)) seconds"
|
||||
echo "Total duration: $((total_duration / 1000)).$((total_duration % 1000)) seconds"
|
@ -338,7 +338,7 @@ var sealBenchCmd = &cli.Command{
|
||||
|
||||
if !skipc2 {
|
||||
log.Info("generating winning post candidates")
|
||||
wipt, err := spt(sectorSize, false).RegisteredWinningPoStProof()
|
||||
wipt, err := spt(sectorSize, miner.SealProofVariant_Standard).RegisteredWinningPoStProof()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -556,7 +556,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
|
||||
Miner: mid,
|
||||
Number: i,
|
||||
},
|
||||
ProofType: spt(sectorSize, false),
|
||||
ProofType: spt(sectorSize, miner.SealProofVariant_Standard),
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
@ -586,7 +586,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
|
||||
Miner: mid,
|
||||
Number: i,
|
||||
},
|
||||
ProofType: spt(sectorSize, false),
|
||||
ProofType: spt(sectorSize, miner.SealProofVariant_Standard),
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
@ -797,7 +797,7 @@ var proveCmd = &cli.Command{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: abi.SectorNumber(c2in.SectorNum),
|
||||
},
|
||||
ProofType: spt(abi.SectorSize(c2in.SectorSize), false),
|
||||
ProofType: spt(abi.SectorSize(c2in.SectorSize), miner.SealProofVariant_Standard),
|
||||
}
|
||||
|
||||
fmt.Printf("----\nstart proof computation\n")
|
||||
@ -828,8 +828,8 @@ func bps(sectorSize abi.SectorSize, sectorNum int, d time.Duration) string {
|
||||
return types.SizeStr(types.BigInt{Int: bps}) + "/s"
|
||||
}
|
||||
|
||||
func spt(ssize abi.SectorSize, synth bool) abi.RegisteredSealProof {
|
||||
spt, err := miner.SealProofTypeFromSectorSize(ssize, build.TestNetworkVersion, synth)
|
||||
func spt(ssize abi.SectorSize, variant miner.SealProofVariant) abi.RegisteredSealProof {
|
||||
spt, err := miner.SealProofTypeFromSectorSize(ssize, build.TestNetworkVersion, variant)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
prf "github.com/filecoin-project/specs-actors/actors/runtime/proof"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||
@ -186,7 +187,7 @@ var simpleAddPiece = &cli.Command{
|
||||
Miner: mid,
|
||||
Number: 1,
|
||||
},
|
||||
ProofType: spt(sectorSize, false),
|
||||
ProofType: spt(sectorSize, miner.SealProofVariant_Standard),
|
||||
}
|
||||
|
||||
data, err := os.Open(cctx.Args().First())
|
||||
@ -201,7 +202,7 @@ var simpleAddPiece = &cli.Command{
|
||||
return xerrors.Errorf("add piece: %w", err)
|
||||
}
|
||||
|
||||
took := time.Now().Sub(start)
|
||||
took := time.Since(start)
|
||||
|
||||
fmt.Printf("AddPiece %s (%s)\n", took, bps(abi.SectorSize(pi.Size), 1, took))
|
||||
fmt.Printf("%s %d\n", pi.PieceCID, pi.Size)
|
||||
@ -227,6 +228,10 @@ var simplePreCommit1 = &cli.Command{
|
||||
Name: "synthetic",
|
||||
Usage: "generate synthetic PoRep proofs",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "non-interactive",
|
||||
Usage: "generate NI-PoRep proofs",
|
||||
},
|
||||
},
|
||||
ArgsUsage: "[unsealed] [sealed] [cache] [[piece cid] [piece size]]...",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
@ -258,12 +263,17 @@ var simplePreCommit1 = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
variant, err := variantFromArgs(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sr := storiface.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: 1,
|
||||
},
|
||||
ProofType: spt(sectorSize, cctx.Bool("synthetic")),
|
||||
ProofType: spt(sectorSize, variant),
|
||||
}
|
||||
|
||||
ticket := [32]byte{}
|
||||
@ -283,7 +293,7 @@ var simplePreCommit1 = &cli.Command{
|
||||
return xerrors.Errorf("precommit1: %w", err)
|
||||
}
|
||||
|
||||
took := time.Now().Sub(start)
|
||||
took := time.Since(start)
|
||||
|
||||
fmt.Printf("PreCommit1 %s (%s)\n", took, bps(sectorSize, 1, took))
|
||||
fmt.Println(base64.StdEncoding.EncodeToString(p1o))
|
||||
@ -308,6 +318,10 @@ var simplePreCommit2 = &cli.Command{
|
||||
Name: "synthetic",
|
||||
Usage: "generate synthetic PoRep proofs",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "non-interactive",
|
||||
Usage: "generate NI-PoRep proofs",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "external-pc2",
|
||||
Usage: "command for computing PC2 externally",
|
||||
@ -383,12 +397,17 @@ Example invocation of lotus-bench as external executor:
|
||||
return err
|
||||
}
|
||||
|
||||
variant, err := variantFromArgs(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sr := storiface.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: 1,
|
||||
},
|
||||
ProofType: spt(sectorSize, cctx.Bool("synthetic")),
|
||||
ProofType: spt(sectorSize, variant),
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
@ -398,7 +417,7 @@ Example invocation of lotus-bench as external executor:
|
||||
return xerrors.Errorf("precommit2: %w", err)
|
||||
}
|
||||
|
||||
took := time.Now().Sub(start)
|
||||
took := time.Since(start)
|
||||
|
||||
fmt.Printf("PreCommit2 %s (%s)\n", took, bps(sectorSize, 1, took))
|
||||
fmt.Printf("d:%s r:%s\n", p2o.Unsealed, p2o.Sealed)
|
||||
@ -423,6 +442,10 @@ var simpleCommit1 = &cli.Command{
|
||||
Name: "synthetic",
|
||||
Usage: "generate synthetic PoRep proofs",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "non-interactive",
|
||||
Usage: "generate NI-PoRep proofs",
|
||||
},
|
||||
},
|
||||
ArgsUsage: "[sealed] [cache] [comm D] [comm R] [c1out.json]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
@ -453,12 +476,17 @@ var simpleCommit1 = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
variant, err := variantFromArgs(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sr := storiface.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: 1,
|
||||
},
|
||||
ProofType: spt(sectorSize, cctx.Bool("synthetic")),
|
||||
ProofType: spt(sectorSize, variant),
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
@ -493,7 +521,7 @@ var simpleCommit1 = &cli.Command{
|
||||
return xerrors.Errorf("commit1: %w", err)
|
||||
}
|
||||
|
||||
took := time.Now().Sub(start)
|
||||
took := time.Since(start)
|
||||
|
||||
fmt.Printf("Commit1 %s (%s)\n", took, bps(sectorSize, 1, took))
|
||||
|
||||
@ -533,6 +561,10 @@ var simpleCommit2 = &cli.Command{
|
||||
Name: "synthetic",
|
||||
Usage: "generate synthetic PoRep proofs",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "non-interactive",
|
||||
Usage: "generate NI-PoRep proofs",
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
if c.Bool("no-gpu") {
|
||||
@ -574,12 +606,17 @@ var simpleCommit2 = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
variant, err := variantFromArgs(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ref := storiface.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: abi.SectorNumber(c2in.SectorNum),
|
||||
},
|
||||
ProofType: spt(abi.SectorSize(c2in.SectorSize), c.Bool("synthetic")),
|
||||
ProofType: spt(abi.SectorSize(c2in.SectorSize), variant),
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
@ -637,7 +674,7 @@ var simpleWindowPost = &cli.Command{
|
||||
return xerrors.Errorf("parse commr: %w", err)
|
||||
}
|
||||
|
||||
wpt, err := spt(sectorSize, false).RegisteredWindowPoStProof()
|
||||
wpt, err := spt(sectorSize, miner.SealProofVariant_Standard).RegisteredWindowPoStProof()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -657,7 +694,7 @@ var simpleWindowPost = &cli.Command{
|
||||
|
||||
vp, err := ffi.GenerateSingleVanillaProof(ffi.PrivateSectorInfo{
|
||||
SectorInfo: prf.SectorInfo{
|
||||
SealProof: spt(sectorSize, false),
|
||||
SealProof: spt(sectorSize, miner.SealProofVariant_Standard),
|
||||
SectorNumber: sn,
|
||||
SealedCID: commr,
|
||||
},
|
||||
@ -728,7 +765,7 @@ var simpleWinningPost = &cli.Command{
|
||||
return xerrors.Errorf("parse commr: %w", err)
|
||||
}
|
||||
|
||||
wpt, err := spt(sectorSize, false).RegisteredWinningPoStProof()
|
||||
wpt, err := spt(sectorSize, miner.SealProofVariant_Standard).RegisteredWinningPoStProof()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -748,7 +785,7 @@ var simpleWinningPost = &cli.Command{
|
||||
|
||||
vp, err := ffi.GenerateSingleVanillaProof(ffi.PrivateSectorInfo{
|
||||
SectorInfo: prf.SectorInfo{
|
||||
SealProof: spt(sectorSize, false),
|
||||
SealProof: spt(sectorSize, miner.SealProofVariant_Standard),
|
||||
SectorNumber: sn,
|
||||
SealedCID: commr,
|
||||
},
|
||||
@ -842,7 +879,7 @@ var simpleReplicaUpdate = &cli.Command{
|
||||
Miner: mid,
|
||||
Number: 1,
|
||||
},
|
||||
ProofType: spt(sectorSize, false),
|
||||
ProofType: spt(sectorSize, miner.SealProofVariant_Standard),
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
@ -852,7 +889,7 @@ var simpleReplicaUpdate = &cli.Command{
|
||||
return xerrors.Errorf("replica update: %w", err)
|
||||
}
|
||||
|
||||
took := time.Now().Sub(start)
|
||||
took := time.Since(start)
|
||||
|
||||
fmt.Printf("ReplicaUpdate %s (%s)\n", took, bps(sectorSize, 1, took))
|
||||
fmt.Printf("d:%s r:%s\n", ruo.NewUnsealed, ruo.NewSealed)
|
||||
@ -910,7 +947,7 @@ var simpleProveReplicaUpdate1 = &cli.Command{
|
||||
Miner: mid,
|
||||
Number: 1,
|
||||
},
|
||||
ProofType: spt(sectorSize, false),
|
||||
ProofType: spt(sectorSize, miner.SealProofVariant_Standard),
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
@ -935,7 +972,7 @@ var simpleProveReplicaUpdate1 = &cli.Command{
|
||||
return xerrors.Errorf("replica update: %w", err)
|
||||
}
|
||||
|
||||
took := time.Now().Sub(start)
|
||||
took := time.Since(start)
|
||||
|
||||
fmt.Printf("ProveReplicaUpdate1 %s (%s)\n", took, bps(sectorSize, 1, took))
|
||||
|
||||
@ -997,7 +1034,7 @@ var simpleProveReplicaUpdate2 = &cli.Command{
|
||||
Miner: mid,
|
||||
Number: 1,
|
||||
},
|
||||
ProofType: spt(sectorSize, false),
|
||||
ProofType: spt(sectorSize, miner.SealProofVariant_Standard),
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
@ -1032,7 +1069,7 @@ var simpleProveReplicaUpdate2 = &cli.Command{
|
||||
return xerrors.Errorf("prove replica update2: %w", err)
|
||||
}
|
||||
|
||||
took := time.Now().Sub(start)
|
||||
took := time.Since(start)
|
||||
|
||||
fmt.Printf("ProveReplicaUpdate2 %s (%s)\n", took, bps(sectorSize, 1, took))
|
||||
fmt.Println("p:", base64.StdEncoding.EncodeToString(p))
|
||||
@ -1071,3 +1108,16 @@ func ParsePieceInfos(cctx *cli.Context, firstArg int) ([]abi.PieceInfo, error) {
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func variantFromArgs(cctx *cli.Context) (miner.SealProofVariant, error) {
|
||||
variant := miner.SealProofVariant_Standard
|
||||
if cctx.Bool("synthetic") {
|
||||
if cctx.Bool("non-interactive") {
|
||||
return variant, xerrors.Errorf("can't use both synthetic and non-interactive")
|
||||
}
|
||||
variant = miner.SealProofVariant_Synthetic
|
||||
} else if cctx.Bool("non-interactive") {
|
||||
variant = miner.SealProofVariant_NonInteractive
|
||||
}
|
||||
return variant, nil
|
||||
}
|
||||
|
@ -137,9 +137,8 @@ var preSealCmd = &cli.Command{
|
||||
nv = network.Version(c.Uint64("network-version"))
|
||||
}
|
||||
|
||||
var synthetic = false // there's little reason to have this for a seed.
|
||||
|
||||
spt, err := miner.SealProofTypeFromSectorSize(sectorSize, nv, synthetic)
|
||||
var variant = miner.SealProofVariant_Standard // there's little reason to have this for a seed.
|
||||
spt, err := miner.SealProofTypeFromSectorSize(sectorSize, nv, variant)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ import (
|
||||
"github.com/ipfs/go-cid"
|
||||
)
|
||||
|
||||
var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMetadata{
|
||||
var EmbeddedBuiltinActorsMetadata = []*BuiltinActorsMetadata{
|
||||
{{- range . }} {
|
||||
Network: {{printf "%q" .Network}},
|
||||
Version: {{.Version}},
|
||||
|
2
go.mod
2
go.mod
@ -46,7 +46,7 @@ require (
|
||||
github.com/filecoin-project/go-jsonrpc v0.3.2
|
||||
github.com/filecoin-project/go-padreader v0.0.1
|
||||
github.com/filecoin-project/go-paramfetch v0.0.4
|
||||
github.com/filecoin-project/go-state-types v0.14.0-rc1
|
||||
github.com/filecoin-project/go-state-types v0.14.0-rc2
|
||||
github.com/filecoin-project/go-statemachine v1.0.3
|
||||
github.com/filecoin-project/go-statestore v0.2.0
|
||||
github.com/filecoin-project/go-storedcounter v0.1.0
|
||||
|
3
go.sum
3
go.sum
@ -291,8 +291,9 @@ github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go
|
||||
github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
|
||||
github.com/filecoin-project/go-state-types v0.1.6/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q=
|
||||
github.com/filecoin-project/go-state-types v0.1.10/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q=
|
||||
github.com/filecoin-project/go-state-types v0.14.0-rc1 h1:kWBGX/uqZmYotYMNmw+R/fIuot/k0KMcEtB7PKFy1SQ=
|
||||
github.com/filecoin-project/go-state-types v0.14.0-rc1/go.mod h1:cHpOPup9H1g2T29dKHAjC2sc7/Ef5ypjuW9A3I+e9yY=
|
||||
github.com/filecoin-project/go-state-types v0.14.0-rc2 h1:jx+iOyy7un1G6ozkvKpIBVFhs6Rg23j6NAoiIjBmGTc=
|
||||
github.com/filecoin-project/go-state-types v0.14.0-rc2/go.mod h1:cHpOPup9H1g2T29dKHAjC2sc7/Ef5ypjuW9A3I+e9yY=
|
||||
github.com/filecoin-project/go-statemachine v1.0.3 h1:N07o6alys+V1tNoSTi4WuuoeNC4erS/6jE74+NsgQuk=
|
||||
github.com/filecoin-project/go-statemachine v1.0.3/go.mod h1:jZdXXiHa61n4NmgWFG4w8tnqgvZVHYbJ3yW7+y8bF54=
|
||||
github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI=
|
||||
|
@ -51,50 +51,13 @@ func decodeOutputToUint64(output []byte) (uint64, error) {
|
||||
err := binary.Read(buf, binary.BigEndian, &result)
|
||||
return result, err
|
||||
}
|
||||
func buildInputFromuint64(number uint64) []byte {
|
||||
func buildInputFromUint64(number uint64) []byte {
|
||||
// Convert the number to a binary uint64 array
|
||||
binaryNumber := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(binaryNumber, number)
|
||||
return inputDataFromArray(binaryNumber)
|
||||
}
|
||||
|
||||
// recursive delegate calls that fail due to gas limits are currently getting to 229 iterations
|
||||
// before running out of gas
|
||||
func recursiveDelegatecallFail(ctx context.Context, t *testing.T, client *kit.TestFullNode, filename string, count uint64) {
|
||||
expectedIterationsBeforeFailing := int(220)
|
||||
fromAddr, idAddr := client.EVM().DeployContractFromFilename(ctx, filename)
|
||||
t.Log("recursion count - ", count)
|
||||
inputData := buildInputFromuint64(count)
|
||||
_, _, err := client.EVM().InvokeContractByFuncName(ctx, fromAddr, idAddr, "recursiveCall(uint256)", inputData)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
result, _, err := client.EVM().InvokeContractByFuncName(ctx, fromAddr, idAddr, "totalCalls()", []byte{})
|
||||
require.NoError(t, err)
|
||||
|
||||
resultUint, err := decodeOutputToUint64(result)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotEqual(t, int(resultUint), int(count))
|
||||
require.Equal(t, expectedIterationsBeforeFailing, int(resultUint))
|
||||
}
|
||||
func recursiveDelegatecallSuccess(ctx context.Context, t *testing.T, client *kit.TestFullNode, filename string, count uint64) {
|
||||
t.Log("Count - ", count)
|
||||
|
||||
fromAddr, idAddr := client.EVM().DeployContractFromFilename(ctx, filename)
|
||||
inputData := buildInputFromuint64(count)
|
||||
_, _, err := client.EVM().InvokeContractByFuncName(ctx, fromAddr, idAddr, "recursiveCall(uint256)", inputData)
|
||||
require.NoError(t, err)
|
||||
|
||||
result, _, err := client.EVM().InvokeContractByFuncName(ctx, fromAddr, idAddr, "totalCalls()", []byte{})
|
||||
require.NoError(t, err)
|
||||
|
||||
resultUint, err := decodeOutputToUint64(result)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, int(count), int(resultUint))
|
||||
}
|
||||
|
||||
// TestFEVMRecursive does a basic fevm contract installation and invocation
|
||||
func TestFEVMRecursive(t *testing.T) {
|
||||
callCounts := []uint64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 100, 230, 330}
|
||||
@ -107,7 +70,7 @@ func TestFEVMRecursive(t *testing.T) {
|
||||
for _, callCount := range callCounts {
|
||||
callCount := callCount // linter unhappy unless callCount is local to loop
|
||||
t.Run(fmt.Sprintf("TestFEVMRecursive%d", callCount), func(t *testing.T) {
|
||||
_, _, err := client.EVM().InvokeContractByFuncName(ctx, fromAddr, idAddr, "recursiveCall(uint256)", buildInputFromuint64(callCount))
|
||||
_, _, err := client.EVM().InvokeContractByFuncName(ctx, fromAddr, idAddr, "recursiveCall(uint256)", buildInputFromUint64(callCount))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
@ -125,7 +88,7 @@ func TestFEVMRecursiveFail(t *testing.T) {
|
||||
for _, failCallCount := range failCallCounts {
|
||||
failCallCount := failCallCount // linter unhappy unless callCount is local to loop
|
||||
t.Run(fmt.Sprintf("TestFEVMRecursiveFail%d", failCallCount), func(t *testing.T) {
|
||||
_, wait, err := client.EVM().InvokeContractByFuncName(ctx, fromAddr, idAddr, "recursiveCall(uint256)", buildInputFromuint64(failCallCount))
|
||||
_, wait, err := client.EVM().InvokeContractByFuncName(ctx, fromAddr, idAddr, "recursiveCall(uint256)", buildInputFromUint64(failCallCount))
|
||||
require.Error(t, err)
|
||||
require.Equal(t, exitcode.ExitCode(37), wait.Receipt.ExitCode)
|
||||
})
|
||||
@ -156,23 +119,51 @@ func TestFEVMRecursive2(t *testing.T) {
|
||||
|
||||
// TestFEVMRecursiveDelegatecallCount tests the maximum delegatecall recursion depth.
|
||||
func TestFEVMRecursiveDelegatecallCount(t *testing.T) {
|
||||
|
||||
ctx, cancel, client := kit.SetupFEVMTest(t)
|
||||
defer cancel()
|
||||
|
||||
highestSuccessCount := uint64(226)
|
||||
// these depend on the actors bundle, may need to be adjusted with a network upgrade
|
||||
const highestSuccessCount = 228
|
||||
const expectedIterationsBeforeFailing = 222
|
||||
|
||||
filename := "contracts/RecursiveDelegeatecall.hex"
|
||||
recursiveDelegatecallSuccess(ctx, t, client, filename, uint64(1))
|
||||
recursiveDelegatecallSuccess(ctx, t, client, filename, uint64(2))
|
||||
recursiveDelegatecallSuccess(ctx, t, client, filename, uint64(10))
|
||||
recursiveDelegatecallSuccess(ctx, t, client, filename, uint64(100))
|
||||
recursiveDelegatecallSuccess(ctx, t, client, filename, highestSuccessCount)
|
||||
|
||||
recursiveDelegatecallFail(ctx, t, client, filename, highestSuccessCount+1)
|
||||
recursiveDelegatecallFail(ctx, t, client, filename, uint64(1000))
|
||||
recursiveDelegatecallFail(ctx, t, client, filename, uint64(10000000))
|
||||
testCases := []struct {
|
||||
recursionCount uint64
|
||||
expectSuccess bool
|
||||
}{
|
||||
// success
|
||||
{1, true},
|
||||
{2, true},
|
||||
{10, true},
|
||||
{100, true},
|
||||
{highestSuccessCount, true},
|
||||
// failure
|
||||
{highestSuccessCount + 1, false},
|
||||
{1000, false},
|
||||
{10000000, false},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("recursionCount=%d,expectSuccess=%t", tc.recursionCount, tc.expectSuccess), func(t *testing.T) {
|
||||
fromAddr, idAddr := client.EVM().DeployContractFromFilename(ctx, filename)
|
||||
inputData := buildInputFromUint64(tc.recursionCount)
|
||||
_, _, err := client.EVM().InvokeContractByFuncName(ctx, fromAddr, idAddr, "recursiveCall(uint256)", inputData)
|
||||
require.NoError(t, err)
|
||||
|
||||
result, _, err := client.EVM().InvokeContractByFuncName(ctx, fromAddr, idAddr, "totalCalls()", []byte{})
|
||||
require.NoError(t, err)
|
||||
|
||||
resultUint, err := decodeOutputToUint64(result)
|
||||
require.NoError(t, err)
|
||||
|
||||
if tc.expectSuccess {
|
||||
require.Equal(t, int(tc.recursionCount), int(resultUint))
|
||||
} else {
|
||||
require.NotEqual(t, int(resultUint), int(tc.recursionCount), "unexpected recursion count, if the actors bundle has changed, this test may need to be adjusted")
|
||||
require.Equal(t, int(expectedIterationsBeforeFailing), int(resultUint))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestFEVMBasic does a basic fevm contract installation and invocation
|
||||
@ -585,44 +576,52 @@ func TestFEVMRecursiveActorCall(t *testing.T) {
|
||||
filenameActor := "contracts/RecCall.hex"
|
||||
fromAddr, actorAddr := client.EVM().DeployContractFromFilename(ctx, filenameActor)
|
||||
|
||||
testN := func(n, r int, ex exitcode.ExitCode) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
inputData := make([]byte, 32*3)
|
||||
binary.BigEndian.PutUint64(inputData[24:], uint64(n))
|
||||
binary.BigEndian.PutUint64(inputData[32+24:], uint64(n))
|
||||
binary.BigEndian.PutUint64(inputData[32+32+24:], uint64(r))
|
||||
exitCodeStackOverflow := exitcode.ExitCode(37)
|
||||
exitCodeTransactionReverted := exitcode.ExitCode(33)
|
||||
|
||||
client.EVM().InvokeContractByFuncNameExpectExit(ctx, fromAddr, actorAddr, "exec1(uint256,uint256,uint256)", inputData, ex)
|
||||
}
|
||||
testCases := []struct {
|
||||
stackDepth int
|
||||
recursionLimit int
|
||||
exitCode exitcode.ExitCode
|
||||
}{
|
||||
{0, 1, exitcode.Ok},
|
||||
{1, 1, exitcode.Ok},
|
||||
{20, 1, exitcode.Ok},
|
||||
{200, 1, exitcode.Ok},
|
||||
{251, 1, exitcode.Ok},
|
||||
{252, 1, exitCodeStackOverflow},
|
||||
{0, 10, exitcode.Ok},
|
||||
{1, 10, exitcode.Ok},
|
||||
{20, 10, exitcode.Ok},
|
||||
{200, 10, exitcode.Ok},
|
||||
{251, 10, exitcode.Ok},
|
||||
{252, 10, exitCodeStackOverflow},
|
||||
{0, 32, exitcode.Ok},
|
||||
{1, 32, exitcode.Ok},
|
||||
{20, 32, exitcode.Ok},
|
||||
{200, 32, exitcode.Ok},
|
||||
{251, 32, exitcode.Ok},
|
||||
{252, 32, exitCodeStackOverflow},
|
||||
// the following are actors bundle dependent and may need to be tweaked with a network upgrade
|
||||
{0, 255, exitcode.Ok},
|
||||
{251, 164, exitcode.Ok},
|
||||
{0, 261, exitCodeTransactionReverted},
|
||||
{251, 173, exitCodeTransactionReverted},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
var fail string
|
||||
if tc.exitCode != exitcode.Ok {
|
||||
fail = "-fails"
|
||||
}
|
||||
t.Run(fmt.Sprintf("stackDepth=%d,recursionLimit=%d%s", tc.stackDepth, tc.recursionLimit, fail), func(t *testing.T) {
|
||||
inputData := make([]byte, 32*3)
|
||||
binary.BigEndian.PutUint64(inputData[24:], uint64(tc.stackDepth))
|
||||
binary.BigEndian.PutUint64(inputData[32+24:], uint64(tc.stackDepth))
|
||||
binary.BigEndian.PutUint64(inputData[32+32+24:], uint64(tc.recursionLimit))
|
||||
|
||||
t.Run("n=0,r=1", testN(0, 1, exitcode.Ok))
|
||||
t.Run("n=1,r=1", testN(1, 1, exitcode.Ok))
|
||||
t.Run("n=20,r=1", testN(20, 1, exitcode.Ok))
|
||||
t.Run("n=200,r=1", testN(200, 1, exitcode.Ok))
|
||||
t.Run("n=251,r=1", testN(251, 1, exitcode.Ok))
|
||||
|
||||
t.Run("n=252,r=1-fails", testN(252, 1, exitcode.ExitCode(37))) // 37 means stack overflow
|
||||
|
||||
t.Run("n=0,r=10", testN(0, 10, exitcode.Ok))
|
||||
t.Run("n=1,r=10", testN(1, 10, exitcode.Ok))
|
||||
t.Run("n=20,r=10", testN(20, 10, exitcode.Ok))
|
||||
t.Run("n=200,r=10", testN(200, 10, exitcode.Ok))
|
||||
t.Run("n=251,r=10", testN(251, 10, exitcode.Ok))
|
||||
|
||||
t.Run("n=252,r=10-fails", testN(252, 10, exitcode.ExitCode(37)))
|
||||
|
||||
t.Run("n=0,r=32", testN(0, 32, exitcode.Ok))
|
||||
t.Run("n=1,r=32", testN(1, 32, exitcode.Ok))
|
||||
t.Run("n=20,r=32", testN(20, 32, exitcode.Ok))
|
||||
t.Run("n=200,r=32", testN(200, 32, exitcode.Ok))
|
||||
t.Run("n=251,r=32", testN(251, 32, exitcode.Ok))
|
||||
|
||||
t.Run("n=0,r=252", testN(0, 252, exitcode.Ok))
|
||||
t.Run("n=251,r=164", testN(251, 164, exitcode.Ok))
|
||||
|
||||
t.Run("n=0,r=255-fails", testN(0, 255, exitcode.ExitCode(33))) // 33 means transaction reverted
|
||||
t.Run("n=251,r=167-fails", testN(251, 167, exitcode.ExitCode(33)))
|
||||
client.EVM().InvokeContractByFuncNameExpectExit(ctx, fromAddr, actorAddr, "exec1(uint256,uint256,uint256)", inputData, tc.exitCode)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestFEVMRecursiveActorCallEstimate
|
||||
|
@ -260,7 +260,7 @@ func (n *Ensemble) MinerEnroll(minerNode *TestMiner, full *TestFullNode, opts ..
|
||||
)
|
||||
|
||||
// Will use 2KiB sectors by default (default value of sectorSize).
|
||||
proofType, err := miner.SealProofTypeFromSectorSize(options.sectorSize, n.genesis.version, false)
|
||||
proofType, err := miner.SealProofTypeFromSectorSize(options.sectorSize, n.genesis.version, miner.SealProofVariant_Standard)
|
||||
require.NoError(n.t, err)
|
||||
|
||||
// Create the preseal commitment.
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
const DefaultPresealsPerBootstrapMiner = 2
|
||||
|
||||
const TestSpt = abi.RegisteredSealProof_StackedDrg2KiBV1_1
|
||||
const TestSptNi = abi.RegisteredSealProof_StackedDrg2KiBV1_2_Feat_NiPoRep
|
||||
|
||||
// nodeOpts is an options accumulating struct, where functional options are
|
||||
// merged into.
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
@ -224,82 +225,6 @@ func (tm *TestUnmanagedMiner) makeAndSaveCCSector(_ context.Context, sectorNumbe
|
||||
tm.cacheDirPaths[sectorNumber] = cacheDirPath
|
||||
}
|
||||
|
||||
func (tm *TestUnmanagedMiner) OnboardSectorWithPieces(ctx context.Context, proofType abi.RegisteredSealProof) (abi.SectorNumber, chan WindowPostResp,
|
||||
context.CancelFunc) {
|
||||
req := require.New(tm.t)
|
||||
sectorNumber := tm.currentSectorNum
|
||||
tm.currentSectorNum++
|
||||
|
||||
// Step 1: Wait for the pre-commitseal randomness to be available (we can only draw seal randomness from tipsets that have already achieved finality)
|
||||
preCommitSealRand := tm.waitPreCommitSealRandomness(ctx, sectorNumber)
|
||||
|
||||
// Step 2: Build a sector with non 0 Pieces that we want to onboard
|
||||
var pieces []abi.PieceInfo
|
||||
if !tm.mockProofs {
|
||||
pieces = tm.mkAndSavePiecesToOnboard(ctx, sectorNumber, proofType)
|
||||
} else {
|
||||
pieces = []abi.PieceInfo{{
|
||||
Size: abi.PaddedPieceSize(tm.options.sectorSize),
|
||||
PieceCID: cid.MustParse("baga6ea4seaqjtovkwk4myyzj56eztkh5pzsk5upksan6f5outesy62bsvl4dsha"),
|
||||
}}
|
||||
}
|
||||
|
||||
// Step 3: Generate a Pre-Commit for the CC sector -> this persists the proof on the `TestUnmanagedMiner` Miner State
|
||||
if !tm.mockProofs {
|
||||
tm.generatePreCommit(ctx, sectorNumber, preCommitSealRand, proofType, pieces)
|
||||
} else {
|
||||
tm.sealedCids[sectorNumber] = cid.MustParse("bagboea4b5abcatlxechwbp7kjpjguna6r6q7ejrhe6mdp3lf34pmswn27pkkiekz")
|
||||
tm.unsealedCids[sectorNumber] = cid.MustParse("baga6ea4seaqjtovkwk4myyzj56eztkh5pzsk5upksan6f5outesy62bsvl4dsha")
|
||||
}
|
||||
|
||||
// Step 4 : Submit the Pre-Commit to the network
|
||||
unsealedCid := tm.unsealedCids[sectorNumber]
|
||||
r, err := tm.submitMessage(ctx, &miner14.PreCommitSectorBatchParams2{
|
||||
Sectors: []miner14.SectorPreCommitInfo{{
|
||||
Expiration: 2880 * 300,
|
||||
SectorNumber: sectorNumber,
|
||||
SealProof: TestSpt,
|
||||
SealedCID: tm.sealedCids[sectorNumber],
|
||||
SealRandEpoch: preCommitSealRand,
|
||||
UnsealedCid: &unsealedCid,
|
||||
}},
|
||||
}, 1, builtin.MethodsMiner.PreCommitSectorBatch2)
|
||||
req.NoError(err)
|
||||
req.True(r.Receipt.ExitCode.IsSuccess())
|
||||
|
||||
// Step 5: Generate a ProveCommit for the CC sector
|
||||
waitSeedRandomness := tm.proveCommitWaitSeed(ctx, sectorNumber)
|
||||
|
||||
proveCommit := []byte{0xde, 0xad, 0xbe, 0xef} // mock prove commit
|
||||
if !tm.mockProofs {
|
||||
proveCommit = tm.generateProveCommit(ctx, sectorNumber, proofType, waitSeedRandomness, pieces)
|
||||
}
|
||||
|
||||
// Step 6: Submit the ProveCommit to the network
|
||||
tm.t.Log("Submitting ProveCommitSector ...")
|
||||
|
||||
var manifest []miner14.PieceActivationManifest
|
||||
for _, piece := range pieces {
|
||||
manifest = append(manifest, miner14.PieceActivationManifest{
|
||||
CID: piece.PieceCID,
|
||||
Size: piece.Size,
|
||||
})
|
||||
}
|
||||
|
||||
r, err = tm.submitMessage(ctx, &miner14.ProveCommitSectors3Params{
|
||||
SectorActivations: []miner14.SectorActivationManifest{{SectorNumber: sectorNumber, Pieces: manifest}},
|
||||
SectorProofs: [][]byte{proveCommit},
|
||||
RequireActivationSuccess: true,
|
||||
}, 1, builtin.MethodsMiner.ProveCommitSectors3)
|
||||
req.NoError(err)
|
||||
req.True(r.Receipt.ExitCode.IsSuccess())
|
||||
|
||||
tm.proofType[sectorNumber] = proofType
|
||||
|
||||
respCh, cancelFn := tm.wdPostLoop(ctx, sectorNumber, tm.sealedCids[sectorNumber], tm.sealedSectorPaths[sectorNumber], tm.cacheDirPaths[sectorNumber])
|
||||
return sectorNumber, respCh, cancelFn
|
||||
}
|
||||
|
||||
func (tm *TestUnmanagedMiner) mkStagedFileWithPieces(pt abi.RegisteredSealProof) ([]abi.PieceInfo, string) {
|
||||
paddedPieceSize := abi.PaddedPieceSize(tm.options.sectorSize)
|
||||
unpaddedPieceSize := paddedPieceSize.Unpadded()
|
||||
@ -340,39 +265,51 @@ func (tm *TestUnmanagedMiner) mkStagedFileWithPieces(pt abi.RegisteredSealProof)
|
||||
return publicPieces, unsealedSectorFile.Name()
|
||||
}
|
||||
|
||||
func (tm *TestUnmanagedMiner) SnapDeal(ctx context.Context, proofType abi.RegisteredSealProof, sectorNumber abi.SectorNumber) {
|
||||
if tm.mockProofs {
|
||||
tm.t.Fatal("snap deal with mock proofs currently not supported")
|
||||
func (tm *TestUnmanagedMiner) SnapDeal(ctx context.Context, proofType abi.RegisteredSealProof, sectorNumber abi.SectorNumber) []abi.PieceInfo {
|
||||
updateProofType := abi.SealProofInfos[proofType].UpdateProof
|
||||
var pieces []abi.PieceInfo
|
||||
var snapProof []byte
|
||||
var newSealedCid cid.Cid
|
||||
|
||||
if !tm.mockProofs {
|
||||
// generate sector key
|
||||
var unsealedPath string
|
||||
pieces, unsealedPath = tm.mkStagedFileWithPieces(proofType)
|
||||
|
||||
s, err := os.Stat(tm.sealedSectorPaths[sectorNumber])
|
||||
require.NoError(tm.t, err)
|
||||
|
||||
randomBytes := make([]byte, s.Size())
|
||||
_, err = io.ReadFull(rand.Reader, randomBytes)
|
||||
require.NoError(tm.t, err)
|
||||
|
||||
updatePath := requireTempFile(tm.t, bytes.NewReader(randomBytes), uint64(s.Size()))
|
||||
require.NoError(tm.t, updatePath.Close())
|
||||
updateDir := filepath.Join(tm.t.TempDir(), fmt.Sprintf("update-%d", sectorNumber))
|
||||
require.NoError(tm.t, os.MkdirAll(updateDir, 0700))
|
||||
|
||||
var newUnsealedCid cid.Cid
|
||||
newSealedCid, newUnsealedCid, err = ffi.SectorUpdate.EncodeInto(updateProofType, updatePath.Name(), updateDir,
|
||||
tm.sealedSectorPaths[sectorNumber], tm.cacheDirPaths[sectorNumber], unsealedPath, pieces)
|
||||
require.NoError(tm.t, err)
|
||||
|
||||
vp, err := ffi.SectorUpdate.GenerateUpdateVanillaProofs(updateProofType, tm.sealedCids[sectorNumber],
|
||||
newSealedCid, newUnsealedCid, updatePath.Name(), updateDir, tm.sealedSectorPaths[sectorNumber],
|
||||
tm.cacheDirPaths[sectorNumber])
|
||||
require.NoError(tm.t, err)
|
||||
|
||||
snapProof, err = ffi.SectorUpdate.GenerateUpdateProofWithVanilla(updateProofType, tm.sealedCids[sectorNumber],
|
||||
newSealedCid, newUnsealedCid, vp)
|
||||
require.NoError(tm.t, err)
|
||||
} else {
|
||||
pieces = []abi.PieceInfo{{
|
||||
Size: abi.PaddedPieceSize(tm.options.sectorSize),
|
||||
PieceCID: cid.MustParse("baga6ea4seaqlhznlutptgfwhffupyer6txswamerq5fc2jlwf2lys2mm5jtiaeq"),
|
||||
}}
|
||||
snapProof = []byte{0xde, 0xad, 0xbe, 0xef}
|
||||
newSealedCid = cid.MustParse("bagboea4b5abcatlxechwbp7kjpjguna6r6q7ejrhe6mdp3lf34pmswn27pkkieka")
|
||||
}
|
||||
|
||||
// generate sector key
|
||||
pieces, unsealedPath := tm.mkStagedFileWithPieces(proofType)
|
||||
updateProofType := abi.SealProofInfos[proofType].UpdateProof
|
||||
|
||||
s, err := os.Stat(tm.sealedSectorPaths[sectorNumber])
|
||||
require.NoError(tm.t, err)
|
||||
|
||||
randomBytes := make([]byte, s.Size())
|
||||
_, err = io.ReadFull(rand.Reader, randomBytes)
|
||||
require.NoError(tm.t, err)
|
||||
|
||||
updatePath := requireTempFile(tm.t, bytes.NewReader(randomBytes), uint64(s.Size()))
|
||||
require.NoError(tm.t, updatePath.Close())
|
||||
updateDir := filepath.Join(tm.t.TempDir(), fmt.Sprintf("update-%d", sectorNumber))
|
||||
require.NoError(tm.t, os.MkdirAll(updateDir, 0700))
|
||||
|
||||
newSealed, newUnsealed, err := ffi.SectorUpdate.EncodeInto(updateProofType, updatePath.Name(), updateDir,
|
||||
tm.sealedSectorPaths[sectorNumber], tm.cacheDirPaths[sectorNumber], unsealedPath, pieces)
|
||||
require.NoError(tm.t, err)
|
||||
|
||||
vp, err := ffi.SectorUpdate.GenerateUpdateVanillaProofs(updateProofType, tm.sealedCids[sectorNumber],
|
||||
newSealed, newUnsealed, updatePath.Name(), updateDir, tm.sealedSectorPaths[sectorNumber],
|
||||
tm.cacheDirPaths[sectorNumber])
|
||||
require.NoError(tm.t, err)
|
||||
|
||||
snapProof, err := ffi.SectorUpdate.GenerateUpdateProofWithVanilla(updateProofType, tm.sealedCids[sectorNumber],
|
||||
newSealed, newUnsealed, vp)
|
||||
require.NoError(tm.t, err)
|
||||
tm.waitForMutableDeadline(ctx, sectorNumber)
|
||||
|
||||
// submit proof
|
||||
@ -396,7 +333,7 @@ func (tm *TestUnmanagedMiner) SnapDeal(ctx context.Context, proofType abi.Regist
|
||||
Sector: sectorNumber,
|
||||
Deadline: sl.Deadline,
|
||||
Partition: sl.Partition,
|
||||
NewSealedCID: newSealed,
|
||||
NewSealedCID: newSealedCid,
|
||||
Pieces: manifest,
|
||||
},
|
||||
},
|
||||
@ -405,9 +342,11 @@ func (tm *TestUnmanagedMiner) SnapDeal(ctx context.Context, proofType abi.Regist
|
||||
RequireActivationSuccess: true,
|
||||
RequireNotificationSuccess: false,
|
||||
}
|
||||
r, err := tm.submitMessage(ctx, params, 1, builtin.MethodsMiner.ProveReplicaUpdates3)
|
||||
r, err := tm.SubmitMessage(ctx, params, 1, builtin.MethodsMiner.ProveReplicaUpdates3)
|
||||
require.NoError(tm.t, err)
|
||||
require.True(tm.t, r.Receipt.ExitCode.IsSuccess())
|
||||
|
||||
return pieces
|
||||
}
|
||||
|
||||
func (tm *TestUnmanagedMiner) waitForMutableDeadline(ctx context.Context, sectorNum abi.SectorNumber) {
|
||||
@ -440,66 +379,228 @@ func (tm *TestUnmanagedMiner) waitForMutableDeadline(ctx context.Context, sector
|
||||
}
|
||||
}
|
||||
|
||||
func (tm *TestUnmanagedMiner) OnboardCCSector(ctx context.Context, proofType abi.RegisteredSealProof) (abi.SectorNumber, chan WindowPostResp,
|
||||
context.CancelFunc) {
|
||||
req := require.New(tm.t)
|
||||
func (tm *TestUnmanagedMiner) NextSectorNumber() abi.SectorNumber {
|
||||
sectorNumber := tm.currentSectorNum
|
||||
tm.currentSectorNum++
|
||||
return sectorNumber
|
||||
}
|
||||
|
||||
func (tm *TestUnmanagedMiner) PrepareSectorForProveCommit(
|
||||
ctx context.Context,
|
||||
proofType abi.RegisteredSealProof,
|
||||
sectorNumber abi.SectorNumber,
|
||||
pieces []abi.PieceInfo,
|
||||
) (seedEpoch abi.ChainEpoch, proveCommit []byte) {
|
||||
|
||||
req := require.New(tm.t)
|
||||
|
||||
// Wait for the pre-commitseal randomness to be available (we can only draw seal randomness from tipsets that have already achieved finality)
|
||||
preCommitSealRandEpoch := tm.waitPreCommitSealRandomness(ctx, sectorNumber, proofType)
|
||||
|
||||
// Generate a Pre-Commit for the CC sector -> this persists the proof on the `TestUnmanagedMiner` Miner State
|
||||
tm.generatePreCommit(ctx, sectorNumber, preCommitSealRandEpoch, proofType, pieces)
|
||||
|
||||
// --------------------Create pre-commit for the CC sector -> we'll just pre-commit `sector size` worth of 0s for this CC sector
|
||||
|
||||
// Step 1: Wait for the pre-commitseal randomness to be available (we can only draw seal randomness from tipsets that have already achieved finality)
|
||||
preCommitSealRand := tm.waitPreCommitSealRandomness(ctx, sectorNumber)
|
||||
|
||||
if !tm.mockProofs {
|
||||
// Step 2: Write empty bytes that we want to seal i.e. create our CC sector
|
||||
tm.makeAndSaveCCSector(ctx, sectorNumber)
|
||||
|
||||
// Step 3: Generate a Pre-Commit for the CC sector -> this persists the proof on the `TestUnmanagedMiner` Miner State
|
||||
tm.generatePreCommit(ctx, sectorNumber, preCommitSealRand, proofType, []abi.PieceInfo{})
|
||||
} else {
|
||||
// skip the above steps and use a mock sealed CID
|
||||
tm.sealedCids[sectorNumber] = cid.MustParse("bagboea4b5abcatlxechwbp7kjpjguna6r6q7ejrhe6mdp3lf34pmswn27pkkiekz")
|
||||
if !proofType.IsNonInteractive() {
|
||||
// Submit the Pre-Commit to the network
|
||||
var uc *cid.Cid
|
||||
if len(pieces) > 0 {
|
||||
unsealedCid := tm.unsealedCids[sectorNumber]
|
||||
uc = &unsealedCid
|
||||
}
|
||||
r, err := tm.SubmitMessage(ctx, &miner14.PreCommitSectorBatchParams2{
|
||||
Sectors: []miner14.SectorPreCommitInfo{{
|
||||
Expiration: 2880 * 300,
|
||||
SectorNumber: sectorNumber,
|
||||
SealProof: proofType,
|
||||
SealedCID: tm.sealedCids[sectorNumber],
|
||||
SealRandEpoch: preCommitSealRandEpoch,
|
||||
UnsealedCid: uc,
|
||||
}},
|
||||
}, 1, builtin.MethodsMiner.PreCommitSectorBatch2)
|
||||
req.NoError(err)
|
||||
req.True(r.Receipt.ExitCode.IsSuccess())
|
||||
}
|
||||
|
||||
// Step 4 : Submit the Pre-Commit to the network
|
||||
r, err := tm.submitMessage(ctx, &miner14.PreCommitSectorBatchParams2{
|
||||
Sectors: []miner14.SectorPreCommitInfo{{
|
||||
Expiration: 2880 * 300,
|
||||
SectorNumber: sectorNumber,
|
||||
SealProof: TestSpt,
|
||||
SealedCID: tm.sealedCids[sectorNumber],
|
||||
SealRandEpoch: preCommitSealRand,
|
||||
}},
|
||||
}, 1, builtin.MethodsMiner.PreCommitSectorBatch2)
|
||||
req.NoError(err)
|
||||
req.True(r.Receipt.ExitCode.IsSuccess())
|
||||
// Generate a ProveCommit for the CC sector
|
||||
var seedRandomness abi.InteractiveSealRandomness
|
||||
seedEpoch, seedRandomness = tm.proveCommitWaitSeed(ctx, sectorNumber, proofType)
|
||||
|
||||
// Step 5: Generate a ProveCommit for the CC sector
|
||||
waitSeedRandomness := tm.proveCommitWaitSeed(ctx, sectorNumber)
|
||||
|
||||
proveCommit := []byte{0xde, 0xad, 0xbe, 0xef} // mock prove commit
|
||||
proveCommit = []byte{0xde, 0xad, 0xbe, 0xef} // mock prove commit
|
||||
if !tm.mockProofs {
|
||||
proveCommit = tm.generateProveCommit(ctx, sectorNumber, proofType, waitSeedRandomness, []abi.PieceInfo{})
|
||||
proveCommit = tm.generateProveCommit(ctx, sectorNumber, proofType, seedRandomness, pieces)
|
||||
}
|
||||
|
||||
return seedEpoch, proveCommit
|
||||
}
|
||||
|
||||
func (tm *TestUnmanagedMiner) SubmitProveCommit(
|
||||
ctx context.Context,
|
||||
proofType abi.RegisteredSealProof,
|
||||
sectorNumber abi.SectorNumber,
|
||||
seedEpoch abi.ChainEpoch,
|
||||
proveCommit []byte,
|
||||
pieceManifest []miner14.PieceActivationManifest,
|
||||
) {
|
||||
|
||||
req := require.New(tm.t)
|
||||
|
||||
if proofType.IsNonInteractive() {
|
||||
req.Nil(pieceManifest, "piece manifest should be nil for Non-interactive PoRep")
|
||||
}
|
||||
|
||||
// Step 6: Submit the ProveCommit to the network
|
||||
tm.t.Log("Submitting ProveCommitSector ...")
|
||||
if proofType.IsNonInteractive() {
|
||||
tm.t.Log("Submitting ProveCommitSector ...")
|
||||
|
||||
r, err = tm.submitMessage(ctx, &miner14.ProveCommitSectors3Params{
|
||||
SectorActivations: []miner14.SectorActivationManifest{{SectorNumber: sectorNumber}},
|
||||
SectorProofs: [][]byte{proveCommit},
|
||||
RequireActivationSuccess: true,
|
||||
}, 0, builtin.MethodsMiner.ProveCommitSectors3)
|
||||
req.NoError(err)
|
||||
req.True(r.Receipt.ExitCode.IsSuccess())
|
||||
var provingDeadline uint64 = 7
|
||||
if tm.IsImmutableDeadline(ctx, provingDeadline) {
|
||||
// avoid immutable deadlines
|
||||
provingDeadline = 5
|
||||
}
|
||||
|
||||
actorIdNum, err := address.IDFromAddress(tm.ActorAddr)
|
||||
req.NoError(err)
|
||||
actorId := abi.ActorID(actorIdNum)
|
||||
|
||||
r, err := tm.SubmitMessage(ctx, &miner14.ProveCommitSectorsNIParams{
|
||||
Sectors: []miner14.SectorNIActivationInfo{{
|
||||
SealingNumber: sectorNumber,
|
||||
SealerID: actorId,
|
||||
SealedCID: tm.sealedCids[sectorNumber],
|
||||
SectorNumber: sectorNumber,
|
||||
SealRandEpoch: seedEpoch,
|
||||
Expiration: 2880 * 300,
|
||||
}},
|
||||
AggregateProof: proveCommit,
|
||||
SealProofType: proofType,
|
||||
AggregateProofType: abi.RegisteredAggregationProof_SnarkPackV2,
|
||||
ProvingDeadline: provingDeadline,
|
||||
RequireActivationSuccess: true,
|
||||
}, 1, builtin.MethodsMiner.ProveCommitSectorsNI)
|
||||
req.NoError(err)
|
||||
req.True(r.Receipt.ExitCode.IsSuccess())
|
||||
|
||||
// NI-PoRep lets us determine the deadline, so we can check that it's set correctly
|
||||
sp, err := tm.FullNode.StateSectorPartition(ctx, tm.ActorAddr, sectorNumber, r.TipSet)
|
||||
req.NoError(err)
|
||||
req.Equal(provingDeadline, sp.Deadline)
|
||||
} else {
|
||||
tm.t.Log("Submitting ProveCommitSector ...")
|
||||
|
||||
r, err := tm.SubmitMessage(ctx, &miner14.ProveCommitSectors3Params{
|
||||
SectorActivations: []miner14.SectorActivationManifest{{SectorNumber: sectorNumber, Pieces: pieceManifest}},
|
||||
SectorProofs: [][]byte{proveCommit},
|
||||
RequireActivationSuccess: true,
|
||||
}, 0, builtin.MethodsMiner.ProveCommitSectors3)
|
||||
req.NoError(err)
|
||||
req.True(r.Receipt.ExitCode.IsSuccess())
|
||||
}
|
||||
}
|
||||
|
||||
func (tm *TestUnmanagedMiner) OnboardCCSector(ctx context.Context, proofType abi.RegisteredSealProof) (abi.SectorNumber, chan WindowPostResp, context.CancelFunc) {
|
||||
sectorNumber := tm.NextSectorNumber()
|
||||
|
||||
if !tm.mockProofs {
|
||||
// Write empty bytes that we want to seal i.e. create our CC sector
|
||||
tm.makeAndSaveCCSector(ctx, sectorNumber)
|
||||
}
|
||||
|
||||
seedEpoch, proveCommit := tm.PrepareSectorForProveCommit(ctx, proofType, sectorNumber, []abi.PieceInfo{})
|
||||
|
||||
tm.SubmitProveCommit(ctx, proofType, sectorNumber, seedEpoch, proveCommit, nil)
|
||||
|
||||
tm.proofType[sectorNumber] = proofType
|
||||
|
||||
respCh, cancelFn := tm.wdPostLoop(ctx, sectorNumber, tm.sealedCids[sectorNumber], tm.sealedSectorPaths[sectorNumber], tm.cacheDirPaths[sectorNumber])
|
||||
|
||||
return sectorNumber, respCh, cancelFn
|
||||
}
|
||||
|
||||
func (tm *TestUnmanagedMiner) OnboardSectorWithPieces(ctx context.Context, proofType abi.RegisteredSealProof) (abi.SectorNumber, chan WindowPostResp, context.CancelFunc) {
|
||||
sectorNumber := tm.NextSectorNumber()
|
||||
|
||||
// Build a sector with non 0 Pieces that we want to onboard
|
||||
var pieces []abi.PieceInfo
|
||||
if !tm.mockProofs {
|
||||
pieces = tm.mkAndSavePiecesToOnboard(ctx, sectorNumber, proofType)
|
||||
} else {
|
||||
pieces = []abi.PieceInfo{{
|
||||
Size: abi.PaddedPieceSize(tm.options.sectorSize),
|
||||
PieceCID: cid.MustParse("baga6ea4seaqjtovkwk4myyzj56eztkh5pzsk5upksan6f5outesy62bsvl4dsha"),
|
||||
}}
|
||||
}
|
||||
|
||||
_, proveCommit := tm.PrepareSectorForProveCommit(ctx, proofType, sectorNumber, pieces)
|
||||
|
||||
// Submit the ProveCommit to the network
|
||||
tm.t.Log("Submitting ProveCommitSector ...")
|
||||
|
||||
var manifest []miner14.PieceActivationManifest
|
||||
for _, piece := range pieces {
|
||||
manifest = append(manifest, miner14.PieceActivationManifest{
|
||||
CID: piece.PieceCID,
|
||||
Size: piece.Size,
|
||||
})
|
||||
}
|
||||
|
||||
tm.SubmitProveCommit(ctx, proofType, sectorNumber, 0, proveCommit, manifest)
|
||||
|
||||
tm.proofType[sectorNumber] = proofType
|
||||
respCh, cancelFn := tm.wdPostLoop(ctx, sectorNumber, tm.sealedCids[sectorNumber], tm.sealedSectorPaths[sectorNumber], tm.cacheDirPaths[sectorNumber])
|
||||
|
||||
return sectorNumber, respCh, cancelFn
|
||||
}
|
||||
|
||||
// calculateNextPostEpoch calculates the first epoch of the deadline proving window
|
||||
// that is desired for the given sector for the specified miner.
|
||||
// This function returns the current epoch and the calculated proving epoch.
|
||||
func (tm *TestUnmanagedMiner) calculateNextPostEpoch(
|
||||
ctx context.Context,
|
||||
sectorNumber abi.SectorNumber,
|
||||
) (abi.ChainEpoch, abi.ChainEpoch, error) {
|
||||
// Retrieve the current blockchain head
|
||||
head, err := tm.FullNode.ChainHead(ctx)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to get chain head: %w", err)
|
||||
}
|
||||
|
||||
// Obtain the proving deadline information for the miner
|
||||
di, err := tm.FullNode.StateMinerProvingDeadline(ctx, tm.ActorAddr, head.Key())
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to get proving deadline: %w", err)
|
||||
}
|
||||
|
||||
tm.t.Logf("Miner %s: WindowPoST(%d): ProvingDeadline: %+v", tm.ActorAddr, sectorNumber, di)
|
||||
|
||||
// Fetch the sector partition for the given sector number
|
||||
sp, err := tm.FullNode.StateSectorPartition(ctx, tm.ActorAddr, sectorNumber, head.Key())
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to get sector partition: %w", err)
|
||||
}
|
||||
|
||||
tm.t.Logf("Miner %s: WindowPoST(%d): SectorPartition: %+v", tm.ActorAddr, sectorNumber, sp)
|
||||
|
||||
// Calculate the start of the period, adjusting if the current deadline has passed
|
||||
periodStart := di.PeriodStart
|
||||
// calculate current deadline index because it won't be reliable from state until the first
|
||||
// challenge window cron tick after first sector onboarded
|
||||
currentDeadlineIdx := uint64(math.Abs(float64((di.CurrentEpoch - di.PeriodStart) / di.WPoStChallengeWindow)))
|
||||
if di.PeriodStart < di.CurrentEpoch && sp.Deadline <= currentDeadlineIdx {
|
||||
// If the deadline has passed in the current proving period, calculate for the next period
|
||||
// Note that di.Open may be > di.CurrentEpoch if the miner has just been enrolled in cron so
|
||||
// their deadlines haven't started rolling yet
|
||||
periodStart += di.WPoStProvingPeriod
|
||||
}
|
||||
|
||||
// Calculate the exact epoch when proving should occur
|
||||
provingEpoch := periodStart + di.WPoStChallengeWindow*abi.ChainEpoch(sp.Deadline)
|
||||
|
||||
tm.t.Logf("Miner %s: WindowPoST(%d): next ProvingEpoch: %d", tm.ActorAddr, sectorNumber, provingEpoch)
|
||||
|
||||
return di.CurrentEpoch, provingEpoch, nil
|
||||
}
|
||||
|
||||
func (tm *TestUnmanagedMiner) wdPostLoop(
|
||||
pctx context.Context,
|
||||
sectorNumber abi.SectorNumber,
|
||||
@ -598,7 +699,7 @@ func (tm *TestUnmanagedMiner) SubmitPostDispute(ctx context.Context, sectorNumbe
|
||||
|
||||
tm.t.Logf("Miner %s: Sector %d - Disputing WindowedPoSt to confirm validity at epoch %d", tm.ActorAddr, sectorNumber, disputeEpoch)
|
||||
|
||||
_, err = tm.submitMessage(ctx, &miner14.DisputeWindowedPoStParams{
|
||||
_, err = tm.SubmitMessage(ctx, &miner14.DisputeWindowedPoStParams{
|
||||
Deadline: sp.Deadline,
|
||||
PoStIndex: 0,
|
||||
}, 1, builtin.MethodsMiner.DisputeWindowedPoSt)
|
||||
@ -651,7 +752,7 @@ func (tm *TestUnmanagedMiner) submitWindowPost(ctx context.Context, sectorNumber
|
||||
return fmt.Errorf("Miner(%s): failed to get miner info for sector %d: %w", tm.ActorAddr, sectorNumber, err)
|
||||
}
|
||||
|
||||
r, err := tm.submitMessage(ctx, &miner14.SubmitWindowedPoStParams{
|
||||
r, err := tm.SubmitMessage(ctx, &miner14.SubmitWindowedPoStParams{
|
||||
ChainCommitEpoch: chainRandomnessEpoch,
|
||||
ChainCommitRand: chainRandomness,
|
||||
Deadline: sp.Deadline,
|
||||
@ -754,12 +855,16 @@ func (tm *TestUnmanagedMiner) generateWindowPost(
|
||||
|
||||
return proofBytes, nil
|
||||
}
|
||||
func (tm *TestUnmanagedMiner) waitPreCommitSealRandomness(ctx context.Context, sectorNumber abi.SectorNumber) abi.ChainEpoch {
|
||||
func (tm *TestUnmanagedMiner) waitPreCommitSealRandomness(ctx context.Context, sectorNumber abi.SectorNumber, proofType abi.RegisteredSealProof) abi.ChainEpoch {
|
||||
// We want to draw seal randomness from a tipset that has already achieved finality as PreCommits are expensive to re-generate.
|
||||
// Check if we already have an epoch that is already final and wait for such an epoch if we don't have one.
|
||||
head, err := tm.FullNode.ChainHead(ctx)
|
||||
require.NoError(tm.t, err)
|
||||
|
||||
if proofType.IsNonInteractive() {
|
||||
return head.Height() - 1 // no need to wait
|
||||
}
|
||||
|
||||
var sealRandEpoch abi.ChainEpoch
|
||||
if head.Height() > policy.SealRandomnessLookback {
|
||||
sealRandEpoch = head.Height() - policy.SealRandomnessLookback
|
||||
@ -775,53 +880,6 @@ func (tm *TestUnmanagedMiner) waitPreCommitSealRandomness(ctx context.Context, s
|
||||
return sealRandEpoch
|
||||
}
|
||||
|
||||
// calculateNextPostEpoch calculates the first epoch of the deadline proving window
|
||||
// that is desired for the given sector for the specified miner.
|
||||
// This function returns the current epoch and the calculated proving epoch.
|
||||
func (tm *TestUnmanagedMiner) calculateNextPostEpoch(
|
||||
ctx context.Context,
|
||||
sectorNumber abi.SectorNumber,
|
||||
) (abi.ChainEpoch, abi.ChainEpoch, error) {
|
||||
// Retrieve the current blockchain head
|
||||
head, err := tm.FullNode.ChainHead(ctx)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to get chain head: %w", err)
|
||||
}
|
||||
|
||||
// Obtain the proving deadline information for the miner
|
||||
di, err := tm.FullNode.StateMinerProvingDeadline(ctx, tm.ActorAddr, head.Key())
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to get proving deadline: %w", err)
|
||||
}
|
||||
|
||||
tm.t.Logf("Miner %s: WindowPoST(%d): ProvingDeadline: %+v", tm.ActorAddr, sectorNumber, di)
|
||||
|
||||
// Fetch the sector partition for the given sector number
|
||||
sp, err := tm.FullNode.StateSectorPartition(ctx, tm.ActorAddr, sectorNumber, head.Key())
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to get sector partition: %w", err)
|
||||
}
|
||||
|
||||
tm.t.Logf("Miner %s: WindowPoST(%d): SectorPartition: %+v", tm.ActorAddr, sectorNumber, sp)
|
||||
|
||||
// Calculate the start of the period, adjusting if the current deadline has passed
|
||||
periodStart := di.PeriodStart
|
||||
// calculate current deadline index because it won't be reliable from state until the first
|
||||
// challenge window cron tick after first sector onboarded
|
||||
curIdx := (di.CurrentEpoch - di.PeriodStart) / di.WPoStChallengeWindow
|
||||
if di.PeriodStart < di.CurrentEpoch && sp.Deadline <= uint64(curIdx) {
|
||||
// If the deadline has passed in the current proving period, calculate for the next period
|
||||
periodStart += di.WPoStProvingPeriod
|
||||
}
|
||||
|
||||
// Calculate the exact epoch when proving should occur
|
||||
provingEpoch := periodStart + di.WPoStChallengeWindow*abi.ChainEpoch(sp.Deadline)
|
||||
|
||||
tm.t.Logf("Miner %s: WindowPoST(%d): next ProvingEpoch: %d", tm.ActorAddr, sectorNumber, provingEpoch)
|
||||
|
||||
return di.CurrentEpoch, provingEpoch, nil
|
||||
}
|
||||
|
||||
func (tm *TestUnmanagedMiner) generatePreCommit(
|
||||
ctx context.Context,
|
||||
sectorNumber abi.SectorNumber,
|
||||
@ -829,6 +887,15 @@ func (tm *TestUnmanagedMiner) generatePreCommit(
|
||||
proofType abi.RegisteredSealProof,
|
||||
pieceInfo []abi.PieceInfo,
|
||||
) {
|
||||
|
||||
if tm.mockProofs {
|
||||
tm.sealedCids[sectorNumber] = cid.MustParse("bagboea4b5abcatlxechwbp7kjpjguna6r6q7ejrhe6mdp3lf34pmswn27pkkiekz")
|
||||
if len(pieceInfo) > 0 {
|
||||
tm.unsealedCids[sectorNumber] = cid.MustParse("baga6ea4seaqjtovkwk4myyzj56eztkh5pzsk5upksan6f5outesy62bsvl4dsha")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
req := require.New(tm.t)
|
||||
tm.t.Logf("Miner %s: Generating proof type %d PreCommit for sector %d...", tm.ActorAddr, proofType, sectorNumber)
|
||||
|
||||
@ -878,41 +945,48 @@ func (tm *TestUnmanagedMiner) generatePreCommit(
|
||||
tm.unsealedCids[sectorNumber] = unsealedCid
|
||||
}
|
||||
|
||||
func (tm *TestUnmanagedMiner) proveCommitWaitSeed(ctx context.Context, sectorNumber abi.SectorNumber) abi.InteractiveSealRandomness {
|
||||
func (tm *TestUnmanagedMiner) proveCommitWaitSeed(ctx context.Context, sectorNumber abi.SectorNumber, proofType abi.RegisteredSealProof) (abi.ChainEpoch, abi.InteractiveSealRandomness) {
|
||||
req := require.New(tm.t)
|
||||
head, err := tm.FullNode.ChainHead(ctx)
|
||||
req.NoError(err)
|
||||
|
||||
tm.t.Logf("Miner %s: Fetching pre-commit info for sector %d...", tm.ActorAddr, sectorNumber)
|
||||
preCommitInfo, err := tm.FullNode.StateSectorPreCommitInfo(ctx, tm.ActorAddr, sectorNumber, head.Key())
|
||||
req.NoError(err)
|
||||
seedRandomnessHeight := preCommitInfo.PreCommitEpoch + policy.GetPreCommitChallengeDelay()
|
||||
var seedRandomnessHeight abi.ChainEpoch
|
||||
|
||||
tm.t.Logf("Miner %s: Waiting %d epochs for seed randomness at epoch %d (current epoch %d) for sector %d...", tm.ActorAddr, seedRandomnessHeight-head.Height(), seedRandomnessHeight, head.Height(), sectorNumber)
|
||||
tm.FullNode.WaitTillChain(ctx, HeightAtLeast(seedRandomnessHeight+5))
|
||||
if proofType.IsNonInteractive() {
|
||||
seedRandomnessHeight = head.Height() - 1 // no need to wait, it just can't be current epoch
|
||||
} else {
|
||||
tm.t.Logf("Miner %s: Fetching pre-commit info for sector %d...", tm.ActorAddr, sectorNumber)
|
||||
preCommitInfo, err := tm.FullNode.StateSectorPreCommitInfo(ctx, tm.ActorAddr, sectorNumber, head.Key())
|
||||
req.NoError(err)
|
||||
seedRandomnessHeight = preCommitInfo.PreCommitEpoch + policy.GetPreCommitChallengeDelay()
|
||||
|
||||
tm.t.Logf("Miner %s: Waiting %d epochs for seed randomness at epoch %d (current epoch %d) for sector %d...", tm.ActorAddr, seedRandomnessHeight-head.Height(), seedRandomnessHeight, head.Height(), sectorNumber)
|
||||
tm.FullNode.WaitTillChain(ctx, HeightAtLeast(seedRandomnessHeight+5))
|
||||
|
||||
head, err = tm.FullNode.ChainHead(ctx)
|
||||
req.NoError(err)
|
||||
}
|
||||
|
||||
minerAddrBytes := new(bytes.Buffer)
|
||||
req.NoError(tm.ActorAddr.MarshalCBOR(minerAddrBytes))
|
||||
|
||||
head, err = tm.FullNode.ChainHead(ctx)
|
||||
req.NoError(err)
|
||||
|
||||
tm.t.Logf("Miner %s: Fetching seed randomness for sector %d...", tm.ActorAddr, sectorNumber)
|
||||
rand, err := tm.FullNode.StateGetRandomnessFromBeacon(ctx, crypto.DomainSeparationTag_InteractiveSealChallengeSeed, seedRandomnessHeight, minerAddrBytes.Bytes(), head.Key())
|
||||
req.NoError(err)
|
||||
seedRandomness := abi.InteractiveSealRandomness(rand)
|
||||
|
||||
tm.t.Logf("Miner %s: Obtained seed randomness for sector %d: %x", tm.ActorAddr, sectorNumber, seedRandomness)
|
||||
return seedRandomness
|
||||
return seedRandomnessHeight, seedRandomness
|
||||
}
|
||||
|
||||
func (tm *TestUnmanagedMiner) generateProveCommit(
|
||||
ctx context.Context,
|
||||
_ context.Context,
|
||||
sectorNumber abi.SectorNumber,
|
||||
proofType abi.RegisteredSealProof,
|
||||
seedRandomness abi.InteractiveSealRandomness,
|
||||
pieces []abi.PieceInfo,
|
||||
) []byte {
|
||||
|
||||
tm.t.Logf("Miner %s: Generating proof type %d Sector Proof for sector %d...", tm.ActorAddr, proofType, sectorNumber)
|
||||
req := require.New(tm.t)
|
||||
|
||||
@ -938,20 +1012,43 @@ func (tm *TestUnmanagedMiner) generateProveCommit(
|
||||
|
||||
tm.t.Logf("Miner %s: Running proof type %d SealCommitPhase2 for sector %d...", tm.ActorAddr, proofType, sectorNumber)
|
||||
|
||||
sectorProof, err := ffi.SealCommitPhase2(scp1, sectorNumber, actorId)
|
||||
req.NoError(err)
|
||||
var sectorProof []byte
|
||||
|
||||
if proofType.IsNonInteractive() {
|
||||
circuitProofs, err := ffi.SealCommitPhase2CircuitProofs(scp1, sectorNumber)
|
||||
req.NoError(err)
|
||||
asvpai := proof.AggregateSealVerifyProofAndInfos{
|
||||
Miner: actorId,
|
||||
SealProof: proofType,
|
||||
AggregateProof: abi.RegisteredAggregationProof_SnarkPackV2,
|
||||
Infos: []proof.AggregateSealVerifyInfo{{
|
||||
Number: sectorNumber,
|
||||
Randomness: tm.sealTickets[sectorNumber],
|
||||
InteractiveRandomness: make([]byte, 32),
|
||||
SealedCID: tm.sealedCids[sectorNumber],
|
||||
UnsealedCID: tm.unsealedCids[sectorNumber],
|
||||
}},
|
||||
}
|
||||
tm.t.Logf("Miner %s: Aggregating circuit proofs for sector %d: %+v", tm.ActorAddr, sectorNumber, asvpai)
|
||||
sectorProof, err = ffi.AggregateSealProofs(asvpai, [][]byte{circuitProofs})
|
||||
req.NoError(err)
|
||||
} else {
|
||||
sectorProof, err = ffi.SealCommitPhase2(scp1, sectorNumber, actorId)
|
||||
req.NoError(err)
|
||||
}
|
||||
|
||||
tm.t.Logf("Miner %s: Got proof type %d sector proof of length %d for sector %d", tm.ActorAddr, proofType, len(sectorProof), sectorNumber)
|
||||
|
||||
return sectorProof
|
||||
}
|
||||
|
||||
func (tm *TestUnmanagedMiner) submitMessage(
|
||||
func (tm *TestUnmanagedMiner) SubmitMessage(
|
||||
ctx context.Context,
|
||||
params cbg.CBORMarshaler,
|
||||
value uint64,
|
||||
method abi.MethodNum,
|
||||
) (*api.MsgLookup, error) {
|
||||
|
||||
enc, aerr := actors.SerializeParams(params)
|
||||
if aerr != nil {
|
||||
return nil, aerr
|
||||
@ -1034,14 +1131,10 @@ func (tm *TestUnmanagedMiner) WaitTillActivatedAndAssertPower(
|
||||
// Miner B should now have power
|
||||
tm.AssertPower(ctx, uint64(tm.options.sectorSize), uint64(tm.options.sectorSize))
|
||||
|
||||
if tm.mockProofs {
|
||||
// WindowPost Dispute should succeed as we are using mock proofs
|
||||
err := tm.SubmitPostDispute(ctx, sector)
|
||||
require.NoError(tm.t, err)
|
||||
} else {
|
||||
if !tm.mockProofs {
|
||||
// WindowPost Dispute should fail
|
||||
tm.AssertDisputeFails(ctx, sector)
|
||||
}
|
||||
} // else it would pass, which we don't want
|
||||
}
|
||||
|
||||
func (tm *TestUnmanagedMiner) AssertDisputeFails(ctx context.Context, sector abi.SectorNumber) {
|
||||
@ -1050,3 +1143,11 @@ func (tm *TestUnmanagedMiner) AssertDisputeFails(ctx context.Context, sector abi
|
||||
require.Contains(tm.t, err.Error(), "failed to dispute valid post")
|
||||
require.Contains(tm.t, err.Error(), "(RetCode=16)")
|
||||
}
|
||||
|
||||
func (tm *TestUnmanagedMiner) IsImmutableDeadline(ctx context.Context, deadlineIndex uint64) bool {
|
||||
di, err := tm.FullNode.StateMinerProvingDeadline(ctx, tm.ActorAddr, types.EmptyTSK)
|
||||
require.NoError(tm.t, err)
|
||||
// don't rely on di.Index because if we haven't enrolled in cron it won't be ticking
|
||||
currentDeadline := uint64((di.CurrentEpoch - di.PeriodStart) / di.WPoStChallengeWindow)
|
||||
return currentDeadline == deadlineIndex || currentDeadline == deadlineIndex-1
|
||||
}
|
||||
|
@ -9,6 +9,12 @@ import (
|
||||
// and set to value "1" to enable running expensive tests outside of CI.
|
||||
const EnvRunExpensiveTests = "LOTUS_RUN_EXPENSIVE_TESTS"
|
||||
|
||||
// EnvRunVeryExpensiveTests is the environment variable that needs to be present
|
||||
// and set to value "1" to enable running very expensive tests outside of CI.
|
||||
// A "very expensive" test is one that is expected to take too long to run in
|
||||
// a standard CI setup, and should be skipped unless explicitly enabled.
|
||||
const EnvRunVeryExpensiveTests = "LOTUS_RUN_VERY_EXPENSIVE_TESTS"
|
||||
|
||||
// Expensive marks a test as expensive, skipping it immediately if not running an
|
||||
func Expensive(t *testing.T) {
|
||||
switch {
|
||||
@ -18,3 +24,10 @@ func Expensive(t *testing.T) {
|
||||
t.Skipf("skipping expensive test outside of CI; enable by setting env var %s=1", EnvRunExpensiveTests)
|
||||
}
|
||||
}
|
||||
|
||||
// Expensive marks a test as expensive, skipping it immediately if not running an
|
||||
func VeryExpensive(t *testing.T) {
|
||||
if os.Getenv(EnvRunVeryExpensiveTests) != "1" {
|
||||
t.Skipf("skipping VERY expensive test outside of CI; enable by setting env var %s=1", EnvRunVeryExpensiveTests)
|
||||
}
|
||||
}
|
||||
|
@ -8,24 +8,28 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
)
|
||||
|
||||
// Manually onboard CC sectors, bypassing lotus-miner onboarding pathways
|
||||
func TestManualSectorOnboarding(t *testing.T) {
|
||||
const defaultSectorSize = abi.SectorSize(2 << 10) // 2KiB
|
||||
|
||||
req := require.New(t)
|
||||
|
||||
const defaultSectorSize = abi.SectorSize(2 << 10) // 2KiB
|
||||
sealProofType, err := miner.SealProofTypeFromSectorSize(defaultSectorSize, network.Version23, miner.SealProofVariant_Standard)
|
||||
req.NoError(err)
|
||||
|
||||
for _, withMockProofs := range []bool{true, false} {
|
||||
testName := "WithRealProofs"
|
||||
if withMockProofs {
|
||||
testName = "WithMockProofs"
|
||||
}
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
if testName == "WithRealProofs" {
|
||||
if !withMockProofs {
|
||||
kit.Expensive(t)
|
||||
}
|
||||
kit.QuietMiningLogs()
|
||||
@ -85,7 +89,7 @@ func TestManualSectorOnboarding(t *testing.T) {
|
||||
var bRespCh chan kit.WindowPostResp
|
||||
var bWdPostCancelF context.CancelFunc
|
||||
|
||||
bSectorNum, bRespCh, bWdPostCancelF = minerB.OnboardCCSector(ctx, kit.TestSpt)
|
||||
bSectorNum, bRespCh, bWdPostCancelF = minerB.OnboardCCSector(ctx, sealProofType)
|
||||
// Miner B should still not have power as power can only be gained after sector is activated i.e. the first WindowPost is submitted for it
|
||||
minerB.AssertNoPower(ctx)
|
||||
// Ensure that the block miner checks for and waits for posts during the appropriate proving window from our new miner with a sector
|
||||
@ -106,13 +110,9 @@ func TestManualSectorOnboarding(t *testing.T) {
|
||||
minerC.WaitTillActivatedAndAssertPower(ctx, cRespCh, cSectorNum)
|
||||
|
||||
// Miner B has activated the CC sector -> upgrade it with snapdeals
|
||||
// Note: We can't activate a sector with mock proofs as the WdPost is successfully disputed and so no point
|
||||
// in snapping it as snapping is only for activated sectors
|
||||
if !withMockProofs {
|
||||
minerB.SnapDeal(ctx, kit.TestSpt, bSectorNum)
|
||||
// cancel the WdPost for the CC sector as the corresponding CommR is no longer valid
|
||||
bWdPostCancelF()
|
||||
}
|
||||
_ = minerB.SnapDeal(ctx, kit.TestSpt, bSectorNum)
|
||||
// cancel the WdPost for the CC sector as the corresponding CommR is no longer valid
|
||||
bWdPostCancelF()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
@ -17,6 +18,7 @@ import (
|
||||
miner11 "github.com/filecoin-project/go-state-types/builtin/v11/miner"
|
||||
power11 "github.com/filecoin-project/go-state-types/builtin/v11/power"
|
||||
adt11 "github.com/filecoin-project/go-state-types/builtin/v11/util/adt"
|
||||
account "github.com/filecoin-project/go-state-types/builtin/v14/account"
|
||||
markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market"
|
||||
migration "github.com/filecoin-project/go-state-types/builtin/v9/migration/test"
|
||||
miner9 "github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
||||
@ -302,7 +304,7 @@ func TestMigrationNV17(t *testing.T) {
|
||||
minerInfo, err := testClient.StateMinerInfo(ctx, testMiner.ActorAddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
spt, err := miner.SealProofTypeFromSectorSize(minerInfo.SectorSize, network.Version17, false)
|
||||
spt, err := miner.SealProofTypeFromSectorSize(minerInfo.SectorSize, network.Version17, miner.SealProofVariant_Standard)
|
||||
require.NoError(t, err)
|
||||
|
||||
preCommitParams := miner9.PreCommitSectorParams{
|
||||
@ -828,3 +830,70 @@ func TestMigrationNV21(t *testing.T) {
|
||||
//todo @zen Direct data onboarding tests
|
||||
|
||||
}
|
||||
|
||||
func TestMigrationNV23(t *testing.T) {
|
||||
kit.QuietMiningLogs()
|
||||
f090Addr, err := address.NewIDAddress(90)
|
||||
require.NoError(t, err)
|
||||
nv23epoch := abi.ChainEpoch(100)
|
||||
testClient, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(),
|
||||
kit.UpgradeSchedule(stmgr.Upgrade{
|
||||
Network: network.Version22,
|
||||
Height: -1,
|
||||
}, stmgr.Upgrade{
|
||||
Network: network.Version23,
|
||||
Height: nv23epoch,
|
||||
Migration: filcns.UpgradeActorsV14,
|
||||
},
|
||||
))
|
||||
|
||||
ens.InterconnectAll().BeginMining(10 * time.Millisecond)
|
||||
|
||||
clientApi := testClient.FullNode.(*impl.FullNodeAPI)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
testClient.WaitTillChain(ctx, kit.HeightAtLeast(nv23epoch+5))
|
||||
|
||||
bs := blockstore.NewAPIBlockstore(testClient)
|
||||
ctxStore := gstStore.WrapBlockStore(ctx, bs)
|
||||
|
||||
preMigrationTs, err := clientApi.ChainGetTipSetByHeight(ctx, nv23epoch-1, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
root := preMigrationTs.Blocks()[0].ParentStateRoot
|
||||
preStateTree, err := state.LoadStateTree(ctxStore, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, types.StateTreeVersion5, preStateTree.Version())
|
||||
|
||||
// Check f090 actor before migration
|
||||
msigCodeNv22, ok := actors.GetActorCodeID(actorstypes.Version13, manifest.MultisigKey)
|
||||
assert.True(t, ok)
|
||||
f090ActorPre, err := preStateTree.GetActor(f090Addr)
|
||||
require.NoError(t, err)
|
||||
require.True(t, f090ActorPre.Code.Equals(msigCodeNv22))
|
||||
|
||||
// Get state after the migration
|
||||
postMigrationTs, err := clientApi.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
postStateTree, err := state.LoadStateTree(ctxStore, postMigrationTs.Blocks()[0].ParentStateRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check the new system actor
|
||||
systemAct, err := postStateTree.GetActor(builtin.SystemActorAddr)
|
||||
require.NoError(t, err)
|
||||
systemCode, ok := actors.GetActorCodeID(actorstypes.Version14, manifest.SystemKey)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, systemCode, systemAct.Code)
|
||||
|
||||
// Check f090 actor after migration
|
||||
f090ActorPost, err := postStateTree.GetActor(f090Addr)
|
||||
require.NoError(t, err)
|
||||
accountNV23, ok := actors.GetActorCodeID(actorstypes.Version14, manifest.AccountKey)
|
||||
assert.True(t, ok)
|
||||
require.True(t, f090ActorPost.Code.Equals(accountNV23))
|
||||
f090StatePost, err := clientApi.StateReadState(ctx, f090Addr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
state := f090StatePost.State.(*account.State)
|
||||
require.Equal(t, state.Address, f090Addr)
|
||||
}
|
||||
|
260
itests/niporep_manual_test.go
Normal file
260
itests/niporep_manual_test.go
Normal file
@ -0,0 +1,260 @@
|
||||
package itests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipld/go-ipld-prime"
|
||||
"github.com/ipld/go-ipld-prime/codec/dagcbor"
|
||||
"github.com/ipld/go-ipld-prime/datamodel"
|
||||
cidlink "github.com/ipld/go-ipld-prime/linking/cid"
|
||||
"github.com/ipld/go-ipld-prime/node/basicnode"
|
||||
"github.com/multiformats/go-multicodec"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/builtin"
|
||||
miner14 "github.com/filecoin-project/go-state-types/builtin/v14/miner"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
"github.com/filecoin-project/lotus/lib/must"
|
||||
)
|
||||
|
||||
func TestManualNISectorOnboarding(t *testing.T) {
|
||||
req := require.New(t)
|
||||
|
||||
const defaultSectorSize = abi.SectorSize(2 << 10) // 2KiB
|
||||
sealProofType, err := miner.SealProofTypeFromSectorSize(defaultSectorSize, network.Version23, miner.SealProofVariant_NonInteractive)
|
||||
req.NoError(err)
|
||||
|
||||
for _, withMockProofs := range []bool{true, false} {
|
||||
testName := "WithRealProofs"
|
||||
if withMockProofs {
|
||||
testName = "WithMockProofs"
|
||||
}
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
if !withMockProofs {
|
||||
kit.VeryExpensive(t)
|
||||
}
|
||||
kit.QuietMiningLogs()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var (
|
||||
// need to pick a balance value so that the test is not racy on CI by running through it's WindowPostDeadlines too fast
|
||||
blocktime = 2 * time.Millisecond
|
||||
client kit.TestFullNode
|
||||
minerA kit.TestMiner // A is a standard genesis miner
|
||||
)
|
||||
|
||||
// Setup and begin mining with a single miner (A)
|
||||
// Miner A will only be a genesis Miner with power allocated in the genesis block and will not onboard any sectors from here on
|
||||
ens := kit.NewEnsemble(t, kit.MockProofs(withMockProofs)).
|
||||
FullNode(&client, kit.SectorSize(defaultSectorSize)).
|
||||
// preseal more than the default number of sectors to ensure that the genesis miner has power
|
||||
// because our unmanaged miners won't produce blocks so we may get null rounds
|
||||
Miner(&minerA, &client, kit.PresealSectors(5), kit.SectorSize(defaultSectorSize), kit.WithAllSubsystems()).
|
||||
Start().
|
||||
InterconnectAll()
|
||||
blockMiners := ens.BeginMiningMustPost(blocktime)
|
||||
req.Len(blockMiners, 1)
|
||||
blockMiner := blockMiners[0]
|
||||
|
||||
// Instantiate MinerB to manually handle sector onboarding and power acquisition through sector activation.
|
||||
// Unlike other miners managed by the Lotus Miner storage infrastructure, MinerB operates independently,
|
||||
// performing all related tasks manually. Managed by the TestKit, MinerB has the capability to utilize actual proofs
|
||||
// for the processes of sector onboarding and activation.
|
||||
nodeOpts := []kit.NodeOpt{kit.SectorSize(defaultSectorSize), kit.OwnerAddr(client.DefaultKey)}
|
||||
minerB, ens := ens.UnmanagedMiner(&client, nodeOpts...)
|
||||
|
||||
ens.Start()
|
||||
|
||||
build.Clock.Sleep(time.Second)
|
||||
|
||||
t.Log("Checking initial power ...")
|
||||
|
||||
// Miner A should have power as it has already onboarded sectors in the genesis block
|
||||
head, err := client.ChainHead(ctx)
|
||||
req.NoError(err)
|
||||
p, err := client.StateMinerPower(ctx, minerA.ActorAddr, head.Key())
|
||||
req.NoError(err)
|
||||
t.Logf("MinerA RBP: %v, QaP: %v", p.MinerPower.QualityAdjPower.String(), p.MinerPower.RawBytePower.String())
|
||||
|
||||
// Miner B should have no power as it has yet to onboard and activate any sectors
|
||||
minerB.AssertNoPower(ctx)
|
||||
|
||||
// Verify that ProveCommitSectorsNI rejects messages with invalid parameters
|
||||
verifyProveCommitSectorsNIErrorConditions(ctx, t, minerB, sealProofType)
|
||||
|
||||
// ---- Miner B onboards a CC sector
|
||||
var bSectorNum abi.SectorNumber
|
||||
var bRespCh chan kit.WindowPostResp
|
||||
var bWdPostCancelF context.CancelFunc
|
||||
|
||||
// Onboard a CC sector with Miner B using NI-PoRep
|
||||
bSectorNum, bRespCh, bWdPostCancelF = minerB.OnboardCCSector(ctx, sealProofType)
|
||||
// Miner B should still not have power as power can only be gained after sector is activated i.e. the first WindowPost is submitted for it
|
||||
minerB.AssertNoPower(ctx)
|
||||
|
||||
// Check that the sector-activated event was emitted
|
||||
{
|
||||
expectedEntries := []types.EventEntry{
|
||||
{Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "$type", Value: must.One(ipld.Encode(basicnode.NewString("sector-activated"), dagcbor.Encode))},
|
||||
{Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "sector", Value: must.One(ipld.Encode(basicnode.NewInt(int64(bSectorNum)), dagcbor.Encode))},
|
||||
{Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "unsealed-cid", Value: must.One(ipld.Encode(datamodel.Null, dagcbor.Encode))},
|
||||
}
|
||||
from := head.Height()
|
||||
recentEvents, err := client.FullNode.GetActorEventsRaw(ctx, &types.ActorEventFilter{FromHeight: &from})
|
||||
req.NoError(err)
|
||||
req.Len(recentEvents, 1)
|
||||
req.Equal(expectedEntries, recentEvents[0].Entries)
|
||||
}
|
||||
|
||||
// Ensure that the block miner checks for and waits for posts during the appropriate proving window from our new miner with a sector
|
||||
blockMiner.WatchMinerForPost(minerB.ActorAddr)
|
||||
|
||||
// Wait till both miners' sectors have had their first post and are activated and check that this is reflected in miner power
|
||||
minerB.WaitTillActivatedAndAssertPower(ctx, bRespCh, bSectorNum)
|
||||
|
||||
head, err = client.ChainHead(ctx)
|
||||
req.NoError(err)
|
||||
|
||||
// Miner B has activated the CC sector -> upgrade it with snapdeals
|
||||
snapPieces := minerB.SnapDeal(ctx, kit.TestSpt, bSectorNum)
|
||||
// cancel the WdPost for the CC sector as the corresponding CommR is no longer valid
|
||||
bWdPostCancelF()
|
||||
|
||||
// Check "sector-updated" event happned after snap
|
||||
{
|
||||
expectedEntries := []types.EventEntry{
|
||||
{Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "$type", Value: must.One(ipld.Encode(basicnode.NewString("sector-updated"), dagcbor.Encode))},
|
||||
{Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "sector", Value: must.One(ipld.Encode(basicnode.NewInt(int64(bSectorNum)), dagcbor.Encode))},
|
||||
{Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "unsealed-cid", Value: must.One(ipld.Encode(basicnode.NewLink(cidlink.Link{Cid: snapPieces[0].PieceCID}), dagcbor.Encode))},
|
||||
{Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "piece-cid", Value: must.One(ipld.Encode(basicnode.NewLink(cidlink.Link{Cid: snapPieces[0].PieceCID}), dagcbor.Encode))},
|
||||
{Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "piece-size", Value: must.One(ipld.Encode(basicnode.NewInt(int64(snapPieces[0].Size)), dagcbor.Encode))},
|
||||
}
|
||||
from := head.Height()
|
||||
recentEvents, err := client.FullNode.GetActorEventsRaw(ctx, &types.ActorEventFilter{FromHeight: &from})
|
||||
req.NoError(err)
|
||||
req.Len(recentEvents, 1)
|
||||
req.Equal(expectedEntries, recentEvents[0].Entries)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func verifyProveCommitSectorsNIErrorConditions(ctx context.Context, t *testing.T, miner *kit.TestUnmanagedMiner, sealProofType abi.RegisteredSealProof) {
|
||||
req := require.New(t)
|
||||
|
||||
head, err := miner.FullNode.ChainHead(ctx)
|
||||
req.NoError(err)
|
||||
|
||||
actorIdNum, err := address.IDFromAddress(miner.ActorAddr)
|
||||
req.NoError(err)
|
||||
actorId := abi.ActorID(actorIdNum)
|
||||
|
||||
var provingDeadline uint64 = 7
|
||||
if miner.IsImmutableDeadline(ctx, provingDeadline) {
|
||||
// avoid immutable deadlines
|
||||
provingDeadline = 5
|
||||
}
|
||||
|
||||
submitAndFail := func(params *miner14.ProveCommitSectorsNIParams, errMsg string, errCode int) {
|
||||
t.Helper()
|
||||
r, err := miner.SubmitMessage(ctx, params, 1, builtin.MethodsMiner.ProveCommitSectorsNI)
|
||||
req.Error(err)
|
||||
req.Contains(err.Error(), errMsg)
|
||||
if errCode > 0 {
|
||||
req.Contains(err.Error(), fmt.Sprintf("(RetCode=%d)", errCode))
|
||||
}
|
||||
req.Nil(r)
|
||||
}
|
||||
|
||||
sn := abi.SectorNumber(5000)
|
||||
mkSai := func() miner14.SectorNIActivationInfo {
|
||||
sn++
|
||||
return miner14.SectorNIActivationInfo{
|
||||
SealingNumber: sn,
|
||||
SealerID: actorId,
|
||||
SealedCID: cid.MustParse("bagboea4b5abcatlxechwbp7kjpjguna6r6q7ejrhe6mdp3lf34pmswn27pkkiekz"),
|
||||
SectorNumber: sn,
|
||||
SealRandEpoch: head.Height() - 10,
|
||||
Expiration: 2880 * 300,
|
||||
}
|
||||
}
|
||||
mkParams := func() miner14.ProveCommitSectorsNIParams {
|
||||
return miner14.ProveCommitSectorsNIParams{
|
||||
Sectors: []miner14.SectorNIActivationInfo{mkSai(), mkSai()},
|
||||
AggregateProof: []byte{0xca, 0xfe, 0xbe, 0xef},
|
||||
SealProofType: sealProofType,
|
||||
AggregateProofType: abi.RegisteredAggregationProof_SnarkPackV2,
|
||||
ProvingDeadline: provingDeadline,
|
||||
RequireActivationSuccess: true,
|
||||
}
|
||||
}
|
||||
|
||||
// Test message rejection on no sectors
|
||||
params := mkParams()
|
||||
params.Sectors = []miner14.SectorNIActivationInfo{}
|
||||
submitAndFail(¶ms, "too few sectors", 16)
|
||||
|
||||
// Test message rejection on too many sectors
|
||||
sectorInfos := make([]miner14.SectorNIActivationInfo, 66)
|
||||
for i := range sectorInfos {
|
||||
sectorInfos[i] = mkSai()
|
||||
}
|
||||
params = mkParams()
|
||||
params.Sectors = sectorInfos
|
||||
submitAndFail(¶ms, "too many sectors", 16)
|
||||
|
||||
// Test bad aggregation proof type
|
||||
params = mkParams()
|
||||
params.AggregateProofType = abi.RegisteredAggregationProof_SnarkPackV1
|
||||
submitAndFail(¶ms, "aggregate proof type", 16)
|
||||
|
||||
// Test bad SealerID
|
||||
params = mkParams()
|
||||
params.Sectors[1].SealerID = 1234
|
||||
submitAndFail(¶ms, "invalid NI commit 1 while requiring activation success", 16)
|
||||
|
||||
// Test bad SealingNumber
|
||||
params = mkParams()
|
||||
params.Sectors[1].SealingNumber = 1234
|
||||
submitAndFail(¶ms, "invalid NI commit 1 while requiring activation success", 16)
|
||||
|
||||
// Test bad SealedCID
|
||||
params = mkParams()
|
||||
params.Sectors[1].SealedCID = cid.MustParse("baga6ea4seaqjtovkwk4myyzj56eztkh5pzsk5upksan6f5outesy62bsvl4dsha")
|
||||
submitAndFail(¶ms, "invalid NI commit 1 while requiring activation success", 16)
|
||||
|
||||
// Test bad SealRandEpoch
|
||||
head, err = miner.FullNode.ChainHead(ctx)
|
||||
req.NoError(err)
|
||||
params = mkParams()
|
||||
params.Sectors[1].SealRandEpoch = head.Height() + builtin.EpochsInDay
|
||||
submitAndFail(¶ms, fmt.Sprintf("seal challenge epoch %d must be before now", params.Sectors[1].SealRandEpoch), 16)
|
||||
params.Sectors[1].SealRandEpoch = head.Height() - 190*builtin.EpochsInDay
|
||||
submitAndFail(¶ms, "invalid NI commit 1 while requiring activation success", 16)
|
||||
|
||||
// Immutable/bad deadlines
|
||||
di, err := miner.FullNode.StateMinerProvingDeadline(ctx, miner.ActorAddr, head.Key())
|
||||
req.NoError(err)
|
||||
currentDeadlineIdx := uint64(math.Abs(float64((di.CurrentEpoch - di.PeriodStart) / di.WPoStChallengeWindow)))
|
||||
req.Less(currentDeadlineIdx, di.WPoStPeriodDeadlines)
|
||||
params = mkParams()
|
||||
params.ProvingDeadline = currentDeadlineIdx
|
||||
submitAndFail(¶ms, fmt.Sprintf("proving deadline %d must not be the current or next deadline", currentDeadlineIdx), 18)
|
||||
params.ProvingDeadline = currentDeadlineIdx + 1
|
||||
submitAndFail(¶ms, fmt.Sprintf("proving deadline %d must not be the current or next deadline", currentDeadlineIdx+1), 18)
|
||||
params.ProvingDeadline = di.WPoStPeriodDeadlines // too big
|
||||
submitAndFail(¶ms, fmt.Sprintf("proving deadline index %d invalid", di.WPoStPeriodDeadlines), 16)
|
||||
}
|
Loading…
Reference in New Issue
Block a user