diff --git a/.circleci/config.yml b/.circleci/config.yml index 4a69a4a49..1c77c2128 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -735,6 +735,45 @@ jobs: - packer/build: template: tools/packer/lotus.pkr.hcl args: "-var ci_workspace_bins=./linux-nerpanet -var lotus_network=nerpanet -var git_tag=$CIRCLE_TAG" + publish-dockerhub: + description: publish to dockerhub + machine: + image: ubuntu-2004:202010-01 + parameters: + tag: + type: string + default: latest + steps: + - checkout + - run: + name: dockerhub login + command: echo $DOCKERHUB_PASSWORD | docker login --username $DOCKERHUB_USERNAME --password-stdin + - run: + name: docker build + command: | + docker build --target lotus -t filecoin/lotus:<< parameters.tag >> -f Dockerfile.lotus . + docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:<< parameters.tag >> -f Dockerfile.lotus . + if [[ ! -z $CIRCLE_SHA1 ]]; then + docker build --target lotus -t filecoin/lotus:$CIRCLE_SHA1 -f Dockerfile.lotus . + docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:$CIRCLE_SHA1 -f Dockerfile.lotus . + fi + if [[ ! -z $CIRCLE_TAG ]]; then + docker build --target lotus -t filecoin/lotus:$CIRCLE_TAG -f Dockerfile.lotus . + docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:$CIRCLE_TAG -f Dockerfile.lotus . + fi + - run: + name: docker push + command: | + docker push filecoin/lotus:<< parameters.tag >> + docker push filecoin/lotus-all-in-one:<< parameters.tag >> + if [[ ! -z $CIRCLE_SHA1 ]]; then + docker push filecoin/lotus:$CIRCLE_SHA1 + docker push filecoin/lotus-all-in-one:$CIRCLE_SHA1 + fi + if [[ ! -z $CIRCLE_TAG ]]; then + docker push filecoin/lotus:$CIRCLE_TAG + docker push filecoin/lotus-all-in-one:$CIRCLE_TAG + fi workflows: version: 2.1 @@ -1017,6 +1056,16 @@ workflows: tags: only: - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - publish-dockerhub: + name: publish-dockerhub + tag: stable + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ nightly: triggers: @@ -1030,3 +1079,6 @@ workflows: - publish-snapcraft: name: publish-snapcraft-nightly channel: edge + - publish-dockerhub: + name: publish-dockerhub-nightly + tag: nightly diff --git a/.circleci/template.yml b/.circleci/template.yml index fb59f23ea..27036ab26 100644 --- a/.circleci/template.yml +++ b/.circleci/template.yml @@ -735,6 +735,45 @@ jobs: - packer/build: template: tools/packer/lotus.pkr.hcl args: "-var ci_workspace_bins=./linux-nerpanet -var lotus_network=nerpanet -var git_tag=$CIRCLE_TAG" + publish-dockerhub: + description: publish to dockerhub + machine: + image: ubuntu-2004:202010-01 + parameters: + tag: + type: string + default: latest + steps: + - checkout + - run: + name: dockerhub login + command: echo $DOCKERHUB_PASSWORD | docker login --username $DOCKERHUB_USERNAME --password-stdin + - run: + name: docker build + command: | + docker build --target lotus -t filecoin/lotus:<< parameters.tag >> -f Dockerfile.lotus . + docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:<< parameters.tag >> -f Dockerfile.lotus . + if [["[[ ! -z $CIRCLE_SHA1 ]]"]]; then + docker build --target lotus -t filecoin/lotus:$CIRCLE_SHA1 -f Dockerfile.lotus . + docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:$CIRCLE_SHA1 -f Dockerfile.lotus . + fi + if [["[[ ! -z $CIRCLE_TAG ]]"]]; then + docker build --target lotus -t filecoin/lotus:$CIRCLE_TAG -f Dockerfile.lotus . + docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:$CIRCLE_TAG -f Dockerfile.lotus . + fi + - run: + name: docker push + command: | + docker push filecoin/lotus:<< parameters.tag >> + docker push filecoin/lotus-all-in-one:<< parameters.tag >> + if [["[[ ! -z $CIRCLE_SHA1 ]]"]]; then + docker push filecoin/lotus:$CIRCLE_SHA1 + docker push filecoin/lotus-all-in-one:$CIRCLE_SHA1 + fi + if [["[[ ! -z $CIRCLE_TAG ]]"]]; then + docker push filecoin/lotus:$CIRCLE_TAG + docker push filecoin/lotus-all-in-one:$CIRCLE_TAG + fi workflows: version: 2.1 @@ -887,6 +926,16 @@ workflows: tags: only: - /^v\d+\.\d+\.\d+(-rc\d+)?$/ + - publish-dockerhub: + name: publish-dockerhub + tag: stable + filters: + branches: + ignore: + - /.*/ + tags: + only: + - /^v\d+\.\d+\.\d+(-rc\d+)?$/ nightly: triggers: @@ -900,3 +949,6 @@ workflows: - publish-snapcraft: name: publish-snapcraft-nightly channel: edge + - publish-dockerhub: + name: publish-dockerhub-nightly + tag: nightly diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md deleted file mode 100644 index 23c7640b7..000000000 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -name: Bug Report -about: Create a report to help us improve -title: "[BUG] " -labels: hint/needs-triaging, kind/bug -assignees: '' - ---- - -> Note: For security-related bugs/issues, please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy). - -**Describe the bug** -A clear and concise description of what the bug is. -(If you are not sure what the bug is, try to figure it out via a [discussion](https://github.com/filecoin-project/lotus/discussions/new) first! - -**Version (run `lotus version`):** - -**To Reproduce** -Steps to reproduce the behavior: -1. Run '...' -2. See error - -**Expected behavior** -A clear and concise description of what you expected to happen. - -**Logs** -Provide daemon/miner/worker logs, and goroutines(if available) for troubleshooting. - -**Screenshots** -If applicable, add screenshots to help explain your problem. - -**Additional context** -Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 000000000..7876715e2 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,92 @@ +name: "Bug Report" +description: "File a bug report to help us improve" +labels: [need/triage, kind/bug] +body: +- type: checkboxes + attributes: + label: Checklist + description: Please check off the following boxes before continuing to file a bug report! + options: + - label: This is **not** a security-related bug/issue. If it is, please follow please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy). + required: true + - label: This is **not** a question or a support request. If you have any lotus related questions, please ask in the [lotus forum](https://github.com/filecoin-project/lotus/discussions). + required: true + - label: This is **not** a new feature request. If it is, please file a [feature request](https://github.com/filecoin-project/lotus/issues/new?assignees=&labels=need%2Ftriage%2Ckind%2Ffeature&template=feature_request.yml) instead. + required: true + - label: This is **not** an enhancement request. If it is, please file a [improvement suggestion](https://github.com/filecoin-project/lotus/issues/new?assignees=&labels=need%2Ftriage%2Ckind%2Fenhancement&template=enhancement.yml) instead. + required: true + - label: I **have** searched on the [issue tracker](https://github.com/filecoin-project/lotus/issues) and the [lotus forum](https://github.com/filecoin-project/lotus/discussions), and there is no existing related issue or discussion. + required: true + - label: I am running the [`Latest release`](https://github.com/filecoin-project/lotus/releases), or the most recent RC(release canadiate) for the upcoming release or the dev branch(master), or have an issue updating to any of these. + required: true + - label: I did not make any code changes to lotus. + required: false +- type: dropdown + id: component-and-area + validations: + required: true + attributes: + label: Lotus component + description: Please select the lotus component you are filing a bug for + options: + - lotus daemon - chain sync + - lotus miner - mining and block production + - lotus miner/worker - sealing + - lotus miner - proving(WindowPoSt) + - lotus miner/market - storage deal + - lotus miner/market - retrieval deal + - lotus client + - lotus JSON-RPC API + - lotus message management (mpool) + - Other +- type: textarea + id: version + attributes: + label: Lotus Version + description: Enter the output of `lotus version` and `lotus-miner version` if applicable. + placeholder: | + e.g. + Daemon:1.11.0-rc2+debug+git.0519cd371.dirty+api1.3.0 + Local: lotus version 1.11.0-rc2+debug+git.0519cd371.dirty + validations: + reuiqred: true +- type: textarea + id: Description + attributes: + label: Describe the Bug + description: | + This is where you get to tell us what went wrong, when doing so, please try to provide a clear and concise description of the bug with all related information: + * What you were doding when you experienced the bug? + * Any *error* messages you saw, *where* you saw them, and what you believe may have caused them (if you have any ideas). + * What is the expected behaviour? + * For sealing issues, include the output of `lotus-miner sectors status --log ` for the failed sector(s). + * For proving issues, include the output of `lotus-miner proving` info. + * For deal making issues, include the output of `lotus client list-deals -v` and/or `lotus-miner storage-deals|retrieval-deals|data-transfers list [-v]` commands for the deal(s) in question. + render: bash + validations: + required: true +- type: textarea + id: extraInfo + attributes: + label: Logging Information + description: | + Please provide debug logs of the problem, remember you can get set log level control for: + * lotus: use `lotus log list` to get all log systems available and set level by `lotus log set-level`. An example can be found [here](https://docs.filecoin.io/get-started/lotus/configuration-and-advanced-usage/#log-level-control). + * lotus-miner:`lotus-miner log list` to get all log systems available and set level by `lotus-miner log set-level + If you don't provide detailed logs when you raise the issue it will almost certainly be the first request I make before furthur diagnosing the problem. + render: bash + validations: + required: true +- type: textarea + id: RepoSteps + attributes: + label: Repo Steps + description: "Steps to reproduce the behavior" + value: | + 1. Run '...' + 2. Do '...' + 3. See error '...' + ... + render: bash + validations: + required: false \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/deal-making-issues.md b/.github/ISSUE_TEMPLATE/deal-making-issues.md deleted file mode 100644 index bec800cb7..000000000 --- a/.github/ISSUE_TEMPLATE/deal-making-issues.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -name: Deal Making Issues -about: Create a report for help with deal making failures. -title: "[Deal Making Issue]" -labels: hint/needs-triaging, area/markets -assignees: '' - ---- - -> Note: For security-related bugs/issues, please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy). - -Please provide all the information requested here to help us troubleshoot "deal making failed" issues. -If the information requested is missing, we will probably have to just ask you to provide it anyway, -before we can help debug. - -**Basic Information** -Including information like, Are you the client or the miner? Is this a storage deal or a retrieval deal? Is it an offline deal? - -**Describe the problem** - -A brief description of the problem you encountered while trying to make a deal. - -**Version** - -The output of `lotus --version`. - -**Setup** - -You miner(if applicable) and daemon setup, i.e: What hardware do you use, how much ram and etc. - -**To Reproduce** - Steps to reproduce the behavior: - 1. Run '...' - 2. See error - -**Deal status** - -The output of `lotus client list-deals -v` and/or `lotus-miner storage-deals|retrieval-deals|data-transfers list [-v]` commands for the deal(s) in question. - -**Lotus daemon and miner logs** - -Please go through the logs of your daemon and miner(if applicable), and include screenshots of any error/warning-like messages you find. - -Alternatively please upload full log files and share a link here - -** Code modifications ** - -If you have modified parts of lotus, please describe which areas were modified, -and the scope of those modifications diff --git a/.github/ISSUE_TEMPLATE/enhancement.yml b/.github/ISSUE_TEMPLATE/enhancement.yml new file mode 100644 index 000000000..7320fa5c5 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/enhancement.yml @@ -0,0 +1,44 @@ +name: Enhancement +description: Suggest an improvement to an existing lotus feature. +labels: [need/triage, kind/enhancement] +body: +- type: checkboxes + attributes: + label: Checklist + description: Please check off the following boxes before continuing to create an improvement suggestion! + options: + - label: This is **not** a new feature or an enhancement to the Filecoin protocol. If it is, please open an [FIP issue](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0001.md). + required: true + - label: This is **not** a new feature request. If it is, please file a [feature request](https://github.com/filecoin-project/lotus/issues/new?assignees=&labels=need%2Ftriage%2Ckind%2Ffeature&template=feature_request.yml) instead. + required: true + - label: This is **not** brainstorming ideas. If you have an idea you'd like to discuss, please open a new discussion on [the lotus forum](https://github.com/filecoin-project/lotus/discussions/categories/ideas) and select the category as `Ideas`. + required: true + - label: I **have** a specific, actionable, and well motivated improvement to propose. + required: true +- type: dropdown + id: component + validations: + required: true + attributes: + label: Lotus component + description: Please select the lotus component you are propoing improvement for + options: + - lotus daemon - chain sync + - lotus miner - mining and block production + - lotus miner/worker - sealing + - lotus miner - proving(WindowPoSt) + - lotus miner/market - storage deal + - lotus miner/market - retrieval deal + - lotus client + - lotus JSON-RPC API + - lotus message management (mpool) + - Other +- type: textarea + id: request + attributes: + label: Improvement Suggestion + description: A clear and concise description of what the motivation or the current problem is and what is the suggested improvement? + placeholder: Ex. Currently lotus... However, as a storage provider, I'd like... + validations: + required: true + diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index 0803a6db8..000000000 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this project -title: "[Feature Request]" -labels: hint/needs-triaging -assignees: '' - ---- - -**Is your feature request related to a problem? Please describe.** -A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] - -**Describe the solution you'd like** -A clear and concise description of what you want to happen. - -**Describe alternatives you've considered** -A clear and concise description of any alternative solutions or features you've considered. - -**Additional context** -Add any other context or screenshots about the feature request here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 000000000..5cb39b0d5 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,63 @@ +name: Feature request +description: Suggest an idea for lotus +labels: [need/triage, kind/feature] +body: +- type: checkboxes + attributes: + label: Checklist + description: Please check off the following boxes before continuing to create a new feature request! + options: + - label: This is **not** a new feature or an enhancement to the Filecoin protocol. If it is, please open an [FIP issue](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0001.md). + required: true + - label: This is **not** brainstorming ideas. If you have an idea you'd like to discuss, please open a new discussion on [the lotus forum](https://github.com/filecoin-project/lotus/discussions/categories/ideas) and select the category as `Ideas`. + required: true + - label: I **have** a specific, actionable, and well motivated feature request to propose. + required: true +- type: dropdown + id: component + validations: + required: true + attributes: + label: Lotus component + description: Please select the lotus component you are requesting a new feature for + options: + - lotus daemon - chain sync + - lotus miner - mining and block production + - lotus miner/worker - sealing + - lotus miner - proving(WindowPoSt) + - lotus miner/market - storage deal + - lotus miner/market - retrieval deal + - lotus client + - lotus JSON-RPC API + - lotus message management (mpool) + - Other +- type: textarea + id: request + attributes: + label: What is the motivation behind this feature request? Is your feature request related to a problem? Please describe. + description: A clear and concise description of what the motivation or the problem is. + placeholder: Ex. I'm always frustrated when [...] + validations: + required: true +- type: textarea + id: solution + attributes: + label: Describe the solution you'd like + description: A clear and concise description of what you want to happen. + validations: + required: true +- type: textarea + id: alternates + attributes: + label: Describe alternatives you've considered + description: A clear and concise description of any alternative solutions or features you've considered. + validations: + required: false +- type: textarea + id: extra + attributes: + label: Additional context + description: Add any other context, design docs or screenshots about the feature request here. + validations: + required: false + diff --git a/.github/ISSUE_TEMPLATE/m1_bug_report_deal.yml b/.github/ISSUE_TEMPLATE/m1_bug_report_deal.yml new file mode 100644 index 000000000..4402e97da --- /dev/null +++ b/.github/ISSUE_TEMPLATE/m1_bug_report_deal.yml @@ -0,0 +1,91 @@ +name: "M1 Bug Report For Deal Making" +description: "File a bug report around deal making for the M1 releases" +labels: [need/triage, kind/bug, M1-release] +body: +- type: checkboxes + id: checklist + attributes: + label: Checklist + description: Please check off the following boxes before continuing to file a bug report! + options: + - label: This is **not** a question or a support request. If you have any lotus related questions, please ask in the [lotus forum](https://github.com/filecoin-project/lotus/discussions). + required: true + - label: I **am** reporting a bug w.r.t one of the [M1 tags](https://github.com/filecoin-project/lotus/discussions/6852#discussioncomment-1043951). If not, choose another issue option [here](https://github.com/filecoin-project/lotus/issues/new/choose). + required: true + - label: I **am** reporting a bug around deal making. If not, create a [M1 Bug Report For Non Deal Making Issue](https://github.com/filecoin-project/lotus/issues/new?assignees=&labels=need%2Ftriage%2Ckind%2Fbug%2CM1-release&template=m1_bug_report_non_deal.yml). + required: true + - label: I have my log level set as instructed [here](https://github.com/filecoin-project/lotus/discussions/6852#discussioncomment-1043678) and have logs available for troubleshooting. + required: true + - label: The deal is coming from one of the M1 clients(communitcated in the coordination slack channel). + required: true + - label: I **have** searched on the [issue tracker](https://github.com/filecoin-project/lotus/issues) and the [lotus forum](https://github.com/filecoin-project/lotus/discussions), and there is no existing related issue or discussion. + required: true +- type: dropdown + id: lotus-componets + validations: + required: true + attributes: + label: Lotus Component + description: Please select the lotus component you are filing a bug for + options: + - lotus miner market subsystem - storage deal + - lotus miner market subsystem - retrieval deal + - lotus miner - storage deal + - lotus miner - retrieval deal +- type: textarea + id: version + attributes: + label: Lotus Tag and Version + description: Enter the lotus tag, output of `lotus version` and `lotus-miner version`. + validations: + reuiqred: true +- type: textarea + id: Description + attributes: + label: Describe the Bug + description: | + This is where you get to tell us what went wrong, when doing so, please try to provide a clear and concise description of the bug with all related information: + * What you were doding when you experienced the bug? + * Any *error* messages you saw, *where* you saw them, and what you believe may have caused them (if you have any ideas). + * What is the expected behaviour? + render: bash + validations: + required: true +- type: textarea + id: deal-status + attributes: + label: Deal Status + description: What's the status of the deal? + placeholder: | + Please share the output of `lotus-miner storage-deals|retrieval-deals list [-v]` commands for the deal(s) in question. + validations: + required: true +- type: textarea + id: data-transfer-status + attributes: + label: Data Transfer Status + description: What's the status of the data transfer? + placeholder: | + Please share the output of `lotus-miner data-transfers list -v` commands for the deal(s) in question. + validations: + required: true +- type: textarea + id: logging + attributes: + label: Logging Information + description: Please link to the whole of the miner logs on your side of the transaction. You can upload the logs to a [gist](https://gist.github.com). + validations: + required: true +- type: textarea + id: RepoSteps + attributes: + label: Repo Steps (optional) + description: "Steps to reproduce the behavior" + value: | + 1. Run '...' + 2. Do '...' + 3. See error '...' + ... + render: bash + validations: + required: false \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/m1_bug_report_non_deal.yml b/.github/ISSUE_TEMPLATE/m1_bug_report_non_deal.yml new file mode 100644 index 000000000..ede3593e5 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/m1_bug_report_non_deal.yml @@ -0,0 +1,81 @@ +name: "M1 Bug Report For Non Deal Making Issue" +description: "File a bug report around non deal making issue for the M1 releases" +labels: [need/triage, kind/bug, M1-release] +body: +- type: checkboxes + id: checklist + attributes: + label: Checklist + description: Please check off the following boxes before continuing to file a bug report! + options: + - label: This is **not** a question or a support request. If you have any lotus related questions, please ask in the [lotus forum](https://github.com/filecoin-project/lotus/discussions). + required: true + - label: I **am** reporting a bug w.r.t one of the [M1 tags](https://github.com/filecoin-project/lotus/discussions/6852#discussioncomment-1043951). If not, choose another issue option [here](https://github.com/filecoin-project/lotus/issues/new/choose). + required: true + - label: I am **not** reporting a bug around deal making. If yes, create a [M1 Bug Report For Deal Making](https://github.com/filecoin-project/lotus/issues/new?assignees=&labels=need%2Ftriage%2Ckind%2Fbug%2CM1-release&template=m1_bug_report_deal.yml). + required: true + - label: I **have** searched on the [issue tracker](https://github.com/filecoin-project/lotus/issues) and the [lotus forum](https://github.com/filecoin-project/lotus/discussions), and there is no existing related issue or discussion. + required: true +- type: dropdown + id: component-and-area + validations: + required: true + attributes: + label: Lotus component + description: Please select the lotus component you are filing a bug for + options: + - lotus daemon - chain sync **with** splitstore enabled + - lotus daemon - chain sync **without** splitstore enabled + - lotus miner - mining and block production + - lotus miner/worker - sealing + - lotus miner - proving(WindowPoSt) + - lotus client + - lotus JSON-RPC API + - lotus message management (mpool) + - Other +- type: textarea + id: version + attributes: + label: Lotus Tag and Version + description: Enter the lotus tag, output of `lotus version` and `lotus-miner version`. + validations: + reuiqred: true +- type: textarea + id: Description + attributes: + label: Describe the Bug + description: | + This is where you get to tell us what went wrong, when doing so, please try to provide a clear and concise description of the bug with all related information: + * What you were doding when you experienced the bug? + * Any *error* messages you saw, *where* you saw them, and what you believe may have caused them (if you have any ideas). + * What is the expected behaviour? + * For sealing issues, include the output of `lotus-miner sectors status --log ` for the failed sector(s). + * For proving issues, include the output of `lotus-miner proving` info. + render: bash + validations: + required: true +- type: textarea + id: extraInfo + attributes: + label: Logging Information + description: | + Please provide debug logs of the problem, remember you can get set log level control for: + * lotus: use `lotus log list` to get all log systems available and set level by `lotus log set-level`. An example can be found [here](https://docs.filecoin.io/get-started/lotus/configuration-and-advanced-usage/#log-level-control). + * lotus-miner:`lotus-miner log list` to get all log systems available and set level by `lotus-miner log set-level + If you don't provide detailed logs when you raise the issue it will almost certainly be the first request I make before furthur diagnosing the problem. + render: bash + validations: + required: true +- type: textarea + id: RepoSteps + attributes: + label: Repo Steps + description: "Steps to reproduce the behavior" + value: | + 1. Run '...' + 2. Do '...' + 3. See error '...' + ... + render: bash + validations: + required: false \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/mining-issues.md b/.github/ISSUE_TEMPLATE/mining-issues.md deleted file mode 100644 index 434e160d4..000000000 --- a/.github/ISSUE_TEMPLATE/mining-issues.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -name: Mining Issues -about: Create a report for help with mining failures. -title: "[Mining Issue]" -labels: hint/needs-triaging, area/mining -assignees: '' - ---- - -> Note: For security-related bugs/issues, please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy). - -Please provide all the information requested here to help us troubleshoot "mining/WinningPoSt failed" issues. -If the information requested is missing, you may be asked you to provide it. - -**Describe the problem** -A brief description of the problem you encountered while mining new blocks. - -**Version** - -The output of `lotus --version`. - -**Setup** - -You miner and daemon setup, including what hardware do you use, your environment variable settings, how do you run your miner and worker, do you use GPU and etc. - -**Lotus daemon and miner logs** - -Please go through the logs of your daemon and miner, and include screenshots of any error/warning-like messages you find, highlighting the one has "winning post" in it. - -Alternatively please upload full log files and share a link here - -** Code modifications ** - -If you have modified parts of lotus, please describe which areas were modified, -and the scope of those modifications diff --git a/.github/ISSUE_TEMPLATE/proving-issues.md b/.github/ISSUE_TEMPLATE/proving-issues.md deleted file mode 100644 index 6187d546e..000000000 --- a/.github/ISSUE_TEMPLATE/proving-issues.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -name: Proving Issues -about: Create a report for help with proving failures. -title: "[Proving Issue]" -labels: area/proving, hint/needs-triaging -assignees: '' - ---- - -> Note: For security-related bugs/issues, please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy). - -Please provide all the information requested here to help us troubleshoot "proving/window PoSt failed" issues. -If the information requested is missing, we will probably have to just ask you to provide it anyway, -before we can help debug. - -**Describe the problem** -A brief description of the problem you encountered while proving the storage. - -**Version** - -The output of `lotus --version`. - -**Setup** - -You miner and daemon setup, including what hardware do you use, your environment variable settings, how do you run your miner and worker, do you use GPU and etc. - -**Proving status** - -The output of `lotus-miner proving` info. - -**Lotus miner logs** - -Please go through the logs of your miner, and include screenshots of any error-like messages you find, highlighting the one has "window post" in it. - -Alternatively please upload full log files and share a link here - -**Lotus miner diagnostic info** - -Please collect the following diagnostic information, and share a link here - -* lotus-miner diagnostic info `lotus-miner info all > allinfo.txt` - -** Code modifications ** - -If you have modified parts of lotus, please describe which areas were modified, -and the scope of those modifications diff --git a/.github/ISSUE_TEMPLATE/sealing-issues.md b/.github/ISSUE_TEMPLATE/sealing-issues.md deleted file mode 100644 index 7511849d3..000000000 --- a/.github/ISSUE_TEMPLATE/sealing-issues.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -name: Sealing Issues -about: Create a report for help with sealing (commit) failures. -title: "[Sealing Issue]" -labels: hint/needs-triaging, area/sealing -assignees: '' - ---- - -> Note: For security-related bugs/issues, please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy). - -Please provide all the information requested here to help us troubleshoot "commit failed" issues. -If the information requested is missing, we will probably have to just ask you to provide it anyway, -before we can help debug. - -**Describe the problem** -A brief description of the problem you encountered while sealing a sector. - -**Version** - -The output of `lotus --version`. - -**Setup** - -You miner and daemon setup, including what hardware do you use, your environment variable settings, how do you run your miner and worker, do you use GPU and etc. - -**Commands** - -Commands you ran. - -**Sectors status** - -The output of `lotus-miner sectors status --log ` for the failed sector(s). - -**Lotus miner logs** - -Please go through the logs of your miner, and include screenshots of any error-like messages you find. - -Alternatively please upload full log files and share a link here - -**Lotus miner diagnostic info** - -Please collect the following diagnostic information, and share a link here - -* lotus-miner diagnostic info `lotus-miner info all > allinfo` - -** Code modifications ** - -If you have modified parts of lotus, please describe which areas were modified, -and the scope of those modifications diff --git a/Dockerfile.lotus b/Dockerfile.lotus index 0b43ef806..72c609305 100644 --- a/Dockerfile.lotus +++ b/Dockerfile.lotus @@ -36,7 +36,7 @@ WORKDIR /opt/filecoin ARG RUSTFLAGS="" ARG GOFLAGS="" -RUN make deps lotus lotus-miner lotus-worker lotus-shed lotus-chainwatch lotus-stats +RUN make lotus lotus-miner lotus-worker lotus-shed lotus-wallet lotus-gateway FROM ubuntu:20.04 AS base @@ -56,19 +56,173 @@ COPY --from=builder /usr/lib/x86_64-linux-gnu/libOpenCL.so.1 /lib/ RUN useradd -r -u 532 -U fc +### FROM base AS lotus MAINTAINER Lotus Development Team -COPY --from=builder /opt/filecoin/lotus /usr/local/bin/ +COPY --from=builder /opt/filecoin/lotus /usr/local/bin/ COPY --from=builder /opt/filecoin/lotus-shed /usr/local/bin/ +COPY scripts/docker-lotus-entrypoint.sh / ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters ENV LOTUS_PATH /var/lib/lotus +ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1 +ENV LOTUS_JAEGER_AGENT_PORT 6831 +ENV DOCKER_LOTUS_IMPORT_SNAPSHOT https://fil-chain-snapshots-fallback.s3.amazonaws.com/mainnet/minimal_finality_stateroots_latest.car +ENV DOCKER_LOTUS_IMPORT_WALLET "" -RUN mkdir /var/lib/lotus /var/tmp/filecoin-proof-parameters && chown fc /var/lib/lotus /var/tmp/filecoin-proof-parameters +RUN mkdir /var/lib/lotus /var/tmp/filecoin-proof-parameters +RUN chown fc: /var/lib/lotus /var/tmp/filecoin-proof-parameters + +VOLUME /var/lib/lotus +VOLUME /var/tmp/filecoin-proof-parameters USER fc -ENTRYPOINT ["/usr/local/bin/lotus"] +EXPOSE 1234 + +ENTRYPOINT ["/docker-lotus-entrypoint.sh"] CMD ["-help"] + +### +FROM base AS lotus-wallet +MAINTAINER Lotus Development Team + +COPY --from=builder /opt/filecoin/lotus-wallet /usr/local/bin/ + +ENV WALLET_PATH /var/lib/lotus-wallet +ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1 +ENV LOTUS_JAEGER_AGENT_PORT 6831 + +RUN mkdir /var/lib/lotus-wallet +RUN chown fc: /var/lib/lotus-wallet + +VOLUME /var/lib/lotus-wallet + +USER fc + +EXPOSE 1777 + +ENTRYPOINT ["/usr/local/bin/lotus-wallet"] + +CMD ["-help"] + +### +FROM base AS lotus-gateway +MAINTAINER Lotus Development Team + +COPY --from=builder /opt/filecoin/lotus-gateway /usr/local/bin/ + +ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1 +ENV LOTUS_JAEGER_AGENT_PORT 6831 +ENV FULLNODE_API_INFO /ip4/127.0.0.1/tcp/1234/http + +USER fc + +EXPOSE 1234 + +ENTRYPOINT ["/usr/local/bin/lotus-gateway"] + +CMD ["-help"] + + +### +FROM base AS lotus-miner +MAINTAINER Lotus Development Team + +COPY --from=builder /opt/filecoin/lotus-miner /usr/local/bin/ +COPY scripts/docker-lotus-miner-entrypoint.sh / + +ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters +ENV FULLNODE_API_INFO /ip4/127.0.0.1/tcp/1234/http +ENV LOTUS_MINER_PATH /var/lib/lotus-miner +ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1 +ENV LOTUS_JAEGER_AGENT_PORT 6831 +ENV DOCKER_LOTUS_MINER_INIT true + +RUN mkdir /var/lib/lotus-miner /var/tmp/filecoin-proof-parameters +RUN chown fc: /var/lib/lotus-miner /var/tmp/filecoin-proof-parameters + +VOLUME /var/lib/lotus-miner +VOLUME /var/tmp/filecoin-proof-parameters + +USER fc + +EXPOSE 2345 + +ENTRYPOINT ["/docker-lotus-miner-entrypoint.sh"] + +CMD ["-help"] + + +### +FROM base AS lotus-worker +MAINTAINER Lotus Development Team + +COPY --from=builder /opt/filecoin/lotus-worker /usr/local/bin/ + +ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters +ENV MINER_API_INFO /ip4/127.0.0.1/tcp/2345/http +ENV LOTUS_WORKER_PATH /var/lib/lotus-worker +ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1 +ENV LOTUS_JAEGER_AGENT_PORT 6831 + +RUN mkdir /var/lib/lotus-worker +RUN chown fc: /var/lib/lotus-worker + +VOLUME /var/lib/lotus-worker + +USER fc + +EXPOSE 3456 + +ENTRYPOINT ["/usr/local/bin/lotus-worker"] + +CMD ["-help"] + + +### +from base as lotus-all-in-one + +ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters +ENV FULLNODE_API_INFO /ip4/127.0.0.1/tcp/1234/http +ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1 +ENV LOTUS_JAEGER_AGENT_PORT 6831 +ENV LOTUS_MINER_PATH /var/lib/lotus-miner +ENV LOTUS_PATH /var/lib/lotus +ENV LOTUS_WORKER_PATH /var/lib/lotus-worker +ENV MINER_API_INFO /ip4/127.0.0.1/tcp/2345/http +ENV WALLET_PATH /var/lib/lotus-wallet +ENV DOCKER_LOTUS_IMPORT_SNAPSHOT https://fil-chain-snapshots-fallback.s3.amazonaws.com/mainnet/minimal_finality_stateroots_latest.car +ENV DOCKER_LOTUS_MINER_INIT true + +COPY --from=builder /opt/filecoin/lotus /usr/local/bin/ +COPY --from=builder /opt/filecoin/lotus-shed /usr/local/bin/ +COPY --from=builder /opt/filecoin/lotus-wallet /usr/local/bin/ +COPY --from=builder /opt/filecoin/lotus-gateway /usr/local/bin/ +COPY --from=builder /opt/filecoin/lotus-miner /usr/local/bin/ +COPY --from=builder /opt/filecoin/lotus-worker /usr/local/bin/ + +RUN mkdir /var/tmp/filecoin-proof-parameters +RUN mkdir /var/lib/lotus +RUN mkdir /var/lib/lotus-miner +RUN mkdir /var/lib/lotus-worker +RUN mkdir /var/lib/lotus-wallet +RUN chown fc: /var/tmp/filecoin-proof-parameters +RUN chown fc: /var/lib/lotus +RUN chown fc: /var/lib/lotus-miner +RUN chown fc: /var/lib/lotus-worker +RUN chown fc: /var/lib/lotus-wallet + + +VOLUME /var/tmp/filecoin-proof-parameters +VOLUME /var/lib/lotus +VOLUME /var/lib/lotus-miner +VOLUME /var/lib/lotus-worker +VOLUME /var/lib/lotus-wallet + +EXPOSE 1234 +EXPOSE 2345 +EXPOSE 3456 +EXPOSE 1777 diff --git a/Makefile b/Makefile index a5ce8a99f..2e9fa7459 100644 --- a/Makefile +++ b/Makefile @@ -336,6 +336,9 @@ api-gen: goimports -w api .PHONY: api-gen +cfgdoc-gen: + go run ./node/config/cfgdocgen > ./node/config/doc_gen.go + appimage: lotus rm -rf appimage-builder-cache || true rm AppDir/io.filecoin.lotus.desktop || true @@ -373,7 +376,7 @@ docsgen-openrpc-worker: docsgen-openrpc-bin .PHONY: docsgen docsgen-md-bin docsgen-openrpc-bin -gen: actors-gen type-gen method-gen docsgen api-gen circleci +gen: actors-gen type-gen method-gen cfgdoc-gen docsgen api-gen circleci @echo ">>> IF YOU'VE MODIFIED THE CLI, REMEMBER TO ALSO MAKE docsgen-cli" .PHONY: gen diff --git a/blockstore/splitstore/README.md b/blockstore/splitstore/README.md index 1c6569a34..5b0df61d9 100644 --- a/blockstore/splitstore/README.md +++ b/blockstore/splitstore/README.md @@ -27,9 +27,38 @@ If you intend to use the discard coldstore, your also need to add the following: ColdStoreType = "discard" ``` In general you _should not_ have to use the discard store, unless you -are running a network booster or have very constrained hardware with -not enough disk space to maintain a coldstore, even with garbage -collection. +are running a network assistive node (like a bootstrapper or booster) +or have very constrained hardware with not enough disk space to +maintain a coldstore, even with garbage collection. It is also appropriate +for small nodes that are simply watching the chain. + +*Warning:* Using the discard store for a general purpose node is discouraged, unless +you really know what you are doing. Use it at your own risk. + +## Configuration Options + +These are options in the `[Chainstore.Splitstore]` section of the configuration: + +- `HotStoreType` -- specifies the type of hotstore to use. + The only currently supported option is `"badger"`. +- `ColdStoreType` -- specifies the type of coldstore to use. + The default value is `"universal"`, which will use the initial monolith blockstore + as the coldstore. + The other possible value is `"discard"`, as outlined above, which is specialized for + running without a coldstore. Note that the discard store wraps the initial monolith + blockstore and discards writes; this is necessary to support syncing from a snapshot. +- `MarkSetType` -- specifies the type of markset to use during compaction. + The markset is the data structure used by compaction/gc to track live objects. + The default value is `"map"`, which will use an in-memory map; if you are limited + in memory (or indeed see compaction run out of memory), you can also specify + `"badger"` which will use an disk backed markset, using badger. This will use + much less memory, but will also make compaction slower. +- `HotStoreMessageRetention` -- specifies how many finalities, beyond the 4 + finalities maintained by default, to maintain messages and message receipts in the + hotstore. This is useful for assistive nodes that want to support syncing for other + nodes beyond 4 finalities, while running with the discard coldstore option. + It is also useful for miners who accept deals and need to lookback messages beyond + the 4 finalities, which would otherwise hit the coldstore. ## Operation @@ -67,6 +96,6 @@ Compaction works transactionally with the following algorithm: - We delete in small batches taking a lock; each batch is checked again for marks, from the concurrent transactional mark, so as to never delete anything live - We then end the transaction and compact/gc the hotstore. -## Coldstore Garbage Collection +## Garbage Collection TBD -- see [#6577](https://github.com/filecoin-project/lotus/issues/6577) diff --git a/blockstore/splitstore/markset.go b/blockstore/splitstore/markset.go index a644e7279..458ea8beb 100644 --- a/blockstore/splitstore/markset.go +++ b/blockstore/splitstore/markset.go @@ -32,6 +32,8 @@ func OpenMarkSetEnv(path string, mtype string) (MarkSetEnv, error) { return NewBloomMarkSetEnv() case "map": return NewMapMarkSetEnv() + case "badger": + return NewBadgerMarkSetEnv(path) default: return nil, xerrors.Errorf("unknown mark set type %s", mtype) } diff --git a/blockstore/splitstore/markset_badger.go b/blockstore/splitstore/markset_badger.go new file mode 100644 index 000000000..ef67db213 --- /dev/null +++ b/blockstore/splitstore/markset_badger.go @@ -0,0 +1,230 @@ +package splitstore + +import ( + "os" + "path/filepath" + "sync" + + "golang.org/x/xerrors" + + "github.com/dgraph-io/badger/v2" + "github.com/dgraph-io/badger/v2/options" + "go.uber.org/zap" + + cid "github.com/ipfs/go-cid" +) + +type BadgerMarkSetEnv struct { + path string +} + +var _ MarkSetEnv = (*BadgerMarkSetEnv)(nil) + +type BadgerMarkSet struct { + mx sync.RWMutex + cond sync.Cond + pend map[string]struct{} + writing map[int]map[string]struct{} + writers int + seqno int + + db *badger.DB + path string +} + +var _ MarkSet = (*BadgerMarkSet)(nil) + +var badgerMarkSetBatchSize = 16384 + +func NewBadgerMarkSetEnv(path string) (MarkSetEnv, error) { + msPath := filepath.Join(path, "markset.badger") + err := os.MkdirAll(msPath, 0755) //nolint:gosec + if err != nil { + return nil, xerrors.Errorf("error creating markset directory: %w", err) + } + + return &BadgerMarkSetEnv{path: msPath}, nil +} + +func (e *BadgerMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) { + path := filepath.Join(e.path, name) + + // clean up first + err := os.RemoveAll(path) + if err != nil { + return nil, xerrors.Errorf("error clearing markset directory: %w", err) + } + + err = os.MkdirAll(path, 0755) //nolint:gosec + if err != nil { + return nil, xerrors.Errorf("error creating markset directory: %w", err) + } + + opts := badger.DefaultOptions(path) + opts.SyncWrites = false + opts.CompactL0OnClose = false + opts.Compression = options.None + // Note: We use FileIO for loading modes to avoid memory thrashing and interference + // between the system blockstore and the markset. + // It was observed that using the default memory mapped option resulted in + // significant interference and unacceptably high block validation times once the markset + // exceeded 1GB in size. + opts.TableLoadingMode = options.FileIO + opts.ValueLogLoadingMode = options.FileIO + opts.Logger = &badgerLogger{ + SugaredLogger: log.Desugar().WithOptions(zap.AddCallerSkip(1)).Sugar(), + skip2: log.Desugar().WithOptions(zap.AddCallerSkip(2)).Sugar(), + } + + db, err := badger.Open(opts) + if err != nil { + return nil, xerrors.Errorf("error creating badger markset: %w", err) + } + + ms := &BadgerMarkSet{ + pend: make(map[string]struct{}), + writing: make(map[int]map[string]struct{}), + db: db, + path: path, + } + ms.cond.L = &ms.mx + + return ms, nil +} + +func (e *BadgerMarkSetEnv) Close() error { + return os.RemoveAll(e.path) +} + +func (s *BadgerMarkSet) Mark(c cid.Cid) error { + s.mx.Lock() + + if s.pend == nil { + s.mx.Unlock() + return errMarkSetClosed + } + + s.pend[string(c.Hash())] = struct{}{} + + if len(s.pend) < badgerMarkSetBatchSize { + s.mx.Unlock() + return nil + } + + pend := s.pend + seqno := s.seqno + s.seqno++ + s.writing[seqno] = pend + s.pend = make(map[string]struct{}) + s.writers++ + s.mx.Unlock() + + defer func() { + s.mx.Lock() + defer s.mx.Unlock() + + delete(s.writing, seqno) + s.writers-- + if s.writers == 0 { + s.cond.Broadcast() + } + }() + + empty := []byte{} // not nil + + batch := s.db.NewWriteBatch() + defer batch.Cancel() + + for k := range pend { + if err := batch.Set([]byte(k), empty); err != nil { + return err + } + } + + err := batch.Flush() + if err != nil { + return xerrors.Errorf("error flushing batch to badger markset: %w", err) + } + + return nil +} + +func (s *BadgerMarkSet) Has(c cid.Cid) (bool, error) { + s.mx.RLock() + defer s.mx.RUnlock() + + if s.pend == nil { + return false, errMarkSetClosed + } + + key := c.Hash() + pendKey := string(key) + _, ok := s.pend[pendKey] + if ok { + return true, nil + } + + for _, wr := range s.writing { + _, ok := wr[pendKey] + if ok { + return true, nil + } + } + + err := s.db.View(func(txn *badger.Txn) error { + _, err := txn.Get(key) + return err + }) + + switch err { + case nil: + return true, nil + + case badger.ErrKeyNotFound: + return false, nil + + default: + return false, xerrors.Errorf("error checking badger markset: %w", err) + } +} + +func (s *BadgerMarkSet) Close() error { + s.mx.Lock() + defer s.mx.Unlock() + + if s.pend == nil { + return nil + } + + for s.writers > 0 { + s.cond.Wait() + } + + s.pend = nil + db := s.db + s.db = nil + + err := db.Close() + if err != nil { + return xerrors.Errorf("error closing badger markset: %w", err) + } + + err = os.RemoveAll(s.path) + if err != nil { + return xerrors.Errorf("error deleting badger markset: %w", err) + } + + return nil +} + +func (s *BadgerMarkSet) SetConcurrent() {} + +// badger logging through go-log +type badgerLogger struct { + *zap.SugaredLogger + skip2 *zap.SugaredLogger +} + +func (b *badgerLogger) Warningf(format string, args ...interface{}) {} +func (b *badgerLogger) Infof(format string, args ...interface{}) {} +func (b *badgerLogger) Debugf(format string, args ...interface{}) {} diff --git a/blockstore/splitstore/markset_test.go b/blockstore/splitstore/markset_test.go index d5c01e220..38519949a 100644 --- a/blockstore/splitstore/markset_test.go +++ b/blockstore/splitstore/markset_test.go @@ -16,6 +16,15 @@ func TestBloomMarkSet(t *testing.T) { testMarkSet(t, "bloom") } +func TestBadgerMarkSet(t *testing.T) { + bs := badgerMarkSetBatchSize + badgerMarkSetBatchSize = 1 + t.Cleanup(func() { + badgerMarkSetBatchSize = bs + }) + testMarkSet(t, "badger") +} + func testMarkSet(t *testing.T, lsType string) { t.Helper() diff --git a/blockstore/splitstore/splitstore.go b/blockstore/splitstore/splitstore.go index 821ebb2b6..b401d657e 100644 --- a/blockstore/splitstore/splitstore.go +++ b/blockstore/splitstore/splitstore.go @@ -62,8 +62,11 @@ func init() { type Config struct { // MarkSetType is the type of mark set to use. // - // Only current sane value is "map", but we may add an option for a disk-backed - // markset for memory-constrained situations. + // The default value is "map", which uses an in-memory map-backed markset. + // If you are constrained in memory (i.e. compaction runs out of memory), you + // can use "badger", which will use a disk-backed markset using badger. + // Note that compaction will take quite a bit longer when using the "badger" option, + // but that shouldn't really matter (as long as it is under 7.5hrs). MarkSetType string // DiscardColdBlocks indicates whether to skip moving cold blocks to the coldstore. diff --git a/blockstore/splitstore/splitstore_compact.go b/blockstore/splitstore/splitstore_compact.go index 86f035e6f..b95459ea5 100644 --- a/blockstore/splitstore/splitstore_compact.go +++ b/blockstore/splitstore/splitstore_compact.go @@ -184,16 +184,6 @@ func (s *SplitStore) trackTxnRef(c cid.Cid) { return } - if s.txnProtect != nil { - mark, err := s.txnProtect.Has(c) - if err != nil { - log.Warnf("error checking markset: %s", err) - // track it anyways - } else if mark { - return - } - } - s.txnRefsMx.Lock() s.txnRefs[c] = struct{}{} s.txnRefsMx.Unlock() @@ -209,27 +199,11 @@ func (s *SplitStore) trackTxnRefMany(cids []cid.Cid) { s.txnRefsMx.Lock() defer s.txnRefsMx.Unlock() - quiet := false for _, c := range cids { if isUnitaryObject(c) { continue } - if s.txnProtect != nil { - mark, err := s.txnProtect.Has(c) - if err != nil { - if !quiet { - quiet = true - log.Warnf("error checking markset: %s", err) - } - // track it anyways - } - - if mark { - continue - } - } - s.txnRefs[c] = struct{}{} } @@ -631,7 +605,7 @@ func (s *SplitStore) endTxnProtect() { s.txnMissing = nil } -func (s *SplitStore) walkChain(ts *types.TipSet, inclState abi.ChainEpoch, inclMsgs abi.ChainEpoch, +func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEpoch, f func(cid.Cid) error) error { visited := cid.NewSet() walked := cid.NewSet() @@ -639,6 +613,8 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState abi.ChainEpoch, inclM walkCnt := 0 scanCnt := 0 + stopWalk := func(_ cid.Cid) error { return errStopWalk } + walkBlock := func(c cid.Cid) error { if !visited.Visit(c) { return nil @@ -662,27 +638,28 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState abi.ChainEpoch, inclM // message are retained if within the inclMsgs boundary if hdr.Height >= inclMsgs && hdr.Height > 0 { if inclMsgs < inclState { - // we need to use walkObjectIncomplete here, as messages may be missing early on if we + // we need to use walkObjectIncomplete here, as messages/receipts may be missing early on if we // synced from snapshot and have a long HotStoreMessageRetentionPolicy. - stopWalk := func(_ cid.Cid) error { return errStopWalk } if err := s.walkObjectIncomplete(hdr.Messages, walked, f, stopWalk); err != nil { return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err) } + + if err := s.walkObjectIncomplete(hdr.ParentMessageReceipts, walked, f, stopWalk); err != nil { + return xerrors.Errorf("error walking messages receipts (cid: %s): %w", hdr.ParentMessageReceipts, err) + } } else { if err := s.walkObject(hdr.Messages, walked, f); err != nil { return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err) } - } - } - // state and message receipts is only retained if within the inclState boundary - if hdr.Height >= inclState || hdr.Height == 0 { - if hdr.Height > 0 { if err := s.walkObject(hdr.ParentMessageReceipts, walked, f); err != nil { return xerrors.Errorf("error walking message receipts (cid: %s): %w", hdr.ParentMessageReceipts, err) } } + } + // state is only retained if within the inclState boundary, with the exception of genesis + if hdr.Height >= inclState || hdr.Height == 0 { if err := s.walkObject(hdr.ParentStateRoot, walked, f); err != nil { return xerrors.Errorf("error walking state root (cid: %s): %w", hdr.ParentStateRoot, err) } diff --git a/blockstore/splitstore/splitstore_test.go b/blockstore/splitstore/splitstore_test.go index 26e5c3cc0..b945eb90b 100644 --- a/blockstore/splitstore/splitstore_test.go +++ b/blockstore/splitstore/splitstore_test.go @@ -210,6 +210,15 @@ func TestSplitStoreCompaction(t *testing.T) { testSplitStore(t, &Config{MarkSetType: "map"}) } +func TestSplitStoreCompactionWithBadger(t *testing.T) { + bs := badgerMarkSetBatchSize + badgerMarkSetBatchSize = 1 + t.Cleanup(func() { + badgerMarkSetBatchSize = bs + }) + testSplitStore(t, &Config{MarkSetType: "badger"}) +} + type mockChain struct { t testing.TB diff --git a/blockstore/splitstore/splitstore_warmup.go b/blockstore/splitstore/splitstore_warmup.go index 55fa94c6f..7c5769e22 100644 --- a/blockstore/splitstore/splitstore_warmup.go +++ b/blockstore/splitstore/splitstore_warmup.go @@ -48,7 +48,7 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error { count := int64(0) xcount := int64(0) missing := int64(0) - err := s.walkChain(curTs, epoch, epoch+1, // we don't load messages in warmup + err := s.walkChain(curTs, epoch, epoch+1, // we don't load messages/receipts in warmup func(c cid.Cid) error { if isUnitaryObject(c) { return errStopWalk diff --git a/build/params_nerpanet.go b/build/params_nerpanet.go index d0d99ece4..8879d01b5 100644 --- a/build/params_nerpanet.go +++ b/build/params_nerpanet.go @@ -43,7 +43,7 @@ const UpgradeOrangeHeight = 300 const UpgradeTrustHeight = 600 const UpgradeNorwegianHeight = 201000 const UpgradeTurboHeight = 203000 -const UpgradeHyperdriveHeight = 999999999 +const UpgradeHyperdriveHeight = 379178 func init() { // Minimum block production power is set to 4 TiB diff --git a/cmd/lotus-storage-miner/config.go b/cmd/lotus-storage-miner/config.go index e5e4fc4c4..652426583 100644 --- a/cmd/lotus-storage-miner/config.go +++ b/cmd/lotus-storage-miner/config.go @@ -4,19 +4,91 @@ import ( "fmt" "github.com/urfave/cli/v2" + "golang.org/x/xerrors" "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/repo" ) var configCmd = &cli.Command{ Name: "config", - Usage: "Output default configuration", + Usage: "Manage node config", + Subcommands: []*cli.Command{ + configDefaultCmd, + configUpdateCmd, + }, +} + +var configDefaultCmd = &cli.Command{ + Name: "default", + Usage: "Print default node config", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "no-comment", + Usage: "don't comment default values", + }, + }, Action: func(cctx *cli.Context) error { - comm, err := config.ConfigComment(config.DefaultStorageMiner()) + c := config.DefaultStorageMiner() + + cb, err := config.ConfigUpdate(c, nil, !cctx.Bool("no-comment")) if err != nil { return err } - fmt.Println(string(comm)) + + fmt.Println(string(cb)) + + return nil + }, +} + +var configUpdateCmd = &cli.Command{ + Name: "updated", + Usage: "Print updated node config", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "no-comment", + Usage: "don't comment default values", + }, + }, + Action: func(cctx *cli.Context) error { + r, err := repo.NewFS(cctx.String(FlagMinerRepo)) + if err != nil { + return err + } + + ok, err := r.Exists() + if err != nil { + return err + } + + if !ok { + return xerrors.Errorf("repo not initialized") + } + + lr, err := r.LockRO(repo.StorageMiner) + if err != nil { + return xerrors.Errorf("locking repo: %w", err) + } + + cfgNode, err := lr.Config() + if err != nil { + _ = lr.Close() + return xerrors.Errorf("getting node config: %w", err) + } + + if err := lr.Close(); err != nil { + return err + } + + cfgDef := config.DefaultStorageMiner() + + updated, err := config.ConfigUpdate(cfgNode, cfgDef, !cctx.Bool("no-comment")) + if err != nil { + return err + } + + fmt.Print(string(updated)) return nil }, } diff --git a/cmd/lotus/config.go b/cmd/lotus/config.go new file mode 100644 index 000000000..fcb7e2b08 --- /dev/null +++ b/cmd/lotus/config.go @@ -0,0 +1,94 @@ +package main + +import ( + "fmt" + + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/repo" +) + +var configCmd = &cli.Command{ + Name: "config", + Usage: "Manage node config", + Subcommands: []*cli.Command{ + configDefaultCmd, + configUpdateCmd, + }, +} + +var configDefaultCmd = &cli.Command{ + Name: "default", + Usage: "Print default node config", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "no-comment", + Usage: "don't comment default values", + }, + }, + Action: func(cctx *cli.Context) error { + c := config.DefaultFullNode() + + cb, err := config.ConfigUpdate(c, nil, !cctx.Bool("no-comment")) + if err != nil { + return err + } + + fmt.Println(string(cb)) + + return nil + }, +} + +var configUpdateCmd = &cli.Command{ + Name: "updated", + Usage: "Print updated node config", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "no-comment", + Usage: "don't comment default values", + }, + }, + Action: func(cctx *cli.Context) error { + r, err := repo.NewFS(cctx.String("repo")) + if err != nil { + return err + } + + ok, err := r.Exists() + if err != nil { + return err + } + + if !ok { + return xerrors.Errorf("repo not initialized") + } + + lr, err := r.LockRO(repo.FullNode) + if err != nil { + return xerrors.Errorf("locking repo: %w", err) + } + + cfgNode, err := lr.Config() + if err != nil { + _ = lr.Close() + return xerrors.Errorf("getting node config: %w", err) + } + + if err := lr.Close(); err != nil { + return err + } + + cfgDef := config.DefaultFullNode() + + updated, err := config.ConfigUpdate(cfgNode, cfgDef, !cctx.Bool("no-comment")) + if err != nil { + return err + } + + fmt.Print(string(updated)) + return nil + }, +} diff --git a/cmd/lotus/main.go b/cmd/lotus/main.go index 63d01f891..d803cce1e 100644 --- a/cmd/lotus/main.go +++ b/cmd/lotus/main.go @@ -29,6 +29,7 @@ func main() { local := []*cli.Command{ DaemonCmd, backupCmd, + configCmd, } if AdvanceBlockCmd != nil { local = append(local, AdvanceBlockCmd) diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 000000000..b962d5cc2 --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,145 @@ +# By default, this docker-compose file will start a lotus fullnode +# +# Some directives have been left commented out so they serve as an +# example for more advanced use. +# +# To provide a custom configuration file, or automatically import +# a wallet, uncomment the "configs" or "secrets" sections. +# +# start on a single node: +# +# docker-compose up +# +# start on docker swarm: +# +# docker swarm init (if you haven't already) +# docker stack deploy -c docker-compose.yaml mylotuscluster +# +# for more information, please visit docs.filecoin.io + +version: "3.8" + +volumes: + parameters: + lotus-repo: + lotus-miner-repo: + lotus-worker-repo: + +configs: + lotus-config-toml: + file: /path/to/lotus/config.toml + lotus-miner-config-toml: + file: /path/to/lotus-miner/config.toml + +secrets: + lotus-wallet: + file: /path/to/exported/lotus/wallet + +services: + lotus: + build: + context: . + target: lotus + dockerfile: Dockerfile.lotus + image: filecoin/lotus + volumes: + - parameters:/var/tmp/filecoin-proof-parameters + - lotus-repo:/var/lib/lotus + ports: + - 1234:1234 + environment: + - LOTUS_JAEGER_AGENT_HOST=jaeger + - LOTUS_JAEGER_AGENT_PORT=6831 + # - DOCKER_LOTUS_IMPORT_WALLET=/tmp/wallet + deploy: + restart_policy: + condition: on-failure + delay: 30s + # configs: + # - source: lotus-config-toml + # target: /var/lib/lotus/config.toml + # secrets: + # - source: lotus-wallet + # target: /tmp/wallet + command: + - daemon + lotus-gateway: + build: + context: . + target: lotus-gateway + dockerfile: Dockerfile.lotus + image: filecoin/lotus-gateway + depends_on: + - lotus + ports: + - 1235:1234 + environment: + - FULLNODE_API_INFO=/dns/lotus/tcp/1234/http + - LOTUS_JAEGER_AGENT_HOST=jaeger + - LOTUS_JAEGER_AGENT_PORT=6831 + deploy: + restart_policy: + condition: on-failure + delay: 30s + command: + - run + # + # Uncomment to run miner software + # + # lotus-miner: + # build: + # context: . + # target: lotus-miner + # dockerfile: Dockerfile.lotus + # image: filecoin/lotus-miner + # volumes: + # - parameters:/var/tmp/filecoin-proof-parameters + # - lotus-miner-repo:/var/lib/lotus-miner + # depends_on: + # - lotus + # ports: + # - 2345:2345 + # environment: + # - FULLNODE_API_INFO=/dns/lotus/tcp/1234/http + # - LOTUS_JAEGER_AGENT_HOST=jaeger + # - LOTUS_JAEGER_AGENT_PORT=6831 + # deploy: + # restart_policy: + # condition: on-failure + # delay: 30s + # configs: + # - source: lotus-miner-config-toml + # - target: /var/lib/lotus-miner/config.toml + # command: + # - run + # lotus-worker: + # build: + # context: . + # target: lotus-worker + # dockerfile: Dockerfile.lotus + # image: filecoin/lotus-worker + # volumes: + # - parameters:/var/tmp/filecoin-proof-parameters + # - lotus-worker-repo:/var/lib/lotus-worker + # depends_on: + # - lotus-worker + # environment: + # - MINER_API_INFO=/dns/lotus-miner/tcp/1234/http + # - LOTUS_JAEGER_AGENT_HOST=jaeger + # - LOTUS_JAEGER_AGENT_PORT=6831 + # deploy: + # restart_policy: + # condition: on-failure + # delay: 30s + # replicas: 2 + # command: + # - run + jaeger: + image: jaegertracing/all-in-one + ports: + - "6831:6831/udp" + - "16686:16686" + deploy: + restart_policy: + condition: on-failure + delay: 30s diff --git a/documentation/en/cli-lotus-miner.md b/documentation/en/cli-lotus-miner.md index 37384c349..2ba693bff 100644 --- a/documentation/en/cli-lotus-miner.md +++ b/documentation/en/cli-lotus-miner.md @@ -13,7 +13,7 @@ COMMANDS: init Initialize a lotus miner repo run Start a lotus miner process stop Stop a running lotus miner - config Output default configuration + config Manage node config backup Create node metadata backup version Print version help, h Shows a list of commands or help for one command @@ -145,13 +145,47 @@ OPTIONS: ## lotus-miner config ``` NAME: - lotus-miner config - Output default configuration + lotus-miner config - Manage node config USAGE: - lotus-miner config [command options] [arguments...] + lotus-miner config command [command options] [arguments...] + +COMMANDS: + default Print default node config + updated Print updated node config + help, h Shows a list of commands or help for one command OPTIONS: - --help, -h show help (default: false) + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus-miner config default +``` +NAME: + lotus-miner config default - Print default node config + +USAGE: + lotus-miner config default [command options] [arguments...] + +OPTIONS: + --no-comment don't comment default values (default: false) + --help, -h show help (default: false) + +``` + +### lotus-miner config updated +``` +NAME: + lotus-miner config updated - Print updated node config + +USAGE: + lotus-miner config updated [command options] [arguments...] + +OPTIONS: + --no-comment don't comment default values (default: false) + --help, -h show help (default: false) ``` diff --git a/documentation/en/cli-lotus.md b/documentation/en/cli-lotus.md index 6971ed6e7..cae648a0d 100644 --- a/documentation/en/cli-lotus.md +++ b/documentation/en/cli-lotus.md @@ -12,6 +12,7 @@ VERSION: COMMANDS: daemon Start a lotus daemon process backup Create node metadata backup + config Manage node config version Print version help, h Shows a list of commands or help for one command BASIC: @@ -108,6 +109,53 @@ OPTIONS: ``` +## lotus config +``` +NAME: + lotus config - Manage node config + +USAGE: + lotus config command [command options] [arguments...] + +COMMANDS: + default Print default node config + updated Print updated node config + help, h Shows a list of commands or help for one command + +OPTIONS: + --help, -h show help (default: false) + --version, -v print the version (default: false) + +``` + +### lotus config default +``` +NAME: + lotus config default - Print default node config + +USAGE: + lotus config default [command options] [arguments...] + +OPTIONS: + --no-comment don't comment default values (default: false) + --help, -h show help (default: false) + +``` + +### lotus config updated +``` +NAME: + lotus config updated - Print updated node config + +USAGE: + lotus config updated [command options] [arguments...] + +OPTIONS: + --no-comment don't comment default values (default: false) + --help, -h show help (default: false) + +``` + ## lotus version ``` NAME: diff --git a/go.mod b/go.mod index 9520c8d98..1725754b4 100644 --- a/go.mod +++ b/go.mod @@ -33,9 +33,9 @@ require ( github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7 github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 - github.com/filecoin-project/go-data-transfer v1.6.0 + github.com/filecoin-project/go-data-transfer v1.7.0 github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a - github.com/filecoin-project/go-fil-markets v1.5.0 + github.com/filecoin-project/go-fil-markets v1.6.0-rc1 github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec github.com/filecoin-project/go-multistore v0.0.3 github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 @@ -77,7 +77,7 @@ require ( github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459 github.com/ipfs/go-filestore v1.0.0 github.com/ipfs/go-fs-lock v0.0.6 - github.com/ipfs/go-graphsync v0.6.1 + github.com/ipfs/go-graphsync v0.6.5 github.com/ipfs/go-ipfs-blockstore v1.0.3 github.com/ipfs/go-ipfs-chunker v0.0.5 github.com/ipfs/go-ipfs-ds-help v1.0.0 diff --git a/go.sum b/go.sum index 86ac15b2c..ee34abe5c 100644 --- a/go.sum +++ b/go.sum @@ -275,16 +275,16 @@ github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7/ github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= github.com/filecoin-project/go-data-transfer v1.0.1/go.mod h1:UxvfUAY9v3ub0a21BSK9u3pB2aq30Y0KMsG+w9/ysyo= -github.com/filecoin-project/go-data-transfer v1.6.0 h1:DHIzEc23ydRCCBwtFet3MfgO8gMpZEnw60Y+s71oX6o= -github.com/filecoin-project/go-data-transfer v1.6.0/go.mod h1:E3WW4mCEYwU2y65swPEajSZoFWFmfXt7uwGduoACZQc= +github.com/filecoin-project/go-data-transfer v1.7.0 h1:mFRn+UuTdPROmhplLSekzd4rAs9ug8ubtSY4nw9wYkU= +github.com/filecoin-project/go-data-transfer v1.7.0/go.mod h1:GLRr5BmLEqsLwXfiRDG7uJvph22KGL2M4iOuF8EINaU= github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ= github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a h1:hyJ+pUm/4U4RdEZBlg6k8Ma4rDiuvqyGpoICXAxwsTg= github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c= -github.com/filecoin-project/go-fil-markets v1.5.0 h1:3KEs01L8XFCEgujZ6ggFjr1XWjpjTQcmSSeo3I99I0k= -github.com/filecoin-project/go-fil-markets v1.5.0/go.mod h1:7be6zzFwaN8kxVeYZf/UUj/JilHC0ogPvWqE1TW8Ptk= +github.com/filecoin-project/go-fil-markets v1.6.0-rc1 h1:kQtND2NXz/cfGkjq+f5MCtz2oZAQabQvQ/zu4fppIps= +github.com/filecoin-project/go-fil-markets v1.6.0-rc1/go.mod h1:S/C9PcSLFp75NpaF5aUqutnhXVJk6hM2dhWPYNq2jCQ= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= @@ -627,8 +627,9 @@ github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28 github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= github.com/ipfs/go-graphsync v0.4.2/go.mod h1:/VmbZTUdUMTbNkgzAiCEucIIAU3BkLE2cZrDCVUhyi0= github.com/ipfs/go-graphsync v0.4.3/go.mod h1:mPOwDYv128gf8gxPFgXnz4fNrSYPsWyqisJ7ych+XDY= -github.com/ipfs/go-graphsync v0.6.1 h1:i9wN7YkBXWwIsUjVQeuaDxFB59yWZrG1xL564Nz7aGE= -github.com/ipfs/go-graphsync v0.6.1/go.mod h1:e2ZxnClqBBYAtd901g9vXMJzS47labjAtOzsWtOzKNk= +github.com/ipfs/go-graphsync v0.6.4/go.mod h1:5WyaeigpNdpiYQuW2vwpuecOoEfB4h747ZGEOKmAGTg= +github.com/ipfs/go-graphsync v0.6.5 h1:YAJl6Yit23PQcaawzb1rPK9PSnbbq2jjMRPpRpJ0Y5U= +github.com/ipfs/go-graphsync v0.6.5/go.mod h1:GdHT8JeuIZ0R4lSjFR16Oe4zPi5dXwKi9zR9ADVlcdk= github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= diff --git a/itests/deals_concurrent_test.go b/itests/deals_concurrent_test.go index d7932b896..241c9071d 100644 --- a/itests/deals_concurrent_test.go +++ b/itests/deals_concurrent_test.go @@ -7,13 +7,13 @@ import ( "testing" "time" + "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/stretchr/testify/require" datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/itests/kit" "github.com/filecoin-project/lotus/node" "github.com/filecoin-project/lotus/node/modules" @@ -56,14 +56,14 @@ func TestDealWithMarketAndMinerNode(t *testing.T) { }) } - // TODO: add 2, 4, 8, more when this graphsync issue is fixed: https://github.com/ipfs/go-graphsync/issues/175# - cycles := []int{1} + // this test is expensive because we don't use mock proofs; do a single cycle. + cycles := []int{4} for _, n := range cycles { n := n ns := fmt.Sprintf("%d", n) t.Run(ns+"-fastretrieval-CAR", func(t *testing.T) { runTest(t, n, true, true) }) t.Run(ns+"-fastretrieval-NoCAR", func(t *testing.T) { runTest(t, n, true, false) }) - t.Run(ns+"-stdretrieval-CAR", func(t *testing.T) { runTest(t, n, true, false) }) + t.Run(ns+"-stdretrieval-CAR", func(t *testing.T) { runTest(t, n, false, true) }) t.Run(ns+"-stdretrieval-NoCAR", func(t *testing.T) { runTest(t, n, false, false) }) } } @@ -73,6 +73,12 @@ func TestDealCyclesConcurrent(t *testing.T) { t.Skip("skipping test in short mode") } + oldDelay := policy.GetPreCommitChallengeDelay() + policy.SetPreCommitChallengeDelay(5) + t.Cleanup(func() { + policy.SetPreCommitChallengeDelay(oldDelay) + }) + kit.QuietMiningLogs() blockTime := 10 * time.Millisecond @@ -95,8 +101,8 @@ func TestDealCyclesConcurrent(t *testing.T) { }) } - // TODO: add 2, 4, 8, more when this graphsync issue is fixed: https://github.com/ipfs/go-graphsync/issues/175# - cycles := []int{1} + // this test is cheap because we use mock proofs, do various cycles + cycles := []int{2, 4, 8, 16} for _, n := range cycles { n := n ns := fmt.Sprintf("%d", n) @@ -107,13 +113,19 @@ func TestDealCyclesConcurrent(t *testing.T) { } } -func TestSimultenousTransferLimit(t *testing.T) { +func TestSimultanenousTransferLimit(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode") } kit.QuietMiningLogs() + oldDelay := policy.GetPreCommitChallengeDelay() + policy.SetPreCommitChallengeDelay(5) + t.Cleanup(func() { + policy.SetPreCommitChallengeDelay(oldDelay) + }) + blockTime := 10 * time.Millisecond // For these tests where the block time is artificially short, just use @@ -121,9 +133,14 @@ func TestSimultenousTransferLimit(t *testing.T) { // so that the deal starts sealing in time startEpoch := abi.ChainEpoch(2 << 12) + const ( + graphsyncThrottle = 2 + concurrency = 20 + ) runTest := func(t *testing.T) { client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ConstructorOpts( - node.ApplyIf(node.IsType(repo.StorageMiner), node.Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(2))), + node.ApplyIf(node.IsType(repo.StorageMiner), node.Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(graphsyncThrottle))), + node.Override(new(dtypes.Graphsync), modules.Graphsync(graphsyncThrottle)), )) ens.InterconnectAll().BeginMining(blockTime) dh := kit.NewDealHarness(t, client, miner, miner) @@ -145,7 +162,7 @@ func TestSimultenousTransferLimit(t *testing.T) { select { case u := <-du: t.Logf("%d - %s", u.TransferID, datatransfer.Statuses[u.Status]) - if u.Status == datatransfer.Ongoing { + if u.Status == datatransfer.Ongoing && u.Transferred > 0 { ongoing[u.TransferID] = struct{}{} } else { delete(ongoing, u.TransferID) @@ -160,16 +177,20 @@ func TestSimultenousTransferLimit(t *testing.T) { } }() + t.Logf("running concurrent deals: %d", concurrency) + dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{ - N: 1, // TODO: set to 20 after https://github.com/ipfs/go-graphsync/issues/175 is fixed + N: concurrency, FastRetrieval: true, StartEpoch: startEpoch, }) + t.Logf("all deals finished") + cancel() wg.Wait() - require.LessOrEqual(t, maxOngoing, 2) + require.LessOrEqual(t, maxOngoing, graphsyncThrottle) } runTest(t) diff --git a/itests/kit/deals.go b/itests/kit/deals.go index 4cee13925..311db4b25 100644 --- a/itests/kit/deals.go +++ b/itests/kit/deals.go @@ -292,6 +292,7 @@ func (dh *DealHarness) RunConcurrentDeals(opts RunConcurrentDealsOpts) { for i := 0; i < opts.N; i++ { i := i errgrp.Go(func() (err error) { + defer dh.t.Logf("finished concurrent deal %d/%d", i, opts.N) defer func() { // This is necessary because golang can't deal with test // failures being reported from children goroutines ¯\_(ツ)_/¯ @@ -299,11 +300,17 @@ func (dh *DealHarness) RunConcurrentDeals(opts RunConcurrentDealsOpts) { err = fmt.Errorf("deal failed: %s", r) } }() + + dh.t.Logf("making storage deal %d/%d", i, opts.N) + deal, res, inPath := dh.MakeOnlineDeal(context.Background(), MakeFullDealParams{ Rseed: 5 + i, FastRet: opts.FastRetrieval, StartEpoch: opts.StartEpoch, }) + + dh.t.Logf("retrieving deal %d/%d", i, opts.N) + outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, opts.CarExport) AssertFilesEqual(dh.t, inPath, outPath) return nil diff --git a/node/config/cfgdocgen/gen.go b/node/config/cfgdocgen/gen.go new file mode 100644 index 000000000..8d0efb65e --- /dev/null +++ b/node/config/cfgdocgen/gen.go @@ -0,0 +1,131 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "sort" + "strings" +) + +func run() error { + tfb, err := ioutil.ReadFile("./node/config/types.go") + if err != nil { + return err + } + + // could use the ast lib, but this is simpler + + type st int + const ( + stGlobal st = iota // looking for typedef + stType st = iota // in typedef + ) + + lines := strings.Split(string(tfb), "\n") + state := stGlobal + + type field struct { + Name string + Type string + Comment string + } + + var currentType string + var currentComment []string + + out := map[string][]field{} + + for l := range lines { + line := strings.TrimSpace(lines[l]) + + switch state { + case stGlobal: + if strings.HasPrefix(line, "type ") { + currentType = line + currentType = strings.TrimPrefix(currentType, "type") + currentType = strings.TrimSuffix(currentType, "{") + currentType = strings.TrimSpace(currentType) + currentType = strings.TrimSuffix(currentType, "struct") + currentType = strings.TrimSpace(currentType) + currentComment = nil + state = stType + continue + } + case stType: + if strings.HasPrefix(line, "// ") { + cline := strings.TrimSpace(strings.TrimPrefix(line, "//")) + currentComment = append(currentComment, cline) + continue + } + + comment := currentComment + currentComment = nil + + if strings.HasPrefix(line, "}") { + state = stGlobal + continue + } + + f := strings.Fields(line) + if len(f) < 2 { // empty or embedded struct + continue + } + + name := f[0] + typ := f[1] + + out[currentType] = append(out[currentType], field{ + Name: name, + Type: typ, + Comment: strings.Join(comment, "\n"), + }) + } + } + + var outt []string + for t := range out { + outt = append(outt, t) + } + sort.Strings(outt) + + fmt.Print(`// Code generated by github.com/filecoin-project/lotus/node/config/cfgdocgen. DO NOT EDIT. + +package config + +type DocField struct { + Name string + Type string + Comment string +} + +var Doc = map[string][]DocField{ +`) + + for _, typeName := range outt { + typ := out[typeName] + + fmt.Printf("\t\"%s\": []DocField{\n", typeName) + + for _, f := range typ { + fmt.Println("\t\t{") + fmt.Printf("\t\t\tName: \"%s\",\n", f.Name) + fmt.Printf("\t\t\tType: \"%s\",\n\n", f.Type) + fmt.Printf("\t\t\tComment: `%s`,\n", f.Comment) + fmt.Println("\t\t},") + } + + fmt.Printf("\t},\n") + } + + fmt.Println(`}`) + + return nil +} + +func main() { + if err := run(); err != nil { + fmt.Println(err.Error()) + os.Exit(1) + } +} diff --git a/node/config/def.go b/node/config/def.go index 17f43ffc3..b438ea56b 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -24,269 +24,10 @@ const ( RetrievalPricingExternalMode = "external" ) -// Common is common config between full node and miner -type Common struct { - API API - Backup Backup - Libp2p Libp2p - Pubsub Pubsub -} - -// FullNode is a full node config -type FullNode struct { - Common - Client Client - Metrics Metrics - Wallet Wallet - Fees FeeConfig - Chainstore Chainstore -} - -// // Common - -type Backup struct { - DisableMetadataLog bool -} - -// StorageMiner is a miner config -type StorageMiner struct { - Common - - Subsystems MinerSubsystemConfig - Dealmaking DealmakingConfig - Sealing SealingConfig - Storage sectorstorage.SealerConfig - Fees MinerFeeConfig - Addresses MinerAddressConfig -} - -type MinerSubsystemConfig struct { - EnableMining bool - EnableSealing bool - EnableSectorStorage bool - EnableMarkets bool - - SealerApiInfo string // if EnableSealing == false - SectorIndexApiInfo string // if EnableSectorStorage == false -} - -type DealmakingConfig struct { - ConsiderOnlineStorageDeals bool - ConsiderOfflineStorageDeals bool - ConsiderOnlineRetrievalDeals bool - ConsiderOfflineRetrievalDeals bool - ConsiderVerifiedStorageDeals bool - ConsiderUnverifiedStorageDeals bool - PieceCidBlocklist []cid.Cid - ExpectedSealDuration Duration - // Maximum amount of time proposed deal StartEpoch can be in future - MaxDealStartDelay Duration - // The amount of time to wait for more deals to arrive before - // publishing - PublishMsgPeriod Duration - // The maximum number of deals to include in a single PublishStorageDeals - // message - MaxDealsPerPublishMsg uint64 - // The maximum collateral that the provider will put up against a deal, - // as a multiplier of the minimum collateral bound - MaxProviderCollateralMultiplier uint64 - - // The maximum number of parallel online data transfers (storage+retrieval) - SimultaneousTransfers uint64 - - Filter string - RetrievalFilter string - - RetrievalPricing *RetrievalPricing -} - -type RetrievalPricing struct { - Strategy string // possible values: "default", "external" - - Default *RetrievalPricingDefault - External *RetrievalPricingExternal -} - -type RetrievalPricingExternal struct { - // Path of the external script that will be run to price a retrieval deal. - // This parameter is ONLY applicable if the retrieval pricing policy strategy has been configured to "external". - Path string -} - -type RetrievalPricingDefault struct { - // VerifiedDealsFreeTransfer configures zero fees for data transfer for a retrieval deal - // of a payloadCid that belongs to a verified storage deal. - // This parameter is ONLY applicable if the retrieval pricing policy strategy has been configured to "default". - // default value is true - VerifiedDealsFreeTransfer bool -} - -type SealingConfig struct { - // 0 = no limit - MaxWaitDealsSectors uint64 - - // includes failed, 0 = no limit - MaxSealingSectors uint64 - - // includes failed, 0 = no limit - MaxSealingSectorsForDeals uint64 - - WaitDealsDelay Duration - - // CommittedCapacitySectorLifetime is the duration a Committed Capacity (CC) sector will - // live before it must be extended or converted into sector containing deals before it is - // terminated. Value must be between 180-540 days inclusive. - CommittedCapacitySectorLifetime Duration - - AlwaysKeepUnsealedCopy bool - - // Run sector finalization before submitting sector proof to the chain - FinalizeEarly bool - - // Whether to use available miner balance for sector collateral instead of sending it with each message - CollateralFromMinerBalance bool - // Minimum available balance to keep in the miner actor before sending it with messages - AvailableBalanceBuffer types.FIL - // Don't send collateral with messages even if there is no available balance in the miner actor - DisableCollateralFallback bool - - // enable / disable precommit batching (takes effect after nv13) - BatchPreCommits bool - // maximum precommit batch size - batches will be sent immediately above this size - MaxPreCommitBatch int - // how long to wait before submitting a batch after crossing the minimum batch size - PreCommitBatchWait Duration - // time buffer for forceful batch submission before sectors/deal in batch would start expiring - PreCommitBatchSlack Duration - - // enable / disable commit aggregation (takes effect after nv13) - AggregateCommits bool - // maximum batched commit size - batches will be sent immediately above this size - MinCommitBatch int - MaxCommitBatch int - // how long to wait before submitting a batch after crossing the minimum batch size - CommitBatchWait Duration - // time buffer for forceful batch submission before sectors/deals in batch would start expiring - CommitBatchSlack Duration - - // network BaseFee below which to stop doing commit aggregation, instead - // submitting proofs to the chain individually - AggregateAboveBaseFee types.FIL - - TerminateBatchMax uint64 - TerminateBatchMin uint64 - TerminateBatchWait Duration - - // Keep this many sectors in sealing pipeline, start CC if needed - // todo TargetSealingSectors uint64 - - // todo TargetSectors - stop auto-pleding new sectors after this many sectors are sealed, default CC upgrade for deals sectors if above -} - -type BatchFeeConfig struct { - Base types.FIL - PerSector types.FIL -} - func (b *BatchFeeConfig) FeeForSectors(nSectors int) abi.TokenAmount { return big.Add(big.Int(b.Base), big.Mul(big.NewInt(int64(nSectors)), big.Int(b.PerSector))) } -type MinerFeeConfig struct { - MaxPreCommitGasFee types.FIL - MaxCommitGasFee types.FIL - - // maxBatchFee = maxBase + maxPerSector * nSectors - MaxPreCommitBatchGasFee BatchFeeConfig - MaxCommitBatchGasFee BatchFeeConfig - - MaxTerminateGasFee types.FIL - MaxWindowPoStGasFee types.FIL - MaxPublishDealsFee types.FIL - MaxMarketBalanceAddFee types.FIL -} - -type MinerAddressConfig struct { - PreCommitControl []string - CommitControl []string - TerminateControl []string - DealPublishControl []string - - // DisableOwnerFallback disables usage of the owner address for messages - // sent automatically - DisableOwnerFallback bool - // DisableWorkerFallback disables usage of the worker address for messages - // sent automatically, if control addresses are configured. - // A control address that doesn't have enough funds will still be chosen - // over the worker address if this flag is set. - DisableWorkerFallback bool -} - -// API contains configs for API endpoint -type API struct { - ListenAddress string - RemoteListenAddress string - Timeout Duration -} - -// Libp2p contains configs for libp2p -type Libp2p struct { - ListenAddresses []string - AnnounceAddresses []string - NoAnnounceAddresses []string - BootstrapPeers []string - ProtectedPeers []string - - ConnMgrLow uint - ConnMgrHigh uint - ConnMgrGrace Duration -} - -type Pubsub struct { - Bootstrapper bool - DirectPeers []string - IPColocationWhitelist []string - RemoteTracer string -} - -type Chainstore struct { - EnableSplitstore bool - Splitstore Splitstore -} - -type Splitstore struct { - ColdStoreType string - HotStoreType string - MarkSetType string - - HotStoreMessageRetention uint64 -} - -// // Full Node - -type Metrics struct { - Nickname string - HeadNotifs bool -} - -type Client struct { - UseIpfs bool - IpfsOnlineMode bool - IpfsMAddr string - IpfsUseForRetrieval bool - SimultaneousTransfers uint64 -} - -type Wallet struct { - RemoteBackend string - EnableLedger bool - DisableLocal bool -} - -type FeeConfig struct { - DefaultMaxFee types.FIL -} - func defCommon() Common { return Common{ API: API{ diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go new file mode 100644 index 000000000..ea68dc344 --- /dev/null +++ b/node/config/doc_gen.go @@ -0,0 +1,767 @@ +// Code generated by github.com/filecoin-project/lotus/node/config/cfgdocgen. DO NOT EDIT. + +package config + +type DocField struct { + Name string + Type string + Comment string +} + +var Doc = map[string][]DocField{ + "API": []DocField{ + { + Name: "ListenAddress", + Type: "string", + + Comment: `Binding address for the Lotus API`, + }, + { + Name: "RemoteListenAddress", + Type: "string", + + Comment: ``, + }, + { + Name: "Timeout", + Type: "Duration", + + Comment: ``, + }, + }, + "Backup": []DocField{ + { + Name: "DisableMetadataLog", + Type: "bool", + + Comment: `Note that in case of metadata corruption it might be much harder to recover +your node if metadata log is disabled`, + }, + }, + "BatchFeeConfig": []DocField{ + { + Name: "Base", + Type: "types.FIL", + + Comment: ``, + }, + { + Name: "PerSector", + Type: "types.FIL", + + Comment: ``, + }, + }, + "Chainstore": []DocField{ + { + Name: "EnableSplitstore", + Type: "bool", + + Comment: ``, + }, + { + Name: "Splitstore", + Type: "Splitstore", + + Comment: ``, + }, + }, + "Client": []DocField{ + { + Name: "UseIpfs", + Type: "bool", + + Comment: ``, + }, + { + Name: "IpfsOnlineMode", + Type: "bool", + + Comment: ``, + }, + { + Name: "IpfsMAddr", + Type: "string", + + Comment: ``, + }, + { + Name: "IpfsUseForRetrieval", + Type: "bool", + + Comment: ``, + }, + { + Name: "SimultaneousTransfers", + Type: "uint64", + + Comment: `The maximum number of simultaneous data transfers between the client +and storage providers`, + }, + }, + "Common": []DocField{ + { + Name: "API", + Type: "API", + + Comment: ``, + }, + { + Name: "Backup", + Type: "Backup", + + Comment: ``, + }, + { + Name: "Libp2p", + Type: "Libp2p", + + Comment: ``, + }, + { + Name: "Pubsub", + Type: "Pubsub", + + Comment: ``, + }, + }, + "DealmakingConfig": []DocField{ + { + Name: "ConsiderOnlineStorageDeals", + Type: "bool", + + Comment: `When enabled, the miner can accept online deals`, + }, + { + Name: "ConsiderOfflineStorageDeals", + Type: "bool", + + Comment: `When enabled, the miner can accept offline deals`, + }, + { + Name: "ConsiderOnlineRetrievalDeals", + Type: "bool", + + Comment: `When enabled, the miner can accept retrieval deals`, + }, + { + Name: "ConsiderOfflineRetrievalDeals", + Type: "bool", + + Comment: `When enabled, the miner can accept offline retrieval deals`, + }, + { + Name: "ConsiderVerifiedStorageDeals", + Type: "bool", + + Comment: `When enabled, the miner can accept verified deals`, + }, + { + Name: "ConsiderUnverifiedStorageDeals", + Type: "bool", + + Comment: `When enabled, the miner can accept unverified deals`, + }, + { + Name: "PieceCidBlocklist", + Type: "[]cid.Cid", + + Comment: `A list of Data CIDs to reject when making deals`, + }, + { + Name: "ExpectedSealDuration", + Type: "Duration", + + Comment: `Maximum expected amount of time getting the deal into a sealed sector will take +This includes the time the deal will need to get transferred and published +before being assigned to a sector`, + }, + { + Name: "MaxDealStartDelay", + Type: "Duration", + + Comment: `Maximum amount of time proposed deal StartEpoch can be in future`, + }, + { + Name: "PublishMsgPeriod", + Type: "Duration", + + Comment: `When a deal is ready to publish, the amount of time to wait for more +deals to be ready to publish before publishing them all as a batch`, + }, + { + Name: "MaxDealsPerPublishMsg", + Type: "uint64", + + Comment: `The maximum number of deals to include in a single PublishStorageDeals +message`, + }, + { + Name: "MaxProviderCollateralMultiplier", + Type: "uint64", + + Comment: `The maximum collateral that the provider will put up against a deal, +as a multiplier of the minimum collateral bound`, + }, + { + Name: "SimultaneousTransfers", + Type: "uint64", + + Comment: `The maximum number of parallel online data transfers (storage+retrieval)`, + }, + { + Name: "Filter", + Type: "string", + + Comment: `A command used for fine-grained evaluation of storage deals +see https://docs.filecoin.io/mine/lotus/miner-configuration/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details`, + }, + { + Name: "RetrievalFilter", + Type: "string", + + Comment: `A command used for fine-grained evaluation of retrieval deals +see https://docs.filecoin.io/mine/lotus/miner-configuration/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details`, + }, + { + Name: "RetrievalPricing", + Type: "*RetrievalPricing", + + Comment: ``, + }, + }, + "FeeConfig": []DocField{ + { + Name: "DefaultMaxFee", + Type: "types.FIL", + + Comment: ``, + }, + }, + "FullNode": []DocField{ + { + Name: "Client", + Type: "Client", + + Comment: ``, + }, + { + Name: "Metrics", + Type: "Metrics", + + Comment: ``, + }, + { + Name: "Wallet", + Type: "Wallet", + + Comment: ``, + }, + { + Name: "Fees", + Type: "FeeConfig", + + Comment: ``, + }, + { + Name: "Chainstore", + Type: "Chainstore", + + Comment: ``, + }, + }, + "Libp2p": []DocField{ + { + Name: "ListenAddresses", + Type: "[]string", + + Comment: `Binding address for the libp2p host - 0 means random port. +Format: multiaddress; see https://multiformats.io/multiaddr/`, + }, + { + Name: "AnnounceAddresses", + Type: "[]string", + + Comment: `Addresses to explicitally announce to other peers. If not specified, +all interface addresses are announced +Format: multiaddress`, + }, + { + Name: "NoAnnounceAddresses", + Type: "[]string", + + Comment: `Addresses to not announce +Format: multiaddress`, + }, + { + Name: "BootstrapPeers", + Type: "[]string", + + Comment: ``, + }, + { + Name: "ProtectedPeers", + Type: "[]string", + + Comment: ``, + }, + { + Name: "ConnMgrLow", + Type: "uint", + + Comment: ``, + }, + { + Name: "ConnMgrHigh", + Type: "uint", + + Comment: ``, + }, + { + Name: "ConnMgrGrace", + Type: "Duration", + + Comment: ``, + }, + }, + "Metrics": []DocField{ + { + Name: "Nickname", + Type: "string", + + Comment: ``, + }, + { + Name: "HeadNotifs", + Type: "bool", + + Comment: ``, + }, + }, + "MinerAddressConfig": []DocField{ + { + Name: "PreCommitControl", + Type: "[]string", + + Comment: `Addresses to send PreCommit messages from`, + }, + { + Name: "CommitControl", + Type: "[]string", + + Comment: `Addresses to send Commit messages from`, + }, + { + Name: "TerminateControl", + Type: "[]string", + + Comment: ``, + }, + { + Name: "DealPublishControl", + Type: "[]string", + + Comment: ``, + }, + { + Name: "DisableOwnerFallback", + Type: "bool", + + Comment: `DisableOwnerFallback disables usage of the owner address for messages +sent automatically`, + }, + { + Name: "DisableWorkerFallback", + Type: "bool", + + Comment: `DisableWorkerFallback disables usage of the worker address for messages +sent automatically, if control addresses are configured. +A control address that doesn't have enough funds will still be chosen +over the worker address if this flag is set.`, + }, + }, + "MinerFeeConfig": []DocField{ + { + Name: "MaxPreCommitGasFee", + Type: "types.FIL", + + Comment: ``, + }, + { + Name: "MaxCommitGasFee", + Type: "types.FIL", + + Comment: ``, + }, + { + Name: "MaxPreCommitBatchGasFee", + Type: "BatchFeeConfig", + + Comment: `maxBatchFee = maxBase + maxPerSector * nSectors`, + }, + { + Name: "MaxCommitBatchGasFee", + Type: "BatchFeeConfig", + + Comment: ``, + }, + { + Name: "MaxTerminateGasFee", + Type: "types.FIL", + + Comment: ``, + }, + { + Name: "MaxWindowPoStGasFee", + Type: "types.FIL", + + Comment: `WindowPoSt is a high-value operation, so the default fee should be high.`, + }, + { + Name: "MaxPublishDealsFee", + Type: "types.FIL", + + Comment: ``, + }, + { + Name: "MaxMarketBalanceAddFee", + Type: "types.FIL", + + Comment: ``, + }, + }, + "MinerSubsystemConfig": []DocField{ + { + Name: "EnableMining", + Type: "bool", + + Comment: ``, + }, + { + Name: "EnableSealing", + Type: "bool", + + Comment: ``, + }, + { + Name: "EnableSectorStorage", + Type: "bool", + + Comment: ``, + }, + { + Name: "EnableMarkets", + Type: "bool", + + Comment: ``, + }, + { + Name: "SealerApiInfo", + Type: "string", + + Comment: ``, + }, + { + Name: "SectorIndexApiInfo", + Type: "string", + + Comment: ``, + }, + }, + "Pubsub": []DocField{ + { + Name: "Bootstrapper", + Type: "bool", + + Comment: `Run the node in bootstrap-node mode`, + }, + { + Name: "DirectPeers", + Type: "[]string", + + Comment: `DirectPeers specifies peers with direct peering agreements. These peers are +connected outside of the mesh, with all (valid) message unconditionally +forwarded to them. The router will maintain open connections to these peers. +Note that the peering agreement should be reciprocal with direct peers +symmetrically configured at both ends. +Type: Array of multiaddress peerinfo strings, must include peerid (/p2p/12D3K...`, + }, + { + Name: "IPColocationWhitelist", + Type: "[]string", + + Comment: ``, + }, + { + Name: "RemoteTracer", + Type: "string", + + Comment: ``, + }, + }, + "RetrievalPricing": []DocField{ + { + Name: "Strategy", + Type: "string", + + Comment: ``, + }, + { + Name: "Default", + Type: "*RetrievalPricingDefault", + + Comment: ``, + }, + { + Name: "External", + Type: "*RetrievalPricingExternal", + + Comment: ``, + }, + }, + "RetrievalPricingDefault": []DocField{ + { + Name: "VerifiedDealsFreeTransfer", + Type: "bool", + + Comment: `VerifiedDealsFreeTransfer configures zero fees for data transfer for a retrieval deal +of a payloadCid that belongs to a verified storage deal. +This parameter is ONLY applicable if the retrieval pricing policy strategy has been configured to "default". +default value is true`, + }, + }, + "RetrievalPricingExternal": []DocField{ + { + Name: "Path", + Type: "string", + + Comment: `Path of the external script that will be run to price a retrieval deal. +This parameter is ONLY applicable if the retrieval pricing policy strategy has been configured to "external".`, + }, + }, + "SealingConfig": []DocField{ + { + Name: "MaxWaitDealsSectors", + Type: "uint64", + + Comment: `Upper bound on how many sectors can be waiting for more deals to be packed in it before it begins sealing at any given time. +If the miner is accepting multiple deals in parallel, up to MaxWaitDealsSectors of new sectors will be created. +If more than MaxWaitDealsSectors deals are accepted in parallel, only MaxWaitDealsSectors deals will be processed in parallel +Note that setting this number too high in relation to deal ingestion rate may result in poor sector packing efficiency +0 = no limit`, + }, + { + Name: "MaxSealingSectors", + Type: "uint64", + + Comment: `Upper bound on how many sectors can be sealing at the same time when creating new CC sectors (0 = unlimited)`, + }, + { + Name: "MaxSealingSectorsForDeals", + Type: "uint64", + + Comment: `Upper bound on how many sectors can be sealing at the same time when creating new sectors with deals (0 = unlimited)`, + }, + { + Name: "WaitDealsDelay", + Type: "Duration", + + Comment: `Period of time that a newly created sector will wait for more deals to be packed in to before it starts to seal. +Sectors which are fully filled will start sealing immediately`, + }, + { + Name: "AlwaysKeepUnsealedCopy", + Type: "bool", + + Comment: `Whether to keep unsealed copies of deal data regardless of whether the client requested that. This lets the miner +avoid the relatively high cost of unsealing the data later, at the cost of more storage space`, + }, + { + Name: "FinalizeEarly", + Type: "bool", + + Comment: `Run sector finalization before submitting sector proof to the chain`, + }, + { + Name: "CollateralFromMinerBalance", + Type: "bool", + + Comment: `Whether to use available miner balance for sector collateral instead of sending it with each message`, + }, + { + Name: "AvailableBalanceBuffer", + Type: "types.FIL", + + Comment: `Minimum available balance to keep in the miner actor before sending it with messages`, + }, + { + Name: "DisableCollateralFallback", + Type: "bool", + + Comment: `Don't send collateral with messages even if there is no available balance in the miner actor`, + }, + { + Name: "BatchPreCommits", + Type: "bool", + + Comment: `enable / disable precommit batching (takes effect after nv13)`, + }, + { + Name: "MaxPreCommitBatch", + Type: "int", + + Comment: `maximum precommit batch size - batches will be sent immediately above this size`, + }, + { + Name: "PreCommitBatchWait", + Type: "Duration", + + Comment: `how long to wait before submitting a batch after crossing the minimum batch size`, + }, + { + Name: "PreCommitBatchSlack", + Type: "Duration", + + Comment: `time buffer for forceful batch submission before sectors/deal in batch would start expiring`, + }, + { + Name: "AggregateCommits", + Type: "bool", + + Comment: `enable / disable commit aggregation (takes effect after nv13)`, + }, + { + Name: "MinCommitBatch", + Type: "int", + + Comment: `maximum batched commit size - batches will be sent immediately above this size`, + }, + { + Name: "MaxCommitBatch", + Type: "int", + + Comment: ``, + }, + { + Name: "CommitBatchWait", + Type: "Duration", + + Comment: `how long to wait before submitting a batch after crossing the minimum batch size`, + }, + { + Name: "CommitBatchSlack", + Type: "Duration", + + Comment: `time buffer for forceful batch submission before sectors/deals in batch would start expiring`, + }, + { + Name: "AggregateAboveBaseFee", + Type: "types.FIL", + + Comment: `network BaseFee below which to stop doing commit aggregation, instead +submitting proofs to the chain individually`, + }, + { + Name: "TerminateBatchMax", + Type: "uint64", + + Comment: ``, + }, + { + Name: "TerminateBatchMin", + Type: "uint64", + + Comment: ``, + }, + { + Name: "TerminateBatchWait", + Type: "Duration", + + Comment: ``, + }, + }, + "Splitstore": []DocField{ + { + Name: "ColdStoreType", + Type: "string", + + Comment: ``, + }, + { + Name: "HotStoreType", + Type: "string", + + Comment: ``, + }, + { + Name: "MarkSetType", + Type: "string", + + Comment: ``, + }, + { + Name: "HotStoreMessageRetention", + Type: "uint64", + + Comment: ``, + }, + }, + "StorageMiner": []DocField{ + { + Name: "Subsystems", + Type: "MinerSubsystemConfig", + + Comment: ``, + }, + { + Name: "Dealmaking", + Type: "DealmakingConfig", + + Comment: ``, + }, + { + Name: "Sealing", + Type: "SealingConfig", + + Comment: ``, + }, + { + Name: "Storage", + Type: "sectorstorage.SealerConfig", + + Comment: ``, + }, + { + Name: "Fees", + Type: "MinerFeeConfig", + + Comment: ``, + }, + { + Name: "Addresses", + Type: "MinerAddressConfig", + + Comment: ``, + }, + }, + "Wallet": []DocField{ + { + Name: "RemoteBackend", + Type: "string", + + Comment: ``, + }, + { + Name: "EnableLedger", + Type: "bool", + + Comment: ``, + }, + { + Name: "DisableLocal", + Type: "bool", + + Comment: ``, + }, + }, +} diff --git a/node/config/doc_util.go b/node/config/doc_util.go new file mode 100644 index 000000000..ee70a9cfd --- /dev/null +++ b/node/config/doc_util.go @@ -0,0 +1,44 @@ +package config + +import ( + "fmt" + "strings" +) + +func findDoc(root interface{}, section, name string) *DocField { + rt := fmt.Sprintf("%T", root)[len("*config."):] + + doc := findDocSect(rt, section, name) + if doc != nil { + return doc + } + + return findDocSect("Common", section, name) +} + +func findDocSect(root string, section, name string) *DocField { + path := strings.Split(section, ".") + + docSection := Doc[root] + for _, e := range path { + if docSection == nil { + return nil + } + + for _, field := range docSection { + if field.Name == e { + docSection = Doc[field.Type] + break + } + + } + } + + for _, df := range docSection { + if df.Name == name { + return &df + } + } + + return nil +} diff --git a/node/config/load.go b/node/config/load.go index 61e6e8f97..082106044 100644 --- a/node/config/load.go +++ b/node/config/load.go @@ -5,6 +5,10 @@ import ( "fmt" "io" "os" + "reflect" + "regexp" + "strings" + "unicode" "github.com/BurntSushi/toml" "github.com/kelseyhightower/envconfig" @@ -42,15 +46,116 @@ func FromReader(reader io.Reader, def interface{}) (interface{}, error) { return cfg, nil } -func ConfigComment(t interface{}) ([]byte, error) { - buf := new(bytes.Buffer) - _, _ = buf.WriteString("# Default config:\n") - e := toml.NewEncoder(buf) - if err := e.Encode(t); err != nil { - return nil, xerrors.Errorf("encoding config: %w", err) +func ConfigUpdate(cfgCur, cfgDef interface{}, comment bool) ([]byte, error) { + var nodeStr, defStr string + if cfgDef != nil { + buf := new(bytes.Buffer) + e := toml.NewEncoder(buf) + if err := e.Encode(cfgDef); err != nil { + return nil, xerrors.Errorf("encoding default config: %w", err) + } + + defStr = buf.String() } - b := buf.Bytes() - b = bytes.ReplaceAll(b, []byte("\n"), []byte("\n#")) - b = bytes.ReplaceAll(b, []byte("#["), []byte("[")) - return b, nil + + { + buf := new(bytes.Buffer) + e := toml.NewEncoder(buf) + if err := e.Encode(cfgCur); err != nil { + return nil, xerrors.Errorf("encoding node config: %w", err) + } + + nodeStr = buf.String() + } + + if comment { + // create a map of default lines so we can comment those out later + defLines := strings.Split(defStr, "\n") + defaults := map[string]struct{}{} + for i := range defLines { + l := strings.TrimSpace(defLines[i]) + if len(l) == 0 { + continue + } + if l[0] == '#' || l[0] == '[' { + continue + } + defaults[l] = struct{}{} + } + + nodeLines := strings.Split(nodeStr, "\n") + var outLines []string + + sectionRx := regexp.MustCompile(`\[(.+)]`) + var section string + + for i, line := range nodeLines { + // if this is a section, track it + trimmed := strings.TrimSpace(line) + if len(trimmed) > 0 { + if trimmed[0] == '[' { + m := sectionRx.FindSubmatch([]byte(trimmed)) + if len(m) != 2 { + return nil, xerrors.Errorf("section didn't match (line %d)", i) + } + section = string(m[1]) + + // never comment sections + outLines = append(outLines, line) + continue + } + } + + pad := strings.Repeat(" ", len(line)-len(strings.TrimLeftFunc(line, unicode.IsSpace))) + + // see if we have docs for this field + { + lf := strings.Fields(line) + if len(lf) > 1 { + doc := findDoc(cfgCur, section, lf[0]) + + if doc != nil { + // found docfield, emit doc comment + if len(doc.Comment) > 0 { + for _, docLine := range strings.Split(doc.Comment, "\n") { + outLines = append(outLines, pad+"# "+docLine) + } + outLines = append(outLines, pad+"#") + } + + outLines = append(outLines, pad+"# type: "+doc.Type) + } + } + } + + // if there is the same line in the default config, comment it out it output + if _, found := defaults[strings.TrimSpace(nodeLines[i])]; (cfgDef == nil || found) && len(line) > 0 { + line = pad + "#" + line[len(pad):] + } + outLines = append(outLines, line) + if len(line) > 0 { + outLines = append(outLines, "") + } + } + + nodeStr = strings.Join(outLines, "\n") + } + + // sanity-check that the updated config parses the same way as the current one + if cfgDef != nil { + cfgUpdated, err := FromReader(strings.NewReader(nodeStr), cfgDef) + if err != nil { + return nil, xerrors.Errorf("parsing updated config: %w", err) + } + + if !reflect.DeepEqual(cfgCur, cfgUpdated) { + return nil, xerrors.Errorf("updated config didn't match current config") + } + } + + return []byte(nodeStr), nil +} + +func ConfigComment(t interface{}) ([]byte, error) { + return ConfigUpdate(t, nil, true) } diff --git a/node/config/types.go b/node/config/types.go new file mode 100644 index 000000000..32795c5b9 --- /dev/null +++ b/node/config/types.go @@ -0,0 +1,318 @@ +package config + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/types" + sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" +) + +// // NOTE: ONLY PUT STRUCT DEFINITIONS IN THIS FILE +// // +// // After making edits here, run 'make cfgdoc-gen' (or 'make gen') + +// Common is common config between full node and miner +type Common struct { + API API + Backup Backup + Libp2p Libp2p + Pubsub Pubsub +} + +// FullNode is a full node config +type FullNode struct { + Common + Client Client + Metrics Metrics + Wallet Wallet + Fees FeeConfig + Chainstore Chainstore +} + +// // Common + +type Backup struct { + // When set to true disables metadata log (.lotus/kvlog). This can save disk + // space by reducing metadata redundancy. + // + // Note that in case of metadata corruption it might be much harder to recover + // your node if metadata log is disabled + DisableMetadataLog bool +} + +// StorageMiner is a miner config +type StorageMiner struct { + Common + + Subsystems MinerSubsystemConfig + Dealmaking DealmakingConfig + Sealing SealingConfig + Storage sectorstorage.SealerConfig + Fees MinerFeeConfig + Addresses MinerAddressConfig +} + +type MinerSubsystemConfig struct { + EnableMining bool + EnableSealing bool + EnableSectorStorage bool + EnableMarkets bool + + SealerApiInfo string // if EnableSealing == false + SectorIndexApiInfo string // if EnableSectorStorage == false +} + +type DealmakingConfig struct { + // When enabled, the miner can accept online deals + ConsiderOnlineStorageDeals bool + // When enabled, the miner can accept offline deals + ConsiderOfflineStorageDeals bool + // When enabled, the miner can accept retrieval deals + ConsiderOnlineRetrievalDeals bool + // When enabled, the miner can accept offline retrieval deals + ConsiderOfflineRetrievalDeals bool + // When enabled, the miner can accept verified deals + ConsiderVerifiedStorageDeals bool + // When enabled, the miner can accept unverified deals + ConsiderUnverifiedStorageDeals bool + // A list of Data CIDs to reject when making deals + PieceCidBlocklist []cid.Cid + // Maximum expected amount of time getting the deal into a sealed sector will take + // This includes the time the deal will need to get transferred and published + // before being assigned to a sector + ExpectedSealDuration Duration + // Maximum amount of time proposed deal StartEpoch can be in future + MaxDealStartDelay Duration + // When a deal is ready to publish, the amount of time to wait for more + // deals to be ready to publish before publishing them all as a batch + PublishMsgPeriod Duration + // The maximum number of deals to include in a single PublishStorageDeals + // message + MaxDealsPerPublishMsg uint64 + // The maximum collateral that the provider will put up against a deal, + // as a multiplier of the minimum collateral bound + MaxProviderCollateralMultiplier uint64 + + // The maximum number of parallel online data transfers (storage+retrieval) + SimultaneousTransfers uint64 + + // A command used for fine-grained evaluation of storage deals + // see https://docs.filecoin.io/mine/lotus/miner-configuration/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details + Filter string + // A command used for fine-grained evaluation of retrieval deals + // see https://docs.filecoin.io/mine/lotus/miner-configuration/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details + RetrievalFilter string + + RetrievalPricing *RetrievalPricing +} + +type RetrievalPricing struct { + Strategy string // possible values: "default", "external" + + Default *RetrievalPricingDefault + External *RetrievalPricingExternal +} + +type RetrievalPricingExternal struct { + // Path of the external script that will be run to price a retrieval deal. + // This parameter is ONLY applicable if the retrieval pricing policy strategy has been configured to "external". + Path string +} + +type RetrievalPricingDefault struct { + // VerifiedDealsFreeTransfer configures zero fees for data transfer for a retrieval deal + // of a payloadCid that belongs to a verified storage deal. + // This parameter is ONLY applicable if the retrieval pricing policy strategy has been configured to "default". + // default value is true + VerifiedDealsFreeTransfer bool +} + +type SealingConfig struct { + // Upper bound on how many sectors can be waiting for more deals to be packed in it before it begins sealing at any given time. + // If the miner is accepting multiple deals in parallel, up to MaxWaitDealsSectors of new sectors will be created. + // If more than MaxWaitDealsSectors deals are accepted in parallel, only MaxWaitDealsSectors deals will be processed in parallel + // Note that setting this number too high in relation to deal ingestion rate may result in poor sector packing efficiency + // 0 = no limit + MaxWaitDealsSectors uint64 + + // Upper bound on how many sectors can be sealing at the same time when creating new CC sectors (0 = unlimited) + MaxSealingSectors uint64 + + // Upper bound on how many sectors can be sealing at the same time when creating new sectors with deals (0 = unlimited) + MaxSealingSectorsForDeals uint64 + + // CommittedCapacitySectorLifetime is the duration a Committed Capacity (CC) sector will + // live before it must be extended or converted into sector containing deals before it is + // terminated. Value must be between 180-540 days inclusive + CommittedCapacitySectorLifetime Duration + + // Period of time that a newly created sector will wait for more deals to be packed in to before it starts to seal. + // Sectors which are fully filled will start sealing immediately + WaitDealsDelay Duration + + // Whether to keep unsealed copies of deal data regardless of whether the client requested that. This lets the miner + // avoid the relatively high cost of unsealing the data later, at the cost of more storage space + AlwaysKeepUnsealedCopy bool + + // Run sector finalization before submitting sector proof to the chain + FinalizeEarly bool + + // Whether to use available miner balance for sector collateral instead of sending it with each message + CollateralFromMinerBalance bool + // Minimum available balance to keep in the miner actor before sending it with messages + AvailableBalanceBuffer types.FIL + // Don't send collateral with messages even if there is no available balance in the miner actor + DisableCollateralFallback bool + + // enable / disable precommit batching (takes effect after nv13) + BatchPreCommits bool + // maximum precommit batch size - batches will be sent immediately above this size + MaxPreCommitBatch int + // how long to wait before submitting a batch after crossing the minimum batch size + PreCommitBatchWait Duration + // time buffer for forceful batch submission before sectors/deal in batch would start expiring + PreCommitBatchSlack Duration + + // enable / disable commit aggregation (takes effect after nv13) + AggregateCommits bool + // maximum batched commit size - batches will be sent immediately above this size + MinCommitBatch int + MaxCommitBatch int + // how long to wait before submitting a batch after crossing the minimum batch size + CommitBatchWait Duration + // time buffer for forceful batch submission before sectors/deals in batch would start expiring + CommitBatchSlack Duration + + // network BaseFee below which to stop doing commit aggregation, instead + // submitting proofs to the chain individually + AggregateAboveBaseFee types.FIL + + TerminateBatchMax uint64 + TerminateBatchMin uint64 + TerminateBatchWait Duration + + // Keep this many sectors in sealing pipeline, start CC if needed + // todo TargetSealingSectors uint64 + + // todo TargetSectors - stop auto-pleding new sectors after this many sectors are sealed, default CC upgrade for deals sectors if above +} + +type BatchFeeConfig struct { + Base types.FIL + PerSector types.FIL +} + +type MinerFeeConfig struct { + MaxPreCommitGasFee types.FIL + MaxCommitGasFee types.FIL + + // maxBatchFee = maxBase + maxPerSector * nSectors + MaxPreCommitBatchGasFee BatchFeeConfig + MaxCommitBatchGasFee BatchFeeConfig + + MaxTerminateGasFee types.FIL + // WindowPoSt is a high-value operation, so the default fee should be high. + MaxWindowPoStGasFee types.FIL + MaxPublishDealsFee types.FIL + MaxMarketBalanceAddFee types.FIL +} + +type MinerAddressConfig struct { + // Addresses to send PreCommit messages from + PreCommitControl []string + // Addresses to send Commit messages from + CommitControl []string + TerminateControl []string + DealPublishControl []string + + // DisableOwnerFallback disables usage of the owner address for messages + // sent automatically + DisableOwnerFallback bool + // DisableWorkerFallback disables usage of the worker address for messages + // sent automatically, if control addresses are configured. + // A control address that doesn't have enough funds will still be chosen + // over the worker address if this flag is set. + DisableWorkerFallback bool +} + +// API contains configs for API endpoint +type API struct { + // Binding address for the Lotus API + ListenAddress string + RemoteListenAddress string + Timeout Duration +} + +// Libp2p contains configs for libp2p +type Libp2p struct { + // Binding address for the libp2p host - 0 means random port. + // Format: multiaddress; see https://multiformats.io/multiaddr/ + ListenAddresses []string + // Addresses to explicitally announce to other peers. If not specified, + // all interface addresses are announced + // Format: multiaddress + AnnounceAddresses []string + // Addresses to not announce + // Format: multiaddress + NoAnnounceAddresses []string + BootstrapPeers []string + ProtectedPeers []string + + ConnMgrLow uint + ConnMgrHigh uint + ConnMgrGrace Duration +} + +type Pubsub struct { + // Run the node in bootstrap-node mode + Bootstrapper bool + // DirectPeers specifies peers with direct peering agreements. These peers are + // connected outside of the mesh, with all (valid) message unconditionally + // forwarded to them. The router will maintain open connections to these peers. + // Note that the peering agreement should be reciprocal with direct peers + // symmetrically configured at both ends. + // Type: Array of multiaddress peerinfo strings, must include peerid (/p2p/12D3K... + DirectPeers []string + IPColocationWhitelist []string + RemoteTracer string +} + +type Chainstore struct { + EnableSplitstore bool + Splitstore Splitstore +} + +type Splitstore struct { + ColdStoreType string + HotStoreType string + MarkSetType string + + HotStoreMessageRetention uint64 +} + +// // Full Node + +type Metrics struct { + Nickname string + HeadNotifs bool +} + +type Client struct { + UseIpfs bool + IpfsOnlineMode bool + IpfsMAddr string + IpfsUseForRetrieval bool + // The maximum number of simultaneous data transfers between the client + // and storage providers + SimultaneousTransfers uint64 +} + +type Wallet struct { + RemoteBackend string + EnableLedger bool + DisableLocal bool +} + +type FeeConfig struct { + DefaultMaxFee types.FIL +} diff --git a/scripts/docker-lotus-entrypoint.sh b/scripts/docker-lotus-entrypoint.sh new file mode 100755 index 000000000..308a4b6eb --- /dev/null +++ b/scripts/docker-lotus-entrypoint.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +if [ ! -z DOCKER_LOTUS_IMPORT_SNAPSHOT ]; then + GATE="$LOTUS_PATH"/date_initialized + # Don't init if already initialized. + if [ ! -f "$GATE" ]; then + echo importing minimal snapshot + /usr/local/bin/lotus daemon --import-snapshot "$DOCKER_LOTUS_IMPORT_SNAPSHOT" --halt-after-import + # Block future inits + date > "$GATE" + fi +fi + +# import wallet, if provided +if [ ! -z DOCKER_LOTUS_IMPORT_WALLET ]; then + /usr/local/bin/lotus-shed keyinfo import "$DOCKER_LOTUS_IMPORT_WALLET" +fi + +exec /usr/local/bin/lotus $@ diff --git a/scripts/docker-lotus-miner-entrypoint.sh b/scripts/docker-lotus-miner-entrypoint.sh new file mode 100755 index 000000000..1cb153176 --- /dev/null +++ b/scripts/docker-lotus-miner-entrypoint.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +if [ ! -z DOCKER_LOTUS_MINER_INIT ]; then + GATE="$LOTUS_PATH"/date_initialized + + # Don't init if already initialized. + if [ -f "GATE" ]; then + echo lotus-miner already initialized. + exit 0 + fi + + echo starting init + /usr/local/bin/lotus-miner init + + # Block future inits + date > "$GATE" +fi + +exec /usr/local/bin/lotus-miner $@