v1.27.0-a #10
@ -392,14 +392,8 @@ jobs:
|
|||||||
- attach_workspace:
|
- attach_workspace:
|
||||||
at: ~/
|
at: ~/
|
||||||
- run: go install golang.org/x/tools/cmd/goimports
|
- run: go install golang.org/x/tools/cmd/goimports
|
||||||
- run: zcat build/openrpc/full.json.gz | jq > ../pre-openrpc-full
|
|
||||||
- run: zcat build/openrpc/miner.json.gz | jq > ../pre-openrpc-miner
|
|
||||||
- run: zcat build/openrpc/worker.json.gz | jq > ../pre-openrpc-worker
|
|
||||||
- run: make docsgen
|
- run: make docsgen
|
||||||
- run: zcat build/openrpc/full.json.gz | jq > ../post-openrpc-full
|
- run: git --no-pager diff && git --no-pager diff --quiet
|
||||||
- run: zcat build/openrpc/miner.json.gz | jq > ../post-openrpc-miner
|
|
||||||
- run: zcat build/openrpc/worker.json.gz | jq > ../post-openrpc-worker
|
|
||||||
- run: diff ../pre-openrpc-full ../post-openrpc-full && diff ../pre-openrpc-miner ../post-openrpc-miner && diff ../pre-openrpc-worker ../post-openrpc-worker && git --no-pager diff && git --no-pager diff --quiet
|
|
||||||
|
|
||||||
lint-all:
|
lint-all:
|
||||||
description: |
|
description: |
|
||||||
@ -560,6 +554,12 @@ workflows:
|
|||||||
- build
|
- build
|
||||||
suite: itest-cli
|
suite: itest-cli
|
||||||
target: "./itests/cli_test.go"
|
target: "./itests/cli_test.go"
|
||||||
|
- test:
|
||||||
|
name: test-itest-curio
|
||||||
|
requires:
|
||||||
|
- build
|
||||||
|
suite: itest-curio
|
||||||
|
target: "./itests/curio_test.go"
|
||||||
- test:
|
- test:
|
||||||
name: test-itest-deadlines
|
name: test-itest-deadlines
|
||||||
requires:
|
requires:
|
||||||
@ -891,12 +891,6 @@ workflows:
|
|||||||
- build
|
- build
|
||||||
suite: itest-pending_deal_allocation
|
suite: itest-pending_deal_allocation
|
||||||
target: "./itests/pending_deal_allocation_test.go"
|
target: "./itests/pending_deal_allocation_test.go"
|
||||||
- test:
|
|
||||||
name: test-itest-raft_messagesigner
|
|
||||||
requires:
|
|
||||||
- build
|
|
||||||
suite: itest-raft_messagesigner
|
|
||||||
target: "./itests/raft_messagesigner_test.go"
|
|
||||||
- test:
|
- test:
|
||||||
name: test-itest-remove_verifreg_datacap
|
name: test-itest-remove_verifreg_datacap
|
||||||
requires:
|
requires:
|
||||||
@ -1043,7 +1037,7 @@ workflows:
|
|||||||
requires:
|
requires:
|
||||||
- build
|
- build
|
||||||
suite: utest-unit-rest
|
suite: utest-unit-rest
|
||||||
target: "./blockstore/... ./build/... ./chain/... ./conformance/... ./gateway/... ./journal/... ./lib/... ./markets/... ./paychmgr/... ./tools/..."
|
target: "./blockstore/... ./build/... ./chain/... ./conformance/... ./curiosrc/... ./gateway/... ./journal/... ./lib/... ./markets/... ./paychmgr/... ./tools/..."
|
||||||
resource_class: 2xlarge
|
resource_class: 2xlarge
|
||||||
- test:
|
- test:
|
||||||
name: test-unit-storage
|
name: test-unit-storage
|
||||||
|
@ -392,14 +392,8 @@ jobs:
|
|||||||
- attach_workspace:
|
- attach_workspace:
|
||||||
at: ~/
|
at: ~/
|
||||||
- run: go install golang.org/x/tools/cmd/goimports
|
- run: go install golang.org/x/tools/cmd/goimports
|
||||||
- run: zcat build/openrpc/full.json.gz | jq > ../pre-openrpc-full
|
|
||||||
- run: zcat build/openrpc/miner.json.gz | jq > ../pre-openrpc-miner
|
|
||||||
- run: zcat build/openrpc/worker.json.gz | jq > ../pre-openrpc-worker
|
|
||||||
- run: make docsgen
|
- run: make docsgen
|
||||||
- run: zcat build/openrpc/full.json.gz | jq > ../post-openrpc-full
|
- run: git --no-pager diff && git --no-pager diff --quiet
|
||||||
- run: zcat build/openrpc/miner.json.gz | jq > ../post-openrpc-miner
|
|
||||||
- run: zcat build/openrpc/worker.json.gz | jq > ../post-openrpc-worker
|
|
||||||
- run: diff ../pre-openrpc-full ../post-openrpc-full && diff ../pre-openrpc-miner ../post-openrpc-miner && diff ../pre-openrpc-worker ../post-openrpc-worker && git --no-pager diff && git --no-pager diff --quiet
|
|
||||||
|
|
||||||
lint-all:
|
lint-all:
|
||||||
description: |
|
description: |
|
||||||
|
3
.github/CODEOWNERS
vendored
3
.github/CODEOWNERS
vendored
@ -1,6 +1,3 @@
|
|||||||
# Reference
|
# Reference
|
||||||
# https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/creating-a-repository-on-github/about-code-owners
|
# https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/creating-a-repository-on-github/about-code-owners
|
||||||
|
|
||||||
# Global owners
|
|
||||||
# Ensure maintainers team is a requested reviewer for non-draft PRs
|
|
||||||
* @filecoin-project/lotus-maintainers
|
|
||||||
|
14
.github/actions/export-circle-env/action.yml
vendored
Normal file
14
.github/actions/export-circle-env/action.yml
vendored
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
name: Export Circle Env
|
||||||
|
description: Export CircleCI environment variables for Filecoin Lotus
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- run: |
|
||||||
|
if [[ "$GITHUB_REF" == refs/tags/* ]]; then
|
||||||
|
echo "CIRCLE_TAG=${GITHUB_REF#refs/tags/}" | tee -a $GITHUB_ENV
|
||||||
|
fi
|
||||||
|
echo "CIRCLE_PROJECT_USERNAME=$GITHUB_REPOSITORY_OWNER" | tee -a $GITHUB_ENV
|
||||||
|
echo "CIRCLE_PROJECT_REPONAME=${GITHUB_REPOSITORY#$GITHUB_REPOSITORY_OWNER/}" | tee -a $GITHUB_ENV
|
||||||
|
echo "CIRCLE_SHA1=$GITHUB_SHA" | tee -a $GITHUB_ENV
|
||||||
|
shell: bash
|
16
.github/actions/install-go/action.yml
vendored
Normal file
16
.github/actions/install-go/action.yml
vendored
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
name: Install Go
|
||||||
|
description: Install Go for Filecoin Lotus
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: stable
|
||||||
|
cache: false
|
||||||
|
- id: go-mod
|
||||||
|
uses: ipdxco/unified-github-workflows/.github/actions/read-go-mod@main
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: ${{ fromJSON(steps.go-mod.outputs.json).Go }}.x
|
||||||
|
cache: false
|
21
.github/actions/install-system-dependencies/action.yml
vendored
Normal file
21
.github/actions/install-system-dependencies/action.yml
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
name: Install System Dependencies
|
||||||
|
description: Install System dependencies for Filecoin Lotus
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- if: runner.os == 'Linux'
|
||||||
|
run: |
|
||||||
|
# List processes to enable debugging in case /var/lib/apt/lists/ is locked
|
||||||
|
ps aux
|
||||||
|
sudo apt-get update -y
|
||||||
|
sudo apt-get install -y ocl-icd-opencl-dev libhwloc-dev pkg-config
|
||||||
|
shell: bash
|
||||||
|
- if: runner.os == 'macOS'
|
||||||
|
env:
|
||||||
|
HOMEBREW_NO_AUTO_UPDATE: '1'
|
||||||
|
run: |
|
||||||
|
brew install hwloc pkg-config
|
||||||
|
echo "CPATH=$(brew --prefix)/include" | tee -a $GITHUB_ENV
|
||||||
|
echo "LIBRARY_PATH=$(brew --prefix)/lib" | tee -a $GITHUB_ENV
|
||||||
|
shell: bash
|
16
.github/actions/start-yugabytedb/action.yml
vendored
Normal file
16
.github/actions/start-yugabytedb/action.yml
vendored
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
name: Start YugabyteDB
|
||||||
|
description: Install Yugabyte Database for Filecoin Lotus
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- run: docker run --rm --name yugabyte -d -p 5433:5433 yugabytedb/yugabyte:2.18.0.0-b65 bin/yugabyted start --daemon=false
|
||||||
|
shell: bash
|
||||||
|
- run: |
|
||||||
|
while true; do
|
||||||
|
status=$(docker exec yugabyte bin/yugabyted status);
|
||||||
|
echo $status;
|
||||||
|
echo $status | grep Running && break;
|
||||||
|
sleep 1;
|
||||||
|
done
|
||||||
|
shell: bash
|
248
.github/labels.yml
vendored
248
.github/labels.yml
vendored
@ -1,248 +0,0 @@
|
|||||||
###
|
|
||||||
### Special magic GitHub labels
|
|
||||||
### https://help.github.com/en/github/building-a-strong-community/encouraging-helpful-contributions-to-your-project-with-labels
|
|
||||||
#
|
|
||||||
- name: "good first issue"
|
|
||||||
color: 7057ff
|
|
||||||
description: "Good for newcomers"
|
|
||||||
- name: "help wanted"
|
|
||||||
color: 008672
|
|
||||||
description: "Extra attention is needed"
|
|
||||||
|
|
||||||
###
|
|
||||||
### Goals
|
|
||||||
#
|
|
||||||
- name: goal/incentives
|
|
||||||
color: ff004d
|
|
||||||
description: "Incentinet"
|
|
||||||
|
|
||||||
###
|
|
||||||
### Areas
|
|
||||||
#
|
|
||||||
- name: area/ux
|
|
||||||
color: 00A4E0
|
|
||||||
description: "Area: UX"
|
|
||||||
- name: area/chain/vm
|
|
||||||
color: 00A4E2
|
|
||||||
description: "Area: Chain/VM"
|
|
||||||
- name: area/chain/sync
|
|
||||||
color: 00A4E4
|
|
||||||
description: "Area: Chain/Sync"
|
|
||||||
- name: area/chain/misc
|
|
||||||
color: 00A4E6
|
|
||||||
description: "Area: Chain/Misc"
|
|
||||||
- name: area/markets
|
|
||||||
color: 00A4E8
|
|
||||||
description: "Area: Markets"
|
|
||||||
- name: area/sealing/fsm
|
|
||||||
color: 0bb1ed
|
|
||||||
description: "Area: Sealing/FSM"
|
|
||||||
- name: area/sealing/storage
|
|
||||||
color: 0EB4F0
|
|
||||||
description: "Area: Sealing/Storage"
|
|
||||||
- name: area/proving
|
|
||||||
color: 0EB4F0
|
|
||||||
description: "Area: Proving"
|
|
||||||
- name: area/mining
|
|
||||||
color: 10B6F2
|
|
||||||
description: "Area: Mining"
|
|
||||||
- name: area/client/storage
|
|
||||||
color: 13B9F5
|
|
||||||
description: "Area: Client/Storage"
|
|
||||||
- name: area/client/retrieval
|
|
||||||
color: 15BBF7
|
|
||||||
description: "Area: Client/Retrieval"
|
|
||||||
- name: area/wallet
|
|
||||||
color: 15BBF7
|
|
||||||
description: "Area: Wallet"
|
|
||||||
- name: area/payment-channel
|
|
||||||
color: ff6767
|
|
||||||
description: "Area: Payment Channel"
|
|
||||||
- name: area/multisig
|
|
||||||
color: fff0ff
|
|
||||||
description: "Area: Multisig"
|
|
||||||
- name: area/networking
|
|
||||||
color: 273f8a
|
|
||||||
description: "Area: Networking"
|
|
||||||
|
|
||||||
###
|
|
||||||
### Kinds
|
|
||||||
#
|
|
||||||
- name: kind/bug
|
|
||||||
color: c92712
|
|
||||||
description: "Kind: Bug"
|
|
||||||
- name: kind/chore
|
|
||||||
color: fcf0b5
|
|
||||||
description: "Kind: Chore"
|
|
||||||
- name: kind/feature
|
|
||||||
color: FFF3B8
|
|
||||||
description: "Kind: Feature"
|
|
||||||
- name: kind/improvement
|
|
||||||
color: FFF5BA
|
|
||||||
description: "Kind: Improvement"
|
|
||||||
- name: kind/test
|
|
||||||
color: FFF8BD
|
|
||||||
description: "Kind: Test"
|
|
||||||
- name: kind/question
|
|
||||||
color: FFFDC2
|
|
||||||
description: "Kind: Question"
|
|
||||||
- name: kind/enhancement
|
|
||||||
color: FFFFC5
|
|
||||||
description: "Kind: Enhancement"
|
|
||||||
- name: kind/discussion
|
|
||||||
color: FFFFC7
|
|
||||||
description: "Kind: Discussion"
|
|
||||||
|
|
||||||
###
|
|
||||||
### Difficulties
|
|
||||||
#
|
|
||||||
- name: dif/trivial
|
|
||||||
color: b2b7ff
|
|
||||||
description: "Can be confidently tackled by newcomers, who are widely unfamiliar with lotus"
|
|
||||||
- name: dif/easy
|
|
||||||
color: 7886d7
|
|
||||||
description: "An existing lotus user should be able to pick this up"
|
|
||||||
- name: dif/medium
|
|
||||||
color: 6574cd
|
|
||||||
description: "Prior development experience with lotus is likely helpful"
|
|
||||||
- name: dif/hard
|
|
||||||
color: 5661b3
|
|
||||||
description: "Suggests that having worked on the specific component affected by this issue is important"
|
|
||||||
- name: dif/expert
|
|
||||||
color: 2f365f
|
|
||||||
description: "Requires extensive knowledge of the history, implications, ramifications of the issue"
|
|
||||||
|
|
||||||
###
|
|
||||||
### Efforts
|
|
||||||
#
|
|
||||||
- name: effort/minutes
|
|
||||||
color: e8fffe
|
|
||||||
description: "Effort: Minutes"
|
|
||||||
- name: effort/hours
|
|
||||||
color: a0f0ed
|
|
||||||
description: "Effort: Hours"
|
|
||||||
- name: effort/day
|
|
||||||
color: 64d5ca
|
|
||||||
description: "Effort: One Day"
|
|
||||||
- name: effort/days
|
|
||||||
color: 4dc0b5
|
|
||||||
description: "Effort: Multiple Days"
|
|
||||||
- name: effort/week
|
|
||||||
color: 38a89d
|
|
||||||
description: "Effort: One Week"
|
|
||||||
- name: effort/weeks
|
|
||||||
color: 20504f
|
|
||||||
description: "Effort: Multiple Weeks"
|
|
||||||
|
|
||||||
###
|
|
||||||
### Impacts
|
|
||||||
#
|
|
||||||
- name: impact/regression
|
|
||||||
color: f1f5f8
|
|
||||||
description: "Impact: Regression"
|
|
||||||
- name: impact/api-breakage
|
|
||||||
color: ECF0F3
|
|
||||||
description: "Impact: API Breakage"
|
|
||||||
- name: impact/quality
|
|
||||||
color: E7EBEE
|
|
||||||
description: "Impact: Quality"
|
|
||||||
- name: impact/dx
|
|
||||||
color: E2E6E9
|
|
||||||
description: "Impact: Developer Experience"
|
|
||||||
- name: impact/test-flakiness
|
|
||||||
color: DDE1E4
|
|
||||||
description: "Impact: Test Flakiness"
|
|
||||||
- name: impact/consensus
|
|
||||||
color: b20014
|
|
||||||
description: "Impact: Consensus"
|
|
||||||
|
|
||||||
###
|
|
||||||
### Topics
|
|
||||||
#
|
|
||||||
- name: topic/interoperability
|
|
||||||
color: bf0f73
|
|
||||||
description: "Topic: Interoperability"
|
|
||||||
- name: topic/specs
|
|
||||||
color: CC1C80
|
|
||||||
description: "Topic: Specs"
|
|
||||||
- name: topic/docs
|
|
||||||
color: D9298D
|
|
||||||
description: "Topic: Documentation"
|
|
||||||
- name: topic/architecture
|
|
||||||
color: E53599
|
|
||||||
description: "Topic: Architecture"
|
|
||||||
|
|
||||||
###
|
|
||||||
### Priorities
|
|
||||||
###
|
|
||||||
- name: P0
|
|
||||||
color: dd362a
|
|
||||||
description: "P0: Critical Blocker"
|
|
||||||
- name: P1
|
|
||||||
color: ce8048
|
|
||||||
description: "P1: Must be resolved"
|
|
||||||
- name: P2
|
|
||||||
color: dbd81a
|
|
||||||
description: "P2: Should be resolved"
|
|
||||||
- name: P3
|
|
||||||
color: 9fea8f
|
|
||||||
description: "P3: Might get resolved"
|
|
||||||
|
|
||||||
###
|
|
||||||
### Hints
|
|
||||||
#
|
|
||||||
#- name: hint/good-first-issue
|
|
||||||
# color: 7057ff
|
|
||||||
# description: "Hint: Good First Issue"
|
|
||||||
#- name: hint/help-wanted
|
|
||||||
# color: 008672
|
|
||||||
# description: "Hint: Help Wanted"
|
|
||||||
- name: hint/needs-decision
|
|
||||||
color: 33B9A5
|
|
||||||
description: "Hint: Needs Decision"
|
|
||||||
- name: hint/needs-triage
|
|
||||||
color: 1AA08C
|
|
||||||
description: "Hint: Needs Triage"
|
|
||||||
- name: hint/needs-analysis
|
|
||||||
color: 26AC98
|
|
||||||
description: "Hint: Needs Analysis"
|
|
||||||
- name: hint/needs-author-input
|
|
||||||
color: 33B9A5
|
|
||||||
description: "Hint: Needs Author Input"
|
|
||||||
- name: hint/needs-team-input
|
|
||||||
color: 40C6B2
|
|
||||||
description: "Hint: Needs Team Input"
|
|
||||||
- name: hint/needs-community-input
|
|
||||||
color: 4DD3BF
|
|
||||||
description: "Hint: Needs Community Input"
|
|
||||||
- name: hint/needs-review
|
|
||||||
color: 5AE0CC
|
|
||||||
description: "Hint: Needs Review"
|
|
||||||
|
|
||||||
###
|
|
||||||
### Statuses
|
|
||||||
#
|
|
||||||
- name: status/done
|
|
||||||
color: edb3a6
|
|
||||||
description: "Status: Done"
|
|
||||||
- name: status/deferred
|
|
||||||
color: E0A699
|
|
||||||
description: "Status: Deferred"
|
|
||||||
- name: status/in-progress
|
|
||||||
color: D49A8D
|
|
||||||
description: "Status: In Progress"
|
|
||||||
- name: status/blocked
|
|
||||||
color: C78D80
|
|
||||||
description: "Status: Blocked"
|
|
||||||
- name: status/inactive
|
|
||||||
color: BA8073
|
|
||||||
description: "Status: Inactive"
|
|
||||||
- name: status/waiting
|
|
||||||
color: AD7366
|
|
||||||
description: "Status: Waiting"
|
|
||||||
- name: status/rotten
|
|
||||||
color: 7A4033
|
|
||||||
description: "Status: Rotten"
|
|
||||||
- name: status/discarded
|
|
||||||
color: 6D3326
|
|
||||||
description: "Status: Discarded / Won't fix"
|
|
31
.github/workflows/build.yml
vendored
Normal file
31
.github/workflows/build.yml
vendored
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
name: Build
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- release/*
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||||
|
|
||||||
|
permissions: {}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
name: Build
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: 'recursive'
|
||||||
|
- uses: ./.github/actions/install-system-dependencies
|
||||||
|
- uses: ./.github/actions/install-go
|
||||||
|
- run: make deps lotus
|
22
.github/workflows/builtin-actor-tests.yml
vendored
Normal file
22
.github/workflows/builtin-actor-tests.yml
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
name: Built-in Actors
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- build/actors
|
||||||
|
- build/builtin_actors_gen.go
|
||||||
|
branches:
|
||||||
|
- release/*
|
||||||
|
|
||||||
|
permissions: {}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
release:
|
||||||
|
name: Release Tests
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: 1.21
|
||||||
|
- run: go test -tags=release ./build
|
82
.github/workflows/check.yml
vendored
Normal file
82
.github/workflows/check.yml
vendored
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
name: Check
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- release/*
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||||
|
|
||||||
|
permissions: {}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
check-docsgen:
|
||||||
|
name: Check (docs-check)
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: 'recursive'
|
||||||
|
- uses: ./.github/actions/install-system-dependencies
|
||||||
|
- uses: ./.github/actions/install-go
|
||||||
|
- run: go install golang.org/x/tools/cmd/goimports
|
||||||
|
- run: make deps
|
||||||
|
- run: make docsgen
|
||||||
|
- run: git diff --exit-code
|
||||||
|
check-gen:
|
||||||
|
name: Check (gen-check)
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: 'recursive'
|
||||||
|
- uses: ./.github/actions/install-system-dependencies
|
||||||
|
- uses: ./.github/actions/install-go
|
||||||
|
- run: make deps lotus
|
||||||
|
- run: go install golang.org/x/tools/cmd/goimports
|
||||||
|
- run: go install github.com/hannahhoward/cbor-gen-for
|
||||||
|
- run: make gen
|
||||||
|
- run: git diff --exit-code
|
||||||
|
- run: make docsgen-cli
|
||||||
|
- run: git diff --exit-code
|
||||||
|
check-lint:
|
||||||
|
name: Check (lint-all)
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: 'recursive'
|
||||||
|
- uses: ./.github/actions/install-system-dependencies
|
||||||
|
- uses: ./.github/actions/install-go
|
||||||
|
- run: go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
|
||||||
|
- run: make deps
|
||||||
|
- run: golangci-lint run -v --timeout 10m --concurrency 4
|
||||||
|
check-fmt:
|
||||||
|
name: Check (gofmt)
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: 'recursive'
|
||||||
|
- uses: ./.github/actions/install-go
|
||||||
|
- run: go fmt ./...
|
||||||
|
- run: git diff --exit-code
|
||||||
|
check-mod-tidy:
|
||||||
|
name: Check (mod-tidy-check)
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: 'recursive'
|
||||||
|
- uses: ./.github/actions/install-go
|
||||||
|
- run: go mod tidy -v
|
||||||
|
- run: git diff --exit-code
|
73
.github/workflows/codeql-analysis.yml
vendored
73
.github/workflows/codeql-analysis.yml
vendored
@ -1,73 +0,0 @@
|
|||||||
# For most projects, this workflow file will not need changing; you simply need
|
|
||||||
# to commit it to your repository.
|
|
||||||
#
|
|
||||||
# You may wish to alter this file to override the set of languages analyzed,
|
|
||||||
# or to provide custom queries or build logic.
|
|
||||||
#
|
|
||||||
# ******** NOTE ********
|
|
||||||
# We have attempted to detect the languages in your repository. Please check
|
|
||||||
# the `language` matrix defined below to confirm you have the correct set of
|
|
||||||
# supported CodeQL languages.
|
|
||||||
#
|
|
||||||
name: "CodeQL"
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
- 'release/*'
|
|
||||||
pull_request:
|
|
||||||
# The branches below must be a subset of the branches above
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
- 'release/*'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
analyze:
|
|
||||||
name: Analyze
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
language: [ 'go' ]
|
|
||||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
|
|
||||||
# Learn more:
|
|
||||||
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: '1.18.8'
|
|
||||||
|
|
||||||
# Initializes the CodeQL tools for scanning.
|
|
||||||
- name: Initialize CodeQL
|
|
||||||
uses: github/codeql-action/init@v2
|
|
||||||
with:
|
|
||||||
languages: go
|
|
||||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
|
||||||
# By default, queries listed here will override any specified in a config file.
|
|
||||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
|
||||||
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
|
||||||
|
|
||||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
|
||||||
# If this step fails, then you should remove it and run the build manually (see below)
|
|
||||||
- name: Autobuild
|
|
||||||
uses: github/codeql-action/autobuild@v2
|
|
||||||
|
|
||||||
# ℹ️ Command-line programs to run using the OS shell.
|
|
||||||
# 📚 https://git.io/JvXDl
|
|
||||||
|
|
||||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
|
||||||
# and modify them (or add more) to build your code if your project
|
|
||||||
# uses a compiled language
|
|
||||||
|
|
||||||
#- run: |
|
|
||||||
# make bootstrap
|
|
||||||
# make release
|
|
||||||
|
|
||||||
- name: Perform CodeQL Analysis
|
|
||||||
uses: github/codeql-action/analyze@v2
|
|
96
.github/workflows/docker.yml
vendored
Normal file
96
.github/workflows/docker.yml
vendored
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
name: Docker
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- release/*
|
||||||
|
tags:
|
||||||
|
- v*
|
||||||
|
schedule:
|
||||||
|
- cron: '0 0 * * *'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash
|
||||||
|
|
||||||
|
permissions: {}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
docker:
|
||||||
|
name: Docker (${{ matrix.image }} / ${{ matrix.network }}) [publish=${{ github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/tags/') }}]
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
image:
|
||||||
|
- lotus-all-in-one
|
||||||
|
network:
|
||||||
|
- mainnet
|
||||||
|
- butterflynet
|
||||||
|
- calibnet
|
||||||
|
- debug
|
||||||
|
include:
|
||||||
|
- image: lotus
|
||||||
|
network: mainnet
|
||||||
|
env:
|
||||||
|
# Do not publish until CircleCI is deprecated
|
||||||
|
PUBLISH: false
|
||||||
|
# PUBLISH: ${{ github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/tags/') }}
|
||||||
|
steps:
|
||||||
|
- id: channel
|
||||||
|
env:
|
||||||
|
IS_MASTER: ${{ github.ref == 'refs/heads/master' }}
|
||||||
|
IS_TAG: ${{ startsWith(github.ref, 'refs/tags/') }}
|
||||||
|
IS_RC: ${{ endsWith(github.ref, '-rc') }}
|
||||||
|
IS_SCHEDULED: ${{ github.event_name == 'schedule' }}
|
||||||
|
run: |
|
||||||
|
channel=''
|
||||||
|
if [[ "$IS_MASTER" == 'true' ]]; then
|
||||||
|
if [[ "$IS_SCHEDULED" == 'true' ]]; then
|
||||||
|
channel=nightly
|
||||||
|
else
|
||||||
|
channel=master
|
||||||
|
fi
|
||||||
|
elif [[ "$IS_TAG" == 'true' ]]; then
|
||||||
|
if [[ "$IS_RC" == 'true' ]]; then
|
||||||
|
channel=candidate
|
||||||
|
else
|
||||||
|
channel=stable
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
echo "channel=$channel" | tee -a $GITHUB_ENV
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: 'recursive'
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
- name: Docker meta
|
||||||
|
id: meta
|
||||||
|
uses: docker/metadata-action@v5
|
||||||
|
with:
|
||||||
|
images: filecoin/${{ matrix.image }}
|
||||||
|
tags: |
|
||||||
|
type=schedule
|
||||||
|
type=raw,enable=${{ github.event_name != 'schedule' && steps.channel.outputs.channel != '' }},value=${{ steps.channel.outputs.channel }}
|
||||||
|
type=ref,event=tag
|
||||||
|
type=sha,prefix=
|
||||||
|
flavor: |
|
||||||
|
latest=false
|
||||||
|
suffix=${{ matrix.network != 'mainnet' && format('-{0}', matrix.network) || '' }}
|
||||||
|
- if: env.PUBLISH == 'true'
|
||||||
|
name: Login to Docker Hub
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ vars.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
- name: Build and push if channel is set (channel=${{ steps.channel.outputs.channel }})
|
||||||
|
uses: docker/build-push-action@v5
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
push: ${{ env.PUBLISH == 'true' }}
|
||||||
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
|
build-args: |
|
||||||
|
${{ matrix.network != 'mainnet' && format('GOFLAGS=-tags={0}', matrix.network) || ''}}
|
17
.github/workflows/label-syncer.yml
vendored
17
.github/workflows/label-syncer.yml
vendored
@ -1,17 +0,0 @@
|
|||||||
|
|
||||||
name: Label syncer
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
paths:
|
|
||||||
- '.github/labels.yml'
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
name: Sync labels
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@1.0.0
|
|
||||||
- uses: micnncim/action-label-syncer@v1.0.0
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
109
.github/workflows/release.yml
vendored
Normal file
109
.github/workflows/release.yml
vendored
Normal file
@ -0,0 +1,109 @@
|
|||||||
|
name: Release
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- ci/*
|
||||||
|
- release/*
|
||||||
|
tags:
|
||||||
|
- v*
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash
|
||||||
|
|
||||||
|
permissions: {}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
name: Build (${{ matrix.os }}/${{ matrix.arch }})
|
||||||
|
runs-on: ${{ matrix.runner }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- runner: ubuntu-latest
|
||||||
|
os: Linux
|
||||||
|
arch: X64
|
||||||
|
- runner: macos-13
|
||||||
|
os: macOS
|
||||||
|
arch: X64
|
||||||
|
- runner: macos-14
|
||||||
|
os: macOS
|
||||||
|
arch: ARM64
|
||||||
|
steps:
|
||||||
|
- env:
|
||||||
|
OS: ${{ matrix.os }}
|
||||||
|
ARCH: ${{ matrix.arch }}
|
||||||
|
run: |
|
||||||
|
if [[ "$OS" != "$RUNNER_OS" || "$ARCH" != "$RUNNER_ARCH" ]]; then
|
||||||
|
echo "::error title=Unexpected Runner::Expected $OS/$ARCH, got $RUNNER_OS/$RUNNER_ARCH"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: 'recursive'
|
||||||
|
- uses: ./.github/actions/export-circle-env
|
||||||
|
- uses: ./.github/actions/install-system-dependencies
|
||||||
|
- uses: ./.github/actions/install-go
|
||||||
|
- env:
|
||||||
|
GITHUB_TOKEN: ${{ github.token }}
|
||||||
|
run: make deps lotus lotus-miner lotus-worker
|
||||||
|
- if: runner.os == 'macOS'
|
||||||
|
run: otool -hv lotus
|
||||||
|
- run: ./scripts/version-check.sh ./lotus
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: lotus-${{ matrix.os }}-${{ matrix.arch }}
|
||||||
|
path: |
|
||||||
|
lotus
|
||||||
|
lotus-miner
|
||||||
|
lotus-worker
|
||||||
|
release:
|
||||||
|
name: Release [publish=${{ startsWith(github.ref, 'refs/tags/') }}]
|
||||||
|
permissions:
|
||||||
|
# This enables the job to create and/or update GitHub releases
|
||||||
|
contents: write
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: [build]
|
||||||
|
env:
|
||||||
|
# Do not publish until CircleCI is deprecated
|
||||||
|
PUBLISH: false
|
||||||
|
# PUBLISH: ${{ startsWith(github.ref, 'refs/tags/') }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: 'recursive'
|
||||||
|
fetch-depth: 0
|
||||||
|
- uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: lotus-Linux-X64
|
||||||
|
path: linux_amd64_v1
|
||||||
|
- uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: lotus-macOS-X64
|
||||||
|
path: darwin_amd64_v1
|
||||||
|
- uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: lotus-macOS-ARM64
|
||||||
|
path: darwin_arm64
|
||||||
|
- uses: ./.github/actions/export-circle-env
|
||||||
|
- uses: ./.github/actions/install-go
|
||||||
|
- uses: ipfs/download-ipfs-distribution-action@v1
|
||||||
|
with:
|
||||||
|
name: kubo
|
||||||
|
version: v0.16.0
|
||||||
|
- uses: goreleaser/goreleaser-action@7ec5c2b0c6cdda6e8bbb49444bc797dd33d74dd8 # v5.0.0
|
||||||
|
with:
|
||||||
|
distribution: goreleaser-pro
|
||||||
|
version: latest
|
||||||
|
args: release --clean --debug ${{ env.PUBLISH == 'false' && '--snapshot' || '' }}
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ env.PUBLISH == 'true' && github.token || '' }}
|
||||||
|
GORELEASER_KEY: ${{ env.PUBLISH == 'true' && secrets.GORELEASER_KEY || '' }}
|
||||||
|
- run: ./scripts/generate-checksums.sh
|
||||||
|
- if: env.PUBLISH == 'true'
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ github.token }}
|
||||||
|
run: ./scripts/publish-checksums.sh
|
32
.github/workflows/sorted-pr-checks.yml
vendored
Normal file
32
.github/workflows/sorted-pr-checks.yml
vendored
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
name: Comment with sorted PR checks
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
pull_number:
|
||||||
|
description: 'Pull request number'
|
||||||
|
required: true
|
||||||
|
workflow_run:
|
||||||
|
workflows:
|
||||||
|
- Build
|
||||||
|
- Check
|
||||||
|
- CodeQL
|
||||||
|
- Test
|
||||||
|
types:
|
||||||
|
- requested
|
||||||
|
- completed
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
pull-requests: write
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.event.inputs.pull_number || github.event.workflow_run.pull_requests[0].number || 'unknown' }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
comment:
|
||||||
|
if: github.event.inputs.pull_number || github.event.workflow_run.pull_requests[0]
|
||||||
|
uses: ipdxco/sorted-pr-checks/.github/workflows/comment.yml@v1
|
||||||
|
with:
|
||||||
|
pull_number: ${{ github.event.inputs.pull_number || github.event.workflow_run.pull_requests[0].number }}
|
||||||
|
template: unsuccessful_only
|
12
.github/workflows/stale.yml
vendored
12
.github/workflows/stale.yml
vendored
@ -4,18 +4,18 @@ on:
|
|||||||
schedule:
|
schedule:
|
||||||
- cron: '0 12 * * *'
|
- cron: '0 12 * * *'
|
||||||
|
|
||||||
|
permissions: {}
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
stale:
|
stale:
|
||||||
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions:
|
permissions:
|
||||||
issues: write
|
issues: write
|
||||||
pull-requests: write
|
pull-requests: write
|
||||||
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/stale@v3
|
- uses: actions/stale@v9
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ github.token }}
|
||||||
stale-issue-message: 'Oops, seems like we needed more information for this issue, please comment with more details or this issue will be closed in 24 hours.'
|
stale-issue-message: 'Oops, seems like we needed more information for this issue, please comment with more details or this issue will be closed in 24 hours.'
|
||||||
close-issue-message: 'This issue was closed because it is missing author input.'
|
close-issue-message: 'This issue was closed because it is missing author input.'
|
||||||
stale-pr-message: 'Thank you for submitting the PR and contributing to lotus! Lotus maintainers need more of your input before merging it, please address the suggested changes or reply to the comments or this PR will be closed in 48 hours. You are always more than welcome to reopen the PR later as well!'
|
stale-pr-message: 'Thank you for submitting the PR and contributing to lotus! Lotus maintainers need more of your input before merging it, please address the suggested changes or reply to the comments or this PR will be closed in 48 hours. You are always more than welcome to reopen the PR later as well!'
|
||||||
@ -29,5 +29,3 @@ jobs:
|
|||||||
days-before-pr-close: 2
|
days-before-pr-close: 2
|
||||||
remove-stale-when-updated: true
|
remove-stale-when-updated: true
|
||||||
enable-statistics: true
|
enable-statistics: true
|
||||||
|
|
||||||
|
|
||||||
|
8
.github/workflows/sync-master-main.yaml
vendored
8
.github/workflows/sync-master-main.yaml
vendored
@ -1,13 +1,19 @@
|
|||||||
name: sync-master-main
|
name: sync-master-main
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- master
|
- master
|
||||||
|
|
||||||
|
permissions: {}
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
sync:
|
sync:
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v4
|
||||||
- name: update remote branch main
|
- name: update remote branch main
|
||||||
run: |
|
run: |
|
||||||
# overrides the remote branch (origin:github) `main`
|
# overrides the remote branch (origin:github) `main`
|
||||||
|
320
.github/workflows/test.yml
vendored
Normal file
320
.github/workflows/test.yml
vendored
Normal file
@ -0,0 +1,320 @@
|
|||||||
|
name: Test
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- release/*
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
||||||
|
|
||||||
|
permissions: {}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
discover:
|
||||||
|
name: Discover Test Groups
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
groups: ${{ steps.test.outputs.groups }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: 'recursive'
|
||||||
|
- id: test
|
||||||
|
env:
|
||||||
|
# Unit test groups other than unit-rest
|
||||||
|
utests: |
|
||||||
|
[
|
||||||
|
{"name": "unit-cli", "packages": ["./cli/...", "./cmd/...", "./api/..."]},
|
||||||
|
{"name": "unit-storage", "packages": ["./storage/...", "./extern/..."]},
|
||||||
|
{"name": "unit-node", "packages": ["./node/..."]}
|
||||||
|
]
|
||||||
|
# Other tests that require special configuration
|
||||||
|
otests: |
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"name": "multicore-sdr",
|
||||||
|
"packages": ["./storage/sealer/ffiwrapper"],
|
||||||
|
"go_test_flags": "-run=TestMulticoreSDR",
|
||||||
|
"test_rustproofs_logs": "1"
|
||||||
|
}, {
|
||||||
|
"name": "conformance",
|
||||||
|
"packages": ["./conformance"],
|
||||||
|
"go_test_flags": "-run=TestConformance",
|
||||||
|
"skip_conformance": "0"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
# Mapping from test group names to custom runner labels
|
||||||
|
# The jobs default to running on the default hosted runners (4 CPU, 16 RAM).
|
||||||
|
# We use self-hosted xlarge (4 CPU, 8 RAM; and large - 2 CPU, 4 RAM) runners
|
||||||
|
# to extend the available runner capacity (60 default hosted runners).
|
||||||
|
# We use self-hosted 4xlarge (16 CPU, 32 RAM; and 2xlarge - 8 CPU, 16 RAM) self-hosted
|
||||||
|
# to support resource intensive jobs.
|
||||||
|
# In CircleCI, the jobs defaulted to running on medium+ runners (3 CPU, 6 RAM).
|
||||||
|
# The following jobs were scheduled to run on 2xlarge runners (16 CPU, 32 RAM):
|
||||||
|
# - itest-deals_concurrent (✅)
|
||||||
|
# - itest-sector_pledge (✅)
|
||||||
|
# - itest-wdpost_worker_config (❌)
|
||||||
|
# - itest-worker (✅)
|
||||||
|
# - unit-cli (❌)
|
||||||
|
# - unit-rest (❌)
|
||||||
|
runners: |
|
||||||
|
{
|
||||||
|
"itest-deals_concurrent": ["self-hosted", "linux", "x64", "4xlarge"],
|
||||||
|
"itest-sector_pledge": ["self-hosted", "linux", "x64", "4xlarge"],
|
||||||
|
"itest-worker": ["self-hosted", "linux", "x64", "4xlarge"],
|
||||||
|
|
||||||
|
"itest-gateway": ["self-hosted", "linux", "x64", "2xlarge"],
|
||||||
|
"itest-sector_import_full": ["self-hosted", "linux", "x64", "2xlarge"],
|
||||||
|
"itest-sector_import_simple": ["self-hosted", "linux", "x64", "2xlarge"],
|
||||||
|
"itest-wdpost": ["self-hosted", "linux", "x64", "2xlarge"],
|
||||||
|
"unit-storage": ["self-hosted", "linux", "x64", "2xlarge"],
|
||||||
|
|
||||||
|
"itest-batch_deal": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-cli": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-deals_512mb": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-deals_anycid": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-deals_invalid_utf8_label": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-deals_max_staging_deals": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-deals_partial_retrieval": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-deals_publish": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-deals_remote_retrieval": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-decode_params": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-dup_mpool_messages": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-eth_account_abstraction": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-eth_api": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-eth_balance": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-eth_bytecode": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-eth_config": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-eth_conformance": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-eth_deploy": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-eth_fee_history": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-eth_transactions": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-fevm_address": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-fevm_events": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-gas_estimation": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-get_messages_in_ts": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-lite_migration": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-lookup_robust_address": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-mempool": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-mpool_msg_uuid": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-mpool_push_with_uuid": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-msgindex": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-multisig": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-net": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-nonce": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-path_detach_redeclare": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-pending_deal_allocation": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-remove_verifreg_datacap": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-sector_miner_collateral": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-sector_numassign": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-self_sent_txn": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"itest-verifreg": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"multicore-sdr": ["self-hosted", "linux", "x64", "xlarge"],
|
||||||
|
"unit-node": ["self-hosted", "linux", "x64", "xlarge"]
|
||||||
|
}
|
||||||
|
# A list of test groups that require YugabyteDB to be running
|
||||||
|
# In CircleCI, all jobs had yugabytedb running as a sidecar.
|
||||||
|
yugabytedb: |
|
||||||
|
["itest-harmonydb", "itest-harmonytask", "itest-curio"]
|
||||||
|
# A list of test groups that require Proof Parameters to be fetched
|
||||||
|
# In CircleCI, only the following jobs had get-params set:
|
||||||
|
# - unit-cli (✅)
|
||||||
|
# - unit-storage (✅)
|
||||||
|
# - itest-sector_pledge (✅)
|
||||||
|
# - itest-wdpost (✅)
|
||||||
|
parameters: |
|
||||||
|
[
|
||||||
|
"conformance",
|
||||||
|
"itest-api",
|
||||||
|
"itest-deals_offline",
|
||||||
|
"itest-deals_padding",
|
||||||
|
"itest-deals_partial_retrieval_dm-level",
|
||||||
|
"itest-deals_pricing",
|
||||||
|
"itest-deals",
|
||||||
|
"itest-direct_data_onboard_verified",
|
||||||
|
"itest-direct_data_onboard",
|
||||||
|
"itest-net",
|
||||||
|
"itest-path_detach_redeclare",
|
||||||
|
"itest-path_type_filters",
|
||||||
|
"itest-sealing_resources",
|
||||||
|
"itest-sector_finalize_early",
|
||||||
|
"itest-sector_import_full",
|
||||||
|
"itest-sector_import_simple",
|
||||||
|
"itest-sector_pledge",
|
||||||
|
"itest-sector_unseal",
|
||||||
|
"itest-wdpost_no_miner_storage",
|
||||||
|
"itest-wdpost_worker_config",
|
||||||
|
"itest-wdpost",
|
||||||
|
"itest-worker_upgrade",
|
||||||
|
"itest-worker",
|
||||||
|
"multicore-sdr",
|
||||||
|
"unit-cli",
|
||||||
|
"unit-storage"
|
||||||
|
]
|
||||||
|
run: |
|
||||||
|
# Create a list of integration test groups
|
||||||
|
itests="$(
|
||||||
|
find ./itests -name "*_test.go" | \
|
||||||
|
jq -R '{
|
||||||
|
"name": "itest-\(. | split("/") | .[2] | sub("_test.go$";""))",
|
||||||
|
"packages": [.]
|
||||||
|
}' | jq -s
|
||||||
|
)"
|
||||||
|
|
||||||
|
# Create a list of packages that are covered by the integration and unit tests
|
||||||
|
packages="$(jq -n --argjson utests "$utests" '$utests | map(.packages) | flatten | . + ["./itests/..."]')"
|
||||||
|
|
||||||
|
# Create a new group for the unit tests that are not yet covered
|
||||||
|
rest="$(
|
||||||
|
find . -name "*_test.go" | cut -d/ -f2 | sort | uniq | \
|
||||||
|
jq -R '"./\(.)/..."' | \
|
||||||
|
jq -s --argjson p "$packages" '{"name": "unit-rest", "packages": (. - $p)}'
|
||||||
|
)"
|
||||||
|
|
||||||
|
# Combine the groups for integration tests, unit tests, the new unit-rest group, and the other tests
|
||||||
|
groups="$(jq -n --argjson i "$itests" --argjson u "$utests" --argjson r "$rest" --argjson o "$otests" '$i + $u + [$r] + $o')"
|
||||||
|
|
||||||
|
# Apply custom runner labels to the groups
|
||||||
|
groups="$(jq -n --argjson g "$groups" --argjson r "$runners" '$g | map(. + {"runner": (.name as $n | $r | .[$n]) })')"
|
||||||
|
|
||||||
|
# Apply the needs_yugabytedb flag to the groups
|
||||||
|
groups="$(jq -n --argjson g "$groups" --argjson y "$yugabytedb" '$g | map(. + {"needs_yugabytedb": ([.name] | inside($y)) })')"
|
||||||
|
|
||||||
|
# Apply the needs_parameters flag to the groups
|
||||||
|
groups="$(jq -n --argjson g "$groups" --argjson p "$parameters" '$g | map(. + {"needs_parameters": ([.name] | inside($p)) })')"
|
||||||
|
|
||||||
|
# Output the groups
|
||||||
|
echo "groups=$groups"
|
||||||
|
echo "groups=$(jq -nc --argjson g "$groups" '$g')" >> $GITHUB_OUTPUT
|
||||||
|
cache:
|
||||||
|
name: Cache Dependencies
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
fetch_params_key: ${{ steps.fetch_params.outputs.key }}
|
||||||
|
fetch_params_path: ${{ steps.fetch_params.outputs.path }}
|
||||||
|
make_deps_key: ${{ steps.make_deps.outputs.key }}
|
||||||
|
make_deps_path: ${{ steps.make_deps.outputs.path }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: 'recursive'
|
||||||
|
- id: fetch_params
|
||||||
|
env:
|
||||||
|
CACHE_KEY: fetch-params-${{ hashFiles('./extern/filecoin-ffi/parameters.json') }}
|
||||||
|
CACHE_PATH: |
|
||||||
|
/var/tmp/filecoin-proof-parameters/
|
||||||
|
run: |
|
||||||
|
echo -e "key=$CACHE_KEY" | tee -a $GITHUB_OUTPUT
|
||||||
|
echo -e "path<<EOF\n$CACHE_PATH\nEOF" | tee -a $GITHUB_OUTPUT
|
||||||
|
- id: make_deps
|
||||||
|
env:
|
||||||
|
CACHE_KEY: ${{ runner.os }}-${{ runner.arch }}-make-deps-${{ hashFiles('./extern/filecoin-ffi/install-filcrypto') }}-${{ hashFiles('./extern/filecoin-ffi/rust/rustc-target-features-optimized.json') }}
|
||||||
|
CACHE_PATH: |
|
||||||
|
./extern/filecoin-ffi/filcrypto.h
|
||||||
|
./extern/filecoin-ffi/libfilcrypto.a
|
||||||
|
./extern/filecoin-ffi/filcrypto.pc
|
||||||
|
run: |
|
||||||
|
echo -e "key=$CACHE_KEY" | tee -a $GITHUB_OUTPUT
|
||||||
|
echo -e "path<<EOF\n$CACHE_PATH\nEOF" | tee -a $GITHUB_OUTPUT
|
||||||
|
- id: restore_fetch_params
|
||||||
|
uses: actions/cache/restore@v4
|
||||||
|
with:
|
||||||
|
key: ${{ steps.fetch_params.outputs.key }}
|
||||||
|
path: ${{ steps.fetch_params.outputs.path }}
|
||||||
|
lookup-only: true
|
||||||
|
- id: restore_make_deps
|
||||||
|
uses: actions/cache/restore@v4
|
||||||
|
with:
|
||||||
|
key: ${{ steps.make_deps.outputs.key }}
|
||||||
|
path: ${{ steps.make_deps.outputs.path }}
|
||||||
|
lookup-only: true
|
||||||
|
- if: steps.restore_fetch_params.outputs.cache-hit != 'true'
|
||||||
|
uses: ./.github/actions/install-system-dependencies
|
||||||
|
- if: steps.restore_fetch_params.outputs.cache-hit != 'true'
|
||||||
|
uses: ./.github/actions/install-go
|
||||||
|
- if: steps.restore_fetch_params.outputs.cache-hit != 'true' || steps.restore_make_deps.outputs.cache-hit != 'true'
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ github.token }}
|
||||||
|
run: make deps
|
||||||
|
- if: steps.restore_fetch_params.outputs.cache-hit != 'true'
|
||||||
|
run: make lotus
|
||||||
|
- if: steps.restore_fetch_params.outputs.cache-hit != 'true'
|
||||||
|
run: ./lotus fetch-params 2048
|
||||||
|
- if: steps.restore_fetch_params.outputs.cache-hit != 'true'
|
||||||
|
uses: actions/cache/save@v4
|
||||||
|
with:
|
||||||
|
key: ${{ steps.fetch_params.outputs.key }}
|
||||||
|
path: ${{ steps.fetch_params.outputs.path }}
|
||||||
|
- if: steps.restore_make_deps.outputs.cache-hit != 'true'
|
||||||
|
uses: actions/cache/save@v4
|
||||||
|
with:
|
||||||
|
key: ${{ steps.make_deps.outputs.key }}
|
||||||
|
path: ${{ steps.make_deps.outputs.path }}
|
||||||
|
test:
|
||||||
|
needs: [discover, cache]
|
||||||
|
name: Test (${{ matrix.name }})
|
||||||
|
runs-on: ${{ github.repository == 'filecoin-project/lotus' && matrix.runner || 'ubuntu-latest' }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
include: ${{ fromJson(needs.discover.outputs.groups) }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: 'recursive'
|
||||||
|
- uses: ./.github/actions/install-system-dependencies
|
||||||
|
- uses: ./.github/actions/install-go
|
||||||
|
- run: go install gotest.tools/gotestsum@latest
|
||||||
|
- name: Restore cached make deps outputs
|
||||||
|
uses: actions/cache/restore@v4
|
||||||
|
with:
|
||||||
|
key: ${{ needs.cache.outputs.make_deps_key }}
|
||||||
|
path: ${{ needs.cache.outputs.make_deps_path }}
|
||||||
|
fail-on-cache-miss: true
|
||||||
|
- if: ${{ matrix.needs_parameters }}
|
||||||
|
name: Restore cached fetch params outputs
|
||||||
|
uses: actions/cache/restore@v4
|
||||||
|
with:
|
||||||
|
key: ${{ needs.cache.outputs.fetch_params_key }}
|
||||||
|
path: ${{ needs.cache.outputs.fetch_params_path }}
|
||||||
|
fail-on-cache-miss: true
|
||||||
|
- if: ${{ matrix.needs_yugabytedb }}
|
||||||
|
uses: ./.github/actions/start-yugabytedb
|
||||||
|
timeout-minutes: 3
|
||||||
|
# TODO: Install statediff (used to be used for conformance)
|
||||||
|
- id: reports
|
||||||
|
run: mktemp -d | xargs -0 -I{} echo "path={}" | tee -a $GITHUB_OUTPUT
|
||||||
|
# TODO: Track coverage (used to be tracked for conformance)
|
||||||
|
- env:
|
||||||
|
NAME: ${{ matrix.name }}
|
||||||
|
LOTUS_SRC_DIR: ${{ github.workspace }}
|
||||||
|
LOTUS_HARMONYDB_HOSTS: 127.0.0.1
|
||||||
|
REPORTS_PATH: ${{ steps.reports.outputs.path }}
|
||||||
|
SKIP_CONFORMANCE: ${{ matrix.skip_conformance || '1' }}
|
||||||
|
TEST_RUSTPROOFS_LOGS: ${{ matrix.test_rustproofs_logs || '0' }}
|
||||||
|
FORMAT: ${{ matrix.format || 'standard-verbose' }}
|
||||||
|
PACKAGES: ${{ join(matrix.packages, ' ') }}
|
||||||
|
run: |
|
||||||
|
gotestsum \
|
||||||
|
--format "$FORMAT" \
|
||||||
|
--junitfile "$REPORTS_PATH/$NAME.xml" \
|
||||||
|
--jsonfile "$REPORTS_PATH/$NAME.json" \
|
||||||
|
--packages="$PACKAGES" \
|
||||||
|
-- ${{ matrix.go_test_flags || '' }}
|
||||||
|
- if: success() || failure()
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: ${{ matrix.name }}
|
||||||
|
path: |
|
||||||
|
${{ steps.reports.outputs.path }}/${{ matrix.name }}.xml
|
||||||
|
${{ steps.reports.outputs.path }}/${{ matrix.name }}.json
|
||||||
|
continue-on-error: true
|
29
.github/workflows/testground-on-push.yml
vendored
29
.github/workflows/testground-on-push.yml
vendored
@ -1,29 +0,0 @@
|
|||||||
---
|
|
||||||
name: Testground PR Checker
|
|
||||||
|
|
||||||
on: [push]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
testground:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
name: ${{ matrix.composition_file }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
include:
|
|
||||||
- backend_addr: ci.testground.ipfs.team
|
|
||||||
backend_proto: https
|
|
||||||
plan_directory: testplans/lotus-soup
|
|
||||||
composition_file: testplans/lotus-soup/_compositions/baseline-k8s-3-1.toml
|
|
||||||
- backend_addr: ci.testground.ipfs.team
|
|
||||||
backend_proto: https
|
|
||||||
plan_directory: testplans/lotus-soup
|
|
||||||
composition_file: testplans/lotus-soup/_compositions/paych-stress-k8s.toml
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- name: testground run
|
|
||||||
uses: testground/testground-github-action@v1
|
|
||||||
with:
|
|
||||||
backend_addr: ${{ matrix.backend_addr }}
|
|
||||||
backend_proto: ${{ matrix.backend_proto }}
|
|
||||||
plan_directory: ${{ matrix.plan_directory }}
|
|
||||||
composition_file: ${{ matrix.composition_file }}
|
|
6
.gitignore
vendored
6
.gitignore
vendored
@ -6,7 +6,8 @@
|
|||||||
/lotus-chainwatch
|
/lotus-chainwatch
|
||||||
/lotus-shed
|
/lotus-shed
|
||||||
/lotus-sim
|
/lotus-sim
|
||||||
/lotus-provider
|
/curio
|
||||||
|
/sptool
|
||||||
/lotus-townhall
|
/lotus-townhall
|
||||||
/lotus-fountain
|
/lotus-fountain
|
||||||
/lotus-stats
|
/lotus-stats
|
||||||
@ -36,6 +37,9 @@ build/paramfetch.sh
|
|||||||
/darwin
|
/darwin
|
||||||
/linux
|
/linux
|
||||||
*.snap
|
*.snap
|
||||||
|
devgen.car
|
||||||
|
localnet.json
|
||||||
|
/*.ndjson
|
||||||
|
|
||||||
*-fuzz.zip
|
*-fuzz.zip
|
||||||
/chain/types/work_msg/
|
/chain/types/work_msg/
|
||||||
|
@ -27,7 +27,7 @@ builds:
|
|||||||
- goos: linux
|
- goos: linux
|
||||||
goarch: arm64
|
goarch: arm64
|
||||||
prebuilt:
|
prebuilt:
|
||||||
path: /tmp/workspace/{{ .Os }}_{{ .Arch }}{{ with .Amd64 }}_{{ . }}{{ end }}/lotus
|
path: '{{ if index .Env "GITHUB_WORKSPACE" }}{{ .Env.GITHUB_WORKSPACE }}{{ else }}/tmp/workspace{{ end }}/{{ .Os }}_{{ .Arch }}{{ with .Amd64 }}_{{ . }}{{ end }}/lotus'
|
||||||
- id: lotus-miner
|
- id: lotus-miner
|
||||||
binary: lotus-miner
|
binary: lotus-miner
|
||||||
builder: prebuilt
|
builder: prebuilt
|
||||||
@ -43,7 +43,7 @@ builds:
|
|||||||
- goos: linux
|
- goos: linux
|
||||||
goarch: arm64
|
goarch: arm64
|
||||||
prebuilt:
|
prebuilt:
|
||||||
path: /tmp/workspace/{{ .Os }}_{{ .Arch }}{{ with .Amd64 }}_{{ . }}{{ end }}/lotus-miner
|
path: '{{ if index .Env "GITHUB_WORKSPACE" }}{{ .Env.GITHUB_WORKSPACE }}{{ else }}/tmp/workspace{{ end }}/{{ .Os }}_{{ .Arch }}{{ with .Amd64 }}_{{ . }}{{ end }}/lotus-miner'
|
||||||
- id: lotus-worker
|
- id: lotus-worker
|
||||||
binary: lotus-worker
|
binary: lotus-worker
|
||||||
builder: prebuilt
|
builder: prebuilt
|
||||||
@ -59,7 +59,7 @@ builds:
|
|||||||
- goos: linux
|
- goos: linux
|
||||||
goarch: arm64
|
goarch: arm64
|
||||||
prebuilt:
|
prebuilt:
|
||||||
path: /tmp/workspace/{{ .Os }}_{{ .Arch }}{{ with .Amd64 }}_{{ . }}{{ end }}/lotus-worker
|
path: '{{ if index .Env "GITHUB_WORKSPACE" }}{{ .Env.GITHUB_WORKSPACE }}{{ else }}/tmp/workspace{{ end }}/{{ .Os }}_{{ .Arch }}{{ with .Amd64 }}_{{ . }}{{ end }}/lotus-worker'
|
||||||
|
|
||||||
archives:
|
archives:
|
||||||
- id: primary
|
- id: primary
|
||||||
|
@ -80,6 +80,7 @@ The Filecoin network version 22 delivers the following FIPs:
|
|||||||
lotus state actor-cids --network-version=22
|
lotus state actor-cids --network-version=22
|
||||||
Network Version: 22
|
Network Version: 22
|
||||||
Actor Version: 13
|
Actor Version: 13
|
||||||
|
|
||||||
Manifest CID: bafy2bzacecdhvfmtirtojwhw2tyciu4jkbpsbk5g53oe24br27oy62sn4dc4e
|
Manifest CID: bafy2bzacecdhvfmtirtojwhw2tyciu4jkbpsbk5g53oe24br27oy62sn4dc4e
|
||||||
|
|
||||||
Actor CID
|
Actor CID
|
||||||
@ -3757,7 +3758,7 @@ This is a **highly recommended** but optional Lotus v1.11.1 release that introd
|
|||||||
- Config for deal publishing control addresses ([filecoin-project/lotus#6697](https://github.com/filecoin-project/lotus/pull/6697))
|
- Config for deal publishing control addresses ([filecoin-project/lotus#6697](https://github.com/filecoin-project/lotus/pull/6697))
|
||||||
- Set `DealPublishControl` to set the wallet used for sending `PublishStorageDeals` messages, instructions [here](https://lotus.filecoin.io/storage-providers/operate/addresses/#control-addresses).
|
- Set `DealPublishControl` to set the wallet used for sending `PublishStorageDeals` messages, instructions [here](https://lotus.filecoin.io/storage-providers/operate/addresses/#control-addresses).
|
||||||
- Config UX improvements ([filecoin-project/lotus#6848](https://github.com/filecoin-project/lotus/pull/6848))
|
- Config UX improvements ([filecoin-project/lotus#6848](https://github.com/filecoin-project/lotus/pull/6848))
|
||||||
- You can now preview the the default and updated node config by running `lotus/lotus-miner config default/updated`
|
- You can now preview the default and updated node config by running `lotus/lotus-miner config default/updated`
|
||||||
|
|
||||||
## New Features
|
## New Features
|
||||||
- ⭐️⭐️⭐️ Support standalone miner-market process ([filecoin-project/lotus#6356](https://github.com/filecoin-project/lotus/pull/6356))
|
- ⭐️⭐️⭐️ Support standalone miner-market process ([filecoin-project/lotus#6356](https://github.com/filecoin-project/lotus/pull/6356))
|
||||||
@ -5204,7 +5205,7 @@ This consensus-breaking release of Lotus upgrades the actors version to v2.0.0.
|
|||||||
|
|
||||||
#### Mining
|
#### Mining
|
||||||
|
|
||||||
- Increased ExpectedSealDuration and and WaitDealsDelay (https://github.com/filecoin-project/lotus/pull/3743)
|
- Increased ExpectedSealDuration and WaitDealsDelay (https://github.com/filecoin-project/lotus/pull/3743)
|
||||||
- Miner backup/restore commands (https://github.com/filecoin-project/lotus/pull/4133)
|
- Miner backup/restore commands (https://github.com/filecoin-project/lotus/pull/4133)
|
||||||
- lotus-miner: add more help text to storage / attach (https://github.com/filecoin-project/lotus/pull/3961)
|
- lotus-miner: add more help text to storage / attach (https://github.com/filecoin-project/lotus/pull/3961)
|
||||||
- Reject deals that are > 7 days in the future in the BasicDealFilter (https://github.com/filecoin-project/lotus/pull/4173)
|
- Reject deals that are > 7 days in the future in the BasicDealFilter (https://github.com/filecoin-project/lotus/pull/4173)
|
||||||
|
10
Dockerfile
10
Dockerfile
@ -73,7 +73,7 @@ COPY --from=lotus-builder /opt/filecoin/lotus /usr/local/bin/
|
|||||||
COPY --from=lotus-builder /opt/filecoin/lotus-shed /usr/local/bin/
|
COPY --from=lotus-builder /opt/filecoin/lotus-shed /usr/local/bin/
|
||||||
COPY scripts/docker-lotus-entrypoint.sh /
|
COPY scripts/docker-lotus-entrypoint.sh /
|
||||||
|
|
||||||
ARG DOCKER_LOTUS_IMPORT_SNAPSHOT https://snapshots.mainnet.filops.net/minimal/latest
|
ARG DOCKER_LOTUS_IMPORT_SNAPSHOT=https://forest-archive.chainsafe.dev/latest/mainnet/
|
||||||
ENV DOCKER_LOTUS_IMPORT_SNAPSHOT ${DOCKER_LOTUS_IMPORT_SNAPSHOT}
|
ENV DOCKER_LOTUS_IMPORT_SNAPSHOT ${DOCKER_LOTUS_IMPORT_SNAPSHOT}
|
||||||
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
|
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
|
||||||
ENV LOTUS_PATH /var/lib/lotus
|
ENV LOTUS_PATH /var/lib/lotus
|
||||||
@ -109,7 +109,7 @@ COPY --from=lotus-builder /opt/filecoin/lotus-wallet /usr/local/bin/
|
|||||||
COPY --from=lotus-builder /opt/filecoin/lotus-gateway /usr/local/bin/
|
COPY --from=lotus-builder /opt/filecoin/lotus-gateway /usr/local/bin/
|
||||||
COPY --from=lotus-builder /opt/filecoin/lotus-miner /usr/local/bin/
|
COPY --from=lotus-builder /opt/filecoin/lotus-miner /usr/local/bin/
|
||||||
COPY --from=lotus-builder /opt/filecoin/lotus-worker /usr/local/bin/
|
COPY --from=lotus-builder /opt/filecoin/lotus-worker /usr/local/bin/
|
||||||
COPY --from=lotus-builder /opt/filecoin/lotus-provider /usr/local/bin/
|
COPY --from=lotus-builder /opt/filecoin/curio /usr/local/bin/
|
||||||
COPY --from=lotus-builder /opt/filecoin/lotus-stats /usr/local/bin/
|
COPY --from=lotus-builder /opt/filecoin/lotus-stats /usr/local/bin/
|
||||||
COPY --from=lotus-builder /opt/filecoin/lotus-fountain /usr/local/bin/
|
COPY --from=lotus-builder /opt/filecoin/lotus-fountain /usr/local/bin/
|
||||||
|
|
||||||
@ -118,13 +118,13 @@ RUN mkdir /var/lib/lotus
|
|||||||
RUN mkdir /var/lib/lotus-miner
|
RUN mkdir /var/lib/lotus-miner
|
||||||
RUN mkdir /var/lib/lotus-worker
|
RUN mkdir /var/lib/lotus-worker
|
||||||
RUN mkdir /var/lib/lotus-wallet
|
RUN mkdir /var/lib/lotus-wallet
|
||||||
RUN mkdir /var/lib/lotus-provider
|
RUN mkdir /var/lib/curio
|
||||||
RUN chown fc: /var/tmp/filecoin-proof-parameters
|
RUN chown fc: /var/tmp/filecoin-proof-parameters
|
||||||
RUN chown fc: /var/lib/lotus
|
RUN chown fc: /var/lib/lotus
|
||||||
RUN chown fc: /var/lib/lotus-miner
|
RUN chown fc: /var/lib/lotus-miner
|
||||||
RUN chown fc: /var/lib/lotus-worker
|
RUN chown fc: /var/lib/lotus-worker
|
||||||
RUN chown fc: /var/lib/lotus-wallet
|
RUN chown fc: /var/lib/lotus-wallet
|
||||||
RUN chown fc: /var/lib/lotus-provider
|
RUN chown fc: /var/lib/curio
|
||||||
|
|
||||||
|
|
||||||
VOLUME /var/tmp/filecoin-proof-parameters
|
VOLUME /var/tmp/filecoin-proof-parameters
|
||||||
@ -132,7 +132,7 @@ VOLUME /var/lib/lotus
|
|||||||
VOLUME /var/lib/lotus-miner
|
VOLUME /var/lib/lotus-miner
|
||||||
VOLUME /var/lib/lotus-worker
|
VOLUME /var/lib/lotus-worker
|
||||||
VOLUME /var/lib/lotus-wallet
|
VOLUME /var/lib/lotus-wallet
|
||||||
VOLUME /var/lib/lotus-provider
|
VOLUME /var/lib/curio
|
||||||
|
|
||||||
EXPOSE 1234
|
EXPOSE 1234
|
||||||
EXPOSE 2345
|
EXPOSE 2345
|
||||||
|
92
Makefile
92
Makefile
@ -66,7 +66,7 @@ CLEAN+=build/.update-modules
|
|||||||
deps: $(BUILD_DEPS)
|
deps: $(BUILD_DEPS)
|
||||||
.PHONY: deps
|
.PHONY: deps
|
||||||
|
|
||||||
build-devnets: build lotus-seed lotus-shed lotus-provider
|
build-devnets: build lotus-seed lotus-shed curio sptool
|
||||||
.PHONY: build-devnets
|
.PHONY: build-devnets
|
||||||
|
|
||||||
debug: GOFLAGS+=-tags=debug
|
debug: GOFLAGS+=-tags=debug
|
||||||
@ -97,14 +97,20 @@ lotus-miner: $(BUILD_DEPS)
|
|||||||
.PHONY: lotus-miner
|
.PHONY: lotus-miner
|
||||||
BINS+=lotus-miner
|
BINS+=lotus-miner
|
||||||
|
|
||||||
lotus-provider: $(BUILD_DEPS)
|
curio: $(BUILD_DEPS)
|
||||||
rm -f lotus-provider
|
rm -f curio
|
||||||
$(GOCC) build $(GOFLAGS) -o lotus-provider ./cmd/lotus-provider
|
$(GOCC) build $(GOFLAGS) -o curio ./cmd/curio
|
||||||
.PHONY: lotus-provider
|
.PHONY: curio
|
||||||
BINS+=lotus-provider
|
BINS+=curio
|
||||||
|
|
||||||
lp2k: GOFLAGS+=-tags=2k
|
cu2k: GOFLAGS+=-tags=2k
|
||||||
lp2k: lotus-provider
|
cu2k: curio
|
||||||
|
|
||||||
|
sptool: $(BUILD_DEPS)
|
||||||
|
rm -f sptool
|
||||||
|
$(GOCC) build $(GOFLAGS) -o sptool ./cmd/sptool
|
||||||
|
.PHONY: sptool
|
||||||
|
BINS+=sptool
|
||||||
|
|
||||||
lotus-worker: $(BUILD_DEPS)
|
lotus-worker: $(BUILD_DEPS)
|
||||||
rm -f lotus-worker
|
rm -f lotus-worker
|
||||||
@ -124,13 +130,13 @@ lotus-gateway: $(BUILD_DEPS)
|
|||||||
.PHONY: lotus-gateway
|
.PHONY: lotus-gateway
|
||||||
BINS+=lotus-gateway
|
BINS+=lotus-gateway
|
||||||
|
|
||||||
build: lotus lotus-miner lotus-worker lotus-provider
|
build: lotus lotus-miner lotus-worker curio sptool
|
||||||
@[[ $$(type -P "lotus") ]] && echo "Caution: you have \
|
@[[ $$(type -P "lotus") ]] && echo "Caution: you have \
|
||||||
an existing lotus binary in your PATH. This may cause problems if you don't run 'sudo make install'" || true
|
an existing lotus binary in your PATH. This may cause problems if you don't run 'sudo make install'" || true
|
||||||
|
|
||||||
.PHONY: build
|
.PHONY: build
|
||||||
|
|
||||||
install: install-daemon install-miner install-worker install-provider
|
install: install-daemon install-miner install-worker install-curio install-sptool
|
||||||
|
|
||||||
install-daemon:
|
install-daemon:
|
||||||
install -C ./lotus /usr/local/bin/lotus
|
install -C ./lotus /usr/local/bin/lotus
|
||||||
@ -138,8 +144,11 @@ install-daemon:
|
|||||||
install-miner:
|
install-miner:
|
||||||
install -C ./lotus-miner /usr/local/bin/lotus-miner
|
install -C ./lotus-miner /usr/local/bin/lotus-miner
|
||||||
|
|
||||||
install-provider:
|
install-curio:
|
||||||
install -C ./lotus-provider /usr/local/bin/lotus-provider
|
install -C ./curio /usr/local/bin/curio
|
||||||
|
|
||||||
|
install-sptool:
|
||||||
|
install -C ./sptool /usr/local/bin/sptool
|
||||||
|
|
||||||
install-worker:
|
install-worker:
|
||||||
install -C ./lotus-worker /usr/local/bin/lotus-worker
|
install -C ./lotus-worker /usr/local/bin/lotus-worker
|
||||||
@ -156,8 +165,11 @@ uninstall-daemon:
|
|||||||
uninstall-miner:
|
uninstall-miner:
|
||||||
rm -f /usr/local/bin/lotus-miner
|
rm -f /usr/local/bin/lotus-miner
|
||||||
|
|
||||||
uninstall-provider:
|
uninstall-curio:
|
||||||
rm -f /usr/local/bin/lotus-provider
|
rm -f /usr/local/bin/curio
|
||||||
|
|
||||||
|
uninstall-sptool:
|
||||||
|
rm -f /usr/local/bin/sptool
|
||||||
|
|
||||||
uninstall-worker:
|
uninstall-worker:
|
||||||
rm -f /usr/local/bin/lotus-worker
|
rm -f /usr/local/bin/lotus-worker
|
||||||
@ -246,7 +258,9 @@ install-daemon-service: install-daemon
|
|||||||
install -C -m 0644 ./scripts/lotus-daemon.service /etc/systemd/system/lotus-daemon.service
|
install -C -m 0644 ./scripts/lotus-daemon.service /etc/systemd/system/lotus-daemon.service
|
||||||
systemctl daemon-reload
|
systemctl daemon-reload
|
||||||
@echo
|
@echo
|
||||||
@echo "lotus-daemon service installed. Don't forget to run 'sudo systemctl start lotus-daemon' to start it and 'sudo systemctl enable lotus-daemon' for it to be enabled on startup."
|
@echo "lotus-daemon service installed."
|
||||||
|
@echo "To start the service, run: 'sudo systemctl start lotus-daemon'"
|
||||||
|
@echo "To enable the service on startup, run: 'sudo systemctl enable lotus-daemon'"
|
||||||
|
|
||||||
install-miner-service: install-miner install-daemon-service
|
install-miner-service: install-miner install-daemon-service
|
||||||
mkdir -p /etc/systemd/system
|
mkdir -p /etc/systemd/system
|
||||||
@ -254,15 +268,17 @@ install-miner-service: install-miner install-daemon-service
|
|||||||
install -C -m 0644 ./scripts/lotus-miner.service /etc/systemd/system/lotus-miner.service
|
install -C -m 0644 ./scripts/lotus-miner.service /etc/systemd/system/lotus-miner.service
|
||||||
systemctl daemon-reload
|
systemctl daemon-reload
|
||||||
@echo
|
@echo
|
||||||
@echo "lotus-miner service installed. Don't forget to run 'sudo systemctl start lotus-miner' to start it and 'sudo systemctl enable lotus-miner' for it to be enabled on startup."
|
@echo "lotus-miner service installed."
|
||||||
|
@echo "To start the service, run: 'sudo systemctl start lotus-miner'"
|
||||||
|
@echo "To enable the service on startup, run: 'sudo systemctl enable lotus-miner'"
|
||||||
|
|
||||||
install-provider-service: install-provider install-daemon-service
|
install-curio-service: install-curio install-sptool install-daemon-service
|
||||||
mkdir -p /etc/systemd/system
|
mkdir -p /etc/systemd/system
|
||||||
mkdir -p /var/log/lotus
|
mkdir -p /var/log/lotus
|
||||||
install -C -m 0644 ./scripts/lotus-provider.service /etc/systemd/system/lotus-provider.service
|
install -C -m 0644 ./scripts/curio.service /etc/systemd/system/curio.service
|
||||||
systemctl daemon-reload
|
systemctl daemon-reload
|
||||||
@echo
|
@echo
|
||||||
@echo "lotus-provider service installed. Don't forget to run 'sudo systemctl start lotus-provider' to start it and 'sudo systemctl enable lotus-provider' for it to be enabled on startup."
|
@echo "Curio service installed. Don't forget to run 'sudo systemctl start curio' to start it and 'sudo systemctl enable curio' for it to be enabled on startup."
|
||||||
|
|
||||||
install-main-services: install-miner-service
|
install-main-services: install-miner-service
|
||||||
|
|
||||||
@ -282,10 +298,10 @@ clean-miner-service:
|
|||||||
rm -f /etc/systemd/system/lotus-miner.service
|
rm -f /etc/systemd/system/lotus-miner.service
|
||||||
systemctl daemon-reload
|
systemctl daemon-reload
|
||||||
|
|
||||||
clean-provider-service:
|
clean-curio-service:
|
||||||
-systemctl stop lotus-provider
|
-systemctl stop curio
|
||||||
-systemctl disable lotus-provider
|
-systemctl disable curio
|
||||||
rm -f /etc/systemd/system/lotus-provider.service
|
rm -f /etc/systemd/system/curio.service
|
||||||
systemctl daemon-reload
|
systemctl daemon-reload
|
||||||
|
|
||||||
clean-main-services: clean-daemon-service
|
clean-main-services: clean-daemon-service
|
||||||
@ -303,6 +319,10 @@ install-completions:
|
|||||||
install -C ./scripts/bash-completion/lotus /usr/share/bash-completion/completions/lotus
|
install -C ./scripts/bash-completion/lotus /usr/share/bash-completion/completions/lotus
|
||||||
install -C ./scripts/zsh-completion/lotus /usr/local/share/zsh/site-functions/_lotus
|
install -C ./scripts/zsh-completion/lotus /usr/local/share/zsh/site-functions/_lotus
|
||||||
|
|
||||||
|
unittests:
|
||||||
|
@$(GOCC) test $(shell go list ./... | grep -v /lotus/itests)
|
||||||
|
.PHONY: unittests
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -rf $(CLEAN) $(BINS)
|
rm -rf $(CLEAN) $(BINS)
|
||||||
-$(MAKE) -C $(FFI_PATH) clean
|
-$(MAKE) -C $(FFI_PATH) clean
|
||||||
@ -324,7 +344,7 @@ actors-code-gen:
|
|||||||
$(GOCC) fmt ./...
|
$(GOCC) fmt ./...
|
||||||
|
|
||||||
actors-gen: actors-code-gen
|
actors-gen: actors-code-gen
|
||||||
./scripts/fiximports
|
$(GOCC) run ./scripts/fiximports
|
||||||
.PHONY: actors-gen
|
.PHONY: actors-gen
|
||||||
|
|
||||||
bundle-gen:
|
bundle-gen:
|
||||||
@ -358,7 +378,7 @@ docsgen-md-bin: api-gen actors-gen
|
|||||||
docsgen-openrpc-bin: api-gen actors-gen
|
docsgen-openrpc-bin: api-gen actors-gen
|
||||||
$(GOCC) build $(GOFLAGS) -o docgen-openrpc ./api/docgen-openrpc/cmd
|
$(GOCC) build $(GOFLAGS) -o docgen-openrpc ./api/docgen-openrpc/cmd
|
||||||
|
|
||||||
docsgen-md: docsgen-md-full docsgen-md-storage docsgen-md-worker docsgen-md-provider
|
docsgen-md: docsgen-md-full docsgen-md-storage docsgen-md-worker docsgen-md-curio
|
||||||
|
|
||||||
docsgen-md-full: docsgen-md-bin
|
docsgen-md-full: docsgen-md-bin
|
||||||
./docgen-md "api/api_full.go" "FullNode" "api" "./api" > documentation/en/api-v1-unstable-methods.md
|
./docgen-md "api/api_full.go" "FullNode" "api" "./api" > documentation/en/api-v1-unstable-methods.md
|
||||||
@ -367,42 +387,42 @@ docsgen-md-storage: docsgen-md-bin
|
|||||||
./docgen-md "api/api_storage.go" "StorageMiner" "api" "./api" > documentation/en/api-v0-methods-miner.md
|
./docgen-md "api/api_storage.go" "StorageMiner" "api" "./api" > documentation/en/api-v0-methods-miner.md
|
||||||
docsgen-md-worker: docsgen-md-bin
|
docsgen-md-worker: docsgen-md-bin
|
||||||
./docgen-md "api/api_worker.go" "Worker" "api" "./api" > documentation/en/api-v0-methods-worker.md
|
./docgen-md "api/api_worker.go" "Worker" "api" "./api" > documentation/en/api-v0-methods-worker.md
|
||||||
docsgen-md-provider: docsgen-md-bin
|
docsgen-md-curio: docsgen-md-bin
|
||||||
./docgen-md "api/api_lp.go" "Provider" "api" "./api" > documentation/en/api-v0-methods-provider.md
|
./docgen-md "api/api_curio.go" "Curio" "api" "./api" > documentation/en/api-v0-methods-curio.md
|
||||||
|
|
||||||
docsgen-openrpc: docsgen-openrpc-full docsgen-openrpc-storage docsgen-openrpc-worker docsgen-openrpc-gateway
|
docsgen-openrpc: docsgen-openrpc-full docsgen-openrpc-storage docsgen-openrpc-worker docsgen-openrpc-gateway
|
||||||
|
|
||||||
docsgen-openrpc-full: docsgen-openrpc-bin
|
docsgen-openrpc-full: docsgen-openrpc-bin
|
||||||
./docgen-openrpc "api/api_full.go" "FullNode" "api" "./api" -gzip > build/openrpc/full.json.gz
|
./docgen-openrpc "api/api_full.go" "FullNode" "api" "./api" > build/openrpc/full.json
|
||||||
docsgen-openrpc-storage: docsgen-openrpc-bin
|
docsgen-openrpc-storage: docsgen-openrpc-bin
|
||||||
./docgen-openrpc "api/api_storage.go" "StorageMiner" "api" "./api" -gzip > build/openrpc/miner.json.gz
|
./docgen-openrpc "api/api_storage.go" "StorageMiner" "api" "./api" > build/openrpc/miner.json
|
||||||
docsgen-openrpc-worker: docsgen-openrpc-bin
|
docsgen-openrpc-worker: docsgen-openrpc-bin
|
||||||
./docgen-openrpc "api/api_worker.go" "Worker" "api" "./api" -gzip > build/openrpc/worker.json.gz
|
./docgen-openrpc "api/api_worker.go" "Worker" "api" "./api" > build/openrpc/worker.json
|
||||||
docsgen-openrpc-gateway: docsgen-openrpc-bin
|
docsgen-openrpc-gateway: docsgen-openrpc-bin
|
||||||
./docgen-openrpc "api/api_gateway.go" "Gateway" "api" "./api" -gzip > build/openrpc/gateway.json.gz
|
./docgen-openrpc "api/api_gateway.go" "Gateway" "api" "./api" > build/openrpc/gateway.json
|
||||||
|
|
||||||
.PHONY: docsgen docsgen-md-bin docsgen-openrpc-bin
|
.PHONY: docsgen docsgen-md-bin docsgen-openrpc-bin
|
||||||
|
|
||||||
fiximports:
|
fiximports:
|
||||||
./scripts/fiximports
|
$(GOCC) run ./scripts/fiximports
|
||||||
|
|
||||||
gen: actors-code-gen type-gen cfgdoc-gen docsgen api-gen circleci
|
gen: actors-code-gen type-gen cfgdoc-gen docsgen api-gen circleci
|
||||||
./scripts/fiximports
|
$(GOCC) run ./scripts/fiximports
|
||||||
@echo ">>> IF YOU'VE MODIFIED THE CLI OR CONFIG, REMEMBER TO ALSO RUN 'make docsgen-cli'"
|
@echo ">>> IF YOU'VE MODIFIED THE CLI OR CONFIG, REMEMBER TO ALSO RUN 'make docsgen-cli'"
|
||||||
.PHONY: gen
|
.PHONY: gen
|
||||||
|
|
||||||
jen: gen
|
jen: gen
|
||||||
|
|
||||||
snap: lotus lotus-miner lotus-worker lotus-provider
|
snap: lotus lotus-miner lotus-worker curio sptool
|
||||||
snapcraft
|
snapcraft
|
||||||
# snapcraft upload ./lotus_*.snap
|
# snapcraft upload ./lotus_*.snap
|
||||||
|
|
||||||
# separate from gen because it needs binaries
|
# separate from gen because it needs binaries
|
||||||
docsgen-cli: lotus lotus-miner lotus-worker lotus-provider
|
docsgen-cli: lotus lotus-miner lotus-worker curio sptool
|
||||||
python3 ./scripts/generate-lotus-cli.py
|
python3 ./scripts/generate-lotus-cli.py
|
||||||
./lotus config default > documentation/en/default-lotus-config.toml
|
./lotus config default > documentation/en/default-lotus-config.toml
|
||||||
./lotus-miner config default > documentation/en/default-lotus-miner-config.toml
|
./lotus-miner config default > documentation/en/default-lotus-miner-config.toml
|
||||||
./lotus-provider config default > documentation/en/default-lotus-provider-config.toml
|
./curio config default > documentation/en/default-curio-config.toml
|
||||||
.PHONY: docsgen-cli
|
.PHONY: docsgen-cli
|
||||||
|
|
||||||
print-%:
|
print-%:
|
||||||
|
34
api/api_curio.go
Normal file
34
api/api_curio.go
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/storage/sealer/fsutil"
|
||||||
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Curio interface {
|
||||||
|
Version(context.Context) (Version, error) //perm:admin
|
||||||
|
|
||||||
|
AllocatePieceToSector(ctx context.Context, maddr address.Address, piece PieceDealInfo, rawSize int64, source url.URL, header http.Header) (SectorOffset, error) //perm:write
|
||||||
|
|
||||||
|
StorageInit(ctx context.Context, path string, opts storiface.LocalStorageMeta) error //perm:admin
|
||||||
|
StorageAddLocal(ctx context.Context, path string) error //perm:admin
|
||||||
|
StorageDetachLocal(ctx context.Context, path string) error //perm:admin
|
||||||
|
StorageList(ctx context.Context) (map[storiface.ID][]storiface.Decl, error) //perm:admin
|
||||||
|
StorageLocal(ctx context.Context) (map[storiface.ID]string, error) //perm:admin
|
||||||
|
StorageStat(ctx context.Context, id storiface.ID) (fsutil.FsStat, error) //perm:admin
|
||||||
|
StorageInfo(context.Context, storiface.ID) (storiface.StorageInfo, error) //perm:admin
|
||||||
|
StorageFindSector(ctx context.Context, sector abi.SectorID, ft storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]storiface.SectorStorageInfo, error) //perm:admin
|
||||||
|
|
||||||
|
LogList(ctx context.Context) ([]string, error) //perm:read
|
||||||
|
LogSetLevel(ctx context.Context, subsystem, level string) error //perm:admin
|
||||||
|
|
||||||
|
// Trigger shutdown
|
||||||
|
Shutdown(context.Context) error //perm:admin
|
||||||
|
}
|
@ -335,7 +335,7 @@ type FullNode interface {
|
|||||||
WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) //perm:read
|
WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) //perm:read
|
||||||
// WalletDefaultAddress returns the address marked as default in the wallet.
|
// WalletDefaultAddress returns the address marked as default in the wallet.
|
||||||
WalletDefaultAddress(context.Context) (address.Address, error) //perm:write
|
WalletDefaultAddress(context.Context) (address.Address, error) //perm:write
|
||||||
// WalletSetDefault marks the given address as as the default one.
|
// WalletSetDefault marks the given address as the default one.
|
||||||
WalletSetDefault(context.Context, address.Address) error //perm:write
|
WalletSetDefault(context.Context, address.Address) error //perm:write
|
||||||
// WalletExport returns the private key of an address in the wallet.
|
// WalletExport returns the private key of an address in the wallet.
|
||||||
WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin
|
WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin
|
||||||
@ -904,9 +904,6 @@ type FullNode interface {
|
|||||||
// the path specified when calling CreateBackup is within the base path
|
// the path specified when calling CreateBackup is within the base path
|
||||||
CreateBackup(ctx context.Context, fpath string) error //perm:admin
|
CreateBackup(ctx context.Context, fpath string) error //perm:admin
|
||||||
|
|
||||||
RaftState(ctx context.Context) (*RaftStateData, error) //perm:read
|
|
||||||
RaftLeader(ctx context.Context) (peer.ID, error) //perm:read
|
|
||||||
|
|
||||||
// Actor events
|
// Actor events
|
||||||
|
|
||||||
// GetActorEventsRaw returns all user-programmed and built-in actor events that match the given
|
// GetActorEventsRaw returns all user-programmed and built-in actor events that match the given
|
||||||
|
@ -77,6 +77,7 @@ type Gateway interface {
|
|||||||
StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MarketBalance, error)
|
StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MarketBalance, error)
|
||||||
StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*MarketDeal, error)
|
StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*MarketDeal, error)
|
||||||
StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (MinerInfo, error)
|
StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (MinerInfo, error)
|
||||||
|
StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]Deadline, error)
|
||||||
StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error)
|
StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error)
|
||||||
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error)
|
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error)
|
||||||
StateNetworkName(context.Context) (dtypes.NetworkName, error)
|
StateNetworkName(context.Context) (dtypes.NetworkName, error)
|
||||||
|
@ -1,10 +0,0 @@
|
|||||||
package api
|
|
||||||
|
|
||||||
import "context"
|
|
||||||
|
|
||||||
type LotusProvider interface {
|
|
||||||
Version(context.Context) (Version, error) //perm:admin
|
|
||||||
|
|
||||||
// Trigger shutdown
|
|
||||||
Shutdown(context.Context) error //perm:admin
|
|
||||||
}
|
|
@ -200,7 +200,7 @@ type StorageMiner interface {
|
|||||||
// StorageBestAlloc returns list of paths where sector files of the specified type can be allocated, ordered by preference.
|
// StorageBestAlloc returns list of paths where sector files of the specified type can be allocated, ordered by preference.
|
||||||
// Paths with more weight and more % of free space are preferred.
|
// Paths with more weight and more % of free space are preferred.
|
||||||
// Note: This method doesn't filter paths based on AllowTypes/DenyTypes.
|
// Note: This method doesn't filter paths based on AllowTypes/DenyTypes.
|
||||||
StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]storiface.StorageInfo, error) //perm:admin
|
StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType, miner abi.ActorID) ([]storiface.StorageInfo, error) //perm:admin
|
||||||
StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error //perm:admin
|
StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error //perm:admin
|
||||||
StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) //perm:admin
|
StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) //perm:admin
|
||||||
StorageList(ctx context.Context) (map[storiface.ID][]storiface.Decl, error) //perm:admin
|
StorageList(ctx context.Context) (map[storiface.ID][]storiface.Decl, error) //perm:admin
|
||||||
|
@ -15,9 +15,9 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/lib/rpcenc"
|
"github.com/filecoin-project/lotus/lib/rpcenc"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewProviderRpc creates a new http jsonrpc client.
|
// NewCurioRpc creates a new http jsonrpc client.
|
||||||
func NewProviderRpc(ctx context.Context, addr string, requestHeader http.Header) (api.LotusProvider, jsonrpc.ClientCloser, error) {
|
func NewCurioRpc(ctx context.Context, addr string, requestHeader http.Header) (api.Curio, jsonrpc.ClientCloser, error) {
|
||||||
var res v1api.LotusProviderStruct
|
var res v1api.CurioStruct
|
||||||
|
|
||||||
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
||||||
api.GetInternalStructs(&res), requestHeader, jsonrpc.WithErrors(api.RPCErrors))
|
api.GetInternalStructs(&res), requestHeader, jsonrpc.WithErrors(api.RPCErrors))
|
||||||
|
@ -146,6 +146,7 @@ func init() {
|
|||||||
allocationId := verifreg.AllocationId(0)
|
allocationId := verifreg.AllocationId(0)
|
||||||
addExample(allocationId)
|
addExample(allocationId)
|
||||||
addExample(&allocationId)
|
addExample(&allocationId)
|
||||||
|
addExample(miner.SectorOnChainInfoFlags(0))
|
||||||
addExample(map[verifreg.AllocationId]verifreg.Allocation{})
|
addExample(map[verifreg.AllocationId]verifreg.Allocation{})
|
||||||
claimId := verifreg.ClaimId(0)
|
claimId := verifreg.ClaimId(0)
|
||||||
addExample(claimId)
|
addExample(claimId)
|
||||||
@ -356,10 +357,6 @@ func init() {
|
|||||||
addExample(map[string]bitfield.BitField{
|
addExample(map[string]bitfield.BitField{
|
||||||
"": bitfield.NewFromSet([]uint64{5, 6, 7, 10}),
|
"": bitfield.NewFromSet([]uint64{5, 6, 7, 10}),
|
||||||
})
|
})
|
||||||
addExample(&api.RaftStateData{
|
|
||||||
NonceMap: make(map[address.Address]uint64),
|
|
||||||
MsgUuids: make(map[uuid.UUID]*types.SignedMessage),
|
|
||||||
})
|
|
||||||
|
|
||||||
addExample(http.Header{
|
addExample(http.Header{
|
||||||
"Authorization": []string{"Bearer ey.."},
|
"Authorization": []string{"Bearer ey.."},
|
||||||
@ -459,10 +456,10 @@ func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []r
|
|||||||
i = &api.GatewayStruct{}
|
i = &api.GatewayStruct{}
|
||||||
t = reflect.TypeOf(new(struct{ api.Gateway })).Elem()
|
t = reflect.TypeOf(new(struct{ api.Gateway })).Elem()
|
||||||
permStruct = append(permStruct, reflect.TypeOf(api.GatewayStruct{}.Internal))
|
permStruct = append(permStruct, reflect.TypeOf(api.GatewayStruct{}.Internal))
|
||||||
case "Provider":
|
case "Curio":
|
||||||
i = &api.LotusProviderStruct{}
|
i = &api.CurioStruct{}
|
||||||
t = reflect.TypeOf(new(struct{ api.LotusProvider })).Elem()
|
t = reflect.TypeOf(new(struct{ api.Curio })).Elem()
|
||||||
permStruct = append(permStruct, reflect.TypeOf(api.LotusProviderStruct{}.Internal))
|
permStruct = append(permStruct, reflect.TypeOf(api.CurioStruct{}.Internal))
|
||||||
default:
|
default:
|
||||||
panic("unknown type")
|
panic("unknown type")
|
||||||
}
|
}
|
||||||
|
@ -27,8 +27,9 @@ import (
|
|||||||
auth "github.com/filecoin-project/go-jsonrpc/auth"
|
auth "github.com/filecoin-project/go-jsonrpc/auth"
|
||||||
abi "github.com/filecoin-project/go-state-types/abi"
|
abi "github.com/filecoin-project/go-state-types/abi"
|
||||||
big "github.com/filecoin-project/go-state-types/big"
|
big "github.com/filecoin-project/go-state-types/big"
|
||||||
|
miner "github.com/filecoin-project/go-state-types/builtin/v13/miner"
|
||||||
paych "github.com/filecoin-project/go-state-types/builtin/v8/paych"
|
paych "github.com/filecoin-project/go-state-types/builtin/v8/paych"
|
||||||
miner "github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
miner0 "github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
||||||
verifreg "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
verifreg "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||||
crypto "github.com/filecoin-project/go-state-types/crypto"
|
crypto "github.com/filecoin-project/go-state-types/crypto"
|
||||||
dline "github.com/filecoin-project/go-state-types/dline"
|
dline "github.com/filecoin-project/go-state-types/dline"
|
||||||
@ -36,7 +37,7 @@ import (
|
|||||||
|
|
||||||
api "github.com/filecoin-project/lotus/api"
|
api "github.com/filecoin-project/lotus/api"
|
||||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||||
miner0 "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
miner1 "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
types "github.com/filecoin-project/lotus/chain/types"
|
types "github.com/filecoin-project/lotus/chain/types"
|
||||||
ethtypes "github.com/filecoin-project/lotus/chain/types/ethtypes"
|
ethtypes "github.com/filecoin-project/lotus/chain/types/ethtypes"
|
||||||
alerting "github.com/filecoin-project/lotus/journal/alerting"
|
alerting "github.com/filecoin-project/lotus/journal/alerting"
|
||||||
@ -2934,36 +2935,6 @@ func (mr *MockFullNodeMockRecorder) PaychVoucherSubmit(arg0, arg1, arg2, arg3, a
|
|||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherSubmit", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherSubmit), arg0, arg1, arg2, arg3, arg4)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherSubmit", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherSubmit), arg0, arg1, arg2, arg3, arg4)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RaftLeader mocks base method.
|
|
||||||
func (m *MockFullNode) RaftLeader(arg0 context.Context) (peer.ID, error) {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
ret := m.ctrl.Call(m, "RaftLeader", arg0)
|
|
||||||
ret0, _ := ret[0].(peer.ID)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
// RaftLeader indicates an expected call of RaftLeader.
|
|
||||||
func (mr *MockFullNodeMockRecorder) RaftLeader(arg0 interface{}) *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RaftLeader", reflect.TypeOf((*MockFullNode)(nil).RaftLeader), arg0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RaftState mocks base method.
|
|
||||||
func (m *MockFullNode) RaftState(arg0 context.Context) (*api.RaftStateData, error) {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
ret := m.ctrl.Call(m, "RaftState", arg0)
|
|
||||||
ret0, _ := ret[0].(*api.RaftStateData)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
// RaftState indicates an expected call of RaftState.
|
|
||||||
func (mr *MockFullNodeMockRecorder) RaftState(arg0 interface{}) *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RaftState", reflect.TypeOf((*MockFullNode)(nil).RaftState), arg0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Session mocks base method.
|
// Session mocks base method.
|
||||||
func (m *MockFullNode) Session(arg0 context.Context) (uuid.UUID, error) {
|
func (m *MockFullNode) Session(arg0 context.Context) (uuid.UUID, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
@ -3639,7 +3610,7 @@ func (mr *MockFullNodeMockRecorder) StateMinerInfo(arg0, arg1, arg2 interface{})
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StateMinerInitialPledgeCollateral mocks base method.
|
// StateMinerInitialPledgeCollateral mocks base method.
|
||||||
func (m *MockFullNode) StateMinerInitialPledgeCollateral(arg0 context.Context, arg1 address.Address, arg2 miner.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) {
|
func (m *MockFullNode) StateMinerInitialPledgeCollateral(arg0 context.Context, arg1 address.Address, arg2 miner0.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "StateMinerInitialPledgeCollateral", arg0, arg1, arg2, arg3)
|
ret := m.ctrl.Call(m, "StateMinerInitialPledgeCollateral", arg0, arg1, arg2, arg3)
|
||||||
ret0, _ := ret[0].(big.Int)
|
ret0, _ := ret[0].(big.Int)
|
||||||
@ -3684,7 +3655,7 @@ func (mr *MockFullNodeMockRecorder) StateMinerPower(arg0, arg1, arg2 interface{}
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StateMinerPreCommitDepositForPower mocks base method.
|
// StateMinerPreCommitDepositForPower mocks base method.
|
||||||
func (m *MockFullNode) StateMinerPreCommitDepositForPower(arg0 context.Context, arg1 address.Address, arg2 miner.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) {
|
func (m *MockFullNode) StateMinerPreCommitDepositForPower(arg0 context.Context, arg1 address.Address, arg2 miner0.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "StateMinerPreCommitDepositForPower", arg0, arg1, arg2, arg3)
|
ret := m.ctrl.Call(m, "StateMinerPreCommitDepositForPower", arg0, arg1, arg2, arg3)
|
||||||
ret0, _ := ret[0].(big.Int)
|
ret0, _ := ret[0].(big.Int)
|
||||||
@ -3849,10 +3820,10 @@ func (mr *MockFullNodeMockRecorder) StateSearchMsg(arg0, arg1, arg2, arg3, arg4
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StateSectorExpiration mocks base method.
|
// StateSectorExpiration mocks base method.
|
||||||
func (m *MockFullNode) StateSectorExpiration(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner0.SectorExpiration, error) {
|
func (m *MockFullNode) StateSectorExpiration(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner1.SectorExpiration, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "StateSectorExpiration", arg0, arg1, arg2, arg3)
|
ret := m.ctrl.Call(m, "StateSectorExpiration", arg0, arg1, arg2, arg3)
|
||||||
ret0, _ := ret[0].(*miner0.SectorExpiration)
|
ret0, _ := ret[0].(*miner1.SectorExpiration)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
@ -3879,10 +3850,10 @@ func (mr *MockFullNodeMockRecorder) StateSectorGetInfo(arg0, arg1, arg2, arg3 in
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StateSectorPartition mocks base method.
|
// StateSectorPartition mocks base method.
|
||||||
func (m *MockFullNode) StateSectorPartition(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner0.SectorLocation, error) {
|
func (m *MockFullNode) StateSectorPartition(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner1.SectorLocation, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "StateSectorPartition", arg0, arg1, arg2, arg3)
|
ret := m.ctrl.Call(m, "StateSectorPartition", arg0, arg1, arg2, arg3)
|
||||||
ret0, _ := ret[0].(*miner0.SectorLocation)
|
ret0, _ := ret[0].(*miner1.SectorLocation)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
@ -3894,10 +3865,10 @@ func (mr *MockFullNodeMockRecorder) StateSectorPartition(arg0, arg1, arg2, arg3
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StateSectorPreCommitInfo mocks base method.
|
// StateSectorPreCommitInfo mocks base method.
|
||||||
func (m *MockFullNode) StateSectorPreCommitInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner.SectorPreCommitOnChainInfo, error) {
|
func (m *MockFullNode) StateSectorPreCommitInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner0.SectorPreCommitOnChainInfo, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "StateSectorPreCommitInfo", arg0, arg1, arg2, arg3)
|
ret := m.ctrl.Call(m, "StateSectorPreCommitInfo", arg0, arg1, arg2, arg3)
|
||||||
ret0, _ := ret[0].(*miner.SectorPreCommitOnChainInfo)
|
ret0, _ := ret[0].(*miner0.SectorPreCommitOnChainInfo)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
264
api/proxy_gen.go
264
api/proxy_gen.go
@ -5,6 +5,8 @@ package api
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
@ -113,6 +115,41 @@ type CommonNetStub struct {
|
|||||||
NetStub
|
NetStub
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type CurioStruct struct {
|
||||||
|
Internal CurioMethods
|
||||||
|
}
|
||||||
|
|
||||||
|
type CurioMethods struct {
|
||||||
|
AllocatePieceToSector func(p0 context.Context, p1 address.Address, p2 PieceDealInfo, p3 int64, p4 url.URL, p5 http.Header) (SectorOffset, error) `perm:"write"`
|
||||||
|
|
||||||
|
LogList func(p0 context.Context) ([]string, error) `perm:"read"`
|
||||||
|
|
||||||
|
LogSetLevel func(p0 context.Context, p1 string, p2 string) error `perm:"admin"`
|
||||||
|
|
||||||
|
Shutdown func(p0 context.Context) error `perm:"admin"`
|
||||||
|
|
||||||
|
StorageAddLocal func(p0 context.Context, p1 string) error `perm:"admin"`
|
||||||
|
|
||||||
|
StorageDetachLocal func(p0 context.Context, p1 string) error `perm:"admin"`
|
||||||
|
|
||||||
|
StorageFindSector func(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]storiface.SectorStorageInfo, error) `perm:"admin"`
|
||||||
|
|
||||||
|
StorageInfo func(p0 context.Context, p1 storiface.ID) (storiface.StorageInfo, error) `perm:"admin"`
|
||||||
|
|
||||||
|
StorageInit func(p0 context.Context, p1 string, p2 storiface.LocalStorageMeta) error `perm:"admin"`
|
||||||
|
|
||||||
|
StorageList func(p0 context.Context) (map[storiface.ID][]storiface.Decl, error) `perm:"admin"`
|
||||||
|
|
||||||
|
StorageLocal func(p0 context.Context) (map[storiface.ID]string, error) `perm:"admin"`
|
||||||
|
|
||||||
|
StorageStat func(p0 context.Context, p1 storiface.ID) (fsutil.FsStat, error) `perm:"admin"`
|
||||||
|
|
||||||
|
Version func(p0 context.Context) (Version, error) `perm:"admin"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CurioStub struct {
|
||||||
|
}
|
||||||
|
|
||||||
type EthSubscriberStruct struct {
|
type EthSubscriberStruct struct {
|
||||||
Internal EthSubscriberMethods
|
Internal EthSubscriberMethods
|
||||||
}
|
}
|
||||||
@ -457,10 +494,6 @@ type FullNodeMethods struct {
|
|||||||
|
|
||||||
PaychVoucherSubmit func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) `perm:"sign"`
|
PaychVoucherSubmit func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) `perm:"sign"`
|
||||||
|
|
||||||
RaftLeader func(p0 context.Context) (peer.ID, error) `perm:"read"`
|
|
||||||
|
|
||||||
RaftState func(p0 context.Context) (*RaftStateData, error) `perm:"read"`
|
|
||||||
|
|
||||||
StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `perm:"read"`
|
StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `perm:"read"`
|
||||||
|
|
||||||
StateActorCodeCIDs func(p0 context.Context, p1 abinetwork.Version) (map[string]cid.Cid, error) `perm:"read"`
|
StateActorCodeCIDs func(p0 context.Context, p1 abinetwork.Version) (map[string]cid.Cid, error) `perm:"read"`
|
||||||
@ -811,6 +844,8 @@ type GatewayMethods struct {
|
|||||||
|
|
||||||
StateMarketStorageDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*MarketDeal, error) ``
|
StateMarketStorageDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*MarketDeal, error) ``
|
||||||
|
|
||||||
|
StateMinerDeadlines func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]Deadline, error) ``
|
||||||
|
|
||||||
StateMinerInfo func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MinerInfo, error) ``
|
StateMinerInfo func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MinerInfo, error) ``
|
||||||
|
|
||||||
StateMinerPower func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*MinerPower, error) ``
|
StateMinerPower func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*MinerPower, error) ``
|
||||||
@ -849,19 +884,6 @@ type GatewayMethods struct {
|
|||||||
type GatewayStub struct {
|
type GatewayStub struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type LotusProviderStruct struct {
|
|
||||||
Internal LotusProviderMethods
|
|
||||||
}
|
|
||||||
|
|
||||||
type LotusProviderMethods struct {
|
|
||||||
Shutdown func(p0 context.Context) error `perm:"admin"`
|
|
||||||
|
|
||||||
Version func(p0 context.Context) (Version, error) `perm:"admin"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type LotusProviderStub struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
type NetStruct struct {
|
type NetStruct struct {
|
||||||
Internal NetMethods
|
Internal NetMethods
|
||||||
}
|
}
|
||||||
@ -1169,7 +1191,7 @@ type StorageMinerMethods struct {
|
|||||||
|
|
||||||
StorageAuthVerify func(p0 context.Context, p1 string) ([]auth.Permission, error) `perm:"read"`
|
StorageAuthVerify func(p0 context.Context, p1 string) ([]auth.Permission, error) `perm:"read"`
|
||||||
|
|
||||||
StorageBestAlloc func(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType) ([]storiface.StorageInfo, error) `perm:"admin"`
|
StorageBestAlloc func(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType, p4 abi.ActorID) ([]storiface.StorageInfo, error) `perm:"admin"`
|
||||||
|
|
||||||
StorageDeclareSector func(p0 context.Context, p1 storiface.ID, p2 abi.SectorID, p3 storiface.SectorFileType, p4 bool) error `perm:"admin"`
|
StorageDeclareSector func(p0 context.Context, p1 storiface.ID, p2 abi.SectorID, p3 storiface.SectorFileType, p4 bool) error `perm:"admin"`
|
||||||
|
|
||||||
@ -1472,6 +1494,149 @@ func (s *CommonStub) Version(p0 context.Context) (APIVersion, error) {
|
|||||||
return *new(APIVersion), ErrNotSupported
|
return *new(APIVersion), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *CurioStruct) AllocatePieceToSector(p0 context.Context, p1 address.Address, p2 PieceDealInfo, p3 int64, p4 url.URL, p5 http.Header) (SectorOffset, error) {
|
||||||
|
if s.Internal.AllocatePieceToSector == nil {
|
||||||
|
return *new(SectorOffset), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.AllocatePieceToSector(p0, p1, p2, p3, p4, p5)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CurioStub) AllocatePieceToSector(p0 context.Context, p1 address.Address, p2 PieceDealInfo, p3 int64, p4 url.URL, p5 http.Header) (SectorOffset, error) {
|
||||||
|
return *new(SectorOffset), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CurioStruct) LogList(p0 context.Context) ([]string, error) {
|
||||||
|
if s.Internal.LogList == nil {
|
||||||
|
return *new([]string), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.LogList(p0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CurioStub) LogList(p0 context.Context) ([]string, error) {
|
||||||
|
return *new([]string), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CurioStruct) LogSetLevel(p0 context.Context, p1 string, p2 string) error {
|
||||||
|
if s.Internal.LogSetLevel == nil {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.LogSetLevel(p0, p1, p2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CurioStub) LogSetLevel(p0 context.Context, p1 string, p2 string) error {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CurioStruct) Shutdown(p0 context.Context) error {
|
||||||
|
if s.Internal.Shutdown == nil {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.Shutdown(p0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CurioStub) Shutdown(p0 context.Context) error {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CurioStruct) StorageAddLocal(p0 context.Context, p1 string) error {
|
||||||
|
if s.Internal.StorageAddLocal == nil {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StorageAddLocal(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CurioStub) StorageAddLocal(p0 context.Context, p1 string) error {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CurioStruct) StorageDetachLocal(p0 context.Context, p1 string) error {
|
||||||
|
if s.Internal.StorageDetachLocal == nil {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StorageDetachLocal(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CurioStub) StorageDetachLocal(p0 context.Context, p1 string) error {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CurioStruct) StorageFindSector(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]storiface.SectorStorageInfo, error) {
|
||||||
|
if s.Internal.StorageFindSector == nil {
|
||||||
|
return *new([]storiface.SectorStorageInfo), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StorageFindSector(p0, p1, p2, p3, p4)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CurioStub) StorageFindSector(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]storiface.SectorStorageInfo, error) {
|
||||||
|
return *new([]storiface.SectorStorageInfo), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CurioStruct) StorageInfo(p0 context.Context, p1 storiface.ID) (storiface.StorageInfo, error) {
|
||||||
|
if s.Internal.StorageInfo == nil {
|
||||||
|
return *new(storiface.StorageInfo), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StorageInfo(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CurioStub) StorageInfo(p0 context.Context, p1 storiface.ID) (storiface.StorageInfo, error) {
|
||||||
|
return *new(storiface.StorageInfo), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CurioStruct) StorageInit(p0 context.Context, p1 string, p2 storiface.LocalStorageMeta) error {
|
||||||
|
if s.Internal.StorageInit == nil {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StorageInit(p0, p1, p2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CurioStub) StorageInit(p0 context.Context, p1 string, p2 storiface.LocalStorageMeta) error {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CurioStruct) StorageList(p0 context.Context) (map[storiface.ID][]storiface.Decl, error) {
|
||||||
|
if s.Internal.StorageList == nil {
|
||||||
|
return *new(map[storiface.ID][]storiface.Decl), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StorageList(p0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CurioStub) StorageList(p0 context.Context) (map[storiface.ID][]storiface.Decl, error) {
|
||||||
|
return *new(map[storiface.ID][]storiface.Decl), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CurioStruct) StorageLocal(p0 context.Context) (map[storiface.ID]string, error) {
|
||||||
|
if s.Internal.StorageLocal == nil {
|
||||||
|
return *new(map[storiface.ID]string), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StorageLocal(p0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CurioStub) StorageLocal(p0 context.Context) (map[storiface.ID]string, error) {
|
||||||
|
return *new(map[storiface.ID]string), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CurioStruct) StorageStat(p0 context.Context, p1 storiface.ID) (fsutil.FsStat, error) {
|
||||||
|
if s.Internal.StorageStat == nil {
|
||||||
|
return *new(fsutil.FsStat), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StorageStat(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CurioStub) StorageStat(p0 context.Context, p1 storiface.ID) (fsutil.FsStat, error) {
|
||||||
|
return *new(fsutil.FsStat), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CurioStruct) Version(p0 context.Context) (Version, error) {
|
||||||
|
if s.Internal.Version == nil {
|
||||||
|
return *new(Version), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.Version(p0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CurioStub) Version(p0 context.Context) (Version, error) {
|
||||||
|
return *new(Version), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *EthSubscriberStruct) EthSubscription(p0 context.Context, p1 jsonrpc.RawParams) error {
|
func (s *EthSubscriberStruct) EthSubscription(p0 context.Context, p1 jsonrpc.RawParams) error {
|
||||||
if s.Internal.EthSubscription == nil {
|
if s.Internal.EthSubscription == nil {
|
||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
@ -3265,28 +3430,6 @@ func (s *FullNodeStub) PaychVoucherSubmit(p0 context.Context, p1 address.Address
|
|||||||
return *new(cid.Cid), ErrNotSupported
|
return *new(cid.Cid), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FullNodeStruct) RaftLeader(p0 context.Context) (peer.ID, error) {
|
|
||||||
if s.Internal.RaftLeader == nil {
|
|
||||||
return *new(peer.ID), ErrNotSupported
|
|
||||||
}
|
|
||||||
return s.Internal.RaftLeader(p0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *FullNodeStub) RaftLeader(p0 context.Context) (peer.ID, error) {
|
|
||||||
return *new(peer.ID), ErrNotSupported
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *FullNodeStruct) RaftState(p0 context.Context) (*RaftStateData, error) {
|
|
||||||
if s.Internal.RaftState == nil {
|
|
||||||
return nil, ErrNotSupported
|
|
||||||
}
|
|
||||||
return s.Internal.RaftState(p0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *FullNodeStub) RaftState(p0 context.Context) (*RaftStateData, error) {
|
|
||||||
return nil, ErrNotSupported
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *FullNodeStruct) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) {
|
func (s *FullNodeStruct) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) {
|
||||||
if s.Internal.StateAccountKey == nil {
|
if s.Internal.StateAccountKey == nil {
|
||||||
return *new(address.Address), ErrNotSupported
|
return *new(address.Address), ErrNotSupported
|
||||||
@ -5146,6 +5289,17 @@ func (s *GatewayStub) StateMarketStorageDeal(p0 context.Context, p1 abi.DealID,
|
|||||||
return nil, ErrNotSupported
|
return nil, ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStruct) StateMinerDeadlines(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]Deadline, error) {
|
||||||
|
if s.Internal.StateMinerDeadlines == nil {
|
||||||
|
return *new([]Deadline), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StateMinerDeadlines(p0, p1, p2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *GatewayStub) StateMinerDeadlines(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]Deadline, error) {
|
||||||
|
return *new([]Deadline), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *GatewayStruct) StateMinerInfo(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MinerInfo, error) {
|
func (s *GatewayStruct) StateMinerInfo(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MinerInfo, error) {
|
||||||
if s.Internal.StateMinerInfo == nil {
|
if s.Internal.StateMinerInfo == nil {
|
||||||
return *new(MinerInfo), ErrNotSupported
|
return *new(MinerInfo), ErrNotSupported
|
||||||
@ -5333,28 +5487,6 @@ func (s *GatewayStub) Web3ClientVersion(p0 context.Context) (string, error) {
|
|||||||
return "", ErrNotSupported
|
return "", ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *LotusProviderStruct) Shutdown(p0 context.Context) error {
|
|
||||||
if s.Internal.Shutdown == nil {
|
|
||||||
return ErrNotSupported
|
|
||||||
}
|
|
||||||
return s.Internal.Shutdown(p0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *LotusProviderStub) Shutdown(p0 context.Context) error {
|
|
||||||
return ErrNotSupported
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *LotusProviderStruct) Version(p0 context.Context) (Version, error) {
|
|
||||||
if s.Internal.Version == nil {
|
|
||||||
return *new(Version), ErrNotSupported
|
|
||||||
}
|
|
||||||
return s.Internal.Version(p0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *LotusProviderStub) Version(p0 context.Context) (Version, error) {
|
|
||||||
return *new(Version), ErrNotSupported
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *NetStruct) ID(p0 context.Context) (peer.ID, error) {
|
func (s *NetStruct) ID(p0 context.Context) (peer.ID, error) {
|
||||||
if s.Internal.ID == nil {
|
if s.Internal.ID == nil {
|
||||||
return *new(peer.ID), ErrNotSupported
|
return *new(peer.ID), ErrNotSupported
|
||||||
@ -6895,14 +7027,14 @@ func (s *StorageMinerStub) StorageAuthVerify(p0 context.Context, p1 string) ([]a
|
|||||||
return *new([]auth.Permission), ErrNotSupported
|
return *new([]auth.Permission), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStruct) StorageBestAlloc(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType) ([]storiface.StorageInfo, error) {
|
func (s *StorageMinerStruct) StorageBestAlloc(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType, p4 abi.ActorID) ([]storiface.StorageInfo, error) {
|
||||||
if s.Internal.StorageBestAlloc == nil {
|
if s.Internal.StorageBestAlloc == nil {
|
||||||
return *new([]storiface.StorageInfo), ErrNotSupported
|
return *new([]storiface.StorageInfo), ErrNotSupported
|
||||||
}
|
}
|
||||||
return s.Internal.StorageBestAlloc(p0, p1, p2, p3)
|
return s.Internal.StorageBestAlloc(p0, p1, p2, p3, p4)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStub) StorageBestAlloc(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType) ([]storiface.StorageInfo, error) {
|
func (s *StorageMinerStub) StorageBestAlloc(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType, p4 abi.ActorID) ([]storiface.StorageInfo, error) {
|
||||||
return *new([]storiface.StorageInfo), ErrNotSupported
|
return *new([]storiface.StorageInfo), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -7580,10 +7712,10 @@ func (s *WorkerStub) WaitQuiet(p0 context.Context) error {
|
|||||||
var _ ChainIO = new(ChainIOStruct)
|
var _ ChainIO = new(ChainIOStruct)
|
||||||
var _ Common = new(CommonStruct)
|
var _ Common = new(CommonStruct)
|
||||||
var _ CommonNet = new(CommonNetStruct)
|
var _ CommonNet = new(CommonNetStruct)
|
||||||
|
var _ Curio = new(CurioStruct)
|
||||||
var _ EthSubscriber = new(EthSubscriberStruct)
|
var _ EthSubscriber = new(EthSubscriberStruct)
|
||||||
var _ FullNode = new(FullNodeStruct)
|
var _ FullNode = new(FullNodeStruct)
|
||||||
var _ Gateway = new(GatewayStruct)
|
var _ Gateway = new(GatewayStruct)
|
||||||
var _ LotusProvider = new(LotusProviderStruct)
|
|
||||||
var _ Net = new(NetStruct)
|
var _ Net = new(NetStruct)
|
||||||
var _ Signable = new(SignableStruct)
|
var _ Signable = new(SignableStruct)
|
||||||
var _ StorageMiner = new(StorageMinerStruct)
|
var _ StorageMiner = new(StorageMinerStruct)
|
||||||
|
63
api/types.go
63
api/types.go
@ -69,11 +69,6 @@ type MessageSendSpec struct {
|
|||||||
MaximizeFeeCap bool
|
MaximizeFeeCap bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type MpoolMessageWhole struct {
|
|
||||||
Msg *types.Message
|
|
||||||
Spec *MessageSendSpec
|
|
||||||
}
|
|
||||||
|
|
||||||
// GraphSyncDataTransfer provides diagnostics on a data transfer happening over graphsync
|
// GraphSyncDataTransfer provides diagnostics on a data transfer happening over graphsync
|
||||||
type GraphSyncDataTransfer struct {
|
type GraphSyncDataTransfer struct {
|
||||||
// GraphSync request id for this transfer
|
// GraphSync request id for this transfer
|
||||||
@ -353,64 +348,6 @@ type ForkUpgradeParams struct {
|
|||||||
UpgradePhoenixHeight abi.ChainEpoch
|
UpgradePhoenixHeight abi.ChainEpoch
|
||||||
}
|
}
|
||||||
|
|
||||||
type NonceMapType map[address.Address]uint64
|
|
||||||
type MsgUuidMapType map[uuid.UUID]*types.SignedMessage
|
|
||||||
|
|
||||||
type RaftStateData struct {
|
|
||||||
NonceMap NonceMapType
|
|
||||||
MsgUuids MsgUuidMapType
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *NonceMapType) MarshalJSON() ([]byte, error) {
|
|
||||||
marshalled := make(map[string]uint64)
|
|
||||||
for a, n := range *n {
|
|
||||||
marshalled[a.String()] = n
|
|
||||||
}
|
|
||||||
return json.Marshal(marshalled)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *NonceMapType) UnmarshalJSON(b []byte) error {
|
|
||||||
unmarshalled := make(map[string]uint64)
|
|
||||||
err := json.Unmarshal(b, &unmarshalled)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*n = make(map[address.Address]uint64)
|
|
||||||
for saddr, nonce := range unmarshalled {
|
|
||||||
a, err := address.NewFromString(saddr)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
(*n)[a] = nonce
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MsgUuidMapType) MarshalJSON() ([]byte, error) {
|
|
||||||
marshalled := make(map[string]*types.SignedMessage)
|
|
||||||
for u, msg := range *m {
|
|
||||||
marshalled[u.String()] = msg
|
|
||||||
}
|
|
||||||
return json.Marshal(marshalled)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MsgUuidMapType) UnmarshalJSON(b []byte) error {
|
|
||||||
unmarshalled := make(map[string]*types.SignedMessage)
|
|
||||||
err := json.Unmarshal(b, &unmarshalled)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*m = make(map[uuid.UUID]*types.SignedMessage)
|
|
||||||
for suid, msg := range unmarshalled {
|
|
||||||
u, err := uuid.Parse(suid)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
(*m)[u] = msg
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChainExportConfig holds configuration for chain ranged exports.
|
// ChainExportConfig holds configuration for chain ranged exports.
|
||||||
type ChainExportConfig struct {
|
type ChainExportConfig struct {
|
||||||
WriteBufferSize int
|
WriteBufferSize int
|
||||||
|
@ -293,7 +293,7 @@ type FullNode interface {
|
|||||||
WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) //perm:read
|
WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) //perm:read
|
||||||
// WalletDefaultAddress returns the address marked as default in the wallet.
|
// WalletDefaultAddress returns the address marked as default in the wallet.
|
||||||
WalletDefaultAddress(context.Context) (address.Address, error) //perm:write
|
WalletDefaultAddress(context.Context) (address.Address, error) //perm:write
|
||||||
// WalletSetDefault marks the given address as as the default one.
|
// WalletSetDefault marks the given address as the default one.
|
||||||
WalletSetDefault(context.Context, address.Address) error //perm:write
|
WalletSetDefault(context.Context, address.Address) error //perm:write
|
||||||
// WalletExport returns the private key of an address in the wallet.
|
// WalletExport returns the private key of an address in the wallet.
|
||||||
WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin
|
WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin
|
||||||
|
@ -26,8 +26,9 @@ import (
|
|||||||
auth "github.com/filecoin-project/go-jsonrpc/auth"
|
auth "github.com/filecoin-project/go-jsonrpc/auth"
|
||||||
abi "github.com/filecoin-project/go-state-types/abi"
|
abi "github.com/filecoin-project/go-state-types/abi"
|
||||||
big "github.com/filecoin-project/go-state-types/big"
|
big "github.com/filecoin-project/go-state-types/big"
|
||||||
|
miner "github.com/filecoin-project/go-state-types/builtin/v13/miner"
|
||||||
paych "github.com/filecoin-project/go-state-types/builtin/v8/paych"
|
paych "github.com/filecoin-project/go-state-types/builtin/v8/paych"
|
||||||
miner "github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
miner0 "github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
||||||
verifreg "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
verifreg "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||||
crypto "github.com/filecoin-project/go-state-types/crypto"
|
crypto "github.com/filecoin-project/go-state-types/crypto"
|
||||||
dline "github.com/filecoin-project/go-state-types/dline"
|
dline "github.com/filecoin-project/go-state-types/dline"
|
||||||
@ -36,7 +37,7 @@ import (
|
|||||||
api "github.com/filecoin-project/lotus/api"
|
api "github.com/filecoin-project/lotus/api"
|
||||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||||
v0api "github.com/filecoin-project/lotus/api/v0api"
|
v0api "github.com/filecoin-project/lotus/api/v0api"
|
||||||
miner0 "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
miner1 "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
types "github.com/filecoin-project/lotus/chain/types"
|
types "github.com/filecoin-project/lotus/chain/types"
|
||||||
alerting "github.com/filecoin-project/lotus/journal/alerting"
|
alerting "github.com/filecoin-project/lotus/journal/alerting"
|
||||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||||
@ -2699,7 +2700,7 @@ func (mr *MockFullNodeMockRecorder) StateMinerInfo(arg0, arg1, arg2 interface{})
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StateMinerInitialPledgeCollateral mocks base method.
|
// StateMinerInitialPledgeCollateral mocks base method.
|
||||||
func (m *MockFullNode) StateMinerInitialPledgeCollateral(arg0 context.Context, arg1 address.Address, arg2 miner.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) {
|
func (m *MockFullNode) StateMinerInitialPledgeCollateral(arg0 context.Context, arg1 address.Address, arg2 miner0.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "StateMinerInitialPledgeCollateral", arg0, arg1, arg2, arg3)
|
ret := m.ctrl.Call(m, "StateMinerInitialPledgeCollateral", arg0, arg1, arg2, arg3)
|
||||||
ret0, _ := ret[0].(big.Int)
|
ret0, _ := ret[0].(big.Int)
|
||||||
@ -2744,7 +2745,7 @@ func (mr *MockFullNodeMockRecorder) StateMinerPower(arg0, arg1, arg2 interface{}
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StateMinerPreCommitDepositForPower mocks base method.
|
// StateMinerPreCommitDepositForPower mocks base method.
|
||||||
func (m *MockFullNode) StateMinerPreCommitDepositForPower(arg0 context.Context, arg1 address.Address, arg2 miner.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) {
|
func (m *MockFullNode) StateMinerPreCommitDepositForPower(arg0 context.Context, arg1 address.Address, arg2 miner0.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "StateMinerPreCommitDepositForPower", arg0, arg1, arg2, arg3)
|
ret := m.ctrl.Call(m, "StateMinerPreCommitDepositForPower", arg0, arg1, arg2, arg3)
|
||||||
ret0, _ := ret[0].(big.Int)
|
ret0, _ := ret[0].(big.Int)
|
||||||
@ -2924,10 +2925,10 @@ func (mr *MockFullNodeMockRecorder) StateSearchMsgLimited(arg0, arg1, arg2 inter
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StateSectorExpiration mocks base method.
|
// StateSectorExpiration mocks base method.
|
||||||
func (m *MockFullNode) StateSectorExpiration(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner0.SectorExpiration, error) {
|
func (m *MockFullNode) StateSectorExpiration(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner1.SectorExpiration, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "StateSectorExpiration", arg0, arg1, arg2, arg3)
|
ret := m.ctrl.Call(m, "StateSectorExpiration", arg0, arg1, arg2, arg3)
|
||||||
ret0, _ := ret[0].(*miner0.SectorExpiration)
|
ret0, _ := ret[0].(*miner1.SectorExpiration)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
@ -2954,10 +2955,10 @@ func (mr *MockFullNodeMockRecorder) StateSectorGetInfo(arg0, arg1, arg2, arg3 in
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StateSectorPartition mocks base method.
|
// StateSectorPartition mocks base method.
|
||||||
func (m *MockFullNode) StateSectorPartition(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner0.SectorLocation, error) {
|
func (m *MockFullNode) StateSectorPartition(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner1.SectorLocation, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "StateSectorPartition", arg0, arg1, arg2, arg3)
|
ret := m.ctrl.Call(m, "StateSectorPartition", arg0, arg1, arg2, arg3)
|
||||||
ret0, _ := ret[0].(*miner0.SectorLocation)
|
ret0, _ := ret[0].(*miner1.SectorLocation)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
@ -2969,10 +2970,10 @@ func (mr *MockFullNodeMockRecorder) StateSectorPartition(arg0, arg1, arg2, arg3
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StateSectorPreCommitInfo mocks base method.
|
// StateSectorPreCommitInfo mocks base method.
|
||||||
func (m *MockFullNode) StateSectorPreCommitInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) {
|
func (m *MockFullNode) StateSectorPreCommitInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (miner0.SectorPreCommitOnChainInfo, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "StateSectorPreCommitInfo", arg0, arg1, arg2, arg3)
|
ret := m.ctrl.Call(m, "StateSectorPreCommitInfo", arg0, arg1, arg2, arg3)
|
||||||
ret0, _ := ret[0].(miner.SectorPreCommitOnChainInfo)
|
ret0, _ := ret[0].(miner0.SectorPreCommitOnChainInfo)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
@ -13,4 +13,4 @@ func PermissionedFullAPI(a FullNode) FullNode {
|
|||||||
return api.PermissionedFullAPI(a)
|
return api.PermissionedFullAPI(a)
|
||||||
}
|
}
|
||||||
|
|
||||||
type LotusProviderStruct = api.LotusProviderStruct
|
type CurioStruct = api.CurioStruct
|
||||||
|
@ -60,7 +60,7 @@ var (
|
|||||||
MinerAPIVersion0 = newVer(1, 5, 0)
|
MinerAPIVersion0 = newVer(1, 5, 0)
|
||||||
WorkerAPIVersion0 = newVer(1, 7, 0)
|
WorkerAPIVersion0 = newVer(1, 7, 0)
|
||||||
|
|
||||||
ProviderAPIVersion0 = newVer(1, 0, 0)
|
CurioAPIVersion0 = newVer(1, 0, 0)
|
||||||
)
|
)
|
||||||
|
|
||||||
//nolint:varcheck,deadcode
|
//nolint:varcheck,deadcode
|
||||||
|
113
blockstore/cached.go
Normal file
113
blockstore/cached.go
Normal file
@ -0,0 +1,113 @@
|
|||||||
|
package blockstore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
blocks "github.com/ipfs/go-block-format"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BlockstoreCache is a cache for blocks, compatible with lru.Cache; Must be safe for concurrent access
|
||||||
|
type BlockstoreCache interface {
|
||||||
|
Remove(mhString MhString) bool
|
||||||
|
Contains(mhString MhString) bool
|
||||||
|
Get(mhString MhString) (blocks.Block, bool)
|
||||||
|
Add(mhString MhString, block blocks.Block) (evicted bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
type ReadCachedBlockstore struct {
|
||||||
|
top Blockstore
|
||||||
|
cache BlockstoreCache
|
||||||
|
}
|
||||||
|
|
||||||
|
type MhString string
|
||||||
|
|
||||||
|
func NewReadCachedBlockstore(top Blockstore, cache BlockstoreCache) *ReadCachedBlockstore {
|
||||||
|
return &ReadCachedBlockstore{
|
||||||
|
top: top,
|
||||||
|
cache: cache,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReadCachedBlockstore) DeleteBlock(ctx context.Context, cid cid.Cid) error {
|
||||||
|
c.cache.Remove(MhString(cid.Hash()))
|
||||||
|
return c.top.DeleteBlock(ctx, cid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReadCachedBlockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) {
|
||||||
|
if c.cache.Contains(MhString(cid.Hash())) {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.top.Has(ctx, cid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReadCachedBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) {
|
||||||
|
if out, ok := c.cache.Get(MhString(cid.Hash())); ok {
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := c.top.Get(ctx, cid)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
c.cache.Add(MhString(cid.Hash()), out)
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReadCachedBlockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) {
|
||||||
|
if b, ok := c.cache.Get(MhString(cid.Hash())); ok {
|
||||||
|
return len(b.RawData()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.top.GetSize(ctx, cid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReadCachedBlockstore) Put(ctx context.Context, block blocks.Block) error {
|
||||||
|
c.cache.Add(MhString(block.Cid().Hash()), block)
|
||||||
|
return c.top.Put(ctx, block)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReadCachedBlockstore) PutMany(ctx context.Context, blocks []blocks.Block) error {
|
||||||
|
for _, b := range blocks {
|
||||||
|
c.cache.Add(MhString(b.Cid().Hash()), b)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.top.PutMany(ctx, blocks)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReadCachedBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
||||||
|
return c.top.AllKeysChan(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReadCachedBlockstore) HashOnRead(enabled bool) {
|
||||||
|
c.top.HashOnRead(enabled)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReadCachedBlockstore) View(ctx context.Context, cid cid.Cid, callback func([]byte) error) error {
|
||||||
|
return c.top.View(ctx, cid, func(bb []byte) error {
|
||||||
|
blk, err := blocks.NewBlockWithCid(bb, cid)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
c.cache.Add(MhString(cid.Hash()), blk)
|
||||||
|
|
||||||
|
return callback(bb)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReadCachedBlockstore) DeleteMany(ctx context.Context, cids []cid.Cid) error {
|
||||||
|
for _, ci := range cids {
|
||||||
|
c.cache.Remove(MhString(ci.Hash()))
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.top.DeleteMany(ctx, cids)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ReadCachedBlockstore) Flush(ctx context.Context) error {
|
||||||
|
return c.top.Flush(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Blockstore = (*ReadCachedBlockstore)(nil)
|
@ -1,154 +0,0 @@
|
|||||||
package blockstore
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/ipfs/boxo/path"
|
|
||||||
blocks "github.com/ipfs/go-block-format"
|
|
||||||
"github.com/ipfs/go-cid"
|
|
||||||
"github.com/multiformats/go-multiaddr"
|
|
||||||
"github.com/multiformats/go-multihash"
|
|
||||||
"golang.org/x/xerrors"
|
|
||||||
|
|
||||||
rpc "github.com/filecoin-project/kubo-api-client"
|
|
||||||
iface "github.com/filecoin-project/kubo-api-client/coreiface"
|
|
||||||
"github.com/filecoin-project/kubo-api-client/coreiface/options"
|
|
||||||
)
|
|
||||||
|
|
||||||
type IPFSBlockstore struct {
|
|
||||||
ctx context.Context
|
|
||||||
api, offlineAPI iface.CoreAPI
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ BasicBlockstore = (*IPFSBlockstore)(nil)
|
|
||||||
|
|
||||||
func NewLocalIPFSBlockstore(ctx context.Context, onlineMode bool) (Blockstore, error) {
|
|
||||||
localApi, err := rpc.NewLocalApi()
|
|
||||||
if err != nil {
|
|
||||||
return nil, xerrors.Errorf("getting local ipfs api: %w", err)
|
|
||||||
}
|
|
||||||
api, err := localApi.WithOptions(options.Api.Offline(!onlineMode))
|
|
||||||
if err != nil {
|
|
||||||
return nil, xerrors.Errorf("setting offline mode: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
offlineAPI := api
|
|
||||||
if onlineMode {
|
|
||||||
offlineAPI, err = localApi.WithOptions(options.Api.Offline(true))
|
|
||||||
if err != nil {
|
|
||||||
return nil, xerrors.Errorf("applying offline mode: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bs := &IPFSBlockstore{
|
|
||||||
ctx: ctx,
|
|
||||||
api: api,
|
|
||||||
offlineAPI: offlineAPI,
|
|
||||||
}
|
|
||||||
|
|
||||||
return Adapt(bs), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewRemoteIPFSBlockstore(ctx context.Context, maddr multiaddr.Multiaddr, onlineMode bool) (Blockstore, error) {
|
|
||||||
httpApi, err := rpc.NewApi(maddr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, xerrors.Errorf("setting remote ipfs api: %w", err)
|
|
||||||
}
|
|
||||||
api, err := httpApi.WithOptions(options.Api.Offline(!onlineMode))
|
|
||||||
if err != nil {
|
|
||||||
return nil, xerrors.Errorf("applying offline mode: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
offlineAPI := api
|
|
||||||
if onlineMode {
|
|
||||||
offlineAPI, err = httpApi.WithOptions(options.Api.Offline(true))
|
|
||||||
if err != nil {
|
|
||||||
return nil, xerrors.Errorf("applying offline mode: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bs := &IPFSBlockstore{
|
|
||||||
ctx: ctx,
|
|
||||||
api: api,
|
|
||||||
offlineAPI: offlineAPI,
|
|
||||||
}
|
|
||||||
|
|
||||||
return Adapt(bs), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *IPFSBlockstore) DeleteBlock(ctx context.Context, cid cid.Cid) error {
|
|
||||||
return xerrors.Errorf("not supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *IPFSBlockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) {
|
|
||||||
_, err := i.offlineAPI.Block().Stat(ctx, path.FromCid(cid))
|
|
||||||
if err != nil {
|
|
||||||
// The underlying client is running in Offline mode.
|
|
||||||
// Stat() will fail with an err if the block isn't in the
|
|
||||||
// blockstore. If that's the case, return false without
|
|
||||||
// an error since that's the original intention of this method.
|
|
||||||
if err.Error() == "blockservice: key not found" {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
return false, xerrors.Errorf("getting ipfs block: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *IPFSBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) {
|
|
||||||
rd, err := i.api.Block().Get(ctx, path.FromCid(cid))
|
|
||||||
if err != nil {
|
|
||||||
return nil, xerrors.Errorf("getting ipfs block: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
data, err := io.ReadAll(rd)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return blocks.NewBlockWithCid(data, cid)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *IPFSBlockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) {
|
|
||||||
st, err := i.api.Block().Stat(ctx, path.FromCid(cid))
|
|
||||||
if err != nil {
|
|
||||||
return 0, xerrors.Errorf("getting ipfs block: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return st.Size(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *IPFSBlockstore) Put(ctx context.Context, block blocks.Block) error {
|
|
||||||
mhd, err := multihash.Decode(block.Cid().Hash())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = i.api.Block().Put(ctx, bytes.NewReader(block.RawData()),
|
|
||||||
options.Block.Hash(mhd.Code, mhd.Length),
|
|
||||||
options.Block.Format(multihash.Codes[block.Cid().Type()]))
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *IPFSBlockstore) PutMany(ctx context.Context, blocks []blocks.Block) error {
|
|
||||||
// TODO: could be done in parallel
|
|
||||||
|
|
||||||
for _, block := range blocks {
|
|
||||||
if err := i.Put(ctx, block); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *IPFSBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
|
||||||
return nil, xerrors.Errorf("not supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *IPFSBlockstore) HashOnRead(enabled bool) {
|
|
||||||
return // TODO: We could technically support this, but..
|
|
||||||
}
|
|
@ -145,10 +145,10 @@ func readEmbeddedBuiltinActorsMetadata(bundle string) ([]*BuiltinActorsMetadata,
|
|||||||
)
|
)
|
||||||
|
|
||||||
if !strings.HasPrefix(bundle, "v") {
|
if !strings.HasPrefix(bundle, "v") {
|
||||||
return nil, xerrors.Errorf("bundle bundle '%q' doesn't start with a 'v'", bundle)
|
return nil, xerrors.Errorf("bundle '%q' doesn't start with a 'v'", bundle)
|
||||||
}
|
}
|
||||||
if !strings.HasSuffix(bundle, archiveExt) {
|
if !strings.HasSuffix(bundle, archiveExt) {
|
||||||
return nil, xerrors.Errorf("bundle bundle '%q' doesn't end with '%s'", bundle, archiveExt)
|
return nil, xerrors.Errorf("bundle '%q' doesn't end with '%s'", bundle, archiveExt)
|
||||||
}
|
}
|
||||||
version, err := strconv.ParseInt(bundle[1:len(bundle)-len(archiveExt)], 10, 0)
|
version, err := strconv.ParseInt(bundle[1:len(bundle)-len(archiveExt)], 10, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
107
build/builtin_actors_gen_test.go
Normal file
107
build/builtin_actors_gen_test.go
Normal file
@ -0,0 +1,107 @@
|
|||||||
|
//go:build release
|
||||||
|
// +build release
|
||||||
|
|
||||||
|
package build_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/DataDog/zstd"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
"github.com/ipld/go-car/v2"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestEmbeddedBuiltinActorsMetadata(t *testing.T) {
|
||||||
|
subjectsByVersionByNetworks := make(map[actorstypes.Version]map[string]*build.BuiltinActorsMetadata)
|
||||||
|
for _, subject := range build.EmbeddedBuiltinActorsMetadata {
|
||||||
|
if subject.BundleGitTag == "" {
|
||||||
|
// BundleGitTag is required to verify the SHA-256 checksum.
|
||||||
|
// The pack script only includes this for the latest network version, and it is good enough to only
|
||||||
|
// check the latest network version metadata. Hence the skip.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
v, ok := subjectsByVersionByNetworks[subject.Version]
|
||||||
|
if !ok {
|
||||||
|
v = make(map[string]*build.BuiltinActorsMetadata)
|
||||||
|
}
|
||||||
|
v[subject.Network] = subject
|
||||||
|
subjectsByVersionByNetworks[subject.Version] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
for version, networks := range subjectsByVersionByNetworks {
|
||||||
|
cachedCar, err := os.Open(fmt.Sprintf("./actors/v%v.tar.zst", version))
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(func() { require.NoError(t, cachedCar.Close()) })
|
||||||
|
tarReader := tar.NewReader(zstd.NewReader(cachedCar))
|
||||||
|
for {
|
||||||
|
header, err := tarReader.Next()
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
network := strings.TrimSuffix(strings.TrimPrefix(header.Name, "builtin-actors-"), ".car")
|
||||||
|
subject, found := networks[network]
|
||||||
|
if !found {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
shaURL := fmt.Sprintf("https://github.com/filecoin-project/builtin-actors/releases/download/%s/builtin-actors-%s.sha256", subject.BundleGitTag, subject.Network)
|
||||||
|
resp, err := http.Get(shaURL)
|
||||||
|
require.NoError(t, err, "failed to retrieve CAR SHA")
|
||||||
|
require.Equal(t, http.StatusOK, resp.StatusCode, "unexpected response status code while retrieving CAR SHA")
|
||||||
|
|
||||||
|
respBody, err := io.ReadAll(resp.Body)
|
||||||
|
require.NoError(t, resp.Body.Close())
|
||||||
|
require.NoError(t, err)
|
||||||
|
fields := strings.Fields(string(respBody))
|
||||||
|
require.Len(t, fields, 2)
|
||||||
|
wantShaHex := fields[0]
|
||||||
|
|
||||||
|
hasher := sha256.New()
|
||||||
|
reader, err := car.NewBlockReader(io.TeeReader(tarReader, hasher))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.EqualValues(t, 1, reader.Version)
|
||||||
|
require.Len(t, reader.Roots, 1, "expected exactly one root CID for builtin actors bundle network %s, version %v", subject.Network, subject.Version)
|
||||||
|
require.True(t, reader.Roots[0].Equals(subject.ManifestCid), "manifest CID does not match")
|
||||||
|
|
||||||
|
subjectActorsByCid := make(map[cid.Cid]string)
|
||||||
|
for name, c := range subject.Actors {
|
||||||
|
subjectActorsByCid[c] = name
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
next, err := reader.Next()
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
require.NoError(t, err)
|
||||||
|
name, found := subjectActorsByCid[next.Cid()]
|
||||||
|
if found {
|
||||||
|
t.Logf("OK: %sv%v/%s -> %s", subject.Network, subject.Version, name, next.Cid())
|
||||||
|
delete(subjectActorsByCid, next.Cid())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
require.Empty(t, subjectActorsByCid, "ZST CAR bundle did not contain CIDs for all actors; missing: %v", subjectActorsByCid)
|
||||||
|
|
||||||
|
gotShaHex := hex.EncodeToString(hasher.Sum(nil))
|
||||||
|
require.Equal(t, wantShaHex, gotShaHex, "SHA-256 digest of ZST CAR bundle does not match builtin-actors release")
|
||||||
|
delete(networks, network)
|
||||||
|
}
|
||||||
|
require.Empty(t, networks, "CAR bundle did not contain CIDs for network; missing: %v", networks)
|
||||||
|
}
|
||||||
|
}
|
@ -67,12 +67,10 @@ var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{
|
|||||||
Servers: []string{
|
Servers: []string{
|
||||||
"https://pl-eu.testnet.drand.sh",
|
"https://pl-eu.testnet.drand.sh",
|
||||||
"https://pl-us.testnet.drand.sh",
|
"https://pl-us.testnet.drand.sh",
|
||||||
"https://pl-sin.testnet.drand.sh",
|
|
||||||
},
|
},
|
||||||
Relays: []string{
|
Relays: []string{
|
||||||
"/dnsaddr/pl-eu.testnet.drand.sh/",
|
"/dnsaddr/pl-eu.testnet.drand.sh/",
|
||||||
"/dnsaddr/pl-us.testnet.drand.sh/",
|
"/dnsaddr/pl-us.testnet.drand.sh/",
|
||||||
"/dnsaddr/pl-sin.testnet.drand.sh/",
|
|
||||||
},
|
},
|
||||||
IsChained: true,
|
IsChained: true,
|
||||||
ChainInfoJSON: `{"public_key":"922a2e93828ff83345bae533f5172669a26c02dc76d6bf59c80892e12ab1455c229211886f35bb56af6d5bea981024df","period":25,"genesis_time":1590445175,"hash":"84b2234fb34e835dccd048255d7ad3194b81af7d978c3bf157e3469592ae4e02","groupHash":"4dd408e5fdff9323c76a9b6f087ba8fdc5a6da907bd9217d9d10f2287d081957"}`,
|
ChainInfoJSON: `{"public_key":"922a2e93828ff83345bae533f5172669a26c02dc76d6bf59c80892e12ab1455c229211886f35bb56af6d5bea981024df","period":25,"genesis_time":1590445175,"hash":"84b2234fb34e835dccd048255d7ad3194b81af7d978c3bf157e3469592ae4e02","groupHash":"4dd408e5fdff9323c76a9b6f087ba8fdc5a6da907bd9217d9d10f2287d081957"}`,
|
||||||
|
@ -2,7 +2,6 @@ package build
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"compress/gzip"
|
|
||||||
"embed"
|
"embed"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
|
||||||
@ -12,17 +11,9 @@ import (
|
|||||||
//go:embed openrpc
|
//go:embed openrpc
|
||||||
var openrpcfs embed.FS
|
var openrpcfs embed.FS
|
||||||
|
|
||||||
func mustReadGzippedOpenRPCDocument(data []byte) apitypes.OpenRPCDocument {
|
func mustReadOpenRPCDocument(data []byte) apitypes.OpenRPCDocument {
|
||||||
zr, err := gzip.NewReader(bytes.NewBuffer(data))
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
m := apitypes.OpenRPCDocument{}
|
m := apitypes.OpenRPCDocument{}
|
||||||
err = json.NewDecoder(zr).Decode(&m)
|
err := json.NewDecoder(bytes.NewBuffer(data)).Decode(&m)
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
err = zr.Close()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -30,33 +21,33 @@ func mustReadGzippedOpenRPCDocument(data []byte) apitypes.OpenRPCDocument {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func OpenRPCDiscoverJSON_Full() apitypes.OpenRPCDocument {
|
func OpenRPCDiscoverJSON_Full() apitypes.OpenRPCDocument {
|
||||||
data, err := openrpcfs.ReadFile("openrpc/full.json.gz")
|
data, err := openrpcfs.ReadFile("openrpc/full.json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
return mustReadGzippedOpenRPCDocument(data)
|
return mustReadOpenRPCDocument(data)
|
||||||
}
|
}
|
||||||
|
|
||||||
func OpenRPCDiscoverJSON_Miner() apitypes.OpenRPCDocument {
|
func OpenRPCDiscoverJSON_Miner() apitypes.OpenRPCDocument {
|
||||||
data, err := openrpcfs.ReadFile("openrpc/miner.json.gz")
|
data, err := openrpcfs.ReadFile("openrpc/miner.json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
return mustReadGzippedOpenRPCDocument(data)
|
return mustReadOpenRPCDocument(data)
|
||||||
}
|
}
|
||||||
|
|
||||||
func OpenRPCDiscoverJSON_Worker() apitypes.OpenRPCDocument {
|
func OpenRPCDiscoverJSON_Worker() apitypes.OpenRPCDocument {
|
||||||
data, err := openrpcfs.ReadFile("openrpc/worker.json.gz")
|
data, err := openrpcfs.ReadFile("openrpc/worker.json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
return mustReadGzippedOpenRPCDocument(data)
|
return mustReadOpenRPCDocument(data)
|
||||||
}
|
}
|
||||||
|
|
||||||
func OpenRPCDiscoverJSON_Gateway() apitypes.OpenRPCDocument {
|
func OpenRPCDiscoverJSON_Gateway() apitypes.OpenRPCDocument {
|
||||||
data, err := openrpcfs.ReadFile("openrpc/gateway.json.gz")
|
data, err := openrpcfs.ReadFile("openrpc/gateway.json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
return mustReadGzippedOpenRPCDocument(data)
|
return mustReadOpenRPCDocument(data)
|
||||||
}
|
}
|
||||||
|
26867
build/openrpc/full.json
Normal file
26867
build/openrpc/full.json
Normal file
File diff suppressed because it is too large
Load Diff
10157
build/openrpc/gateway.json
Normal file
10157
build/openrpc/gateway.json
Normal file
File diff suppressed because it is too large
Load Diff
12744
build/openrpc/miner.json
Normal file
12744
build/openrpc/miner.json
Normal file
File diff suppressed because it is too large
Load Diff
5536
build/openrpc/worker.json
Normal file
5536
build/openrpc/worker.json
Normal file
File diff suppressed because it is too large
Load Diff
@ -37,7 +37,11 @@ func BuildTypeString() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// BuildVersion is the local build version
|
// BuildVersion is the local build version
|
||||||
|
<<<<<<< HEAD
|
||||||
const BuildVersion = "1.26.3"
|
const BuildVersion = "1.26.3"
|
||||||
|
=======
|
||||||
|
const BuildVersion = "1.27.0-dev"
|
||||||
|
>>>>>>> tags/v1.27.0-rc1
|
||||||
|
|
||||||
func UserVersion() string {
|
func UserVersion() string {
|
||||||
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
|
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
|
||||||
|
@ -153,7 +153,7 @@ type Partition interface {
|
|||||||
UnprovenSectors() (bitfield.BitField, error)
|
UnprovenSectors() (bitfield.BitField, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type SectorOnChainInfo = minertypes.SectorOnChainInfo
|
type SectorOnChainInfo = minertypes13.SectorOnChainInfo
|
||||||
|
|
||||||
func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof, configWantSynthetic bool) (abi.RegisteredSealProof, error) {
|
func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof, configWantSynthetic bool) (abi.RegisteredSealProof, error) {
|
||||||
// We added support for the new proofs in network version 7, and removed support for the old
|
// We added support for the new proofs in network version 7, and removed support for the old
|
||||||
@ -256,6 +256,7 @@ type ProveCommitSectors3Params = minertypes13.ProveCommitSectors3Params
|
|||||||
type SectorActivationManifest = minertypes13.SectorActivationManifest
|
type SectorActivationManifest = minertypes13.SectorActivationManifest
|
||||||
type ProveReplicaUpdates3Params = minertypes13.ProveReplicaUpdates3Params
|
type ProveReplicaUpdates3Params = minertypes13.ProveReplicaUpdates3Params
|
||||||
type SectorUpdateManifest = minertypes13.SectorUpdateManifest
|
type SectorUpdateManifest = minertypes13.SectorUpdateManifest
|
||||||
|
type SectorOnChainInfoFlags = minertypes13.SectorOnChainInfoFlags
|
||||||
|
|
||||||
var QAPowerMax = minertypes.QAPowerMax
|
var QAPowerMax = minertypes.QAPowerMax
|
||||||
|
|
||||||
|
@ -219,7 +219,7 @@ type Partition interface {
|
|||||||
UnprovenSectors() (bitfield.BitField, error)
|
UnprovenSectors() (bitfield.BitField, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type SectorOnChainInfo = minertypes.SectorOnChainInfo
|
type SectorOnChainInfo = minertypes13.SectorOnChainInfo
|
||||||
|
|
||||||
func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof, configWantSynthetic bool) (abi.RegisteredSealProof, error) {
|
func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof, configWantSynthetic bool) (abi.RegisteredSealProof, error) {
|
||||||
// We added support for the new proofs in network version 7, and removed support for the old
|
// We added support for the new proofs in network version 7, and removed support for the old
|
||||||
@ -322,6 +322,7 @@ type ProveCommitSectors3Params = minertypes13.ProveCommitSectors3Params
|
|||||||
type SectorActivationManifest = minertypes13.SectorActivationManifest
|
type SectorActivationManifest = minertypes13.SectorActivationManifest
|
||||||
type ProveReplicaUpdates3Params = minertypes13.ProveReplicaUpdates3Params
|
type ProveReplicaUpdates3Params = minertypes13.ProveReplicaUpdates3Params
|
||||||
type SectorUpdateManifest = minertypes13.SectorUpdateManifest
|
type SectorUpdateManifest = minertypes13.SectorUpdateManifest
|
||||||
|
type SectorOnChainInfoFlags = minertypes13.SectorOnChainInfoFlags
|
||||||
|
|
||||||
var QAPowerMax = minertypes.QAPowerMax
|
var QAPowerMax = minertypes.QAPowerMax
|
||||||
|
|
||||||
|
@ -584,6 +584,10 @@ func fromV{{.v}}SectorOnChainInfo(v{{.v}} miner{{.v}}.SectorOnChainInfo) SectorO
|
|||||||
{{if (ge .v 7)}}
|
{{if (ge .v 7)}}
|
||||||
SectorKeyCID: v{{.v}}.SectorKeyCID,
|
SectorKeyCID: v{{.v}}.SectorKeyCID,
|
||||||
{{end}}
|
{{end}}
|
||||||
|
{{if (ge .v 12)}}
|
||||||
|
PowerBaseEpoch: v{{.v}}.PowerBaseEpoch,
|
||||||
|
ReplacedDayReward: v{{.v}}.ReplacedDayReward,
|
||||||
|
{{end}}
|
||||||
}
|
}
|
||||||
return info
|
return info
|
||||||
}
|
}
|
||||||
|
3
chain/actors/builtin/miner/v12.go
generated
3
chain/actors/builtin/miner/v12.go
generated
@ -545,6 +545,9 @@ func fromV12SectorOnChainInfo(v12 miner12.SectorOnChainInfo) SectorOnChainInfo {
|
|||||||
ExpectedStoragePledge: v12.ExpectedStoragePledge,
|
ExpectedStoragePledge: v12.ExpectedStoragePledge,
|
||||||
|
|
||||||
SectorKeyCID: v12.SectorKeyCID,
|
SectorKeyCID: v12.SectorKeyCID,
|
||||||
|
|
||||||
|
PowerBaseEpoch: v12.PowerBaseEpoch,
|
||||||
|
ReplacedDayReward: v12.ReplacedDayReward,
|
||||||
}
|
}
|
||||||
return info
|
return info
|
||||||
}
|
}
|
||||||
|
3
chain/actors/builtin/miner/v13.go
generated
3
chain/actors/builtin/miner/v13.go
generated
@ -545,6 +545,9 @@ func fromV13SectorOnChainInfo(v13 miner13.SectorOnChainInfo) SectorOnChainInfo {
|
|||||||
ExpectedStoragePledge: v13.ExpectedStoragePledge,
|
ExpectedStoragePledge: v13.ExpectedStoragePledge,
|
||||||
|
|
||||||
SectorKeyCID: v13.SectorKeyCID,
|
SectorKeyCID: v13.SectorKeyCID,
|
||||||
|
|
||||||
|
PowerBaseEpoch: v13.PowerBaseEpoch,
|
||||||
|
ReplacedDayReward: v13.ReplacedDayReward,
|
||||||
}
|
}
|
||||||
return info
|
return info
|
||||||
}
|
}
|
||||||
|
@ -203,7 +203,6 @@ func (db *DrandBeacon) VerifyEntry(entry types.BeaconEntry, prevEntrySig []byte)
|
|||||||
}
|
}
|
||||||
|
|
||||||
db.cacheValue(entry)
|
db.cacheValue(entry)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
package drand
|
package drand
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
@ -18,7 +19,12 @@ import (
|
|||||||
|
|
||||||
func TestPrintGroupInfo(t *testing.T) {
|
func TestPrintGroupInfo(t *testing.T) {
|
||||||
server := build.DrandConfigs[build.DrandTestnet].Servers[0]
|
server := build.DrandConfigs[build.DrandTestnet].Servers[0]
|
||||||
c, err := hclient.New(server, nil, nil)
|
chainInfo := build.DrandConfigs[build.DrandTestnet].ChainInfoJSON
|
||||||
|
|
||||||
|
drandChain, err := dchain.InfoFromJSON(bytes.NewReader([]byte(chainInfo)))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
c, err := hclient.NewWithInfo(server, drandChain, nil)
|
||||||
|
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
cg := c.(interface {
|
cg := c.(interface {
|
||||||
FetchChainInfo(ctx context.Context, groupHash []byte) (*dchain.Info, error)
|
FetchChainInfo(ctx context.Context, groupHash []byte) (*dchain.Info, error)
|
||||||
|
@ -24,8 +24,15 @@ func (syncer *Syncer) SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) e
|
|||||||
ts = tss[0]
|
ts = tss[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := syncer.switchChain(ctx, ts); err != nil {
|
hts := syncer.ChainStore().GetHeaviestTipSet()
|
||||||
return xerrors.Errorf("failed to switch chain when syncing checkpoint: %w", err)
|
if hts.Equals(ts) {
|
||||||
|
// Current head, no need to switch.
|
||||||
|
} else if anc, err := syncer.store.IsAncestorOf(ctx, ts, hts); err != nil {
|
||||||
|
return xerrors.Errorf("failed to walk the chain when checkpointing: %w", err)
|
||||||
|
} else if anc {
|
||||||
|
// New checkpoint is on the current chain, we definitely have the tipsets.
|
||||||
|
} else if err := syncer.collectChain(ctx, ts, hts, true); err != nil {
|
||||||
|
return xerrors.Errorf("failed to collect chain for checkpoint: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := syncer.ChainStore().SetCheckpoint(ctx, ts); err != nil {
|
if err := syncer.ChainStore().SetCheckpoint(ctx, ts); err != nil {
|
||||||
@ -34,24 +41,3 @@ func (syncer *Syncer) SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) e
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (syncer *Syncer) switchChain(ctx context.Context, ts *types.TipSet) error {
|
|
||||||
hts := syncer.ChainStore().GetHeaviestTipSet()
|
|
||||||
if hts.Equals(ts) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if anc, err := syncer.store.IsAncestorOf(ctx, ts, hts); err == nil && anc {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise, sync the chain and set the head.
|
|
||||||
if err := syncer.collectChain(ctx, ts, hts, true); err != nil {
|
|
||||||
return xerrors.Errorf("failed to collect chain for checkpoint: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := syncer.ChainStore().SetHead(ctx, ts); err != nil {
|
|
||||||
return xerrors.Errorf("failed to set the chain head: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
@ -220,7 +220,7 @@ func checkBlockMessages(ctx context.Context, sm *stmgr.StateManager, cs *store.C
|
|||||||
// the sender exists and is an account actor, and the nonces make sense
|
// the sender exists and is an account actor, and the nonces make sense
|
||||||
var sender address.Address
|
var sender address.Address
|
||||||
if nv >= network.Version13 {
|
if nv >= network.Version13 {
|
||||||
sender, err = st.LookupID(m.From)
|
sender, err = st.LookupIDAddress(m.From)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("failed to lookup sender %s: %w", m.From, err)
|
return xerrors.Errorf("failed to lookup sender %s: %w", m.From, err)
|
||||||
}
|
}
|
||||||
|
@ -150,7 +150,7 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock)
|
|||||||
return xerrors.Errorf("block was from the future (now=%d, blk=%d): %w", now, h.Timestamp, consensus.ErrTemporal)
|
return xerrors.Errorf("block was from the future (now=%d, blk=%d): %w", now, h.Timestamp, consensus.ErrTemporal)
|
||||||
}
|
}
|
||||||
if h.Timestamp > now {
|
if h.Timestamp > now {
|
||||||
log.Warn("Got block from the future, but within threshold", h.Timestamp, build.Clock.Now().Unix())
|
log.Warnf("Got block from the future, but within threshold (%d > %d)", h.Timestamp, now)
|
||||||
}
|
}
|
||||||
|
|
||||||
minerCheck := async.Err(func() error {
|
minerCheck := async.Err(func() error {
|
||||||
@ -166,7 +166,7 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if types.BigCmp(pweight, b.Header.ParentWeight) != 0 {
|
if types.BigCmp(pweight, b.Header.ParentWeight) != 0 {
|
||||||
return xerrors.Errorf("parrent weight different: %s (header) != %s (computed)",
|
return xerrors.Errorf("parent weight different: %s (header) != %s (computed)",
|
||||||
b.Header.ParentWeight, pweight)
|
b.Header.ParentWeight, pweight)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -81,7 +81,7 @@ func init() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
// use value from environment
|
// use value from environment
|
||||||
log.Infof("migration worker cound set from %s (%d)", EnvMigrationMaxWorkerCount, mwc)
|
log.Infof("migration worker count set from %s (%d)", EnvMigrationMaxWorkerCount, mwc)
|
||||||
MigrationMaxWorkerCount = int(mwc)
|
MigrationMaxWorkerCount = int(mwc)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -1712,14 +1712,14 @@ func upgradeActorsV10Common(
|
|||||||
|
|
||||||
if stateRoot.Version != types.StateTreeVersion4 {
|
if stateRoot.Version != types.StateTreeVersion4 {
|
||||||
return cid.Undef, xerrors.Errorf(
|
return cid.Undef, xerrors.Errorf(
|
||||||
"expected state root version 4 for actors v9 upgrade, got %d",
|
"expected state root version 4 for actors v10 upgrade, got %d",
|
||||||
stateRoot.Version,
|
stateRoot.Version,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
manifest, ok := actors.GetManifest(actorstypes.Version10)
|
manifest, ok := actors.GetManifest(actorstypes.Version10)
|
||||||
if !ok {
|
if !ok {
|
||||||
return cid.Undef, xerrors.Errorf("no manifest CID for v9 upgrade")
|
return cid.Undef, xerrors.Errorf("no manifest CID for v10 upgrade")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Perform the migration
|
// Perform the migration
|
||||||
@ -1893,7 +1893,7 @@ func UpgradeActorsV12(ctx context.Context, sm *stmgr.StateManager, cache stmgr.M
|
|||||||
}
|
}
|
||||||
newRoot, err := upgradeActorsV12Common(ctx, sm, cache, root, epoch, ts, config)
|
newRoot, err := upgradeActorsV12Common(ctx, sm, cache, root, epoch, ts, config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cid.Undef, xerrors.Errorf("migrating actors v11 state: %w", err)
|
return cid.Undef, xerrors.Errorf("migrating actors v12 state: %w", err)
|
||||||
}
|
}
|
||||||
return newRoot, nil
|
return newRoot, nil
|
||||||
}
|
}
|
||||||
@ -2210,7 +2210,7 @@ func UpgradeActorsV13(ctx context.Context, sm *stmgr.StateManager, cache stmgr.M
|
|||||||
}
|
}
|
||||||
newRoot, err := upgradeActorsV13Common(ctx, sm, cache, root, epoch, ts, config)
|
newRoot, err := upgradeActorsV13Common(ctx, sm, cache, root, epoch, ts, config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cid.Undef, xerrors.Errorf("migrating actors v11 state: %w", err)
|
return cid.Undef, xerrors.Errorf("migrating actors v13 state: %w", err)
|
||||||
}
|
}
|
||||||
return newRoot, nil
|
return newRoot, nil
|
||||||
}
|
}
|
||||||
|
@ -375,6 +375,10 @@ func (m *EventFilterManager) Revert(ctx context.Context, from, to *types.TipSet)
|
|||||||
func (m *EventFilterManager) Install(ctx context.Context, minHeight, maxHeight abi.ChainEpoch, tipsetCid cid.Cid, addresses []address.Address,
|
func (m *EventFilterManager) Install(ctx context.Context, minHeight, maxHeight abi.ChainEpoch, tipsetCid cid.Cid, addresses []address.Address,
|
||||||
keysWithCodec map[string][]types.ActorEventBlock, excludeReverted bool) (EventFilter, error) {
|
keysWithCodec map[string][]types.ActorEventBlock, excludeReverted bool) (EventFilter, error) {
|
||||||
m.mu.Lock()
|
m.mu.Lock()
|
||||||
|
if m.currentHeight == 0 {
|
||||||
|
// sync in progress, we haven't had an Apply
|
||||||
|
m.currentHeight = m.ChainStore.GetHeaviestTipSet().Height()
|
||||||
|
}
|
||||||
currentHeight := m.currentHeight
|
currentHeight := m.currentHeight
|
||||||
m.mu.Unlock()
|
m.mu.Unlock()
|
||||||
|
|
||||||
|
@ -26,7 +26,7 @@ var pragmas = []string{
|
|||||||
"PRAGMA temp_store = memory",
|
"PRAGMA temp_store = memory",
|
||||||
"PRAGMA mmap_size = 30000000000",
|
"PRAGMA mmap_size = 30000000000",
|
||||||
"PRAGMA page_size = 32768",
|
"PRAGMA page_size = 32768",
|
||||||
"PRAGMA auto_vacuum = NONE",
|
"PRAGMA auto_vacuum = NONE", // not useful until we implement GC
|
||||||
"PRAGMA automatic_index = OFF",
|
"PRAGMA automatic_index = OFF",
|
||||||
"PRAGMA journal_mode = WAL",
|
"PRAGMA journal_mode = WAL",
|
||||||
"PRAGMA read_uncommitted = ON",
|
"PRAGMA read_uncommitted = ON",
|
||||||
@ -45,8 +45,10 @@ var ddls = []string{
|
|||||||
reverted INTEGER NOT NULL
|
reverted INTEGER NOT NULL
|
||||||
)`,
|
)`,
|
||||||
|
|
||||||
`CREATE INDEX IF NOT EXISTS height_tipset_key ON event (height,tipset_key)`,
|
createIndexEventEmitterAddr,
|
||||||
`CREATE INDEX IF NOT EXISTS event_emitter_addr ON event (emitter_addr)`,
|
createIndexEventTipsetKeyCid,
|
||||||
|
createIndexEventHeight,
|
||||||
|
createIndexEventReverted,
|
||||||
|
|
||||||
`CREATE TABLE IF NOT EXISTS event_entry (
|
`CREATE TABLE IF NOT EXISTS event_entry (
|
||||||
event_id INTEGER,
|
event_id INTEGER,
|
||||||
@ -57,7 +59,9 @@ var ddls = []string{
|
|||||||
value BLOB NOT NULL
|
value BLOB NOT NULL
|
||||||
)`,
|
)`,
|
||||||
|
|
||||||
`CREATE INDEX IF NOT EXISTS event_entry_key_index ON event_entry (key)`,
|
createIndexEventEntryIndexedKey,
|
||||||
|
createIndexEventEntryCodecValue,
|
||||||
|
createIndexEventEntryEventId,
|
||||||
|
|
||||||
// metadata containing version of schema
|
// metadata containing version of schema
|
||||||
`CREATE TABLE IF NOT EXISTS _meta (
|
`CREATE TABLE IF NOT EXISTS _meta (
|
||||||
@ -67,6 +71,7 @@ var ddls = []string{
|
|||||||
`INSERT OR IGNORE INTO _meta (version) VALUES (1)`,
|
`INSERT OR IGNORE INTO _meta (version) VALUES (1)`,
|
||||||
`INSERT OR IGNORE INTO _meta (version) VALUES (2)`,
|
`INSERT OR IGNORE INTO _meta (version) VALUES (2)`,
|
||||||
`INSERT OR IGNORE INTO _meta (version) VALUES (3)`,
|
`INSERT OR IGNORE INTO _meta (version) VALUES (3)`,
|
||||||
|
`INSERT OR IGNORE INTO _meta (version) VALUES (4)`,
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -74,13 +79,22 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
schemaVersion = 3
|
schemaVersion = 4
|
||||||
|
|
||||||
eventExists = `SELECT MAX(id) FROM event WHERE height=? AND tipset_key=? AND tipset_key_cid=? AND emitter_addr=? AND event_index=? AND message_cid=? AND message_index=?`
|
eventExists = `SELECT MAX(id) FROM event WHERE height=? AND tipset_key=? AND tipset_key_cid=? AND emitter_addr=? AND event_index=? AND message_cid=? AND message_index=?`
|
||||||
insertEvent = `INSERT OR IGNORE INTO event(height, tipset_key, tipset_key_cid, emitter_addr, event_index, message_cid, message_index, reverted) VALUES(?, ?, ?, ?, ?, ?, ?, ?)`
|
insertEvent = `INSERT OR IGNORE INTO event(height, tipset_key, tipset_key_cid, emitter_addr, event_index, message_cid, message_index, reverted) VALUES(?, ?, ?, ?, ?, ?, ?, ?)`
|
||||||
insertEntry = `INSERT OR IGNORE INTO event_entry(event_id, indexed, flags, key, codec, value) VALUES(?, ?, ?, ?, ?, ?)`
|
insertEntry = `INSERT OR IGNORE INTO event_entry(event_id, indexed, flags, key, codec, value) VALUES(?, ?, ?, ?, ?, ?)`
|
||||||
revertEventsInTipset = `UPDATE event SET reverted=true WHERE height=? AND tipset_key=?`
|
revertEventsInTipset = `UPDATE event SET reverted=true WHERE height=? AND tipset_key=?`
|
||||||
restoreEvent = `UPDATE event SET reverted=false WHERE height=? AND tipset_key=? AND tipset_key_cid=? AND emitter_addr=? AND event_index=? AND message_cid=? AND message_index=?`
|
restoreEvent = `UPDATE event SET reverted=false WHERE height=? AND tipset_key=? AND tipset_key_cid=? AND emitter_addr=? AND event_index=? AND message_cid=? AND message_index=?`
|
||||||
|
|
||||||
|
createIndexEventEmitterAddr = `CREATE INDEX IF NOT EXISTS event_emitter_addr ON event (emitter_addr)`
|
||||||
|
createIndexEventTipsetKeyCid = `CREATE INDEX IF NOT EXISTS event_tipset_key_cid ON event (tipset_key_cid);`
|
||||||
|
createIndexEventHeight = `CREATE INDEX IF NOT EXISTS event_height ON event (height);`
|
||||||
|
createIndexEventReverted = `CREATE INDEX IF NOT EXISTS event_reverted ON event (reverted);`
|
||||||
|
|
||||||
|
createIndexEventEntryIndexedKey = `CREATE INDEX IF NOT EXISTS event_entry_indexed_key ON event_entry (indexed, key);`
|
||||||
|
createIndexEventEntryCodecValue = `CREATE INDEX IF NOT EXISTS event_entry_codec_value ON event_entry (codec, value);`
|
||||||
|
createIndexEventEntryEventId = `CREATE INDEX IF NOT EXISTS event_entry_event_id ON event_entry(event_id);`
|
||||||
)
|
)
|
||||||
|
|
||||||
type EventIndex struct {
|
type EventIndex struct {
|
||||||
@ -125,43 +139,43 @@ func (ei *EventIndex) initStatements() (err error) {
|
|||||||
func (ei *EventIndex) migrateToVersion2(ctx context.Context, chainStore *store.ChainStore) error {
|
func (ei *EventIndex) migrateToVersion2(ctx context.Context, chainStore *store.ChainStore) error {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
|
||||||
tx, err := ei.db.Begin()
|
tx, err := ei.db.BeginTx(ctx, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("begin transaction: %w", err)
|
return xerrors.Errorf("begin transaction: %w", err)
|
||||||
}
|
}
|
||||||
// rollback the transaction (a no-op if the transaction was already committed)
|
// rollback the transaction (a no-op if the transaction was already committed)
|
||||||
defer tx.Rollback() //nolint:errcheck
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
|
||||||
// create some temporary indices to help speed up the migration
|
// create some temporary indices to help speed up the migration
|
||||||
_, err = tx.Exec("CREATE INDEX IF NOT EXISTS tmp_height_tipset_key_cid ON event (height,tipset_key_cid)")
|
_, err = tx.ExecContext(ctx, "CREATE INDEX IF NOT EXISTS tmp_height_tipset_key_cid ON event (height,tipset_key_cid)")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("create index tmp_height_tipset_key_cid: %w", err)
|
return xerrors.Errorf("create index tmp_height_tipset_key_cid: %w", err)
|
||||||
}
|
}
|
||||||
_, err = tx.Exec("CREATE INDEX IF NOT EXISTS tmp_tipset_key_cid ON event (tipset_key_cid)")
|
_, err = tx.ExecContext(ctx, "CREATE INDEX IF NOT EXISTS tmp_tipset_key_cid ON event (tipset_key_cid)")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("create index tmp_tipset_key_cid: %w", err)
|
return xerrors.Errorf("create index tmp_tipset_key_cid: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
stmtDeleteOffChainEvent, err := tx.Prepare("DELETE FROM event WHERE tipset_key_cid!=? and height=?")
|
stmtDeleteOffChainEvent, err := tx.PrepareContext(ctx, "DELETE FROM event WHERE tipset_key_cid!=? and height=?")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("prepare stmtDeleteOffChainEvent: %w", err)
|
return xerrors.Errorf("prepare stmtDeleteOffChainEvent: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
stmtSelectEvent, err := tx.Prepare("SELECT id FROM event WHERE tipset_key_cid=? ORDER BY message_index ASC, event_index ASC, id DESC LIMIT 1")
|
stmtSelectEvent, err := tx.PrepareContext(ctx, "SELECT id FROM event WHERE tipset_key_cid=? ORDER BY message_index ASC, event_index ASC, id DESC LIMIT 1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("prepare stmtSelectEvent: %w", err)
|
return xerrors.Errorf("prepare stmtSelectEvent: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
stmtDeleteEvent, err := tx.Prepare("DELETE FROM event WHERE tipset_key_cid=? AND id<?")
|
stmtDeleteEvent, err := tx.PrepareContext(ctx, "DELETE FROM event WHERE tipset_key_cid=? AND id<?")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("prepare stmtDeleteEvent: %w", err)
|
return xerrors.Errorf("prepare stmtDeleteEvent: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// get the lowest height tipset
|
// get the lowest height tipset
|
||||||
var minHeight sql.NullInt64
|
var minHeight sql.NullInt64
|
||||||
err = ei.db.QueryRow("SELECT MIN(height) FROM event").Scan(&minHeight)
|
err = ei.db.QueryRowContext(ctx, "SELECT MIN(height) FROM event").Scan(&minHeight)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == sql.ErrNoRows {
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -198,7 +212,7 @@ func (ei *EventIndex) migrateToVersion2(ctx context.Context, chainStore *store.C
|
|||||||
var eventId sql.NullInt64
|
var eventId sql.NullInt64
|
||||||
err = stmtSelectEvent.QueryRow(tsKeyCid.Bytes()).Scan(&eventId)
|
err = stmtSelectEvent.QueryRow(tsKeyCid.Bytes()).Scan(&eventId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == sql.ErrNoRows {
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return xerrors.Errorf("select event: %w", err)
|
return xerrors.Errorf("select event: %w", err)
|
||||||
@ -224,7 +238,7 @@ func (ei *EventIndex) migrateToVersion2(ctx context.Context, chainStore *store.C
|
|||||||
|
|
||||||
// delete all entries that have an event_id that doesn't exist (since we don't have a foreign
|
// delete all entries that have an event_id that doesn't exist (since we don't have a foreign
|
||||||
// key constraint that gives us cascading deletes)
|
// key constraint that gives us cascading deletes)
|
||||||
res, err := tx.Exec("DELETE FROM event_entry WHERE event_id NOT IN (SELECT id FROM event)")
|
res, err := tx.ExecContext(ctx, "DELETE FROM event_entry WHERE event_id NOT IN (SELECT id FROM event)")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("delete event_entry: %w", err)
|
return xerrors.Errorf("delete event_entry: %w", err)
|
||||||
}
|
}
|
||||||
@ -233,39 +247,143 @@ func (ei *EventIndex) migrateToVersion2(ctx context.Context, chainStore *store.C
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("rows affected: %w", err)
|
return xerrors.Errorf("rows affected: %w", err)
|
||||||
}
|
}
|
||||||
log.Infof("cleaned up %d entries that had deleted events\n", nrRowsAffected)
|
log.Infof("Cleaned up %d entries that had deleted events\n", nrRowsAffected)
|
||||||
|
|
||||||
// drop the temporary indices after the migration
|
// drop the temporary indices after the migration
|
||||||
_, err = tx.Exec("DROP INDEX IF EXISTS tmp_tipset_key_cid")
|
_, err = tx.ExecContext(ctx, "DROP INDEX IF EXISTS tmp_tipset_key_cid")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("create index tmp_tipset_key_cid: %w", err)
|
return xerrors.Errorf("drop index tmp_tipset_key_cid: %w", err)
|
||||||
}
|
}
|
||||||
_, err = tx.Exec("DROP INDEX IF EXISTS tmp_height_tipset_key_cid")
|
_, err = tx.ExecContext(ctx, "DROP INDEX IF EXISTS tmp_height_tipset_key_cid")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("drop index tmp_height_tipset_key_cid: %w", err)
|
return xerrors.Errorf("drop index tmp_height_tipset_key_cid: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// original v2 migration introduced an index:
|
||||||
|
// CREATE INDEX IF NOT EXISTS height_tipset_key ON event (height,tipset_key)
|
||||||
|
// which has subsequently been removed in v4, so it's omitted here
|
||||||
|
|
||||||
|
// increment the schema version to 2 in _meta table.
|
||||||
|
_, err = tx.ExecContext(ctx, "INSERT OR IGNORE INTO _meta (version) VALUES (2)")
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("increment _meta version: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
err = tx.Commit()
|
err = tx.Commit()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("commit transaction: %w", err)
|
return xerrors.Errorf("commit transaction: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// during the migration, we have likely increased the WAL size a lot, so lets do some
|
log.Infof("Successfully migrated event index from version 1 to version 2 in %s", time.Since(now))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// migrateToVersion3 migrates the schema from version 2 to version 3 by creating two indices:
|
||||||
|
// 1) an index on the event.emitter_addr column, and 2) an index on the event_entry.key column.
|
||||||
|
func (ei *EventIndex) migrateToVersion3(ctx context.Context) error {
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
tx, err := ei.db.BeginTx(ctx, nil)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("begin transaction: %w", err)
|
||||||
|
}
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
|
||||||
|
// create index on event.emitter_addr.
|
||||||
|
_, err = tx.ExecContext(ctx, createIndexEventEmitterAddr)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("create index event_emitter_addr: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// original v3 migration introduced an index:
|
||||||
|
// CREATE INDEX IF NOT EXISTS event_entry_key_index ON event_entry (key)
|
||||||
|
// which has subsequently been removed in v4, so it's omitted here
|
||||||
|
|
||||||
|
// increment the schema version to 3 in _meta table.
|
||||||
|
_, err = tx.ExecContext(ctx, "INSERT OR IGNORE INTO _meta (version) VALUES (3)")
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("increment _meta version: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = tx.Commit()
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("commit transaction: %w", err)
|
||||||
|
}
|
||||||
|
log.Infof("Successfully migrated event index from version 2 to version 3 in %s", time.Since(now))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// migrateToVersion4 migrates the schema from version 3 to version 4 by adjusting indexes to match
|
||||||
|
// the query patterns of the event filter.
|
||||||
|
//
|
||||||
|
// First it drops indexes introduced in previous migrations:
|
||||||
|
// 1. the index on the event.height and event.tipset_key columns
|
||||||
|
// 2. the index on the event_entry.key column
|
||||||
|
//
|
||||||
|
// And then creating the following indices:
|
||||||
|
// 1. an index on the event.tipset_key_cid column
|
||||||
|
// 2. an index on the event.height column
|
||||||
|
// 3. an index on the event.reverted column
|
||||||
|
// 4. an index on the event_entry.indexed and event_entry.key columns
|
||||||
|
// 5. an index on the event_entry.codec and event_entry.value columns
|
||||||
|
// 6. an index on the event_entry.event_id column
|
||||||
|
func (ei *EventIndex) migrateToVersion4(ctx context.Context) error {
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
tx, err := ei.db.BeginTx(ctx, nil)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("begin transaction: %w", err)
|
||||||
|
}
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
|
||||||
|
for _, create := range []struct {
|
||||||
|
desc string
|
||||||
|
query string
|
||||||
|
}{
|
||||||
|
{"drop index height_tipset_key", "DROP INDEX IF EXISTS height_tipset_key;"},
|
||||||
|
{"drop index event_entry_key_index", "DROP INDEX IF EXISTS event_entry_key_index;"},
|
||||||
|
{"create index event_tipset_key_cid", createIndexEventTipsetKeyCid},
|
||||||
|
{"create index event_height", createIndexEventHeight},
|
||||||
|
{"create index event_reverted", createIndexEventReverted},
|
||||||
|
{"create index event_entry_indexed_key", createIndexEventEntryIndexedKey},
|
||||||
|
{"create index event_entry_codec_value", createIndexEventEntryCodecValue},
|
||||||
|
{"create index event_entry_event_id", createIndexEventEntryEventId},
|
||||||
|
} {
|
||||||
|
_, err = tx.ExecContext(ctx, create.query)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("%s: %w", create.desc, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err = tx.Exec("INSERT OR IGNORE INTO _meta (version) VALUES (4)"); err != nil {
|
||||||
|
return xerrors.Errorf("increment _meta version: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = tx.Commit()
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("commit transaction: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ei.vacuumDBAndCheckpointWAL(ctx)
|
||||||
|
|
||||||
|
log.Infof("Successfully migrated event index from version 3 to version 4 in %s", time.Since(now))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ei *EventIndex) vacuumDBAndCheckpointWAL(ctx context.Context) {
|
||||||
|
// During the large migrations, we have likely increased the WAL size a lot, so lets do some
|
||||||
// simple DB administration to free up space (VACUUM followed by truncating the WAL file)
|
// simple DB administration to free up space (VACUUM followed by truncating the WAL file)
|
||||||
// as this would be a good time to do it when no other writes are happening
|
// as this would be a good time to do it when no other writes are happening.
|
||||||
log.Infof("Performing DB vacuum and wal checkpointing to free up space after the migration")
|
log.Infof("Performing DB vacuum and wal checkpointing to free up space after the migration")
|
||||||
_, err = ei.db.Exec("VACUUM")
|
_, err := ei.db.ExecContext(ctx, "VACUUM")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("error vacuuming database: %s", err)
|
log.Warnf("error vacuuming database: %s", err)
|
||||||
}
|
}
|
||||||
_, err = ei.db.Exec("PRAGMA wal_checkpoint(TRUNCATE)")
|
_, err = ei.db.ExecContext(ctx, "PRAGMA wal_checkpoint(TRUNCATE)")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("error checkpointing wal: %s", err)
|
log.Warnf("error checkpointing wal: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("Successfully migrated events to version 2 in %s", time.Since(now))
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewEventIndex(ctx context.Context, path string, chainStore *store.ChainStore) (*EventIndex, error) {
|
func NewEventIndex(ctx context.Context, path string, chainStore *store.ChainStore) (*EventIndex, error) {
|
||||||
@ -283,8 +401,8 @@ func NewEventIndex(ctx context.Context, path string, chainStore *store.ChainStor
|
|||||||
|
|
||||||
eventIndex := EventIndex{db: db}
|
eventIndex := EventIndex{db: db}
|
||||||
|
|
||||||
q, err := db.Query("SELECT name FROM sqlite_master WHERE type='table' AND name='_meta';")
|
q, err := db.QueryContext(ctx, "SELECT name FROM sqlite_master WHERE type='table' AND name='_meta';")
|
||||||
if err == sql.ErrNoRows || !q.Next() {
|
if errors.Is(err, sql.ErrNoRows) || !q.Next() {
|
||||||
// empty database, create the schema
|
// empty database, create the schema
|
||||||
for _, ddl := range ddls {
|
for _, ddl := range ddls {
|
||||||
if _, err := db.Exec(ddl); err != nil {
|
if _, err := db.Exec(ddl); err != nil {
|
||||||
@ -305,40 +423,33 @@ func NewEventIndex(ctx context.Context, path string, chainStore *store.ChainStor
|
|||||||
}
|
}
|
||||||
|
|
||||||
if version == 1 {
|
if version == 1 {
|
||||||
log.Infof("upgrading event index from version 1 to version 2")
|
log.Infof("Upgrading event index from version 1 to version 2")
|
||||||
|
|
||||||
err = eventIndex.migrateToVersion2(ctx, chainStore)
|
err = eventIndex.migrateToVersion2(ctx, chainStore)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = db.Close()
|
_ = db.Close()
|
||||||
return nil, xerrors.Errorf("could not migrate sql data to version 2: %w", err)
|
return nil, xerrors.Errorf("could not migrate event index schema from version 1 to version 2: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// to upgrade to version version 2 we only need to create an index on the event table
|
|
||||||
// which means we can just recreate the schema (it will not have any effect on existing data)
|
|
||||||
for _, ddl := range ddls {
|
|
||||||
if _, err := db.Exec(ddl); err != nil {
|
|
||||||
_ = db.Close()
|
|
||||||
return nil, xerrors.Errorf("could not upgrade index to version 2, exec ddl %q: %w", ddl, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
version = 2
|
version = 2
|
||||||
}
|
}
|
||||||
|
|
||||||
if version == 2 {
|
if version == 2 {
|
||||||
log.Infof("upgrading event index from version 2 to version 3")
|
log.Infof("Upgrading event index from version 2 to version 3")
|
||||||
|
err = eventIndex.migrateToVersion3(ctx)
|
||||||
// to upgrade to version 3 we only need to create an index on the event_entry.key column
|
if err != nil {
|
||||||
// and on the event.emitter_addr column
|
|
||||||
// which means we can just reapply the schema (it will not have any effect on existing data)
|
|
||||||
for _, ddl := range ddls {
|
|
||||||
if _, err := db.Exec(ddl); err != nil {
|
|
||||||
_ = db.Close()
|
_ = db.Close()
|
||||||
return nil, xerrors.Errorf("could not upgrade index to version 3, exec ddl %q: %w", ddl, err)
|
return nil, xerrors.Errorf("could not migrate event index schema from version 2 to version 3: %w", err)
|
||||||
}
|
}
|
||||||
|
version = 3
|
||||||
}
|
}
|
||||||
|
|
||||||
version = 3
|
if version == 3 {
|
||||||
|
log.Infof("Upgrading event index from version 3 to version 4")
|
||||||
|
err = eventIndex.migrateToVersion4(ctx)
|
||||||
|
if err != nil {
|
||||||
|
_ = db.Close()
|
||||||
|
return nil, xerrors.Errorf("could not migrate event index schema from version 3 to version 4: %w", err)
|
||||||
|
}
|
||||||
|
version = 4
|
||||||
}
|
}
|
||||||
|
|
||||||
if version != schemaVersion {
|
if version != schemaVersion {
|
||||||
@ -369,9 +480,9 @@ func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, rever
|
|||||||
return xerrors.Errorf("begin transaction: %w", err)
|
return xerrors.Errorf("begin transaction: %w", err)
|
||||||
}
|
}
|
||||||
// rollback the transaction (a no-op if the transaction was already committed)
|
// rollback the transaction (a no-op if the transaction was already committed)
|
||||||
defer tx.Rollback() //nolint:errcheck
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
|
||||||
// lets handle the revert case first, since its simpler and we can simply mark all events events in this tipset as reverted and return
|
// lets handle the revert case first, since its simpler and we can simply mark all events in this tipset as reverted and return
|
||||||
if revert {
|
if revert {
|
||||||
_, err = tx.Stmt(ei.stmtRevertEventsInTipset).Exec(te.msgTs.Height(), te.msgTs.Key().Bytes())
|
_, err = tx.Stmt(ei.stmtRevertEventsInTipset).Exec(te.msgTs.Height(), te.msgTs.Key().Bytes())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -526,7 +637,7 @@ func (ei *EventIndex) prefillFilter(ctx context.Context, f *eventFilter, exclude
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(f.addresses) > 0 {
|
if len(f.addresses) > 0 {
|
||||||
subclauses := []string{}
|
subclauses := make([]string, 0, len(f.addresses))
|
||||||
for _, addr := range f.addresses {
|
for _, addr := range f.addresses {
|
||||||
subclauses = append(subclauses, "emitter_addr=?")
|
subclauses = append(subclauses, "emitter_addr=?")
|
||||||
values = append(values, addr.Bytes())
|
values = append(values, addr.Bytes())
|
||||||
@ -543,7 +654,7 @@ func (ei *EventIndex) prefillFilter(ctx context.Context, f *eventFilter, exclude
|
|||||||
joins = append(joins, fmt.Sprintf("event_entry %s on event.id=%[1]s.event_id", joinAlias))
|
joins = append(joins, fmt.Sprintf("event_entry %s on event.id=%[1]s.event_id", joinAlias))
|
||||||
clauses = append(clauses, fmt.Sprintf("%s.indexed=1 AND %[1]s.key=?", joinAlias))
|
clauses = append(clauses, fmt.Sprintf("%s.indexed=1 AND %[1]s.key=?", joinAlias))
|
||||||
values = append(values, key)
|
values = append(values, key)
|
||||||
subclauses := []string{}
|
subclauses := make([]string, 0, len(vals))
|
||||||
for _, val := range vals {
|
for _, val := range vals {
|
||||||
subclauses = append(subclauses, fmt.Sprintf("(%s.value=? AND %[1]s.codec=?)", joinAlias))
|
subclauses = append(subclauses, fmt.Sprintf("(%s.value=? AND %[1]s.codec=?)", joinAlias))
|
||||||
values = append(values, val.Value, val.Codec)
|
values = append(values, val.Value, val.Codec)
|
||||||
|
@ -369,7 +369,7 @@ func (sp *StatePredicates) OnMinerPreCommitChange() DiffMinerActorStateFunc {
|
|||||||
// DiffPaymentChannelStateFunc is function that compares two states for the payment channel
|
// DiffPaymentChannelStateFunc is function that compares two states for the payment channel
|
||||||
type DiffPaymentChannelStateFunc func(ctx context.Context, oldState paych.State, newState paych.State) (changed bool, user UserData, err error)
|
type DiffPaymentChannelStateFunc func(ctx context.Context, oldState paych.State, newState paych.State) (changed bool, user UserData, err error)
|
||||||
|
|
||||||
// OnPaymentChannelActorChanged calls diffPaymentChannelState when the state changes for the the payment channel actor
|
// OnPaymentChannelActorChanged calls diffPaymentChannelState when the state changes for the payment channel actor
|
||||||
func (sp *StatePredicates) OnPaymentChannelActorChanged(paychAddr address.Address, diffPaymentChannelState DiffPaymentChannelStateFunc) DiffTipSetKeyFunc {
|
func (sp *StatePredicates) OnPaymentChannelActorChanged(paychAddr address.Address, diffPaymentChannelState DiffPaymentChannelStateFunc) DiffTipSetKeyFunc {
|
||||||
return sp.OnActorStateChanged(paychAddr, func(ctx context.Context, oldActorState, newActorState *types.Actor) (changed bool, user UserData, err error) {
|
return sp.OnActorStateChanged(paychAddr, func(ctx context.Context, oldActorState, newActorState *types.Actor) (changed bool, user UserData, err error) {
|
||||||
oldState, err := paych.Load(adt.WrapStore(ctx, sp.cst), oldActorState)
|
oldState, err := paych.Load(adt.WrapStore(ctx, sp.cst), oldActorState)
|
||||||
|
@ -137,7 +137,7 @@ func (s *server) serviceRequest(ctx context.Context, req *validatedRequest) (*Re
|
|||||||
|
|
||||||
chain, err := collectChainSegment(ctx, s.cs, req)
|
chain, err := collectChainSegment(ctx, s.cs, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("block sync request: collectChainSegment failed: ", err)
|
log.Info("block sync request: collectChainSegment failed: ", err)
|
||||||
return &Response{
|
return &Response{
|
||||||
Status: InternalError,
|
Status: InternalError,
|
||||||
ErrorMessage: err.Error(),
|
ErrorMessage: err.Error(),
|
||||||
@ -171,17 +171,11 @@ func collectChainSegment(ctx context.Context, cs *store.ChainStore, req *validat
|
|||||||
}
|
}
|
||||||
|
|
||||||
if req.options.IncludeMessages {
|
if req.options.IncludeMessages {
|
||||||
bmsgs, bmincl, smsgs, smincl, err := gatherMessages(ctx, cs, ts)
|
bst.Messages, err = gatherMessages(ctx, cs, ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("gather messages failed: %w", err)
|
return nil, xerrors.Errorf("gather messages failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: Pass the response to `gatherMessages()` and set all this there.
|
|
||||||
bst.Messages = &CompactedMessages{}
|
|
||||||
bst.Messages.Bls = bmsgs
|
|
||||||
bst.Messages.BlsIncludes = bmincl
|
|
||||||
bst.Messages.Secpk = smsgs
|
|
||||||
bst.Messages.SecpkIncludes = smincl
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bstips = append(bstips, &bst)
|
bstips = append(bstips, &bst)
|
||||||
@ -196,16 +190,16 @@ func collectChainSegment(ctx context.Context, cs *store.ChainStore, req *validat
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func gatherMessages(ctx context.Context, cs *store.ChainStore, ts *types.TipSet) ([]*types.Message, [][]uint64, []*types.SignedMessage, [][]uint64, error) {
|
func gatherMessages(ctx context.Context, cs *store.ChainStore, ts *types.TipSet) (*CompactedMessages, error) {
|
||||||
|
msgs := new(CompactedMessages)
|
||||||
blsmsgmap := make(map[cid.Cid]uint64)
|
blsmsgmap := make(map[cid.Cid]uint64)
|
||||||
secpkmsgmap := make(map[cid.Cid]uint64)
|
secpkmsgmap := make(map[cid.Cid]uint64)
|
||||||
var secpkincl, blsincl [][]uint64
|
|
||||||
|
|
||||||
var blscids, secpkcids []cid.Cid
|
var blscids, secpkcids []cid.Cid
|
||||||
for _, block := range ts.Blocks() {
|
for _, block := range ts.Blocks() {
|
||||||
bc, sc, err := cs.ReadMsgMetaCids(ctx, block.Messages)
|
bc, sc, err := cs.ReadMsgMetaCids(ctx, block.Messages)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: DRY. Use `chain.Message` interface.
|
// FIXME: DRY. Use `chain.Message` interface.
|
||||||
@ -220,7 +214,7 @@ func gatherMessages(ctx context.Context, cs *store.ChainStore, ts *types.TipSet)
|
|||||||
|
|
||||||
bmi = append(bmi, i)
|
bmi = append(bmi, i)
|
||||||
}
|
}
|
||||||
blsincl = append(blsincl, bmi)
|
msgs.BlsIncludes = append(msgs.BlsIncludes, bmi)
|
||||||
|
|
||||||
smi := make([]uint64, 0, len(sc))
|
smi := make([]uint64, 0, len(sc))
|
||||||
for _, m := range sc {
|
for _, m := range sc {
|
||||||
@ -233,18 +227,19 @@ func gatherMessages(ctx context.Context, cs *store.ChainStore, ts *types.TipSet)
|
|||||||
|
|
||||||
smi = append(smi, i)
|
smi = append(smi, i)
|
||||||
}
|
}
|
||||||
secpkincl = append(secpkincl, smi)
|
msgs.SecpkIncludes = append(msgs.SecpkIncludes, smi)
|
||||||
}
|
}
|
||||||
|
|
||||||
blsmsgs, err := cs.LoadMessagesFromCids(ctx, blscids)
|
var err error
|
||||||
|
msgs.Bls, err = cs.LoadMessagesFromCids(ctx, blscids)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
secpkmsgs, err := cs.LoadSignedMessagesFromCids(ctx, secpkcids)
|
msgs.Secpk, err = cs.LoadSignedMessagesFromCids(ctx, secpkcids)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return blsmsgs, blsincl, secpkmsgs, secpkincl, nil
|
return msgs, nil
|
||||||
}
|
}
|
||||||
|
@ -5,6 +5,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
lru "github.com/hashicorp/golang-lru/v2"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
@ -16,6 +17,8 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||||
"github.com/filecoin-project/lotus/chain/store"
|
"github.com/filecoin-project/lotus/chain/store"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/lib/must"
|
||||||
|
"github.com/filecoin-project/lotus/lib/result"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -39,10 +42,19 @@ type Provider interface {
|
|||||||
IsLite() bool
|
IsLite() bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type actorCacheKey struct {
|
||||||
|
types.TipSetKey
|
||||||
|
address.Address
|
||||||
|
}
|
||||||
|
|
||||||
|
var nonceCacheSize = 128
|
||||||
|
|
||||||
type mpoolProvider struct {
|
type mpoolProvider struct {
|
||||||
sm *stmgr.StateManager
|
sm *stmgr.StateManager
|
||||||
ps *pubsub.PubSub
|
ps *pubsub.PubSub
|
||||||
|
|
||||||
|
liteActorCache *lru.Cache[actorCacheKey, result.Result[*types.Actor]]
|
||||||
|
|
||||||
lite MpoolNonceAPI
|
lite MpoolNonceAPI
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -53,18 +65,31 @@ func NewProvider(sm *stmgr.StateManager, ps *pubsub.PubSub) Provider {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func NewProviderLite(sm *stmgr.StateManager, ps *pubsub.PubSub, noncer MpoolNonceAPI) Provider {
|
func NewProviderLite(sm *stmgr.StateManager, ps *pubsub.PubSub, noncer MpoolNonceAPI) Provider {
|
||||||
return &mpoolProvider{sm: sm, ps: ps, lite: noncer}
|
return &mpoolProvider{
|
||||||
|
sm: sm,
|
||||||
|
ps: ps,
|
||||||
|
lite: noncer,
|
||||||
|
liteActorCache: must.One(lru.New[actorCacheKey, result.Result[*types.Actor]](nonceCacheSize)),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mpp *mpoolProvider) IsLite() bool {
|
func (mpp *mpoolProvider) IsLite() bool {
|
||||||
return mpp.lite != nil
|
return mpp.lite != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mpp *mpoolProvider) getActorLite(addr address.Address, ts *types.TipSet) (*types.Actor, error) {
|
func (mpp *mpoolProvider) getActorLite(addr address.Address, ts *types.TipSet) (act *types.Actor, err error) {
|
||||||
if !mpp.IsLite() {
|
if !mpp.IsLite() {
|
||||||
return nil, errors.New("should not use getActorLite on non lite Provider")
|
return nil, errors.New("should not use getActorLite on non lite Provider")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c, ok := mpp.liteActorCache.Get(actorCacheKey{ts.Key(), addr}); ok {
|
||||||
|
return c.Unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
mpp.liteActorCache.Add(actorCacheKey{ts.Key(), addr}, result.Wrap(act, err))
|
||||||
|
}()
|
||||||
|
|
||||||
n, err := mpp.lite.GetNonce(context.TODO(), addr, ts.Key())
|
n, err := mpp.lite.GetNonce(context.TODO(), addr, ts.Key())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("getting nonce over lite: %w", err)
|
return nil, xerrors.Errorf("getting nonce over lite: %w", err)
|
||||||
|
@ -400,7 +400,7 @@ tailLoop:
|
|||||||
continue tailLoop
|
continue tailLoop
|
||||||
}
|
}
|
||||||
|
|
||||||
// the merge loop ended after processing all the chains and we we probably have still
|
// the merge loop ended after processing all the chains and we probably have still
|
||||||
// gas to spare; end the loop.
|
// gas to spare; end the loop.
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -1191,7 +1191,7 @@ func TestOptimalMessageSelection2(t *testing.T) {
|
|||||||
func TestOptimalMessageSelection3(t *testing.T) {
|
func TestOptimalMessageSelection3(t *testing.T) {
|
||||||
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
|
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
|
||||||
|
|
||||||
// this test uses 10 actors sending a block of messages to each other, with the the first
|
// this test uses 10 actors sending a block of messages to each other, with the first
|
||||||
// actors paying higher gas premium than the subsequent actors.
|
// actors paying higher gas premium than the subsequent actors.
|
||||||
// We select with a low ticket quality; the chain dependent merging algorithm should pick
|
// We select with a low ticket quality; the chain dependent merging algorithm should pick
|
||||||
// messages from the median actor from the start
|
// messages from the median actor from the start
|
||||||
|
@ -1,98 +0,0 @@
|
|||||||
package messagesigner
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/google/uuid"
|
|
||||||
"github.com/ipfs/go-datastore"
|
|
||||||
"github.com/ipfs/go-datastore/namespace"
|
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
|
||||||
"golang.org/x/xerrors"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
|
||||||
"github.com/filecoin-project/lotus/chain/messagepool"
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
|
||||||
consensus "github.com/filecoin-project/lotus/lib/consensus/raft"
|
|
||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
|
||||||
)
|
|
||||||
|
|
||||||
type MessageSignerConsensus struct {
|
|
||||||
MsgSigner
|
|
||||||
Consensus *consensus.Consensus
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewMessageSignerConsensus(
|
|
||||||
wallet api.Wallet,
|
|
||||||
mpool messagepool.MpoolNonceAPI,
|
|
||||||
ds dtypes.MetadataDS,
|
|
||||||
consensus *consensus.Consensus) *MessageSignerConsensus {
|
|
||||||
|
|
||||||
ds = namespace.Wrap(ds, datastore.NewKey("/message-signer-consensus/"))
|
|
||||||
return &MessageSignerConsensus{
|
|
||||||
MsgSigner: &MessageSigner{
|
|
||||||
wallet: wallet,
|
|
||||||
mpool: mpool,
|
|
||||||
ds: ds,
|
|
||||||
},
|
|
||||||
Consensus: consensus,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *MessageSignerConsensus) IsLeader(ctx context.Context) bool {
|
|
||||||
return ms.Consensus.IsLeader(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *MessageSignerConsensus) RedirectToLeader(ctx context.Context, method string, arg interface{}, ret interface{}) (bool, error) {
|
|
||||||
ok, err := ms.Consensus.RedirectToLeader(method, arg, ret.(*types.SignedMessage))
|
|
||||||
if err != nil {
|
|
||||||
return ok, err
|
|
||||||
}
|
|
||||||
return ok, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *MessageSignerConsensus) SignMessage(
|
|
||||||
ctx context.Context,
|
|
||||||
msg *types.Message,
|
|
||||||
spec *api.MessageSendSpec,
|
|
||||||
cb func(*types.SignedMessage) error) (*types.SignedMessage, error) {
|
|
||||||
|
|
||||||
signedMsg, err := ms.MsgSigner.SignMessage(ctx, msg, spec, cb)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
op := &consensus.ConsensusOp{
|
|
||||||
Nonce: signedMsg.Message.Nonce,
|
|
||||||
Uuid: spec.MsgUuid,
|
|
||||||
Addr: signedMsg.Message.From,
|
|
||||||
SignedMsg: signedMsg,
|
|
||||||
}
|
|
||||||
err = ms.Consensus.Commit(ctx, op)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return signedMsg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *MessageSignerConsensus) GetSignedMessage(ctx context.Context, uuid uuid.UUID) (*types.SignedMessage, error) {
|
|
||||||
cstate, err := ms.Consensus.State(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
//cstate := state.(Consensus.RaftState)
|
|
||||||
msg, ok := cstate.MsgUuids[uuid]
|
|
||||||
if !ok {
|
|
||||||
return nil, xerrors.Errorf("Msg with Uuid %s not available", uuid)
|
|
||||||
}
|
|
||||||
return msg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *MessageSignerConsensus) GetRaftState(ctx context.Context) (*consensus.RaftState, error) {
|
|
||||||
return ms.Consensus.State(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *MessageSignerConsensus) Leader(ctx context.Context) (peer.ID, error) {
|
|
||||||
return ms.Consensus.Leader(ctx)
|
|
||||||
}
|
|
@ -230,7 +230,7 @@ func NewStateTree(cst cbor.IpldStore, ver types.StateTreeVersion) (*StateTree, e
|
|||||||
Store: cst,
|
Store: cst,
|
||||||
snaps: newStateSnaps(),
|
snaps: newStateSnaps(),
|
||||||
}
|
}
|
||||||
s.lookupIDFun = s.lookupIDinternal
|
s.lookupIDFun = s.lookupInternalIDAddress
|
||||||
return s, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -302,13 +302,13 @@ func LoadStateTree(cst cbor.IpldStore, c cid.Cid) (*StateTree, error) {
|
|||||||
Store: cst,
|
Store: cst,
|
||||||
snaps: newStateSnaps(),
|
snaps: newStateSnaps(),
|
||||||
}
|
}
|
||||||
s.lookupIDFun = s.lookupIDinternal
|
s.lookupIDFun = s.lookupInternalIDAddress
|
||||||
|
|
||||||
return s, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (st *StateTree) SetActor(addr address.Address, act *types.Actor) error {
|
func (st *StateTree) SetActor(addr address.Address, act *types.Actor) error {
|
||||||
iaddr, err := st.LookupID(addr)
|
iaddr, err := st.LookupIDAddress(addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("ID lookup failed: %w", err)
|
return xerrors.Errorf("ID lookup failed: %w", err)
|
||||||
}
|
}
|
||||||
@ -318,7 +318,7 @@ func (st *StateTree) SetActor(addr address.Address, act *types.Actor) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (st *StateTree) lookupIDinternal(addr address.Address) (address.Address, error) {
|
func (st *StateTree) lookupInternalIDAddress(addr address.Address) (address.Address, error) {
|
||||||
act, err := st.GetActor(init_.Address)
|
act, err := st.GetActor(init_.Address)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return address.Undef, xerrors.Errorf("getting init actor: %w", err)
|
return address.Undef, xerrors.Errorf("getting init actor: %w", err)
|
||||||
@ -339,8 +339,8 @@ func (st *StateTree) lookupIDinternal(addr address.Address) (address.Address, er
|
|||||||
return a, err
|
return a, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// LookupID gets the ID address of this actor's `addr` stored in the `InitActor`.
|
// LookupIDAddress gets the ID address of this actor's `addr` stored in the `InitActor`.
|
||||||
func (st *StateTree) LookupID(addr address.Address) (address.Address, error) {
|
func (st *StateTree) LookupIDAddress(addr address.Address) (address.Address, error) {
|
||||||
if addr.Protocol() == address.ID {
|
if addr.Protocol() == address.ID {
|
||||||
return addr, nil
|
return addr, nil
|
||||||
}
|
}
|
||||||
@ -366,7 +366,7 @@ func (st *StateTree) GetActor(addr address.Address) (*types.Actor, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Transform `addr` to its ID format.
|
// Transform `addr` to its ID format.
|
||||||
iaddr, err := st.LookupID(addr)
|
iaddr, err := st.LookupIDAddress(addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if xerrors.Is(err, types.ErrActorNotFound) {
|
if xerrors.Is(err, types.ErrActorNotFound) {
|
||||||
return nil, xerrors.Errorf("resolution lookup failed (%s): %w", addr, err)
|
return nil, xerrors.Errorf("resolution lookup failed (%s): %w", addr, err)
|
||||||
@ -411,7 +411,7 @@ func (st *StateTree) DeleteActor(addr address.Address) error {
|
|||||||
return xerrors.Errorf("DeleteActor called on undefined address")
|
return xerrors.Errorf("DeleteActor called on undefined address")
|
||||||
}
|
}
|
||||||
|
|
||||||
iaddr, err := st.LookupID(addr)
|
iaddr, err := st.LookupIDAddress(addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if xerrors.Is(err, types.ErrActorNotFound) {
|
if xerrors.Is(err, types.ErrActorNotFound) {
|
||||||
return xerrors.Errorf("resolution lookup failed (%s): %w", addr, err)
|
return xerrors.Errorf("resolution lookup failed (%s): %w", addr, err)
|
||||||
|
@ -542,7 +542,7 @@ func (sm *StateManager) MarketBalance(ctx context.Context, addr address.Address,
|
|||||||
return api.MarketBalance{}, err
|
return api.MarketBalance{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
addr, err = sm.LookupID(ctx, addr, ts)
|
addr, err = sm.LookupIDAddress(ctx, addr, ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return api.MarketBalance{}, err
|
return api.MarketBalance{}, err
|
||||||
}
|
}
|
||||||
|
@ -13,6 +13,16 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st cid.Cid, rec cid.Cid, err error) {
|
func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st cid.Cid, rec cid.Cid, err error) {
|
||||||
|
return sm.tipSetState(ctx, ts, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recompute the tipset state without trying to lookup a pre-computed result in the chainstore.
|
||||||
|
// Useful if we know that our local chain-state isn't complete (e.g., we've discarded the events).
|
||||||
|
func (sm *StateManager) RecomputeTipSetState(ctx context.Context, ts *types.TipSet) (st cid.Cid, rec cid.Cid, err error) {
|
||||||
|
return sm.tipSetState(ctx, ts, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm *StateManager) tipSetState(ctx context.Context, ts *types.TipSet, recompute bool) (st cid.Cid, rec cid.Cid, err error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "tipSetState")
|
ctx, span := trace.StartSpan(ctx, "tipSetState")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
if span.IsRecordingEvents() {
|
if span.IsRecordingEvents() {
|
||||||
@ -65,9 +75,11 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c
|
|||||||
|
|
||||||
// First, try to find the tipset in the current chain. If found, we can avoid re-executing
|
// First, try to find the tipset in the current chain. If found, we can avoid re-executing
|
||||||
// it.
|
// it.
|
||||||
|
if !recompute {
|
||||||
if st, rec, found := tryLookupTipsetState(ctx, sm.cs, ts); found {
|
if st, rec, found := tryLookupTipsetState(ctx, sm.cs, ts); found {
|
||||||
return st, rec, nil
|
return st, rec, nil
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
st, rec, err = sm.tsExec.ExecuteTipSet(ctx, sm, ts, sm.tsExecMonitor, false)
|
st, rec, err = sm.tsExec.ExecuteTipSet(ctx, sm, ts, sm.tsExecMonitor, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -178,26 +178,31 @@ func (sm *StateManager) HandleStateForks(ctx context.Context, root cid.Cid, heig
|
|||||||
retCid := root
|
retCid := root
|
||||||
u := sm.stateMigrations[height]
|
u := sm.stateMigrations[height]
|
||||||
if u != nil && u.upgrade != nil {
|
if u != nil && u.upgrade != nil {
|
||||||
if height != build.UpgradeWatermelonFixHeight {
|
|
||||||
migCid, ok, err := u.migrationResultCache.Get(ctx, root)
|
migCid, ok, err := u.migrationResultCache.Get(ctx, root)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if ok {
|
if ok {
|
||||||
log.Infow("CACHED migration", "height", height, "from", root, "to", migCid)
|
log.Infow("CACHED migration", "height", height, "from", root, "to", migCid)
|
||||||
|
foundMigratedRoot, err := sm.ChainStore().StateBlockstore().Has(ctx, migCid)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorw("failed to check whether previous migration result is present", "err", err)
|
||||||
|
} else if !foundMigratedRoot {
|
||||||
|
log.Errorw("cached migration result not found in blockstore, running migration again")
|
||||||
|
u.migrationResultCache.Delete(ctx, root)
|
||||||
|
} else {
|
||||||
return migCid, nil
|
return migCid, nil
|
||||||
}
|
}
|
||||||
|
}
|
||||||
} else if !errors.Is(err, datastore.ErrNotFound) {
|
} else if !errors.Is(err, datastore.ErrNotFound) {
|
||||||
log.Errorw("failed to lookup previous migration result", "err", err)
|
log.Errorw("failed to lookup previous migration result", "err", err)
|
||||||
} else {
|
} else {
|
||||||
log.Debug("no cached migration found, migrating from scratch")
|
log.Debug("no cached migration found, migrating from scratch")
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
log.Warnw("STARTING migration", "height", height, "from", root)
|
log.Warnw("STARTING migration", "height", height, "from", root)
|
||||||
// Yes, we clone the cache, even for the final upgrade epoch. Why? Reverts. We may
|
// Yes, we clone the cache, even for the final upgrade epoch. Why? Reverts. We may
|
||||||
// have to migrate multiple times.
|
// have to migrate multiple times.
|
||||||
tmpCache := u.cache.Clone()
|
tmpCache := u.cache.Clone()
|
||||||
var err error
|
|
||||||
retCid, err = u.upgrade(ctx, sm, tmpCache, cb, root, height, ts)
|
retCid, err = u.upgrade(ctx, sm, tmpCache, cb, root, height, ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorw("FAILED migration", "height", height, "from", root, "error", err)
|
log.Errorw("FAILED migration", "height", height, "from", root, "error", err)
|
||||||
|
@ -375,6 +375,20 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestForkPreMigration(t *testing.T) {
|
func TestForkPreMigration(t *testing.T) {
|
||||||
|
// Backup the original value of the DISABLE_PRE_MIGRATIONS environment variable
|
||||||
|
originalValue, _ := os.LookupEnv("LOTUS_DISABLE_PRE_MIGRATIONS")
|
||||||
|
|
||||||
|
// Unset the DISABLE_PRE_MIGRATIONS environment variable for the test
|
||||||
|
if err := os.Unsetenv("LOTUS_DISABLE_PRE_MIGRATIONS"); err != nil {
|
||||||
|
t.Fatalf("failed to unset LOTUS_DISABLE_PRE_MIGRATIONS: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restore the original DISABLE_PRE_MIGRATIONS environment variable at the end of the test
|
||||||
|
defer func() {
|
||||||
|
if err := os.Setenv("LOTUS_DISABLE_PRE_MIGRATIONS", originalValue); err != nil {
|
||||||
|
t.Fatalf("failed to restore LOTUS_DISABLE_PRE_MIGRATIONS: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
//stm: @CHAIN_GEN_NEXT_TIPSET_001,
|
//stm: @CHAIN_GEN_NEXT_TIPSET_001,
|
||||||
//stm: @CHAIN_STATE_RESOLVE_TO_KEY_ADDR_001, @CHAIN_STATE_SET_VM_CONSTRUCTOR_001
|
//stm: @CHAIN_STATE_RESOLVE_TO_KEY_ADDR_001, @CHAIN_STATE_SET_VM_CONSTRUCTOR_001
|
||||||
logging.SetAllLoggers(logging.LevelInfo)
|
logging.SetAllLoggers(logging.LevelInfo)
|
||||||
|
@ -44,7 +44,7 @@ func (s *RPCStateManager) LoadActorTsk(ctx context.Context, addr address.Address
|
|||||||
return s.gapi.StateGetActor(ctx, addr, tsk)
|
return s.gapi.StateGetActor(ctx, addr, tsk)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *RPCStateManager) LookupID(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) {
|
func (s *RPCStateManager) LookupIDAddress(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) {
|
||||||
return s.gapi.StateLookupID(ctx, addr, ts.Key())
|
return s.gapi.StateLookupID(ctx, addr, ts.Key())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -243,7 +243,7 @@ func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet
|
|||||||
return nil, nil, cid.Undef, xerrors.Errorf("failed to load initital tipset")
|
return nil, nil, cid.Undef, xerrors.Errorf("failed to load initital tipset")
|
||||||
}
|
}
|
||||||
|
|
||||||
mFromId, err := sm.LookupID(ctx, m.VMMessage().From, from)
|
mFromId, err := sm.LookupIDAddress(ctx, m.VMMessage().From, from)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, cid.Undef, xerrors.Errorf("looking up From id address: %w", err)
|
return nil, nil, cid.Undef, xerrors.Errorf("looking up From id address: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -49,7 +49,7 @@ type StateManagerAPI interface {
|
|||||||
Call(ctx context.Context, msg *types.Message, ts *types.TipSet) (*api.InvocResult, error)
|
Call(ctx context.Context, msg *types.Message, ts *types.TipSet) (*api.InvocResult, error)
|
||||||
GetPaychState(ctx context.Context, addr address.Address, ts *types.TipSet) (*types.Actor, paych.State, error)
|
GetPaychState(ctx context.Context, addr address.Address, ts *types.TipSet) (*types.Actor, paych.State, error)
|
||||||
LoadActorTsk(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*types.Actor, error)
|
LoadActorTsk(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*types.Actor, error)
|
||||||
LookupID(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error)
|
LookupIDAddress(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error)
|
||||||
ResolveToDeterministicAddress(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error)
|
ResolveToDeterministicAddress(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -113,6 +113,10 @@ func (m *migrationResultCache) Store(ctx context.Context, root cid.Cid, resultCi
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *migrationResultCache) Delete(ctx context.Context, root cid.Cid) {
|
||||||
|
_ = m.ds.Delete(ctx, m.keyForMigration(root))
|
||||||
|
}
|
||||||
|
|
||||||
type Executor interface {
|
type Executor interface {
|
||||||
NewActorRegistry() *vm.ActorRegistry
|
NewActorRegistry() *vm.ActorRegistry
|
||||||
ExecuteTipSet(ctx context.Context, sm *StateManager, ts *types.TipSet, em ExecMonitor, vmTracing bool) (stateroot cid.Cid, rectsroot cid.Cid, err error)
|
ExecuteTipSet(ctx context.Context, sm *StateManager, ts *types.TipSet, em ExecMonitor, vmTracing bool) (stateroot cid.Cid, rectsroot cid.Cid, err error)
|
||||||
@ -396,13 +400,30 @@ func (sm *StateManager) GetBlsPublicKey(ctx context.Context, addr address.Addres
|
|||||||
return kaddr.Payload(), nil
|
return kaddr.Payload(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *StateManager) LookupID(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) {
|
func (sm *StateManager) LookupIDAddress(_ context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) {
|
||||||
|
// Check for the fast route first to avoid unnecessary CBOR store instantiation and state tree load.
|
||||||
|
if addr.Protocol() == address.ID {
|
||||||
|
return addr, nil
|
||||||
|
}
|
||||||
|
|
||||||
cst := cbor.NewCborStore(sm.cs.StateBlockstore())
|
cst := cbor.NewCborStore(sm.cs.StateBlockstore())
|
||||||
state, err := state.LoadStateTree(cst, sm.parentState(ts))
|
state, err := state.LoadStateTree(cst, sm.parentState(ts))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return address.Undef, xerrors.Errorf("load state tree: %w", err)
|
return address.Undef, xerrors.Errorf("load state tree: %w", err)
|
||||||
}
|
}
|
||||||
return state.LookupID(addr)
|
return state.LookupIDAddress(addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm *StateManager) LookupID(ctx context.Context, addr address.Address, ts *types.TipSet) (abi.ActorID, error) {
|
||||||
|
idAddr, err := sm.LookupIDAddress(ctx, addr, ts)
|
||||||
|
if err != nil {
|
||||||
|
return 0, xerrors.Errorf("state manager lookup id: %w", err)
|
||||||
|
}
|
||||||
|
id, err := address.IDFromAddress(idAddr)
|
||||||
|
if err != nil {
|
||||||
|
return 0, xerrors.Errorf("resolve actor id: id from addr: %w", err)
|
||||||
|
}
|
||||||
|
return abi.ActorID(id), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *StateManager) LookupRobustAddress(ctx context.Context, idAddr address.Address, ts *types.TipSet) (address.Address, error) {
|
func (sm *StateManager) LookupRobustAddress(ctx context.Context, idAddr address.Address, ts *types.TipSet) (address.Address, error) {
|
||||||
|
@ -193,7 +193,7 @@ func (sm *StateManager) setupPostCalicoVesting(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetVestedFunds returns all funds that have "left" actors that are in the genesis state:
|
// GetFilVested returns all funds that have "left" actors that are in the genesis state:
|
||||||
// - For Multisigs, it counts the actual amounts that have vested at the given epoch
|
// - For Multisigs, it counts the actual amounts that have vested at the given epoch
|
||||||
// - For Accounts, it counts max(currentBalance - genesisBalance, 0).
|
// - For Accounts, it counts max(currentBalance - genesisBalance, 0).
|
||||||
func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch) (abi.TokenAmount, error) {
|
func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch) (abi.TokenAmount, error) {
|
||||||
|
@ -44,23 +44,15 @@ func TestChainCheckpoint(t *testing.T) {
|
|||||||
head := cs.GetHeaviestTipSet()
|
head := cs.GetHeaviestTipSet()
|
||||||
require.True(t, head.Equals(checkpointParents))
|
require.True(t, head.Equals(checkpointParents))
|
||||||
|
|
||||||
// Try to set the checkpoint in the future, it should fail.
|
// Checkpoint into the future.
|
||||||
err = cs.SetCheckpoint(ctx, checkpoint)
|
err = cs.SetCheckpoint(ctx, checkpoint)
|
||||||
require.Error(t, err)
|
|
||||||
|
|
||||||
// Then move the head back.
|
|
||||||
err = cs.SetHead(ctx, checkpoint)
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Verify it worked.
|
// And verify that it worked.
|
||||||
head = cs.GetHeaviestTipSet()
|
head = cs.GetHeaviestTipSet()
|
||||||
require.True(t, head.Equals(checkpoint))
|
require.True(t, head.Equals(checkpoint))
|
||||||
|
|
||||||
// And checkpoint it.
|
// Let the second miner mine a fork
|
||||||
err = cs.SetCheckpoint(ctx, checkpoint)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Let the second miner miner mine a fork
|
|
||||||
last = checkpointParents
|
last = checkpointParents
|
||||||
for i := 0; i < 4; i++ {
|
for i := 0; i < 4; i++ {
|
||||||
ts, err := cg.NextTipSetFromMiners(last, cg.Miners[1:], 0)
|
ts, err := cg.NextTipSetFromMiners(last, cg.Miners[1:], 0)
|
||||||
@ -85,11 +77,10 @@ func TestChainCheckpoint(t *testing.T) {
|
|||||||
head = cs.GetHeaviestTipSet()
|
head = cs.GetHeaviestTipSet()
|
||||||
require.True(t, head.Equals(last))
|
require.True(t, head.Equals(last))
|
||||||
|
|
||||||
// Setting a checkpoint on the other fork should fail.
|
// We should switch back if we checkpoint again.
|
||||||
err = cs.SetCheckpoint(ctx, checkpoint)
|
err = cs.SetCheckpoint(ctx, checkpoint)
|
||||||
require.Error(t, err)
|
|
||||||
|
|
||||||
// Setting a checkpoint on this fork should succeed.
|
|
||||||
err = cs.SetCheckpoint(ctx, checkpointParents)
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
head = cs.GetHeaviestTipSet()
|
||||||
|
require.True(t, head.Equals(checkpoint))
|
||||||
}
|
}
|
||||||
|
@ -119,7 +119,7 @@ func (cs *ChainStore) BlockMsgsForTipset(ctx context.Context, ts *types.TipSet)
|
|||||||
var sender address.Address
|
var sender address.Address
|
||||||
if ts.Height() >= build.UpgradeHyperdriveHeight {
|
if ts.Height() >= build.UpgradeHyperdriveHeight {
|
||||||
if useIds {
|
if useIds {
|
||||||
sender, err = st.LookupID(m.From)
|
sender, err = st.LookupIDAddress(m.From)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, xerrors.Errorf("failed to resolve sender: %w", err)
|
return false, xerrors.Errorf("failed to resolve sender: %w", err)
|
||||||
}
|
}
|
||||||
@ -131,14 +131,14 @@ func (cs *ChainStore) BlockMsgsForTipset(ctx context.Context, ts *types.TipSet)
|
|||||||
// uh-oh, we actually have an ID-sender!
|
// uh-oh, we actually have an ID-sender!
|
||||||
useIds = true
|
useIds = true
|
||||||
for robust, nonce := range applied {
|
for robust, nonce := range applied {
|
||||||
resolved, err := st.LookupID(robust)
|
resolved, err := st.LookupIDAddress(robust)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, xerrors.Errorf("failed to resolve sender: %w", err)
|
return false, xerrors.Errorf("failed to resolve sender: %w", err)
|
||||||
}
|
}
|
||||||
applied[resolved] = nonce
|
applied[resolved] = nonce
|
||||||
}
|
}
|
||||||
|
|
||||||
sender, err = st.LookupID(m.From)
|
sender, err = st.LookupIDAddress(m.From)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, xerrors.Errorf("failed to resolve sender: %w", err)
|
return false, xerrors.Errorf("failed to resolve sender: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -793,9 +793,12 @@ func (cs *ChainStore) removeCheckpoint(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetCheckpoint will set a checkpoint past which the chainstore will not allow forks.
|
// SetCheckpoint will set a checkpoint past which the chainstore will not allow forks. If the new
|
||||||
|
// checkpoint is not an ancestor of the current head, head will be set to the new checkpoint.
|
||||||
//
|
//
|
||||||
// NOTE: Checkpoints cannot be set beyond ForkLengthThreshold epochs in the past.
|
// NOTE: Checkpoints cannot be set beyond ForkLengthThreshold epochs in the past, but can be set
|
||||||
|
// arbitrarily far into the future.
|
||||||
|
// NOTE: The new checkpoint must already be synced.
|
||||||
func (cs *ChainStore) SetCheckpoint(ctx context.Context, ts *types.TipSet) error {
|
func (cs *ChainStore) SetCheckpoint(ctx context.Context, ts *types.TipSet) error {
|
||||||
tskBytes, err := json.Marshal(ts.Key())
|
tskBytes, err := json.Marshal(ts.Key())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -805,10 +808,6 @@ func (cs *ChainStore) SetCheckpoint(ctx context.Context, ts *types.TipSet) error
|
|||||||
cs.heaviestLk.Lock()
|
cs.heaviestLk.Lock()
|
||||||
defer cs.heaviestLk.Unlock()
|
defer cs.heaviestLk.Unlock()
|
||||||
|
|
||||||
if ts.Height() > cs.heaviest.Height() {
|
|
||||||
return xerrors.Errorf("cannot set a checkpoint in the future")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise, this operation could get _very_ expensive.
|
// Otherwise, this operation could get _very_ expensive.
|
||||||
if cs.heaviest.Height()-ts.Height() > build.ForkLengthThreshold {
|
if cs.heaviest.Height()-ts.Height() > build.ForkLengthThreshold {
|
||||||
return xerrors.Errorf("cannot set a checkpoint before the fork threshold")
|
return xerrors.Errorf("cannot set a checkpoint before the fork threshold")
|
||||||
@ -821,7 +820,9 @@ func (cs *ChainStore) SetCheckpoint(ctx context.Context, ts *types.TipSet) error
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !anc {
|
if !anc {
|
||||||
return xerrors.Errorf("cannot mark tipset as checkpoint, since it isn't in the main-chain: %w", err)
|
if err := cs.takeHeaviestTipSet(ctx, ts); err != nil {
|
||||||
|
return xerrors.Errorf("failed to switch chains when setting checkpoint: %w", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
err = cs.metadataDs.Put(ctx, checkpointKey, tskBytes)
|
err = cs.metadataDs.Put(ctx, checkpointKey, tskBytes)
|
||||||
|
@ -24,7 +24,7 @@ func NewWindow(capacity int, size time.Duration) *Window {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add attempts to append a new timestamp into the current window. Previously
|
// Add attempts to append a new timestamp into the current window. Previously
|
||||||
// added values that are not not within `size` difference from the value being
|
// added values that are not within `size` difference from the value being
|
||||||
// added are first removed. Add fails if adding the value would cause the
|
// added are first removed. Add fails if adding the value would cause the
|
||||||
// window to exceed capacity.
|
// window to exceed capacity.
|
||||||
func (w *Window) Add() error {
|
func (w *Window) Add() error {
|
||||||
|
@ -357,7 +357,7 @@ func (sm *syncManager) selectInitialSyncTarget() (*types.TipSet, error) {
|
|||||||
return buckets.Heaviest(), nil
|
return buckets.Heaviest(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// adds a tipset to the potential sync targets; returns true if there is a a tipset to work on.
|
// adds a tipset to the potential sync targets; returns true if there is a tipset to work on.
|
||||||
// this could be either a restart, eg because there is no currently scheduled sync work or a worker
|
// this could be either a restart, eg because there is no currently scheduled sync work or a worker
|
||||||
// failed or a potential fork.
|
// failed or a potential fork.
|
||||||
func (sm *syncManager) addSyncTarget(ts *types.TipSet) (*types.TipSet, bool, error) {
|
func (sm *syncManager) addSyncTarget(ts *types.TipSet) (*types.TipSet, bool, error) {
|
||||||
|
@ -1204,7 +1204,7 @@ func TestSyncManualBadTS(t *testing.T) {
|
|||||||
tu.compareSourceState(client)
|
tu.compareSourceState(client)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestState tests fetching the sync worker state before, during & after the sync
|
// TestSyncState tests fetching the sync worker state before, during & after the sync
|
||||||
func TestSyncState(t *testing.T) {
|
func TestSyncState(t *testing.T) {
|
||||||
H := 50
|
H := 50
|
||||||
tu := prepSyncTest(t, H)
|
tu := prepSyncTest(t, H)
|
||||||
|
@ -197,7 +197,7 @@ func init() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewEthBlock(hasTransactions bool) EthBlock {
|
func NewEthBlock(hasTransactions bool, tipsetLen int) EthBlock {
|
||||||
b := EthBlock{
|
b := EthBlock{
|
||||||
Sha3Uncles: EmptyUncleHash, // Sha3Uncles set to a hardcoded value which is used by some clients to determine if has no uncles.
|
Sha3Uncles: EmptyUncleHash, // Sha3Uncles set to a hardcoded value which is used by some clients to determine if has no uncles.
|
||||||
StateRoot: EmptyEthHash,
|
StateRoot: EmptyEthHash,
|
||||||
@ -208,7 +208,7 @@ func NewEthBlock(hasTransactions bool) EthBlock {
|
|||||||
Extradata: []byte{},
|
Extradata: []byte{},
|
||||||
MixHash: EmptyEthHash,
|
MixHash: EmptyEthHash,
|
||||||
Nonce: EmptyEthNonce,
|
Nonce: EmptyEthNonce,
|
||||||
GasLimit: EthUint64(build.BlockGasLimit), // TODO we map Ethereum blocks to Filecoin tipsets; this is inconsistent.
|
GasLimit: EthUint64(build.BlockGasLimit * int64(tipsetLen)),
|
||||||
Uncles: []EthHash{},
|
Uncles: []EthHash{},
|
||||||
Transactions: []interface{}{},
|
Transactions: []interface{}{},
|
||||||
}
|
}
|
||||||
|
@ -6,6 +6,8 @@ import (
|
|||||||
"math/big"
|
"math/big"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/invopop/jsonschema"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -77,6 +79,10 @@ func (f FIL) MarshalText() (text []byte, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (f FIL) UnmarshalText(text []byte) error {
|
func (f FIL) UnmarshalText(text []byte) error {
|
||||||
|
if f.Int == nil {
|
||||||
|
return fmt.Errorf("cannot unmarshal into nil BigInt (text:%s)", string(text))
|
||||||
|
}
|
||||||
|
|
||||||
p, err := ParseFIL(string(text))
|
p, err := ParseFIL(string(text))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -134,5 +140,12 @@ func MustParseFIL(s string) FIL {
|
|||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f FIL) JSONSchema() *jsonschema.Schema {
|
||||||
|
return &jsonschema.Schema{
|
||||||
|
Type: "string",
|
||||||
|
Pattern: `^((\d+(\.\d+)?|0x[0-9a-fA-F]+))( ([aA]([tT][tT][oO])?)?[fF][iI][lL])?$`,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var _ encoding.TextMarshaler = (*FIL)(nil)
|
var _ encoding.TextMarshaler = (*FIL)(nil)
|
||||||
var _ encoding.TextUnmarshaler = (*FIL)(nil)
|
var _ encoding.TextUnmarshaler = (*FIL)(nil)
|
||||||
|
@ -18,10 +18,10 @@ import (
|
|||||||
func TestTipSetKey(t *testing.T) {
|
func TestTipSetKey(t *testing.T) {
|
||||||
//stm: @TYPES_TIPSETKEY_FROM_BYTES_001, @TYPES_TIPSETKEY_NEW_001
|
//stm: @TYPES_TIPSETKEY_FROM_BYTES_001, @TYPES_TIPSETKEY_NEW_001
|
||||||
cb := cid.V1Builder{Codec: cid.DagCBOR, MhType: multihash.BLAKE2B_MIN + 31}
|
cb := cid.V1Builder{Codec: cid.DagCBOR, MhType: multihash.BLAKE2B_MIN + 31}
|
||||||
|
// distinct, but arbitrary CIDs, pretending dag-cbor encoding but they are just a multihash over bytes
|
||||||
c1, _ := cb.Sum([]byte("a"))
|
c1, _ := cb.Sum([]byte("a"))
|
||||||
c2, _ := cb.Sum([]byte("b"))
|
c2, _ := cb.Sum([]byte("b"))
|
||||||
c3, _ := cb.Sum([]byte("c"))
|
c3, _ := cb.Sum([]byte("c"))
|
||||||
fmt.Println(len(c1.Bytes()))
|
|
||||||
|
|
||||||
t.Run("zero value", func(t *testing.T) {
|
t.Run("zero value", func(t *testing.T) {
|
||||||
assert.Equal(t, EmptyTSK, NewTipSetKey())
|
assert.Equal(t, EmptyTSK, NewTipSetKey())
|
||||||
@ -36,6 +36,22 @@ func TestTipSetKey(t *testing.T) {
|
|||||||
assert.Equal(t, []cid.Cid{c1, c1}, NewTipSetKey(c1, c1).Cids())
|
assert.Equal(t, []cid.Cid{c1, c1}, NewTipSetKey(c1, c1).Cids())
|
||||||
})
|
})
|
||||||
|
|
||||||
|
t.Run("derived CID", func(t *testing.T) {
|
||||||
|
assert.Equal(t, "bafy2bzacecesrkxghscnq7vatble2hqdvwat6ed23vdu4vvo3uuggsoaya7ki", c1.String()) // sanity check
|
||||||
|
actualCid, err := NewTipSetKey().Cid()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, "bafy2bzacea456askyutsf7uk4ta2q5aojrlcji4mhaqokbfalgvoq4ueeh4l2", actualCid.String(), "empty TSK")
|
||||||
|
actualCid, err = NewTipSetKey(c1).Cid()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, "bafy2bzacealem6larzxhf7aggj3cozcefqez3jlksx2tuxehwdil27otcmy4q", actualCid.String())
|
||||||
|
actualCid, err = NewTipSetKey(c1, c2, c3).Cid()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, "bafy2bzacecbnwngwfvxuciumcfudiaoqozisp3hus5im5lg4urrwlxbueissu", actualCid.String())
|
||||||
|
|
||||||
|
// The key doesn't check for duplicates.
|
||||||
|
assert.Equal(t, []cid.Cid{c1, c1}, NewTipSetKey(c1, c1).Cids())
|
||||||
|
})
|
||||||
|
|
||||||
t.Run("equality", func(t *testing.T) {
|
t.Run("equality", func(t *testing.T) {
|
||||||
assert.Equal(t, NewTipSetKey(), NewTipSetKey())
|
assert.Equal(t, NewTipSetKey(), NewTipSetKey())
|
||||||
assert.Equal(t, NewTipSetKey(c1), NewTipSetKey(c1))
|
assert.Equal(t, NewTipSetKey(c1), NewTipSetKey(c1))
|
||||||
|
@ -41,14 +41,14 @@ func newVMExecutor(vmi Interface, lane ExecutionLane) Interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (e *vmExecutor) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, error) {
|
func (e *vmExecutor) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, error) {
|
||||||
token := execution.getToken(e.lane)
|
token := execution.getToken(ctx, e.lane)
|
||||||
defer token.Done()
|
defer token.Done()
|
||||||
|
|
||||||
return e.vmi.ApplyMessage(ctx, cmsg)
|
return e.vmi.ApplyMessage(ctx, cmsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *vmExecutor) ApplyImplicitMessage(ctx context.Context, msg *types.Message) (*ApplyRet, error) {
|
func (e *vmExecutor) ApplyImplicitMessage(ctx context.Context, msg *types.Message) (*ApplyRet, error) {
|
||||||
token := execution.getToken(e.lane)
|
token := execution.getToken(ctx, e.lane)
|
||||||
defer token.Done()
|
defer token.Done()
|
||||||
|
|
||||||
return e.vmi.ApplyImplicitMessage(ctx, msg)
|
return e.vmi.ApplyImplicitMessage(ctx, msg)
|
||||||
@ -61,6 +61,7 @@ func (e *vmExecutor) Flush(ctx context.Context) (cid.Cid, error) {
|
|||||||
type executionToken struct {
|
type executionToken struct {
|
||||||
lane ExecutionLane
|
lane ExecutionLane
|
||||||
reserved int
|
reserved int
|
||||||
|
ctx context.Context
|
||||||
}
|
}
|
||||||
|
|
||||||
func (token *executionToken) Done() {
|
func (token *executionToken) Done() {
|
||||||
@ -77,78 +78,69 @@ type executionEnv struct {
|
|||||||
reserved int
|
reserved int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *executionEnv) getToken(lane ExecutionLane) *executionToken {
|
func (e *executionEnv) getToken(ctx context.Context, lane ExecutionLane) *executionToken {
|
||||||
metricsUp(metrics.VMExecutionWaiting, lane)
|
metricsUp(ctx, metrics.VMExecutionWaiting, lane)
|
||||||
defer metricsDown(metrics.VMExecutionWaiting, lane)
|
defer metricsDown(ctx, metrics.VMExecutionWaiting, lane)
|
||||||
|
|
||||||
e.mx.Lock()
|
e.mx.Lock()
|
||||||
defer e.mx.Unlock()
|
|
||||||
|
|
||||||
switch lane {
|
reserving := 0
|
||||||
case ExecutionLaneDefault:
|
if lane == ExecutionLaneDefault {
|
||||||
for e.available <= e.reserved {
|
for e.available <= e.reserved {
|
||||||
e.cond.Wait()
|
e.cond.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
e.available--
|
} else {
|
||||||
|
|
||||||
metricsUp(metrics.VMExecutionRunning, lane)
|
|
||||||
return &executionToken{lane: lane, reserved: 0}
|
|
||||||
|
|
||||||
case ExecutionLanePriority:
|
|
||||||
for e.available == 0 {
|
for e.available == 0 {
|
||||||
e.cond.Wait()
|
e.cond.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
e.available--
|
|
||||||
|
|
||||||
reserving := 0
|
|
||||||
if e.reserved > 0 {
|
if e.reserved > 0 {
|
||||||
e.reserved--
|
e.reserved--
|
||||||
reserving = 1
|
reserving = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
metricsUp(metrics.VMExecutionRunning, lane)
|
|
||||||
return &executionToken{lane: lane, reserved: reserving}
|
|
||||||
|
|
||||||
default:
|
|
||||||
// already checked at interface boundary in NewVM, so this is appropriate
|
|
||||||
panic("bogus execution lane")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
e.available--
|
||||||
|
e.mx.Unlock()
|
||||||
|
|
||||||
|
metricsUp(ctx, metrics.VMExecutionRunning, lane)
|
||||||
|
return &executionToken{lane: lane, reserved: reserving, ctx: ctx}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *executionEnv) putToken(token *executionToken) {
|
func (e *executionEnv) putToken(token *executionToken) {
|
||||||
e.mx.Lock()
|
e.mx.Lock()
|
||||||
defer e.mx.Unlock()
|
|
||||||
|
|
||||||
e.available++
|
e.available++
|
||||||
e.reserved += token.reserved
|
e.reserved += token.reserved
|
||||||
|
|
||||||
// Note: Signal is unsound, because a priority token could wake up a non-priority
|
// Note: Signal is unsound, because a priority token could wake up a non-priority
|
||||||
// goroutnie and lead to deadlock. So Broadcast it must be.
|
// goroutine and lead to deadlock. So Broadcast it must be.
|
||||||
e.cond.Broadcast()
|
e.cond.Broadcast()
|
||||||
|
e.mx.Unlock()
|
||||||
|
|
||||||
metricsDown(metrics.VMExecutionRunning, token.lane)
|
metricsDown(token.ctx, metrics.VMExecutionRunning, token.lane)
|
||||||
}
|
}
|
||||||
|
|
||||||
func metricsUp(metric *stats.Int64Measure, lane ExecutionLane) {
|
func metricsUp(ctx context.Context, metric *stats.Int64Measure, lane ExecutionLane) {
|
||||||
metricsAdjust(metric, lane, 1)
|
metricsAdjust(ctx, metric, lane, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func metricsDown(metric *stats.Int64Measure, lane ExecutionLane) {
|
func metricsDown(ctx context.Context, metric *stats.Int64Measure, lane ExecutionLane) {
|
||||||
metricsAdjust(metric, lane, -1)
|
metricsAdjust(ctx, metric, lane, -1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func metricsAdjust(metric *stats.Int64Measure, lane ExecutionLane, delta int) {
|
var (
|
||||||
laneName := "default"
|
defaultLaneTag = tag.Upsert(metrics.ExecutionLane, "default")
|
||||||
|
priorityLaneTag = tag.Upsert(metrics.ExecutionLane, "priority")
|
||||||
|
)
|
||||||
|
|
||||||
|
func metricsAdjust(ctx context.Context, metric *stats.Int64Measure, lane ExecutionLane, delta int) {
|
||||||
|
laneTag := defaultLaneTag
|
||||||
if lane > ExecutionLaneDefault {
|
if lane > ExecutionLaneDefault {
|
||||||
laneName = "priority"
|
laneTag = priorityLaneTag
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, _ := tag.New(
|
ctx, _ = tag.New(ctx, laneTag)
|
||||||
context.Background(),
|
|
||||||
tag.Upsert(metrics.ExecutionLane, laneName),
|
|
||||||
)
|
|
||||||
stats.Record(ctx, metric.M(int64(delta)))
|
stats.Record(ctx, metric.M(int64(delta)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -111,7 +111,7 @@ func (rt *Runtime) TotalFilCircSupply() abi.TokenAmount {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rt *Runtime) ResolveAddress(addr address.Address) (ret address.Address, ok bool) {
|
func (rt *Runtime) ResolveAddress(addr address.Address) (ret address.Address, ok bool) {
|
||||||
r, err := rt.state.LookupID(addr)
|
r, err := rt.state.LookupIDAddress(addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if xerrors.Is(err, types.ErrActorNotFound) {
|
if xerrors.Is(err, types.ErrActorNotFound) {
|
||||||
return address.Undef, false
|
return address.Undef, false
|
||||||
|
@ -902,7 +902,7 @@ func (vm *LegacyVM) transfer(from, to address.Address, amt types.BigInt, network
|
|||||||
return aerrors.Newf(exitcode.SysErrForbidden, "attempted to transfer negative value: %s", amt)
|
return aerrors.Newf(exitcode.SysErrForbidden, "attempted to transfer negative value: %s", amt)
|
||||||
}
|
}
|
||||||
|
|
||||||
fromID, err = vm.cstate.LookupID(from)
|
fromID, err = vm.cstate.LookupIDAddress(from)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return aerrors.Fatalf("transfer failed when resolving sender address: %s", err)
|
return aerrors.Fatalf("transfer failed when resolving sender address: %s", err)
|
||||||
}
|
}
|
||||||
@ -921,7 +921,7 @@ func (vm *LegacyVM) transfer(from, to address.Address, amt types.BigInt, network
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
toID, err = vm.cstate.LookupID(to)
|
toID, err = vm.cstate.LookupIDAddress(to)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return aerrors.Fatalf("transfer failed when resolving receiver address: %s", err)
|
return aerrors.Fatalf("transfer failed when resolving receiver address: %s", err)
|
||||||
}
|
}
|
||||||
@ -935,12 +935,12 @@ func (vm *LegacyVM) transfer(from, to address.Address, amt types.BigInt, network
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
fromID, err = vm.cstate.LookupID(from)
|
fromID, err = vm.cstate.LookupIDAddress(from)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return aerrors.Fatalf("transfer failed when resolving sender address: %s", err)
|
return aerrors.Fatalf("transfer failed when resolving sender address: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
toID, err = vm.cstate.LookupID(to)
|
toID, err = vm.cstate.LookupIDAddress(to)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return aerrors.Fatalf("transfer failed when resolving receiver address: %s", err)
|
return aerrors.Fatalf("transfer failed when resolving receiver address: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -10,7 +10,7 @@ import (
|
|||||||
"github.com/ipfs/go-datastore"
|
"github.com/ipfs/go-datastore"
|
||||||
"github.com/ipfs/go-datastore/query"
|
"github.com/ipfs/go-datastore/query"
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
ledgerfil "github.com/whyrusleeping/ledger-filecoin-go"
|
ledgerfil "github.com/zondax/ledger-filecoin-go"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
|
30
cli/clicommands/cmd.go
Normal file
30
cli/clicommands/cmd.go
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
package clicommands
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/urfave/cli/v2"
|
||||||
|
|
||||||
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
|
)
|
||||||
|
|
||||||
|
var Commands = []*cli.Command{
|
||||||
|
lcli.WithCategory("basic", lcli.SendCmd),
|
||||||
|
lcli.WithCategory("basic", lcli.WalletCmd),
|
||||||
|
lcli.WithCategory("basic", lcli.InfoCmd),
|
||||||
|
lcli.WithCategory("basic", lcli.ClientCmd),
|
||||||
|
lcli.WithCategory("basic", lcli.MultisigCmd),
|
||||||
|
lcli.WithCategory("basic", lcli.FilplusCmd),
|
||||||
|
lcli.WithCategory("basic", lcli.PaychCmd),
|
||||||
|
lcli.WithCategory("developer", lcli.AuthCmd),
|
||||||
|
lcli.WithCategory("developer", lcli.MpoolCmd),
|
||||||
|
lcli.WithCategory("developer", StateCmd),
|
||||||
|
lcli.WithCategory("developer", lcli.ChainCmd),
|
||||||
|
lcli.WithCategory("developer", lcli.LogCmd),
|
||||||
|
lcli.WithCategory("developer", lcli.WaitApiCmd),
|
||||||
|
lcli.WithCategory("developer", lcli.FetchParamCmd),
|
||||||
|
lcli.WithCategory("developer", lcli.EvmCmd),
|
||||||
|
lcli.WithCategory("network", lcli.NetCmd),
|
||||||
|
lcli.WithCategory("network", lcli.SyncCmd),
|
||||||
|
lcli.WithCategory("status", lcli.StatusCmd),
|
||||||
|
lcli.PprofCmd,
|
||||||
|
lcli.VersionCmd,
|
||||||
|
}
|
70
cli/clicommands/state.go
Normal file
70
cli/clicommands/state.go
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
// Package clicommands contains only the cli.Command definitions that are
|
||||||
|
// common to sptool and miner. These are here because they can't be referenced
|
||||||
|
// in cli/spcli or cli/ because of the import cycle with all the other cli functions.
|
||||||
|
package clicommands
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/urfave/cli/v2"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
|
||||||
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
|
"github.com/filecoin-project/lotus/cli/spcli"
|
||||||
|
)
|
||||||
|
|
||||||
|
var StateCmd = &cli.Command{
|
||||||
|
Name: "state",
|
||||||
|
Usage: "Interact with and query filecoin chain state",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "tipset",
|
||||||
|
Usage: "specify tipset to call method on (pass comma separated array of cids)",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Subcommands: []*cli.Command{
|
||||||
|
lcli.StatePowerCmd,
|
||||||
|
lcli.StateSectorsCmd,
|
||||||
|
lcli.StateActiveSectorsCmd,
|
||||||
|
lcli.StateListActorsCmd,
|
||||||
|
lcli.StateListMinersCmd,
|
||||||
|
lcli.StateCircSupplyCmd,
|
||||||
|
lcli.StateSectorCmd,
|
||||||
|
lcli.StateGetActorCmd,
|
||||||
|
lcli.StateLookupIDCmd,
|
||||||
|
lcli.StateReplayCmd,
|
||||||
|
lcli.StateSectorSizeCmd,
|
||||||
|
lcli.StateReadStateCmd,
|
||||||
|
lcli.StateListMessagesCmd,
|
||||||
|
lcli.StateComputeStateCmd,
|
||||||
|
lcli.StateCallCmd,
|
||||||
|
lcli.StateGetDealSetCmd,
|
||||||
|
lcli.StateWaitMsgCmd,
|
||||||
|
lcli.StateSearchMsgCmd,
|
||||||
|
StateMinerInfo,
|
||||||
|
lcli.StateMarketCmd,
|
||||||
|
lcli.StateExecTraceCmd,
|
||||||
|
lcli.StateNtwkVersionCmd,
|
||||||
|
lcli.StateMinerProvingDeadlineCmd,
|
||||||
|
lcli.StateSysActorCIDsCmd,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var StateMinerInfo = &cli.Command{
|
||||||
|
Name: "miner-info",
|
||||||
|
Usage: "Retrieve miner information",
|
||||||
|
ArgsUsage: "[minerAddress]",
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
addressGetter := func(_ *cli.Context) (address.Address, error) {
|
||||||
|
if cctx.NArg() != 1 {
|
||||||
|
return address.Address{}, lcli.IncorrectNumArgs(cctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
return address.NewFromString(cctx.Args().First())
|
||||||
|
}
|
||||||
|
err := spcli.InfoCmd(addressGetter).Action(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
@ -74,7 +74,7 @@ func GetCidEncoder(cctx *cli.Context) (cidenc.Encoder, error) {
|
|||||||
return e, nil
|
return e, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var clientCmd = &cli.Command{
|
var ClientCmd = &cli.Command{
|
||||||
Name: "client",
|
Name: "client",
|
||||||
Usage: "Make deals, store data, retrieve data",
|
Usage: "Make deals, store data, retrieve data",
|
||||||
Subcommands: []*cli.Command{
|
Subcommands: []*cli.Command{
|
||||||
|
23
cli/cmd.go
23
cli/cmd.go
@ -66,29 +66,6 @@ var CommonCommands = []*cli.Command{
|
|||||||
VersionCmd,
|
VersionCmd,
|
||||||
}
|
}
|
||||||
|
|
||||||
var Commands = []*cli.Command{
|
|
||||||
WithCategory("basic", sendCmd),
|
|
||||||
WithCategory("basic", walletCmd),
|
|
||||||
WithCategory("basic", infoCmd),
|
|
||||||
WithCategory("basic", clientCmd),
|
|
||||||
WithCategory("basic", multisigCmd),
|
|
||||||
WithCategory("basic", filplusCmd),
|
|
||||||
WithCategory("basic", paychCmd),
|
|
||||||
WithCategory("developer", AuthCmd),
|
|
||||||
WithCategory("developer", MpoolCmd),
|
|
||||||
WithCategory("developer", StateCmd),
|
|
||||||
WithCategory("developer", ChainCmd),
|
|
||||||
WithCategory("developer", LogCmd),
|
|
||||||
WithCategory("developer", WaitApiCmd),
|
|
||||||
WithCategory("developer", FetchParamCmd),
|
|
||||||
WithCategory("developer", EvmCmd),
|
|
||||||
WithCategory("network", NetCmd),
|
|
||||||
WithCategory("network", SyncCmd),
|
|
||||||
WithCategory("status", StatusCmd),
|
|
||||||
PprofCmd,
|
|
||||||
VersionCmd,
|
|
||||||
}
|
|
||||||
|
|
||||||
func WithCategory(cat string, cmd *cli.Command) *cli.Command {
|
func WithCategory(cat string, cmd *cli.Command) *cli.Command {
|
||||||
cmd.Category = strings.ToUpper(cat)
|
cmd.Category = strings.ToUpper(cat)
|
||||||
return cmd
|
return cmd
|
||||||
|
@ -361,6 +361,15 @@ var disputerStartCmd = &cli.Command{
|
|||||||
// for a given miner, index, and maxPostIndex, tries to dispute posts from 0...postsSnapshotted-1
|
// for a given miner, index, and maxPostIndex, tries to dispute posts from 0...postsSnapshotted-1
|
||||||
// returns a list of DisputeWindowedPoSt msgs that are expected to succeed if sent
|
// returns a list of DisputeWindowedPoSt msgs that are expected to succeed if sent
|
||||||
func makeDisputeWindowedPosts(ctx context.Context, api v0api.FullNode, dl minerDeadline, postsSnapshotted uint64, sender address.Address) ([]*types.Message, error) {
|
func makeDisputeWindowedPosts(ctx context.Context, api v0api.FullNode, dl minerDeadline, postsSnapshotted uint64, sender address.Address) ([]*types.Message, error) {
|
||||||
|
// CHECK: if miner waller balance is zero then skip sending dispute message
|
||||||
|
walletBalance, err := api.WalletBalance(ctx, dl.miner)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("failed to get wallet balance while checking to send dispute messages to miner %w: %w", dl.miner, err)
|
||||||
|
}
|
||||||
|
if walletBalance.IsZero() {
|
||||||
|
disputeLog.Warnw("wallet balance is zero, skipping dispute message", "wallet", dl.miner)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
disputes := make([]*types.Message, 0)
|
disputes := make([]*types.Message, 0)
|
||||||
|
|
||||||
for i := uint64(0); i < postsSnapshotted; i++ {
|
for i := uint64(0); i < postsSnapshotted; i++ {
|
||||||
|
504
cli/filplus.go
504
cli/filplus.go
@ -4,23 +4,30 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
cbor "github.com/ipfs/go-ipld-cbor"
|
cbor "github.com/ipfs/go-ipld-cbor"
|
||||||
|
"github.com/manifoldco/promptui"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
"github.com/filecoin-project/go-state-types/builtin"
|
||||||
|
verifregtypes13 "github.com/filecoin-project/go-state-types/builtin/v13/verifreg"
|
||||||
verifregtypes8 "github.com/filecoin-project/go-state-types/builtin/v8/verifreg"
|
verifregtypes8 "github.com/filecoin-project/go-state-types/builtin/v8/verifreg"
|
||||||
|
datacap2 "github.com/filecoin-project/go-state-types/builtin/v9/datacap"
|
||||||
verifregtypes9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
verifregtypes9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/api/v0api"
|
"github.com/filecoin-project/lotus/api/v0api"
|
||||||
"github.com/filecoin-project/lotus/blockstore"
|
"github.com/filecoin-project/lotus/blockstore"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
@ -32,7 +39,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/lib/tablewriter"
|
"github.com/filecoin-project/lotus/lib/tablewriter"
|
||||||
)
|
)
|
||||||
|
|
||||||
var filplusCmd = &cli.Command{
|
var FilplusCmd = &cli.Command{
|
||||||
Name: "filplus",
|
Name: "filplus",
|
||||||
Usage: "Interact with the verified registry actor used by Filplus",
|
Usage: "Interact with the verified registry actor used by Filplus",
|
||||||
Flags: []cli.Flag{},
|
Flags: []cli.Flag{},
|
||||||
@ -47,6 +54,7 @@ var filplusCmd = &cli.Command{
|
|||||||
filplusListClaimsCmd,
|
filplusListClaimsCmd,
|
||||||
filplusRemoveExpiredAllocationsCmd,
|
filplusRemoveExpiredAllocationsCmd,
|
||||||
filplusRemoveExpiredClaimsCmd,
|
filplusRemoveExpiredClaimsCmd,
|
||||||
|
filplusExtendClaimCmd,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -322,7 +330,7 @@ var filplusListAllocationsCmd = &cli.Command{
|
|||||||
tablewriter.Col(pieceSize),
|
tablewriter.Col(pieceSize),
|
||||||
tablewriter.Col(tMin),
|
tablewriter.Col(tMin),
|
||||||
tablewriter.Col(tMax),
|
tablewriter.Col(tMax),
|
||||||
tablewriter.NewLineCol(expr))
|
tablewriter.Col(expr))
|
||||||
// populate it with content
|
// populate it with content
|
||||||
for _, alloc := range allocs {
|
for _, alloc := range allocs {
|
||||||
tw.Write(alloc)
|
tw.Write(alloc)
|
||||||
@ -386,6 +394,11 @@ var filplusListClaimsCmd = &cli.Command{
|
|||||||
Name: "expired",
|
Name: "expired",
|
||||||
Usage: "list only expired claims",
|
Usage: "list only expired claims",
|
||||||
},
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "json",
|
||||||
|
Usage: "output results in json format",
|
||||||
|
Value: false,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
if cctx.NArg() > 1 {
|
if cctx.NArg() > 1 {
|
||||||
@ -427,7 +440,7 @@ var filplusListClaimsCmd = &cli.Command{
|
|||||||
var claimList []map[string]interface{}
|
var claimList []map[string]interface{}
|
||||||
|
|
||||||
for key, val := range claims {
|
for key, val := range claims {
|
||||||
if tsHeight > val.TermMax || !expired {
|
if tsHeight > val.TermStart+val.TermMax || !expired {
|
||||||
claim := map[string]interface{}{
|
claim := map[string]interface{}{
|
||||||
claimID: key,
|
claimID: key,
|
||||||
provider: val.Provider,
|
provider: val.Provider,
|
||||||
@ -466,7 +479,7 @@ var filplusListClaimsCmd = &cli.Command{
|
|||||||
tablewriter.Col(tMin),
|
tablewriter.Col(tMin),
|
||||||
tablewriter.Col(tMax),
|
tablewriter.Col(tMax),
|
||||||
tablewriter.Col(tStart),
|
tablewriter.Col(tStart),
|
||||||
tablewriter.NewLineCol(sector))
|
tablewriter.Col(sector))
|
||||||
// populate it with content
|
// populate it with content
|
||||||
for _, alloc := range claimList {
|
for _, alloc := range claimList {
|
||||||
|
|
||||||
@ -518,7 +531,6 @@ var filplusListClaimsCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
return writeOut(ts.Height(), claimsMap, cctx.Bool("json"), cctx.Bool("expired"))
|
return writeOut(ts.Height(), claimsMap, cctx.Bool("json"), cctx.Bool("expired"))
|
||||||
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -924,3 +936,485 @@ var filplusSignRemoveDataCapProposal = &cli.Command{
|
|||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var filplusExtendClaimCmd = &cli.Command{
|
||||||
|
Name: "extend-claim",
|
||||||
|
Usage: "extends claim expiration (TermMax)",
|
||||||
|
UsageText: `Extends claim expiration (TermMax).
|
||||||
|
If the client is original client then claim can be extended to maximum 5 years and no Datacap is required.
|
||||||
|
If the client id different then claim can be extended up to maximum 5 years from now and Datacap is required.
|
||||||
|
`,
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.Int64Flag{
|
||||||
|
Name: "term-max",
|
||||||
|
Usage: "The maximum period for which a provider can earn quality-adjusted power for the piece (epochs). Default is 5 years.",
|
||||||
|
Aliases: []string{"tmax"},
|
||||||
|
Value: verifregtypes13.MaximumVerifiedAllocationTerm,
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "client",
|
||||||
|
Usage: "the client address that will used to send the message",
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "all",
|
||||||
|
Usage: "automatically extend TermMax of all claims for specified miner[s] to --term-max (default: 5 years from claim start epoch)",
|
||||||
|
},
|
||||||
|
&cli.StringSliceFlag{
|
||||||
|
Name: "miner",
|
||||||
|
Usage: "storage provider address[es]",
|
||||||
|
Aliases: []string{"m", "provider", "p"},
|
||||||
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "assume-yes",
|
||||||
|
Usage: "automatic yes to prompts; assume 'yes' as answer to all prompts and run non-interactively",
|
||||||
|
Aliases: []string{"y", "yes"},
|
||||||
|
},
|
||||||
|
&cli.IntFlag{
|
||||||
|
Name: "confidence",
|
||||||
|
Usage: "number of block confirmations to wait for",
|
||||||
|
Value: int(build.MessageConfidence),
|
||||||
|
},
|
||||||
|
&cli.IntFlag{
|
||||||
|
Name: "batch-size",
|
||||||
|
Usage: "number of extend requests per batch. If set incorrectly, this will lead to out of gas error",
|
||||||
|
Value: 500,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ArgsUsage: "<claim1> <claim2> ... or <miner1=claim1> <miner2=claims2> ...",
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
|
||||||
|
miners := cctx.StringSlice("miner")
|
||||||
|
all := cctx.Bool("all")
|
||||||
|
client := cctx.String("client")
|
||||||
|
tmax := cctx.Int64("term-max")
|
||||||
|
|
||||||
|
// No miner IDs and no arguments
|
||||||
|
if len(miners) == 0 && cctx.Args().Len() == 0 {
|
||||||
|
return xerrors.Errorf("must specify at least one miner ID or argument[s]")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Single Miner with no claimID and no --all flag
|
||||||
|
if len(miners) == 1 && cctx.Args().Len() == 0 && !all {
|
||||||
|
return xerrors.Errorf("must specify either --all flag or claim IDs to extend in argument")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Multiple Miner with claimIDs
|
||||||
|
if len(miners) > 1 && cctx.Args().Len() > 0 {
|
||||||
|
return xerrors.Errorf("either specify multiple miner IDs or multiple arguments")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Multiple Miner with no claimID and no --all flag
|
||||||
|
if len(miners) > 1 && cctx.Args().Len() == 0 && !all {
|
||||||
|
return xerrors.Errorf("must specify --all flag with multiple miner IDs")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tmax can't be more than policy max
|
||||||
|
if tmax > verifregtypes13.MaximumVerifiedAllocationTerm {
|
||||||
|
return xerrors.Errorf("specified term-max %d is larger than %d maximum allowed by verified regirty actor policy", tmax, verifregtypes13.MaximumVerifiedAllocationTerm)
|
||||||
|
}
|
||||||
|
|
||||||
|
api, closer, err := GetFullNodeAPIV1(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to get full node api: %s", err)
|
||||||
|
}
|
||||||
|
defer closer()
|
||||||
|
ctx := ReqContext(cctx)
|
||||||
|
|
||||||
|
clientAddr, err := address.NewFromString(client)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
claimMap := make(map[verifregtypes13.ClaimId]ProvInfo)
|
||||||
|
|
||||||
|
// If no miners and arguments are present
|
||||||
|
if len(miners) == 0 && cctx.Args().Len() > 0 {
|
||||||
|
for _, arg := range cctx.Args().Slice() {
|
||||||
|
detail := strings.Split(arg, "=")
|
||||||
|
if len(detail) > 2 {
|
||||||
|
return xerrors.Errorf("incorrect argument format: %s", detail)
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := strconv.ParseInt(detail[1], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to parse the claim ID for %s for argument %s: %s", detail[0], detail, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
maddr, err := address.NewFromString(detail[0])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that minerID exists
|
||||||
|
_, err = api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
mid, err := address.IDFromAddress(maddr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
pi := ProvInfo{
|
||||||
|
Addr: maddr,
|
||||||
|
ID: abi.ActorID(mid),
|
||||||
|
}
|
||||||
|
|
||||||
|
claimMap[verifregtypes13.ClaimId(n)] = pi
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If 1 miner ID and multiple arguments
|
||||||
|
if len(miners) == 1 && cctx.Args().Len() > 0 && !all {
|
||||||
|
for _, arg := range cctx.Args().Slice() {
|
||||||
|
detail := strings.Split(arg, "=")
|
||||||
|
if len(detail) > 1 {
|
||||||
|
return xerrors.Errorf("incorrect argument format %s. Must provide only claim IDs with single miner ID", detail)
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := strconv.ParseInt(detail[0], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to parse the claim ID for %s for argument %s: %s", detail[0], detail, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
claimMap[verifregtypes13.ClaimId(n)] = ProvInfo{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
msgs, err := CreateExtendClaimMsg(ctx, api, claimMap, miners, clientAddr, abi.ChainEpoch(tmax), all, cctx.Bool("assume-yes"), cctx.Int("batch-size"))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If not msgs are found then no claims can be extended
|
||||||
|
if msgs == nil {
|
||||||
|
fmt.Println("No eligible claims to extend")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MpoolBatchPushMessage method will take care of gas estimation and funds check
|
||||||
|
smsgs, err := api.MpoolBatchPushMessage(ctx, msgs, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait for msgs to get mined into a block
|
||||||
|
eg := errgroup.Group{}
|
||||||
|
eg.SetLimit(10)
|
||||||
|
for _, msg := range smsgs {
|
||||||
|
msg := msg
|
||||||
|
eg.Go(func() error {
|
||||||
|
wait, err := api.StateWaitMsg(ctx, msg.Cid(), uint64(cctx.Int("confidence")), 2000, true)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("timeout waiting for message to land on chain %s", wait.Message)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if wait.Receipt.ExitCode.IsError() {
|
||||||
|
return xerrors.Errorf("failed to execute message %s: %s", wait.Message, wait.Receipt.ExitCode)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return eg.Wait()
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
type ProvInfo struct {
|
||||||
|
Addr address.Address
|
||||||
|
ID abi.ActorID
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateExtendClaimMsg creates extend message[s] based on the following conditions
|
||||||
|
// 1. Extend all claims for a miner ID
|
||||||
|
// 2. Extend all claims for multiple miner IDs
|
||||||
|
// 3. Extend specified claims for a miner ID
|
||||||
|
// 4. Extend specific claims for specific miner ID
|
||||||
|
// 5. Extend all claims for a miner ID with different client address (2 messages)
|
||||||
|
// 6. Extend all claims for multiple miner IDs with different client address (2 messages)
|
||||||
|
// 7. Extend specified claims for a miner ID with different client address (2 messages)
|
||||||
|
// 8. Extend specific claims for specific miner ID with different client address (2 messages)
|
||||||
|
func CreateExtendClaimMsg(ctx context.Context, api api.FullNode, pcm map[verifregtypes13.ClaimId]ProvInfo, miners []string, wallet address.Address, tmax abi.ChainEpoch, all, assumeYes bool, batchSize int) ([]*types.Message, error) {
|
||||||
|
|
||||||
|
ac, err := api.StateLookupID(ctx, wallet, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
w, err := address.IDFromAddress(ac)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("converting wallet address to ID: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
wid := abi.ActorID(w)
|
||||||
|
|
||||||
|
head, err := api.ChainHead(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var terms []verifregtypes13.ClaimTerm
|
||||||
|
newClaims := make(map[verifregtypes13.ClaimExtensionRequest]big.Int)
|
||||||
|
rDataCap := big.NewInt(0)
|
||||||
|
|
||||||
|
// If --all is set
|
||||||
|
if all {
|
||||||
|
for _, id := range miners {
|
||||||
|
maddr, err := address.NewFromString(id)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("parsing miner %s: %s", id, err)
|
||||||
|
}
|
||||||
|
mid, err := address.IDFromAddress(maddr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("converting miner address to miner ID: %s", err)
|
||||||
|
}
|
||||||
|
claims, err := api.StateGetClaims(ctx, maddr, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("getting claims for miner %s: %s", maddr, err)
|
||||||
|
}
|
||||||
|
for claimID, claim := range claims {
|
||||||
|
claimID := claimID
|
||||||
|
claim := claim
|
||||||
|
// If the client is not the original client - burn datacap
|
||||||
|
if claim.Client != wid {
|
||||||
|
// The new duration should be greater than the original deal duration and claim should not already be expired
|
||||||
|
if head.Height()+tmax-claim.TermStart > claim.TermMax-claim.TermStart && claim.TermStart+claim.TermMax > head.Height() {
|
||||||
|
req := verifregtypes13.ClaimExtensionRequest{
|
||||||
|
Claim: verifregtypes13.ClaimId(claimID),
|
||||||
|
Provider: abi.ActorID(mid),
|
||||||
|
TermMax: head.Height() + tmax - claim.TermStart,
|
||||||
|
}
|
||||||
|
newClaims[req] = big.NewInt(int64(claim.Size))
|
||||||
|
rDataCap.Add(big.NewInt(int64(claim.Size)).Int, rDataCap.Int)
|
||||||
|
}
|
||||||
|
// If new duration shorter than the original duration then do nothing
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// For original client, compare duration(TermMax) and claim should not already be expired
|
||||||
|
if claim.TermMax < tmax && claim.TermStart+claim.TermMax > head.Height() {
|
||||||
|
terms = append(terms, verifregtypes13.ClaimTerm{
|
||||||
|
ClaimId: verifregtypes13.ClaimId(claimID),
|
||||||
|
TermMax: tmax,
|
||||||
|
Provider: abi.ActorID(mid),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Single miner and specific claims
|
||||||
|
if len(miners) == 1 && len(pcm) > 0 {
|
||||||
|
maddr, err := address.NewFromString(miners[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("parsing miner %s: %s", miners[0], err)
|
||||||
|
}
|
||||||
|
mid, err := address.IDFromAddress(maddr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("converting miner address to miner ID: %s", err)
|
||||||
|
}
|
||||||
|
claims, err := api.StateGetClaims(ctx, maddr, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("getting claims for miner %s: %s", maddr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for claimID := range pcm {
|
||||||
|
claimID := claimID
|
||||||
|
claim, ok := claims[verifregtypes9.ClaimId(claimID)]
|
||||||
|
if !ok {
|
||||||
|
return nil, xerrors.Errorf("claim %d not found for provider %s", claimID, miners[0])
|
||||||
|
}
|
||||||
|
// If the client is not the original client - burn datacap
|
||||||
|
if claim.Client != wid {
|
||||||
|
// The new duration should be greater than the original deal duration and claim should not already be expired
|
||||||
|
if head.Height()+tmax-claim.TermStart > claim.TermMax-claim.TermStart && claim.TermStart+claim.TermMax > head.Height() {
|
||||||
|
req := verifregtypes13.ClaimExtensionRequest{
|
||||||
|
Claim: claimID,
|
||||||
|
Provider: abi.ActorID(mid),
|
||||||
|
TermMax: head.Height() + tmax - claim.TermStart,
|
||||||
|
}
|
||||||
|
newClaims[req] = big.NewInt(int64(claim.Size))
|
||||||
|
rDataCap.Add(big.NewInt(int64(claim.Size)).Int, rDataCap.Int)
|
||||||
|
}
|
||||||
|
// If new duration shorter than the original duration then do nothing
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// For original client, compare duration(TermMax) and claim should not already be expired
|
||||||
|
if claim.TermMax < tmax && claim.TermStart+claim.TermMax > head.Height() {
|
||||||
|
terms = append(terms, verifregtypes13.ClaimTerm{
|
||||||
|
ClaimId: claimID,
|
||||||
|
TermMax: tmax,
|
||||||
|
Provider: abi.ActorID(mid),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(miners) == 0 && len(pcm) > 0 {
|
||||||
|
for claimID, prov := range pcm {
|
||||||
|
prov := prov
|
||||||
|
claimID := claimID
|
||||||
|
claim, err := api.StateGetClaim(ctx, prov.Addr, verifregtypes9.ClaimId(claimID), types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("could not load the claim %d: %s", claimID, err)
|
||||||
|
}
|
||||||
|
if claim == nil {
|
||||||
|
return nil, xerrors.Errorf("claim %d not found in the actor state", claimID)
|
||||||
|
}
|
||||||
|
// If the client is not the original client - burn datacap
|
||||||
|
if claim.Client != wid {
|
||||||
|
// The new duration should be greater than the original deal duration and claim should not already be expired
|
||||||
|
if head.Height()+tmax-claim.TermStart > claim.TermMax-claim.TermStart && claim.TermStart+claim.TermMax > head.Height() {
|
||||||
|
req := verifregtypes13.ClaimExtensionRequest{
|
||||||
|
Claim: claimID,
|
||||||
|
Provider: prov.ID,
|
||||||
|
TermMax: head.Height() + tmax - claim.TermStart,
|
||||||
|
}
|
||||||
|
newClaims[req] = big.NewInt(int64(claim.Size))
|
||||||
|
rDataCap.Add(big.NewInt(int64(claim.Size)).Int, rDataCap.Int)
|
||||||
|
}
|
||||||
|
// If new duration shorter than the original duration then do nothing
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// For original client, compare duration(TermMax) and claim should not already be expired
|
||||||
|
if claim.TermMax < tmax && claim.TermStart+claim.TermMax > head.Height() {
|
||||||
|
terms = append(terms, verifregtypes13.ClaimTerm{
|
||||||
|
ClaimId: claimID,
|
||||||
|
TermMax: tmax,
|
||||||
|
Provider: prov.ID,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var msgs []*types.Message
|
||||||
|
|
||||||
|
if len(terms) > 0 {
|
||||||
|
// Batch in 500 to avoid running out of gas
|
||||||
|
for i := 0; i < len(terms); i += batchSize {
|
||||||
|
batchEnd := i + batchSize
|
||||||
|
if batchEnd > len(terms) {
|
||||||
|
batchEnd = len(terms)
|
||||||
|
}
|
||||||
|
|
||||||
|
batch := terms[i:batchEnd]
|
||||||
|
|
||||||
|
params, err := actors.SerializeParams(&verifregtypes13.ExtendClaimTermsParams{
|
||||||
|
Terms: batch,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("failed to searialise the parameters: %s", err)
|
||||||
|
}
|
||||||
|
oclaimMsg := &types.Message{
|
||||||
|
To: verifreg.Address,
|
||||||
|
From: wallet,
|
||||||
|
Method: verifreg.Methods.ExtendClaimTerms,
|
||||||
|
Params: params,
|
||||||
|
}
|
||||||
|
msgs = append(msgs, oclaimMsg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(newClaims) > 0 {
|
||||||
|
if !assumeYes {
|
||||||
|
out := fmt.Sprintf("Some of the specified allocation have a different client address and will require %d Datacap to extend. Proceed? Yes [Y/y] / No [N/n], Ctrl+C (^C) to exit", rDataCap.Int)
|
||||||
|
validate := func(input string) error {
|
||||||
|
if strings.EqualFold(input, "y") || strings.EqualFold(input, "yes") {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if strings.EqualFold(input, "n") || strings.EqualFold(input, "no") {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errors.New("incorrect input")
|
||||||
|
}
|
||||||
|
|
||||||
|
templates := &promptui.PromptTemplates{
|
||||||
|
Prompt: "{{ . }} ",
|
||||||
|
Valid: "{{ . | green }} ",
|
||||||
|
Invalid: "{{ . | red }} ",
|
||||||
|
Success: "{{ . | cyan | bold }} ",
|
||||||
|
}
|
||||||
|
|
||||||
|
prompt := promptui.Prompt{
|
||||||
|
Label: out,
|
||||||
|
Templates: templates,
|
||||||
|
Validate: validate,
|
||||||
|
}
|
||||||
|
|
||||||
|
input, err := prompt.Run()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if strings.Contains(strings.ToLower(input), "n") {
|
||||||
|
fmt.Println("Dropping the extension for claims that require Datacap")
|
||||||
|
return msgs, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get datacap balance
|
||||||
|
aDataCap, err := api.StateVerifiedClientStatus(ctx, wallet, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if aDataCap == nil {
|
||||||
|
return nil, xerrors.Errorf("wallet %s does not have any datacap", wallet)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that we have enough data cap to make the allocation
|
||||||
|
if rDataCap.GreaterThan(big.NewInt(aDataCap.Int64())) {
|
||||||
|
return nil, xerrors.Errorf("requested datacap %s is greater then the available datacap %s", rDataCap, aDataCap)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a map of just keys, so we can easily batch based on the numeric keys
|
||||||
|
keys := make([]verifregtypes13.ClaimExtensionRequest, 0, len(newClaims))
|
||||||
|
for k := range newClaims {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Batch in 500 to avoid running out of gas
|
||||||
|
for i := 0; i < len(keys); i += batchSize {
|
||||||
|
batchEnd := i + batchSize
|
||||||
|
if batchEnd > len(newClaims) {
|
||||||
|
batchEnd = len(newClaims)
|
||||||
|
}
|
||||||
|
|
||||||
|
batch := keys[i:batchEnd]
|
||||||
|
|
||||||
|
// Calculate Datacap for this batch
|
||||||
|
dcap := big.NewInt(0)
|
||||||
|
for _, k := range batch {
|
||||||
|
dc := newClaims[k]
|
||||||
|
dcap.Add(dcap.Int, dc.Int)
|
||||||
|
}
|
||||||
|
|
||||||
|
ncparams, err := actors.SerializeParams(&verifregtypes13.AllocationRequests{
|
||||||
|
Extensions: batch,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("failed to searialise the parameters: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
transferParams, err := actors.SerializeParams(&datacap2.TransferParams{
|
||||||
|
To: builtin.VerifiedRegistryActorAddr,
|
||||||
|
Amount: big.Mul(dcap, builtin.TokenPrecision),
|
||||||
|
OperatorData: ncparams,
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("failed to serialize transfer parameters: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
nclaimMsg := &types.Message{
|
||||||
|
To: builtin.DatacapActorAddr,
|
||||||
|
From: wallet,
|
||||||
|
Method: datacap.Methods.TransferExported,
|
||||||
|
Params: transferParams,
|
||||||
|
Value: big.Zero(),
|
||||||
|
}
|
||||||
|
msgs = append(msgs, nclaimMsg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return msgs, nil
|
||||||
|
}
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/journal/alerting"
|
"github.com/filecoin-project/lotus/journal/alerting"
|
||||||
)
|
)
|
||||||
|
|
||||||
var infoCmd = &cli.Command{
|
var InfoCmd = &cli.Command{
|
||||||
Name: "info",
|
Name: "info",
|
||||||
Usage: "Print node info",
|
Usage: "Print node info",
|
||||||
Action: infoCmdAct,
|
Action: infoCmdAct,
|
||||||
|
@ -491,7 +491,7 @@ var MpoolReplaceCmd = &cli.Command{
|
|||||||
msg.GasFeeCap = big.Max(retm.GasFeeCap, msg.GasPremium)
|
msg.GasFeeCap = big.Max(retm.GasFeeCap, msg.GasPremium)
|
||||||
|
|
||||||
mff := func() (abi.TokenAmount, error) {
|
mff := func() (abi.TokenAmount, error) {
|
||||||
return abi.TokenAmount(config.DefaultDefaultMaxFee), nil
|
return abi.TokenAmount(config.DefaultDefaultMaxFee()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
messagepool.CapGasFee(mff, &msg, mss)
|
messagepool.CapGasFee(mff, &msg, mss)
|
||||||
|
@ -32,7 +32,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
var multisigCmd = &cli.Command{
|
var MultisigCmd = &cli.Command{
|
||||||
Name: "msig",
|
Name: "msig",
|
||||||
Usage: "Interact with a multisig wallet",
|
Usage: "Interact with a multisig wallet",
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user