Merge branch 'master' into adlrocha/consistent-bcast
This commit is contained in:
commit
b7c297c537
File diff suppressed because it is too large
Load Diff
@ -107,13 +107,11 @@ func main() {
|
||||
// form the input data.
|
||||
type data struct {
|
||||
Networks []string
|
||||
SnapNames []string
|
||||
ItestFiles []string
|
||||
UnitSuites map[string]string
|
||||
}
|
||||
in := data{
|
||||
Networks: []string{"mainnet", "butterflynet", "calibnet", "debug"},
|
||||
SnapNames: []string{"lotus", "lotus-filecoin"},
|
||||
ItestFiles: itests,
|
||||
UnitSuites: func() map[string]string {
|
||||
ret := make(map[string]string)
|
||||
|
@ -7,24 +7,19 @@ executors:
|
||||
golang:
|
||||
docker:
|
||||
# Must match GO_VERSION_MIN in project root
|
||||
- image: cimg/go:1.18.8
|
||||
- image: cimg/go:1.19.7
|
||||
resource_class: medium+
|
||||
golang-2xl:
|
||||
docker:
|
||||
# Must match GO_VERSION_MIN in project root
|
||||
- image: cimg/go:1.19.7
|
||||
resource_class: 2xlarge
|
||||
ubuntu:
|
||||
docker:
|
||||
- image: ubuntu:20.04
|
||||
packer:
|
||||
description: |
|
||||
The HashiCorp provided Packer container
|
||||
parameters:
|
||||
packer-version:
|
||||
type: string
|
||||
default: "1.8"
|
||||
docker:
|
||||
- image: hashicorp/packer:<< parameters.packer-version >>
|
||||
|
||||
|
||||
commands:
|
||||
prepare:
|
||||
build-platform-specific:
|
||||
parameters:
|
||||
linux:
|
||||
default: true
|
||||
@ -34,32 +29,27 @@ commands:
|
||||
default: false
|
||||
description: is a darwin build environment?
|
||||
type: boolean
|
||||
darwin-architecture:
|
||||
default: "amd64"
|
||||
description: which darwin architecture is being used?
|
||||
type: string
|
||||
steps:
|
||||
- checkout
|
||||
- git_fetch_all_tags
|
||||
- run: git submodule sync
|
||||
- run: git submodule update --init
|
||||
- when:
|
||||
condition: <<parameters.linux>>
|
||||
steps:
|
||||
- run:
|
||||
name: Check Go Version
|
||||
command: |
|
||||
v=`go version | { read _ _ v _; echo ${v#go}; }`
|
||||
if [["[[ $v != `cat GO_VERSION_MIN` ]]"]]; then
|
||||
echo "GO_VERSION_MIN file does not match the go version being used."
|
||||
echo "Please update image to cimg/go:`cat GO_VERSION_MIN` or update GO_VERSION_MIN to $v."
|
||||
exit 1
|
||||
fi
|
||||
- run: sudo apt-get update
|
||||
- run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev
|
||||
- run: sudo apt-get install python-is-python3
|
||||
|
||||
- install-ubuntu-deps
|
||||
- check-go-version
|
||||
- when:
|
||||
condition: <<parameters.darwin>>
|
||||
steps:
|
||||
- run:
|
||||
name: Install Go
|
||||
command: |
|
||||
curl https://dl.google.com/go/go`cat GO_VERSION_MIN`.darwin-amd64.pkg -o /tmp/go.pkg && \
|
||||
curl https://dl.google.com/go/go`cat GO_VERSION_MIN`.darwin-<<parameters.darwin-architecture>>.pkg -o /tmp/go.pkg && \
|
||||
sudo installer -pkg /tmp/go.pkg -target /
|
||||
- run:
|
||||
name: Export Go
|
||||
@ -73,8 +63,7 @@ commands:
|
||||
name: Install Rust
|
||||
command: |
|
||||
curl https://sh.rustup.rs -sSf | sh -s -- -y
|
||||
- run: git submodule sync
|
||||
- run: git submodule update --init
|
||||
- run: make deps lotus
|
||||
download-params:
|
||||
steps:
|
||||
- restore_cache:
|
||||
@ -83,7 +72,7 @@ commands:
|
||||
- 'v26-2k-lotus-params'
|
||||
paths:
|
||||
- /var/tmp/filecoin-proof-parameters/
|
||||
- run: ./lotus fetch-params 2048
|
||||
- run: ./lotus fetch-params 2048
|
||||
- save_cache:
|
||||
name: Save parameters cache
|
||||
key: 'v26-2k-lotus-params'
|
||||
@ -105,31 +94,43 @@ commands:
|
||||
name: fetch all tags
|
||||
command: |
|
||||
git fetch --all
|
||||
packer_build:
|
||||
description: "Run a packer build"
|
||||
parameters:
|
||||
template:
|
||||
description: |
|
||||
The name of the packer template file
|
||||
type: string
|
||||
default: packer.json
|
||||
args:
|
||||
description: |
|
||||
Arguments to pass to the packer build command
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
install-ubuntu-deps:
|
||||
steps:
|
||||
- run:
|
||||
name: "Run a packer build"
|
||||
command: packer build << parameters.args >> << parameters.template >>
|
||||
no_output_timeout: 1h
|
||||
- run: sudo apt-get update
|
||||
- run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev
|
||||
check-go-version:
|
||||
steps:
|
||||
- run: |
|
||||
v=`go version | { read _ _ v _; echo ${v#go}; }`
|
||||
if [["[[ $v != `cat GO_VERSION_MIN` ]]"]]; then
|
||||
echo "GO_VERSION_MIN file does not match the go version being used."
|
||||
echo "Please update image to cimg/go:`cat GO_VERSION_MIN` or update GO_VERSION_MIN to $v."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
jobs:
|
||||
build:
|
||||
executor: golang
|
||||
working_directory: ~/lotus
|
||||
steps:
|
||||
- checkout
|
||||
- git_fetch_all_tags
|
||||
- run: git submodule sync
|
||||
- run: git submodule update --init
|
||||
- install-ubuntu-deps
|
||||
- check-go-version
|
||||
- run: make deps lotus
|
||||
- persist_to_workspace:
|
||||
root: ~/
|
||||
paths:
|
||||
- "lotus"
|
||||
mod-tidy-check:
|
||||
executor: golang
|
||||
working_directory: ~/lotus
|
||||
steps:
|
||||
- prepare
|
||||
- install-ubuntu-deps
|
||||
- attach_workspace:
|
||||
at: ~/
|
||||
- run: go mod tidy -v
|
||||
- run:
|
||||
name: Check git diff
|
||||
@ -140,13 +141,14 @@ jobs:
|
||||
test:
|
||||
description: |
|
||||
Run tests with gotestsum.
|
||||
working_directory: ~/lotus
|
||||
parameters: &test-params
|
||||
executor:
|
||||
type: executor
|
||||
default: golang
|
||||
go-test-flags:
|
||||
type: string
|
||||
default: "-timeout 30m"
|
||||
default: "-timeout 20m"
|
||||
description: Flags passed to go test.
|
||||
target:
|
||||
type: string
|
||||
@ -155,21 +157,22 @@ jobs:
|
||||
proofs-log-test:
|
||||
type: string
|
||||
default: "0"
|
||||
get-params:
|
||||
type: boolean
|
||||
default: false
|
||||
suite:
|
||||
type: string
|
||||
default: unit
|
||||
description: Test suite name to report to CircleCI.
|
||||
gotestsum-format:
|
||||
type: string
|
||||
default: standard-verbose
|
||||
description: gotestsum format. https://github.com/gotestyourself/gotestsum#format
|
||||
executor: << parameters.executor >>
|
||||
steps:
|
||||
- prepare
|
||||
- run:
|
||||
command: make deps lotus
|
||||
no_output_timeout: 30m
|
||||
- download-params
|
||||
- install-ubuntu-deps
|
||||
- attach_workspace:
|
||||
at: ~/
|
||||
- when:
|
||||
condition: << parameters.get-params >>
|
||||
steps:
|
||||
- download-params
|
||||
- run:
|
||||
name: go test
|
||||
environment:
|
||||
@ -180,12 +183,11 @@ jobs:
|
||||
mkdir -p /tmp/test-reports/<< parameters.suite >>
|
||||
mkdir -p /tmp/test-artifacts
|
||||
gotestsum \
|
||||
--format << parameters.gotestsum-format >> \
|
||||
--format standard-verbose \
|
||||
--junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \
|
||||
--jsonfile /tmp/test-artifacts/<< parameters.suite >>.json \
|
||||
-- \
|
||||
<< parameters.go-test-flags >> \
|
||||
<< parameters.target >>
|
||||
--packages="<< parameters.target >>" \
|
||||
-- << parameters.go-test-flags >>
|
||||
no_output_timeout: 30m
|
||||
- store_test_results:
|
||||
path: /tmp/test-reports
|
||||
@ -193,6 +195,7 @@ jobs:
|
||||
path: /tmp/test-artifacts/<< parameters.suite >>.json
|
||||
|
||||
test-conformance:
|
||||
working_directory: ~/lotus
|
||||
description: |
|
||||
Run tests using a corpus of interoperable test vectors for Filecoin
|
||||
implementations to test their correctness and compliance with the Filecoin
|
||||
@ -208,10 +211,9 @@ jobs:
|
||||
submodule is used.
|
||||
executor: << parameters.executor >>
|
||||
steps:
|
||||
- prepare
|
||||
- run:
|
||||
command: make deps lotus
|
||||
no_output_timeout: 30m
|
||||
- install-ubuntu-deps
|
||||
- attach_workspace:
|
||||
at: ~/
|
||||
- download-params
|
||||
- when:
|
||||
condition:
|
||||
@ -254,7 +256,7 @@ jobs:
|
||||
build-linux-amd64:
|
||||
executor: golang
|
||||
steps:
|
||||
- prepare
|
||||
- build-platform-specific
|
||||
- run: make lotus lotus-miner lotus-worker
|
||||
- run:
|
||||
name: check tag and version output match
|
||||
@ -273,10 +275,12 @@ jobs:
|
||||
macos:
|
||||
xcode: "13.4.1"
|
||||
steps:
|
||||
- prepare:
|
||||
- build-platform-specific:
|
||||
linux: false
|
||||
darwin: true
|
||||
darwin-architecture: amd64
|
||||
- run: make lotus lotus-miner lotus-worker
|
||||
- run: otool -hv lotus
|
||||
- run:
|
||||
name: check tag and version output match
|
||||
command: ./scripts/version-check.sh ./lotus
|
||||
@ -295,13 +299,15 @@ jobs:
|
||||
resource_class: filecoin-project/self-hosted-m1
|
||||
steps:
|
||||
- run: echo 'export PATH=/opt/homebrew/bin:"$PATH"' >> "$BASH_ENV"
|
||||
- prepare:
|
||||
- build-platform-specific:
|
||||
linux: false
|
||||
darwin: true
|
||||
darwin-architecture: arm64
|
||||
- run: |
|
||||
export CPATH=$(brew --prefix)/include
|
||||
export LIBRARY_PATH=$(brew --prefix)/lib
|
||||
make lotus lotus-miner lotus-worker
|
||||
- run: otool -hv lotus
|
||||
- run:
|
||||
name: check tag and version output match
|
||||
command: ./scripts/version-check.sh ./lotus
|
||||
@ -349,109 +355,52 @@ jobs:
|
||||
- run: ./scripts/generate-checksums.sh
|
||||
- run: ./scripts/publish-checksums.sh
|
||||
|
||||
build-appimage:
|
||||
machine:
|
||||
image: ubuntu-2004:202111-02
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- run:
|
||||
name: Update Go
|
||||
command: |
|
||||
sudo rm -rf /usr/local/go && \
|
||||
curl -L https://golang.org/dl/go`cat GO_VERSION_MIN`.linux-amd64.tar.gz -o /tmp/go.tar.gz && \
|
||||
sudo tar -C /usr/local -xvf /tmp/go.tar.gz
|
||||
- run: go version
|
||||
- run:
|
||||
name: install appimage-builder
|
||||
command: |
|
||||
# appimage-builder requires /dev/snd to exist. It creates containers during the testing phase
|
||||
# that pass sound devices from the host to the testing container. (hard coded!)
|
||||
# https://github.com/AppImageCrafters/appimage-builder/blob/master/appimagebuilder/modules/test/execution_test.py#L54
|
||||
# Circleci doesn't provide a working sound device; this is enough to fake it.
|
||||
if [ ! -e /dev/snd ]
|
||||
then
|
||||
sudo mkdir /dev/snd
|
||||
sudo mknod /dev/snd/ControlC0 c 1 2
|
||||
fi
|
||||
|
||||
# docs: https://appimage-builder.readthedocs.io/en/latest/intro/install.html
|
||||
sudo apt update
|
||||
sudo apt install -y python3-pip python3-setuptools patchelf desktop-file-utils libgdk-pixbuf2.0-dev fakeroot strace
|
||||
sudo curl -Lo /usr/local/bin/appimagetool https://github.com/AppImage/AppImageKit/releases/download/continuous/appimagetool-x86_64.AppImage
|
||||
sudo chmod +x /usr/local/bin/appimagetool
|
||||
sudo pip3 install appimage-builder
|
||||
- run:
|
||||
name: install lotus dependencies
|
||||
command: sudo apt install ocl-icd-opencl-dev libhwloc-dev
|
||||
- run:
|
||||
name: build appimage
|
||||
command: |
|
||||
sed -i "s/version: latest/version: ${CIRCLE_TAG:-latest}/" AppImageBuilder.yml
|
||||
make appimage
|
||||
- run: |
|
||||
mkdir -p /tmp/workspace/appimage && \
|
||||
mv Lotus-*.AppImage /tmp/workspace/appimage/
|
||||
- persist_to_workspace:
|
||||
root: /tmp/workspace
|
||||
paths:
|
||||
- appimage
|
||||
|
||||
|
||||
gofmt:
|
||||
executor: golang
|
||||
working_directory: ~/lotus
|
||||
steps:
|
||||
- prepare
|
||||
- run:
|
||||
command: "! go fmt ./... 2>&1 | read"
|
||||
|
||||
gen-check:
|
||||
executor: golang
|
||||
working_directory: ~/lotus
|
||||
steps:
|
||||
- prepare
|
||||
- run: make deps
|
||||
- install-ubuntu-deps
|
||||
- attach_workspace:
|
||||
at: ~/
|
||||
- run: go install golang.org/x/tools/cmd/goimports
|
||||
- run: go install github.com/hannahhoward/cbor-gen-for
|
||||
- run: make gen
|
||||
- run: git --no-pager diff
|
||||
- run: git --no-pager diff --quiet
|
||||
- run: git --no-pager diff && git --no-pager diff --quiet
|
||||
- run: make docsgen-cli
|
||||
- run: git --no-pager diff
|
||||
- run: git --no-pager diff --quiet
|
||||
- run: git --no-pager diff && git --no-pager diff --quiet
|
||||
|
||||
docs-check:
|
||||
executor: golang
|
||||
working_directory: ~/lotus
|
||||
steps:
|
||||
- prepare
|
||||
- install-ubuntu-deps
|
||||
- attach_workspace:
|
||||
at: ~/
|
||||
- run: go install golang.org/x/tools/cmd/goimports
|
||||
- run: zcat build/openrpc/full.json.gz | jq > ../pre-openrpc-full
|
||||
- run: zcat build/openrpc/miner.json.gz | jq > ../pre-openrpc-miner
|
||||
- run: zcat build/openrpc/worker.json.gz | jq > ../pre-openrpc-worker
|
||||
- run: make deps
|
||||
- run: make docsgen
|
||||
- run: zcat build/openrpc/full.json.gz | jq > ../post-openrpc-full
|
||||
- run: zcat build/openrpc/miner.json.gz | jq > ../post-openrpc-miner
|
||||
- run: zcat build/openrpc/worker.json.gz | jq > ../post-openrpc-worker
|
||||
- run: git --no-pager diff
|
||||
- run: diff ../pre-openrpc-full ../post-openrpc-full
|
||||
- run: diff ../pre-openrpc-miner ../post-openrpc-miner
|
||||
- run: diff ../pre-openrpc-worker ../post-openrpc-worker
|
||||
- run: git --no-pager diff --quiet
|
||||
- run: diff ../pre-openrpc-full ../post-openrpc-full && diff ../pre-openrpc-miner ../post-openrpc-miner && diff ../pre-openrpc-worker ../post-openrpc-worker && git --no-pager diff && git --no-pager diff --quiet
|
||||
|
||||
lint: &lint
|
||||
lint-all:
|
||||
description: |
|
||||
Run golangci-lint.
|
||||
working_directory: ~/lotus
|
||||
parameters:
|
||||
executor:
|
||||
type: executor
|
||||
default: golang
|
||||
concurrency:
|
||||
type: string
|
||||
default: '2'
|
||||
description: |
|
||||
Concurrency used to run linters. Defaults to 2 because NumCPU is not
|
||||
aware of container CPU limits.
|
||||
args:
|
||||
type: string
|
||||
default: ''
|
||||
@ -459,84 +408,15 @@ jobs:
|
||||
Arguments to pass to golangci-lint
|
||||
executor: << parameters.executor >>
|
||||
steps:
|
||||
- prepare
|
||||
- run:
|
||||
command: make deps
|
||||
no_output_timeout: 30m
|
||||
- install-ubuntu-deps
|
||||
- attach_workspace:
|
||||
at: ~/
|
||||
- run:
|
||||
name: Lint
|
||||
command: |
|
||||
golangci-lint run -v --timeout 2m \
|
||||
--concurrency << parameters.concurrency >> << parameters.args >>
|
||||
lint-all:
|
||||
<<: *lint
|
||||
golangci-lint run -v --timeout 10m \
|
||||
--concurrency 4 << parameters.args >>
|
||||
|
||||
publish:
|
||||
description: publish binary artifacts
|
||||
executor: ubuntu
|
||||
parameters:
|
||||
linux:
|
||||
default: false
|
||||
description: publish linux binaries?
|
||||
type: boolean
|
||||
appimage:
|
||||
default: false
|
||||
description: publish appimage binaries?
|
||||
type: boolean
|
||||
steps:
|
||||
- run:
|
||||
name: Install git jq curl
|
||||
command: apt update && apt install -y git jq curl sudo
|
||||
- checkout
|
||||
- git_fetch_all_tags
|
||||
- checkout
|
||||
- install_ipfs
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- when:
|
||||
condition: << parameters.linux >>
|
||||
steps:
|
||||
- run: ./scripts/build-arch-bundle.sh linux
|
||||
- run: ./scripts/publish-arch-release.sh linux
|
||||
- when:
|
||||
condition: << parameters.appimage >>
|
||||
steps:
|
||||
- run: ./scripts/build-appimage-bundle.sh
|
||||
- run: ./scripts/publish-arch-release.sh appimage
|
||||
|
||||
publish-snapcraft:
|
||||
description: build and push snapcraft
|
||||
machine:
|
||||
image: ubuntu-2004:202104-01
|
||||
resource_class: 2xlarge
|
||||
parameters:
|
||||
channel:
|
||||
type: string
|
||||
default: "edge"
|
||||
description: snapcraft channel
|
||||
snap-name:
|
||||
type: string
|
||||
default: 'lotus-filecoin'
|
||||
description: name of snap in snap store
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Install snapcraft
|
||||
command: sudo snap install snapcraft --classic
|
||||
- run:
|
||||
name: Build << parameters.snap-name >> snap
|
||||
command: |
|
||||
if [ "<< parameters.snap-name >>" != 'lotus-filecoin' ]; then
|
||||
cat snap/snapcraft.yaml | sed 's/lotus-filecoin/lotus/' > edited-snapcraft.yaml
|
||||
mv edited-snapcraft.yaml snap/snapcraft.yaml
|
||||
fi
|
||||
|
||||
snapcraft --use-lxd --debug
|
||||
- run:
|
||||
name: Publish snap to << parameters.channel >> channel
|
||||
shell: /bin/bash -o pipefail
|
||||
command: |
|
||||
snapcraft upload *.snap --release << parameters.channel >>
|
||||
build-docker:
|
||||
description: >
|
||||
Publish to Dockerhub
|
||||
@ -569,6 +449,10 @@ jobs:
|
||||
steps:
|
||||
- setup_remote_docker
|
||||
- checkout
|
||||
- git_fetch_all_tags
|
||||
- run: git submodule sync
|
||||
- run: git submodule update --init
|
||||
|
||||
- docker/check:
|
||||
docker-username: DOCKERHUB_USERNAME
|
||||
docker-password: DOCKERHUB_PASSWORD
|
||||
@ -577,7 +461,7 @@ jobs:
|
||||
equal: [ mainnet, <<parameters.network>> ]
|
||||
steps:
|
||||
- when:
|
||||
condition: <parameters.push>>
|
||||
condition: <<parameters.push>>
|
||||
steps:
|
||||
- docker/build:
|
||||
image: filecoin/<<parameters.image>>
|
||||
@ -588,8 +472,8 @@ jobs:
|
||||
command: |
|
||||
docker push filecoin/<<parameters.image>>:<<parameters.channel>>
|
||||
if [["[[ ! -z $CIRCLE_SHA ]]"]]; then
|
||||
docker image tag filecoin/<<parameters.image>>:<<parameters.channel>>-<<parameters.network>> filecoin/<<parameters.image>>:"${CIRCLE_SHA}"
|
||||
docker push filecoin/<<parameters.image>>:"${CIRCLE_SHA}"
|
||||
docker image tag filecoin/<<parameters.image>>:<<parameters.channel>> filecoin/<<parameters.image>>:"${CIRCLE_SHA:0:7}"
|
||||
docker push filecoin/<<parameters.image>>:"${CIRCLE_SHA:0:7}"
|
||||
fi
|
||||
if [["[[ ! -z $CIRCLE_TAG ]]"]]; then
|
||||
docker image tag filecoin/<<parameters.image>>:<<parameters.channel>> filecoin/<<parameters.image>>:"${CIRCLE_TAG}"
|
||||
@ -618,8 +502,8 @@ jobs:
|
||||
command: |
|
||||
docker push filecoin/<<parameters.image>>:<<parameters.channel>>-<<parameters.network>>
|
||||
if [["[[ ! -z $CIRCLE_SHA ]]"]]; then
|
||||
docker image tag filecoin/<<parameters.image>>:<<parameters.channel>>-<<parameters.network>> filecoin/<<parameters.image>>:"${CIRCLE_SHA}"-<<parameters.network>>
|
||||
docker push filecoin/<<parameters.image>>:"${CIRCLE_SHA}"-<<parameters.network>>
|
||||
docker image tag filecoin/<<parameters.image>>:<<parameters.channel>>-<<parameters.network>> filecoin/<<parameters.image>>:"${CIRCLE_SHA:0:7}"-<<parameters.network>>
|
||||
docker push filecoin/<<parameters.image>>:"${CIRCLE_SHA:0:7}"-<<parameters.network>>
|
||||
fi
|
||||
if [["[[ ! -z $CIRCLE_TAG ]]"]]; then
|
||||
docker image tag filecoin/<<parameters.image>>:<<parameters.channel>>-<<parameters.network>> filecoin/<<parameters.image>>:"${CIRCLE_TAG}"-<<parameters.network>>
|
||||
@ -632,54 +516,61 @@ jobs:
|
||||
image: filecoin/<<parameters.image>>
|
||||
extra_build_args: --target <<parameters.image>> --build-arg GOFLAGS=-tags=<<parameters.network>>
|
||||
|
||||
publish-packer-snap:
|
||||
description: build packer image with snap. mainnet only.
|
||||
executor:
|
||||
name: packer
|
||||
steps:
|
||||
- checkout
|
||||
- packer_build:
|
||||
template: tools/packer/lotus-snap.pkr.hcl
|
||||
|
||||
workflows:
|
||||
version: 2.1
|
||||
ci:
|
||||
jobs:
|
||||
- build
|
||||
- lint-all:
|
||||
concurrency: "16" # expend all docker 2xlarge CPUs.
|
||||
- mod-tidy-check
|
||||
- gofmt
|
||||
- gen-check
|
||||
- docs-check
|
||||
requires:
|
||||
- build
|
||||
- mod-tidy-check:
|
||||
requires:
|
||||
- build
|
||||
- gofmt:
|
||||
requires:
|
||||
- build
|
||||
- gen-check:
|
||||
requires:
|
||||
- build
|
||||
- docs-check:
|
||||
requires:
|
||||
- build
|
||||
|
||||
[[- range $file := .ItestFiles -]]
|
||||
[[ with $name := $file | stripSuffix ]]
|
||||
- test:
|
||||
name: test-itest-[[ $name ]]
|
||||
requires:
|
||||
- build
|
||||
suite: itest-[[ $name ]]
|
||||
target: "./itests/[[ $file ]]"
|
||||
[[ end ]]
|
||||
[[- end -]]
|
||||
[[- if or (eq $name "worker") (eq $name "deals_concurrent") (eq $name "wdpost_worker_config")]]
|
||||
executor: golang-2xl
|
||||
[[- end]]
|
||||
[[- end ]][[- end]]
|
||||
|
||||
[[range $suite, $pkgs := .UnitSuites]]
|
||||
[[- range $suite, $pkgs := .UnitSuites]]
|
||||
- test:
|
||||
name: test-[[ $suite ]]
|
||||
requires:
|
||||
- build
|
||||
suite: utest-[[ $suite ]]
|
||||
target: "[[ $pkgs ]]"
|
||||
[[if eq $suite "unit-cli"]]get-params: true[[end]]
|
||||
[[- if eq $suite "unit-rest"]]executor: golang-2xl[[end]]
|
||||
[[- end]]
|
||||
- test:
|
||||
go-test-flags: "-run=TestMulticoreSDR"
|
||||
requires:
|
||||
- build
|
||||
suite: multicore-sdr-check
|
||||
target: "./storage/sealer/ffiwrapper"
|
||||
proofs-log-test: "1"
|
||||
- test-conformance:
|
||||
requires:
|
||||
- build
|
||||
suite: conformance
|
||||
target: "./conformance"
|
||||
- test-conformance:
|
||||
name: test-conformance-bleeding-edge
|
||||
suite: conformance-bleeding-edge
|
||||
target: "./conformance"
|
||||
vectors-branch: specs-actors-v7
|
||||
|
||||
release:
|
||||
jobs:
|
||||
@ -734,51 +625,6 @@ workflows:
|
||||
branches:
|
||||
only:
|
||||
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- build-appimage:
|
||||
name: "Build AppImage"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- publish:
|
||||
name: "Publish AppImage"
|
||||
appimage: true
|
||||
requires:
|
||||
- "Build AppImage"
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
[[- range .SnapNames]]
|
||||
- publish-snapcraft:
|
||||
name: "Publish Snapcraft ([[.]] / stable)"
|
||||
channel: stable
|
||||
snap-name: [[.]]
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+$/
|
||||
- publish-snapcraft:
|
||||
name: "Publish Snapcraft ([[.]] / candidate)"
|
||||
channel: candidate
|
||||
snap-name: [[.]]
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+-rc\d+$/
|
||||
[[- end]]
|
||||
[[- range .Networks]]
|
||||
- build-docker:
|
||||
name: "Docker push (lotus-all-in-one / stable / [[.]])"
|
||||
@ -881,12 +727,6 @@ workflows:
|
||||
only:
|
||||
- master
|
||||
jobs:
|
||||
[[- range .SnapNames]]
|
||||
- publish-snapcraft:
|
||||
name: "Publish Snapcraft ([[.]] / edge)"
|
||||
channel: edge
|
||||
snap-name: [[.]]
|
||||
[[- end]]
|
||||
[[- range .Networks]]
|
||||
- build-docker:
|
||||
name: "Docker (lotus-all-in-one / nightly / [[.]])"
|
||||
@ -895,13 +735,3 @@ workflows:
|
||||
network: [[.]]
|
||||
push: true
|
||||
[[- end]]
|
||||
biweekly:
|
||||
triggers:
|
||||
- schedule:
|
||||
cron: "0 0 1,15 * *"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
jobs:
|
||||
- publish-packer-snap
|
||||
|
55
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
55
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@ -9,15 +9,9 @@ body:
|
||||
options:
|
||||
- label: This is **not** a security-related bug/issue. If it is, please follow please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy).
|
||||
required: true
|
||||
- label: This is **not** a question or a support request. If you have any lotus related questions, please ask in the [lotus forum](https://github.com/filecoin-project/lotus/discussions).
|
||||
required: true
|
||||
- label: This is **not** a new feature request. If it is, please file a [feature request](https://github.com/filecoin-project/lotus/issues/new?assignees=&labels=need%2Ftriage%2Ckind%2Ffeature&template=feature_request.yml) instead.
|
||||
required: true
|
||||
- label: This is **not** an enhancement request. If it is, please file a [improvement suggestion](https://github.com/filecoin-project/lotus/issues/new?assignees=&labels=need%2Ftriage%2Ckind%2Fenhancement&template=enhancement.yml) instead.
|
||||
required: true
|
||||
- label: I **have** searched on the [issue tracker](https://github.com/filecoin-project/lotus/issues) and the [lotus forum](https://github.com/filecoin-project/lotus/discussions), and there is no existing related issue or discussion.
|
||||
required: true
|
||||
- label: I am running the [`Latest release`](https://github.com/filecoin-project/lotus/releases), or the most recent RC(release canadiate) for the upcoming release or the dev branch(master), or have an issue updating to any of these.
|
||||
- label: I am running the [`Latest release`](https://github.com/filecoin-project/lotus/releases), the most recent RC(release canadiate) for the upcoming release or the dev branch(master), or have an issue updating to any of these.
|
||||
required: true
|
||||
- label: I did not make any code changes to lotus.
|
||||
required: false
|
||||
@ -28,19 +22,11 @@ body:
|
||||
options:
|
||||
- label: lotus daemon - chain sync
|
||||
required: false
|
||||
- label: lotus miner - mining and block production
|
||||
- label: lotus fvm/fevm - Lotus FVM and FEVM interactions
|
||||
required: false
|
||||
- label: lotus miner/worker - sealing
|
||||
required: false
|
||||
- label: lotus miner - proving(WindowPoSt)
|
||||
required: false
|
||||
- label: lotus miner/market - storage deal
|
||||
required: false
|
||||
- label: lotus miner/market - retrieval deal
|
||||
required: false
|
||||
- label: lotus miner/market - data transfer
|
||||
required: false
|
||||
- label: lotus client
|
||||
- label: lotus miner - proving(WindowPoSt/WinningPoSt)
|
||||
required: false
|
||||
- label: lotus JSON-RPC API
|
||||
required: false
|
||||
@ -56,22 +42,33 @@ body:
|
||||
description: Enter the output of `lotus version` and `lotus-miner version` if applicable.
|
||||
placeholder: |
|
||||
e.g.
|
||||
Daemon:1.11.0-rc2+debug+git.0519cd371.dirty+api1.3.0
|
||||
Local: lotus version 1.11.0-rc2+debug+git.0519cd371.dirty
|
||||
Daemon: 1.19.0+mainnet+git.64059ca87+api1.5.0
|
||||
Local: lotus-miner version 1.19.0+mainnet+git.64059ca87
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: ReproSteps
|
||||
attributes:
|
||||
label: Repro Steps
|
||||
description: "Steps to reproduce the behavior"
|
||||
value: |
|
||||
1. Run '...'
|
||||
2. Do '...'
|
||||
3. See error '...'
|
||||
...
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: Description
|
||||
attributes:
|
||||
label: Describe the Bug
|
||||
description: |
|
||||
This is where you get to tell us what went wrong, when doing so, please try to provide a clear and concise description of the bug with all related information:
|
||||
* What you were doding when you experienced the bug?
|
||||
* What you were doing when you experienced the bug?
|
||||
* Any *error* messages you saw, *where* you saw them, and what you believe may have caused them (if you have any ideas).
|
||||
* What is the expected behaviour?
|
||||
* For sealing issues, include the output of `lotus-miner sectors status --log <sectorId>` for the failed sector(s).
|
||||
* For proving issues, include the output of `lotus-miner proving` info.
|
||||
* For deal making issues, include the output of `lotus client list-deals -v` and/or `lotus-miner storage-deals|retrieval-deals|data-transfers list [-v]` commands for the deal(s) in question.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
@ -83,18 +80,6 @@ body:
|
||||
Please provide debug logs of the problem, remember you can get set log level control for:
|
||||
* lotus: use `lotus log list` to get all log systems available and set level by `lotus log set-level`. An example can be found [here](https://lotus.filecoin.io/lotus/configure/defaults/#log-level-control).
|
||||
* lotus-miner:`lotus-miner log list` to get all log systems available and set level by `lotus-miner log set-level
|
||||
If you don't provide detailed logs when you raise the issue it will almost certainly be the first request I make before furthur diagnosing the problem.
|
||||
If you don't provide detailed logs when you raise the issue it will almost certainly be the first request we make before furthur diagnosing the problem.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: RepoSteps
|
||||
attributes:
|
||||
label: Repo Steps
|
||||
description: "Steps to reproduce the behavior"
|
||||
value: |
|
||||
1. Run '...'
|
||||
2. Do '...'
|
||||
3. See error '...'
|
||||
...
|
||||
validations:
|
||||
required: false
|
||||
required: true
|
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
blank_issues_enabled: true
|
||||
contact_links:
|
||||
- name: Ask a question about Lotus or get support
|
||||
url: https://github.com/filecoin-project/lotus/discussions/new/choose
|
||||
about: Ask a question or request support for using Lotus
|
||||
- name: Filecoin protocol feature or enhancement
|
||||
url: https://github.com/filecoin-project/FIPs/discussions/new/choose
|
||||
about: Write a discussion in the Filecoin Improvement Proposal repo
|
34
.github/ISSUE_TEMPLATE/enhancement.yml
vendored
34
.github/ISSUE_TEMPLATE/enhancement.yml
vendored
@ -7,13 +7,7 @@ body:
|
||||
label: Checklist
|
||||
description: Please check off the following boxes before continuing to create an improvement suggestion!
|
||||
options:
|
||||
- label: This is **not** a new feature or an enhancement to the Filecoin protocol. If it is, please open an [FIP issue](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0001.md).
|
||||
required: true
|
||||
- label: This is **not** a new feature request. If it is, please file a [feature request](https://github.com/filecoin-project/lotus/issues/new?assignees=&labels=need%2Ftriage%2Ckind%2Ffeature&template=feature_request.yml) instead.
|
||||
required: true
|
||||
- label: This is **not** brainstorming ideas. If you have an idea you'd like to discuss, please open a new discussion on [the lotus forum](https://github.com/filecoin-project/lotus/discussions/categories/ideas) and select the category as `Ideas`.
|
||||
required: true
|
||||
- label: I **have** a specific, actionable, and well motivated improvement to propose.
|
||||
- label: I **have** a specific, actionable, and well motivated improvement to an existing lotus feature.
|
||||
required: true
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
@ -22,19 +16,11 @@ body:
|
||||
options:
|
||||
- label: lotus daemon - chain sync
|
||||
required: false
|
||||
- label: lotus miner - mining and block production
|
||||
- label: lotus fvm/fevm - Lotus FVM and FEVM interactions
|
||||
required: false
|
||||
- label: lotus miner/worker - sealing
|
||||
required: false
|
||||
- label: lotus miner - proving(WindowPoSt)
|
||||
required: false
|
||||
- label: lotus miner/market - storage deal
|
||||
required: false
|
||||
- label: lotus miner/market - retrieval deal
|
||||
required: false
|
||||
- label: lotus miner/market - data transfer
|
||||
required: false
|
||||
- label: lotus client
|
||||
- label: lotus miner - proving(WindowPoSt/WinningPoSt)
|
||||
required: false
|
||||
- label: lotus JSON-RPC API
|
||||
required: false
|
||||
@ -45,9 +31,17 @@ body:
|
||||
- type: textarea
|
||||
id: request
|
||||
attributes:
|
||||
label: Improvement Suggestion
|
||||
description: A clear and concise description of what the motivation or the current problem is and what is the suggested improvement?
|
||||
placeholder: Ex. Currently lotus... However, as a storage provider, I'd like...
|
||||
label: Enhancement Suggestion
|
||||
description: A clear and concise description of the suggested enhancement?
|
||||
placeholder: Ex. Currently lotus... However it would be great if [enhancement] was implemented... With the ability to...
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: request
|
||||
attributes:
|
||||
label: Use-Case
|
||||
description: How would this enhancement help you?
|
||||
placeholder: Ex. With the [enhancement] node operators would be able to... For Storage Providers it would enable...
|
||||
validations:
|
||||
required: true
|
||||
|
||||
|
17
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
17
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@ -7,8 +7,6 @@ body:
|
||||
label: Checklist
|
||||
description: Please check off the following boxes before continuing to create a new feature request!
|
||||
options:
|
||||
- label: This is **not** a new feature or an enhancement to the Filecoin protocol. If it is, please open an [FIP issue](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0001.md).
|
||||
required: true
|
||||
- label: This is **not** brainstorming ideas. If you have an idea you'd like to discuss, please open a new discussion on [the lotus forum](https://github.com/filecoin-project/lotus/discussions/categories/ideas) and select the category as `Ideas`.
|
||||
required: true
|
||||
- label: I **have** a specific, actionable, and well motivated feature request to propose.
|
||||
@ -20,19 +18,11 @@ body:
|
||||
options:
|
||||
- label: lotus daemon - chain sync
|
||||
required: false
|
||||
- label: lotus miner - mining and block production
|
||||
- label: lotus fvm/fevm - Lotus FVM and FEVM interactions
|
||||
required: false
|
||||
- label: lotus miner/worker - sealing
|
||||
required: false
|
||||
- label: lotus miner - proving(WindowPoSt)
|
||||
required: false
|
||||
- label: lotus miner/market - storage deal
|
||||
required: false
|
||||
- label: lotus miner/market - retrieval deal
|
||||
required: false
|
||||
- label: lotus miner/market - data transfer
|
||||
required: false
|
||||
- label: lotus client
|
||||
- label: lotus miner - proving(WindowPoSt/WinningPoSt)
|
||||
required: false
|
||||
- label: lotus JSON-RPC API
|
||||
required: false
|
||||
@ -56,7 +46,7 @@ body:
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: alternates
|
||||
id: alternatives
|
||||
attributes:
|
||||
label: Describe alternatives you've considered
|
||||
description: A clear and concise description of any alternative solutions or features you've considered.
|
||||
@ -69,4 +59,3 @@ body:
|
||||
description: Add any other context, design docs or screenshots about the feature request here.
|
||||
validations:
|
||||
required: false
|
||||
|
||||
|
83
.github/ISSUE_TEMPLATE/service_developer_bug_report.yml
vendored
Normal file
83
.github/ISSUE_TEMPLATE/service_developer_bug_report.yml
vendored
Normal file
@ -0,0 +1,83 @@
|
||||
name: "Bug Report - developer/service provider"
|
||||
description: "Bug report template about FEVM/FVM for developers/service providers"
|
||||
labels: [need/triage, kind/bug, area/fevm]
|
||||
body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Checklist
|
||||
description: Please check off the following boxes before continuing to file a bug report!
|
||||
options:
|
||||
- label: This is **not** a security-related bug/issue. If it is, please follow please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy).
|
||||
required: true
|
||||
- label: I **have** searched on the [issue tracker](https://github.com/filecoin-project/lotus/issues) and the [lotus forum](https://github.com/filecoin-project/lotus/discussions), and there is no existing related issue or discussion.
|
||||
required: true
|
||||
- label: I did not make any code changes to lotus.
|
||||
required: false
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Lotus component
|
||||
description: Please select the lotus component you are filing a bug for
|
||||
options:
|
||||
- label: lotus Ethereum RPC
|
||||
required: false
|
||||
- label: lotus FVM - Lotus FVM interactions
|
||||
required: false
|
||||
- label: FEVM tooling
|
||||
required: false
|
||||
- label: Other
|
||||
required: false
|
||||
- type: textarea
|
||||
id: version
|
||||
attributes:
|
||||
label: Lotus Version
|
||||
render: text
|
||||
description: Enter the output of `lotus version` if applicable.
|
||||
placeholder: |
|
||||
e.g.
|
||||
Daemon: 1.19.0+mainnet+git.64059ca87+api1.5.0
|
||||
Local: lotus-miner version 1.19.0+mainnet+git.64059ca87
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: repro
|
||||
attributes:
|
||||
label: Repro Steps
|
||||
description: "Steps to reproduce the behavior"
|
||||
value: |
|
||||
1. Run '...'
|
||||
2. Do '...'
|
||||
3. See error '...'
|
||||
...
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: Description
|
||||
attributes:
|
||||
label: Describe the Bug
|
||||
description: |
|
||||
This is where you get to tell us what went wrong, when doing so, please try to provide a clear and concise description of the bug with all related information:
|
||||
* What you were doing when you experienced the bug? What are you trying to build?
|
||||
* Any *error* messages and logs you saw, *where* you saw them, and what you believe may have caused them (if you have any ideas).
|
||||
* What is the expected behaviour? Links to the actual code?
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: toolingInfo
|
||||
attributes:
|
||||
label: Tooling
|
||||
render: text
|
||||
description: |
|
||||
What kind of tooling are you using:
|
||||
* Are you using ether.js, Alchemy, Hardhat, etc.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: extraInfo
|
||||
attributes:
|
||||
label: Configuration Options
|
||||
render: text
|
||||
description: |
|
||||
Please provide your updated FEVM related configuration options, or custome enviroment variables related to Lotus FEVM
|
||||
* lotus: use `lotus config updated` to get your configuration options, and copy the [FEVM] section
|
||||
validations:
|
||||
required: true
|
20
.github/workflows/codeql-analysis.yml
vendored
20
.github/workflows/codeql-analysis.yml
vendored
@ -13,10 +13,14 @@ name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
branches:
|
||||
- master
|
||||
- 'release/*'
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ master ]
|
||||
branches:
|
||||
- master
|
||||
- 'release/*'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
@ -33,17 +37,17 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- uses: actions/setup-go@v1
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18.8'
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
uses: github/codeql-action/init@v2
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
languages: go
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
@ -52,7 +56,7 @@ jobs:
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
@ -66,4 +70,4 @@ jobs:
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
||||
uses: github/codeql-action/analyze@v2
|
||||
|
8
.gitignore
vendored
8
.gitignore
vendored
@ -1,6 +1,3 @@
|
||||
/AppDir
|
||||
/appimage-builder-cache
|
||||
*.AppImage
|
||||
/lotus
|
||||
/lotus-miner
|
||||
/lotus-worker
|
||||
@ -50,3 +47,8 @@ build/builtin-actors/v*
|
||||
build/builtin-actors/*.car
|
||||
|
||||
dist/
|
||||
|
||||
|
||||
# The following files are checked into git and result
|
||||
# in dirty git state if removed from the docker context
|
||||
!extern/filecoin-ffi/rust/filecoin.pc
|
||||
|
@ -65,6 +65,7 @@ archives:
|
||||
- id: primary
|
||||
format: tar.gz
|
||||
wrap_in_directory: true
|
||||
name_template: "{{ .ProjectName }}_v{{ .Version }}_{{ .Os }}_{{ .Arch }}"
|
||||
files:
|
||||
# this is a dumb but required hack so it doesn't include the default files
|
||||
# https://github.com/goreleaser/goreleaser/issues/602
|
||||
@ -105,4 +106,4 @@ checksum:
|
||||
disable: true
|
||||
|
||||
snapshot:
|
||||
name_template: "{{ .Tag }}"
|
||||
name_template: "{{ .Version }}"
|
||||
|
@ -1 +0,0 @@
|
||||
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0" y="0" viewBox="0 0 127 127" xml:space="preserve" enable-background="new 0 0 127 127"><style type="text/css">.st0{fill:#00d2d6}.st1{fill:#fff}</style><g><path class="st0" d="M63.5,127C28.5,127.1-0.2,98.4,0,63.2C0.2,28.3,28.6-0.2,63.9,0c34.8,0.2,63.3,28.7,63.1,64 C126.7,98.7,98.5,127.1,63.5,127z M71.4,57.6c5.5,0.8,11,1.5,16.5,2.3c0.5-1.7,0.9-3.1,1.3-4.7c-5.7-0.8-11.2-1.7-17.1-2.5 c2-7,3.7-13.7,5.8-20.2c0.7-2.2,2.3-4.2,3.9-5.9c2.1-2.2,5-1.7,6.8,0.7c0.7,1,1.4,2.1,2.3,2.9c1.1,1.1,2.8,1.6,4,0.6 c0.8-0.7,0.7-2.4,0.8-3.6c0-0.5-0.6-1.1-1-1.6c-2-2.3-4.7-3.1-7.5-3.2c-6.3-0.3-10.9,3-14.5,7.8c-3.5,4.8-5.1,10.5-6.8,16.2 c-0.5,1.6-0.9,3.3-1.4,5.1c-6.2-0.9-12.1-1.7-18.2-2.6c-0.2,1.6-0.4,3.2-0.6,4.8c6,0.9,11.8,1.8,17.8,2.7c-0.8,3.4-1.5,6.5-2.3,9.7 c-5.8-0.8-11.4-1.6-17-2.4c-0.2,1.8-0.4,3.2-0.6,4.8c5.6,0.9,11,1.7,16.5,2.5c0,0.6,0.1,1,0,1.3c-1.7,7.4-3.4,14.8-5.3,22.2 c-0.9,3.5-2.4,6.9-5.3,9.3c-2.4,2-5,1.7-6.8-0.8c-0.8-1.1-1.5-2.5-2.6-3.3c-0.8-0.6-2.5-0.9-3.1-0.5c-0.9,0.7-1.5,2.2-1.4,3.3 c0.1,1,1,2.2,1.9,2.8c3,2.3,6.5,2.6,10,1.9c5.9-1.2,10.1-4.9,12.7-10.1c2-4.1,3.6-8.5,5-12.9c1.3-4,2.2-8,3.3-12.2 c5.8,0.8,11.5,1.7,17.3,2.5c0.5-1.7,0.9-3.2,1.4-4.8c-6.1-0.9-11.9-1.7-17.7-2.6C70.1,64,70.7,60.9,71.4,57.6z"/><path class="st1" d="M71.4,57.6c-0.7,3.3-1.3,6.4-2,9.8c5.9,0.9,11.7,1.7,17.7,2.6c-0.5,1.6-0.9,3.1-1.4,4.8 c-5.8-0.8-11.5-1.7-17.3-2.5c-1.1,4.2-2,8.3-3.3,12.2c-1.4,4.4-3,8.7-5,12.9c-2.6,5.2-6.8,8.9-12.7,10.1c-3.5,0.7-7,0.4-10-1.9 c-0.9-0.7-1.8-1.8-1.9-2.8c-0.1-1.1,0.5-2.7,1.4-3.3c0.6-0.5,2.3-0.1,3.1,0.5c1.1,0.8,1.8,2.1,2.6,3.3c1.8,2.5,4.4,2.9,6.8,0.8 c2.9-2.5,4.4-5.8,5.3-9.3c1.9-7.3,3.6-14.8,5.3-22.2c0.1-0.3,0-0.7,0-1.3c-5.4-0.8-10.8-1.7-16.5-2.5c0.2-1.6,0.4-3,0.6-4.8 c5.6,0.8,11.1,1.6,17,2.4c0.8-3.2,1.5-6.4,2.3-9.7c-6-0.9-11.7-1.8-17.8-2.7c0.2-1.6,0.4-3.2,0.6-4.8c6.1,0.9,12,1.7,18.2,2.6 c0.5-1.8,0.9-3.5,1.4-5.1c1.7-5.6,3.2-11.3,6.8-16.2c3.6-4.9,8.1-8.1,14.5-7.8c2.8,0.1,5.5,0.9,7.5,3.2c0.4,0.5,1,1.1,1,1.6 c-0.1,1.2,0,2.9-0.8,3.6c-1.1,1.1-2.8,0.5-4-0.6c-0.9-0.9-1.6-1.9-2.3-2.9c-1.8-2.4-4.7-2.9-6.8-0.7c-1.6,1.7-3.2,3.7-3.9,5.9 C75.7,39.4,74,46,72,53c5.9,0.9,11.4,1.7,17.1,2.5c-0.5,1.6-0.9,3.1-1.3,4.7C82.3,59.1,76.9,58.3,71.4,57.6z"/></g></svg>
|
Before Width: | Height: | Size: 2.2 KiB |
@ -1,71 +0,0 @@
|
||||
version: 1
|
||||
AppDir:
|
||||
path: ./AppDir
|
||||
app_info:
|
||||
id: io.filecoin.lotus
|
||||
name: Lotus
|
||||
icon: icon
|
||||
version: latest
|
||||
exec: usr/bin/lotus
|
||||
exec_args: $@
|
||||
apt:
|
||||
arch: amd64
|
||||
allow_unauthenticated: true
|
||||
sources:
|
||||
- sourceline: deb http://archive.ubuntu.com/ubuntu/ focal main restricted
|
||||
- sourceline: deb http://archive.ubuntu.com/ubuntu/ focal-updates main restricted
|
||||
- sourceline: deb http://archive.ubuntu.com/ubuntu/ focal universe
|
||||
- sourceline: deb http://archive.ubuntu.com/ubuntu/ focal-updates universe
|
||||
- sourceline: deb http://archive.ubuntu.com/ubuntu/ focal multiverse
|
||||
- sourceline: deb http://archive.ubuntu.com/ubuntu/ focal-updates multiverse
|
||||
- sourceline: deb http://archive.ubuntu.com/ubuntu/ focal-backports main restricted
|
||||
universe multiverse
|
||||
- sourceline: deb http://security.ubuntu.com/ubuntu focal-security main restricted
|
||||
- sourceline: deb http://security.ubuntu.com/ubuntu focal-security universe
|
||||
- sourceline: deb http://security.ubuntu.com/ubuntu focal-security multiverse
|
||||
- sourceline: deb https://cli-assets.heroku.com/apt ./
|
||||
- sourceline: deb http://ppa.launchpad.net/openjdk-r/ppa/ubuntu focal main
|
||||
- sourceline: deb http://ppa.launchpad.net/git-core/ppa/ubuntu focal main
|
||||
- sourceline: deb http://archive.canonical.com/ubuntu focal partner
|
||||
include:
|
||||
- ocl-icd-libopencl1
|
||||
- libhwloc15
|
||||
exclude: []
|
||||
files:
|
||||
include:
|
||||
- /usr/lib/x86_64-linux-gnu/libgcc_s.so.1
|
||||
- /usr/lib/x86_64-linux-gnu/libpthread-2.31.so
|
||||
- /usr/lib/x86_64-linux-gnu/libm-2.31.so
|
||||
- /usr/lib/x86_64-linux-gnu/libdl-2.31.so
|
||||
- /usr/lib/x86_64-linux-gnu/libc-2.31.so
|
||||
- /usr/lib/x86_64-linux-gnu/libudev.so.1.6.17
|
||||
exclude:
|
||||
- usr/share/man
|
||||
- usr/share/doc/*/README.*
|
||||
- usr/share/doc/*/changelog.*
|
||||
- usr/share/doc/*/NEWS.*
|
||||
- usr/share/doc/*/TODO.*
|
||||
test:
|
||||
fedora:
|
||||
image: appimagecrafters/tests-env:fedora-30
|
||||
command: ./AppRun
|
||||
use_host_x: false
|
||||
debian:
|
||||
image: appimagecrafters/tests-env:debian-stable
|
||||
command: ./AppRun
|
||||
use_host_x: false
|
||||
arch:
|
||||
image: appimagecrafters/tests-env:archlinux-latest
|
||||
command: ./AppRun
|
||||
use_host_x: false
|
||||
centos:
|
||||
image: appimagecrafters/tests-env:centos-7
|
||||
command: ./AppRun
|
||||
use_host_x: false
|
||||
ubuntu:
|
||||
image: appimagecrafters/tests-env:ubuntu-xenial
|
||||
command: ./AppRun
|
||||
use_host_x: false
|
||||
AppImage:
|
||||
arch: x86_64
|
||||
|
440
CHANGELOG.md
440
CHANGELOG.md
@ -1,5 +1,445 @@
|
||||
# Lotus changelog
|
||||
|
||||
# UNRELEASED
|
||||
|
||||
## Execution Trace Format Changes
|
||||
|
||||
Execution traces (returned from `lotus state exec-trace`, `lotus state replay`, etc.), has changed to account for changes introduced by the FVM. Specifically:
|
||||
|
||||
- The `Msg` field no longer matches the Filecoin message format as many of the message fields didn't make sense in on-chain sub-calls. Instead, it now has the fields `To`, `From`, `Value`, `Method`, `Params`, and `ParamsCodec` where `ParamsCodec` is a new field indicating the IPLD codec of the parameters.
|
||||
- Importantly, the `Msg.CID` field has been removed. This field is still present in top-level invocation results, just not inside the execution trace itself.
|
||||
- The `MsgRct` field no longer includes a `GasUsed` field and now has a `ReturnCodec` field to indicating the IPLD codec of the return value.
|
||||
- The `Error` and `Duration` fields have been removed as these are not set by the FVM. The top-level message "invocation result" retains the `Error` and `Duration` fields, they've only been removed from the trace itself.
|
||||
- Gas Charges no longer include "virtual" gas fields (those starting with `v...`) or source location information (`loc`) as neither field is set by the FVM.
|
||||
|
||||
A note on "codecs": FVM parameters and return values are IPLD blocks where the "codec" specifies the data encoding. The codec will generally be one of:
|
||||
|
||||
- `0x51`, `0x71` - CBOR or DagCBOR. You should generally treat these as equivalent.
|
||||
- `0x55` - Raw bytes.
|
||||
- `0x00` - Nothing. If the codec is `0x00`, the parameter and/or return value should be empty and should be treated as "void" (not specified).
|
||||
|
||||
<details>
|
||||
<summary>
|
||||
Old <code>ExecutionTrace</code>:
|
||||
</summary>
|
||||
|
||||
```json
|
||||
{
|
||||
"Msg": {
|
||||
"Version": 0,
|
||||
"To": "f01234",
|
||||
"From": "f04321",
|
||||
"Nonce": 1,
|
||||
"Value": "0",
|
||||
"GasLimit": 0,
|
||||
"GasFeeCap": "1234",
|
||||
"GasPremium": "1234",
|
||||
"Method": 42,
|
||||
"Params": "<base64-data-or-null>",
|
||||
"CID": {
|
||||
"/": "bafyxyz....."
|
||||
},
|
||||
},
|
||||
"MsgRct": {
|
||||
"ExitCode": 0,
|
||||
"Return": "<base64-data-or-null>",
|
||||
"GasUsed": 12345,
|
||||
},
|
||||
"Error": "",
|
||||
"Duration": 568191845,
|
||||
"GasCharges": [
|
||||
{
|
||||
"Name": "OnMethodInvocation",
|
||||
"loc": null,
|
||||
"tg": 23856,
|
||||
"cg": 23856,
|
||||
"sg": 0,
|
||||
"vtg": 0,
|
||||
"vcg": 0,
|
||||
"vsg": 0,
|
||||
"tt": 0
|
||||
},
|
||||
{
|
||||
"Name": "wasm_exec",
|
||||
"loc": null,
|
||||
"tg": 1764,
|
||||
"cg": 1764,
|
||||
"sg": 0,
|
||||
"vtg": 0,
|
||||
"vcg": 0,
|
||||
"vsg": 0,
|
||||
"tt": 0
|
||||
},
|
||||
{
|
||||
"Name": "OnSyscall",
|
||||
"loc": null,
|
||||
"tg": 14000,
|
||||
"cg": 14000,
|
||||
"sg": 0,
|
||||
"vtg": 0,
|
||||
"vcg": 0,
|
||||
"vsg": 0,
|
||||
"tt": 0
|
||||
},
|
||||
],
|
||||
"Subcalls": [
|
||||
{
|
||||
"Msg": { },
|
||||
"MsgRct": { },
|
||||
"Error": "",
|
||||
"Duration": 1235,
|
||||
"GasCharges": [],
|
||||
"Subcalls": [],
|
||||
},
|
||||
]
|
||||
}
|
||||
```
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>
|
||||
New <code>ExecutionTrace</code>:
|
||||
</summary>
|
||||
|
||||
```json
|
||||
{
|
||||
"Msg": {
|
||||
"To": "f01234",
|
||||
"From": "f04321",
|
||||
"Value": "0",
|
||||
"Method": 42,
|
||||
"Params": "<base64-data-or-null>",
|
||||
"ParamsCodec": 81
|
||||
},
|
||||
"MsgRct": {
|
||||
"ExitCode": 0,
|
||||
"Return": "<base64-data-or-null>",
|
||||
"ReturnCodec": 81
|
||||
},
|
||||
"GasCharges": [
|
||||
{
|
||||
"Name": "OnMethodInvocation",
|
||||
"loc": null,
|
||||
"tg": 23856,
|
||||
"cg": 23856,
|
||||
"tt": 0
|
||||
},
|
||||
{
|
||||
"Name": "wasm_exec",
|
||||
"loc": null,
|
||||
"tg": 1764,
|
||||
"cg": 1764,
|
||||
"sg": 0,
|
||||
"tt": 0
|
||||
},
|
||||
{
|
||||
"Name": "OnSyscall",
|
||||
"loc": null,
|
||||
"tg": 14000,
|
||||
"cg": 14000,
|
||||
"sg": 0,
|
||||
"tt": 0
|
||||
},
|
||||
],
|
||||
"Subcalls": [
|
||||
{
|
||||
"Msg": { },
|
||||
"MsgRct": { },
|
||||
"GasCharges": [],
|
||||
"Subcalls": [],
|
||||
},
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
# v1.20.3 / 2023-03-09
|
||||
|
||||
A 🐈 stepped on the ⌨️ and made a mistake while resolving conflicts 😨. This releases only includes #10439 to fix that mistake. v1.20.2 is retracted - Please skip v1.20.2 and **only** update to v1.20.3!!!
|
||||
|
||||
## Changelog
|
||||
> compare to v1.20.1
|
||||
|
||||
This is a HIGHLY RECOMMENDED patch release for node operators/API service providers that run ETH RPC service and an optional release for Storage Providers.
|
||||
|
||||
## Bug fixes
|
||||
- fix: EthAPI: use StateCompute for feeHistory; apply minimum gas premium #10413
|
||||
- fix: eth API: return correct txIdx around null blocks #10419
|
||||
- fix: Eth API: make block parameter parsing sounder. #10427
|
||||
|
||||
## Improvement
|
||||
- feat: Lotus Gateway: Add missing methods - master #10420
|
||||
- feat: mempool: Reduce minimum replace fee from 1.25x to 1.1x #10416
|
||||
- We recommend storage providers to update your nodes to this patch, that will help improve developers who use Ethereum tooling's experience.
|
||||
|
||||
# v1.20.2 / 2023-03-09
|
||||
|
||||
This is a HIGHLY RECOMMENDED patch release for node operators/API service providers that run ETH RPC service and an optional release for Storage Providers.
|
||||
|
||||
## Bug fixes
|
||||
- fix: EthAPI: use StateCompute for feeHistory; apply minimum gas premium #10413
|
||||
- fix: eth API: return correct txIdx around null blocks #10419
|
||||
- fix: Eth API: make block parameter parsing sounder. #10427
|
||||
|
||||
## Improvement
|
||||
- feat: Lotus Gateway: Add missing methods - master #10420
|
||||
- feat: mempool: Reduce minimum replace fee from 1.25x to 1.1x #10416
|
||||
- We recommend storage providers to update your nodes to this patch, that will help improve developers who use Ethereum tooling's experience.
|
||||
|
||||
# v1.20.1 / 2023-03-06
|
||||
|
||||
This an optional patch releases for node operators/API service providers that run ETH RPC service.
|
||||
|
||||
## Bug fixes
|
||||
- fix: EthAPI: Correctly get parent hash #10389
|
||||
- fix: EthAPI: Make newEthBlockFromFilecoinTipSet faster and correct #10380
|
||||
- fix: state: short-circuit genesis state computation
|
||||
|
||||
# 1.20.0 / 2023-02-28
|
||||
|
||||
This is a MANDATORY release of Lotus that delivers the [Hygge network upgrade](https://github.com/filecoin-project/community/discussions/74?sort=top#discussioncomment-4313888), introducing Filecoin network version 18. The centerpiece of the upgrade is the introduction of the [Filecoin Virtual Machine (FVM)’s Milestone 2.1](https://fvm.filecoin.io/), which will allow for EVM-compatible contracts to be deployed on the Filecoin network. This upgrade delivers user-programmablity to the Filecoin network for the first time!
|
||||
|
||||
The Filecoin mainnet is scheduled to upgrade to nv18 at epoch 2683348, on March 14th at 2023-03-14T15:14:00Z. All node operators, including storage providers, must upgrade to this release before that time. Storage providers must update their daemons, miners, market and worker(s)/boost.
|
||||
At the upgrade, a short migration will run that converts code actors v9 code CIDs to v10 CIDs, and installs the new Ethereum Address Manager singleton (see below). This is expected to be a lightweight migration that causes no service disruption.
|
||||
|
||||
The Hygge upgrade introduces the following Filecoin Improvement Proposals (FIPs), delivered in FVM3 (see FVM [v3.0.0](https://github.com/filecoin-project/ref-fvm/pull/1683)) and builtin-actors v10 (see actors [v10.0.0](https://github.com/filecoin-project/builtin-actors/releases/tag/v10.0.0)):
|
||||
|
||||
- [FIP-0048](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0048.md): f4 Address Class
|
||||
- [FIP-0049](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0049.md): Actor events
|
||||
- [FIP-0050](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0050.md): API between user-programmed actors and built-in actors
|
||||
- [FIP-0054](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0054.md): Filecoin EVM runtime (FEVM)
|
||||
- [FIP-0055](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0055.md): Supporting Ethereum Accounts, Addresses, and Transactions
|
||||
- [FIP-0057](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0057.md): Update gas charging schedule and system limits for FEVM
|
||||
|
||||
## Filecoin Ethereum Virtual Machine (FEVM)
|
||||
|
||||
The Filecoin Ethereum Virtual Machine (FEVM) is built on top of the WASM-based execution environment introduced in the Skyr v16 upgrade. The chief feature introduced is the ability for anyone participating in the Filecoin network to deploy their own EVM-compatible contracts onto the blockchain, and invoke them as appropriate.
|
||||
|
||||
## New Built-in Actors
|
||||
|
||||
The FEVM is principally delivered through the introduction of **the new [EVM actor](https://github.com/filecoin-project/builtin-actors/tree/master/actors/evm)**. This actor “represents” smart contracts on the Filecoin network, and includes an interpreter that implements all EVM opcodes as their Filecoin equivalents, and translates state I/O operations to be compatible with Filecoin’s IPLD-based data model. For more on the EVM actors, please see [FIP-0054](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0054.md).
|
||||
|
||||
The creation of EVM actors is managed by **the new** [Ethereum Address Manager actor (EAM)](https://github.com/filecoin-project/builtin-actors/tree/master/actors/eam), a singleton that is invoked in order to deploy EVM actors. In order to make usage of the FEVM as seamless as possible for users familiar with the Ethereum ecosystem, this upgrades also introduces **a dedicated actor to serve as “[Ethereum Accounts](https://github.com/filecoin-project/builtin-actors/tree/master/actors/ethaccount)”**. This actor exists to allow for secp keys to be used in the Ethereum addressing scheme. **The last new built-in actor introduced is [the Placeholder actor](https://github.com/filecoin-project/builtin-actors/tree/master/actors/placeholder)**, a thin “shell” of an actor that can transform into either EVM or EthAccount actors. For more on the EAM, EthAccount, and Placeholder actors, please see [FIP-0055](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0055.md).
|
||||
|
||||
### v10 Built-in actor bundles
|
||||
|
||||
Bundles for all networks (mainnet, calibnet, etc.) are included in the lotus source tree (`build/actors/`) and embedded on build, for v10 actors you can find it [here](https://github.com/filecoin-project/lotus/blob/master/build/actors/v10.tar.zst).
|
||||
Reminder: Lotus verifies that the bundle CIDs are the right ones upon build & upgrade against the values in `build/builtin_actors_gen.go`, according to the network you are building. You may also check the bundle manifest CID matches the bundle gen-ed values by running `lotus state actor-cids --network-version 18`.
|
||||
|
||||
The manifest CID & full list of actor code CIDs for nv18 using [actor v10](https://github.com/filecoin-project/builtin-actors/releases/tag/v10.0.0) is:
|
||||
|
||||
"_manifest": "bafy2bzacecsuyf7mmvrhkx2evng5gnz5canlnz2fdlzu2lvcgptiq2pzuovos"
|
||||
"account": "bafk2bzaceampw4romta75hyz5p4cqriypmpbgnkxncgxgqn6zptv5lsp2w2bo"
|
||||
"cron": "bafk2bzacedcbtsifegiu432m5tysjzkxkmoczxscb6hqpmrr6img7xzdbbs2g"
|
||||
"datacap": "bafk2bzacealj5uk7wixhvk7l5tnredtelralwnceafqq34nb2lbylhtuyo64u"
|
||||
"eam": "bafk2bzacedrpm5gbleh4xkyo2jvs7p5g6f34soa6dpv7ashcdgy676snsum6g"
|
||||
"ethaccount": "bafk2bzaceaqoc5zakbhjxn3jljc4lxnthllzunhdor7sxhwgmskvc6drqc3fa"
|
||||
"evm": "bafk2bzaceahmzdxhqsm7cu2mexusjp6frm7r4kdesvti3etv5evfqboos2j4g"
|
||||
"init": "bafk2bzaced2f5rhir3hbpqbz5ght7ohv2kgj42g5ykxrypuo2opxsup3ykwl6"
|
||||
"multisig": "bafk2bzaceduf3hayh63jnl4z2knxv7cnrdenoubni22fxersc4octlwpxpmy4"
|
||||
"paymentchannel": "bafk2bzaceartlg4mrbwgzcwric6mtvyawpbgx2xclo2vj27nna57nxynf3pgc"
|
||||
"placeholder": "bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"
|
||||
"reward": "bafk2bzacebnhtaejfjtzymyfmbdrfmo7vgj3zsof6zlucbmkhrvcuotw5dxpq"
|
||||
"storagemarket": "bafk2bzaceclejwjtpu2dhw3qbx6ow7b4pmhwa7ocrbbiqwp36sq5yeg6jz2bc"
|
||||
"storageminer": "bafk2bzaced4h7noksockro7glnssz2jnmo2rpzd7dvnmfs4p24zx3h6gtx47s"
|
||||
"storagepower": "bafk2bzacec4ay4crzo73ypmh7o3fjendhbqrxake46bprabw67fvwjz5q6ixq"
|
||||
"system": "bafk2bzacedakk5nofebyup4m7nvx6djksfwhnxzrfuq4oyemhpl4lllaikr64"
|
||||
"verifiedregistry": "bafk2bzacedfel6edzqpe5oujno7fog4i526go4dtcs6vwrdtbpy2xq6htvcg6"
|
||||
|
||||
## Node Operators
|
||||
|
||||
FVM has been running in lotus since v1.16.0 and up, and the new FEVM does not increase any node hardware spec requirement.
|
||||
|
||||
With FEVM on Filecoin, we aim to provide full compatibility with the existing EVM ecosystem and its tooling out of the box.
|
||||
Consequently, lotus now provides a full set of [Ethereum-styled APIs](https://github.com/filecoin-project/lotus/blob/release/v1.20.0/node/impl/full/eth.go) for developers and token holders to interact with the Filecoin network as well.
|
||||
For full documentation on this new tooling, please see the [Lotus docs website](https://lotus.filecoin.io/lotus/configure/ethereum-rpc/).
|
||||
|
||||
**Enabling Ethereum JSON RPC API**
|
||||
|
||||
Note that Ethereum APIs are only supported in the lotus v1 API, meaning that any node operator who wants to enable Eth API services must be using the v1 API, instead of the v0 API. To enable Eth RPC, simply set `EnableEthRPC` to `true` in your node config.toml file; or set env var `LOTUS_FEVM_ENABLEETHRPC` to `1` before starting your lotus node.
|
||||
|
||||
**Eth tx hash and Filecoin message CID**
|
||||
|
||||
Most of the Eth APIs take Eth accounts and tx has as an input, and they start with `0x` , and that is what Ethereum tooling support. However, in Filecoin, we have Filecoin account formats where things start with `f` (`f410` specifically for eth accounts on Filecoin) and the messages are in the format of CIDs. To enable a smooth developer experience, Lotus internally converts between Ethereum address and Filecoin account address as needed. In addition, lotus also keeps a Eth tx hash <> Filecoin message CID map and stores them in a SQLite database as node sees a FEVM messages. The database is initiated and the maps are populated automatically in `~/<lotus_repo>/sqlite/txhash.db` for any node that as Eth RPC enabled. Node operators can configure how many historical mappings they wanna store by configuring `EthTxHashMappingLifetimeDays` .
|
||||
|
||||
**Events**
|
||||
|
||||
[FIP-0049 introduces actor events](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0049.md) that can be emitted and externally observable during message execution. An `events.db` is created automatically under `~/<lotus_repo>/sqlite` to store these events if the node has Eth RPC enabled. Node operators can configure the events support base on their needs by configuration `Events` configurations.
|
||||
|
||||
Note: All three features are new, and we welcome user feedback, please create an issue if you have any enhancements that you’d like to see!
|
||||
|
||||
# 1.19.0 / 2022-12-07
|
||||
|
||||
This is an optional feature release of Lotus. This feature release includes the SplitStore beta, the experimental Lotus node cluster feature, as well as numerous enhancments and bugfixes.
|
||||
|
||||
## Highlights
|
||||
### 🟢 SplitStore v2(Beta) 🟢
|
||||
|
||||
Splitstore aims to reduce the node performance impact that's caused by the Filecoin's very large, and continuously growing chain datastore by having a hot and cold blockstore. You can find more about the Splitstore implementation [here](https://github.com/filecoin-project/lotus/blob/master/blockstore/splitstore/README.md).
|
||||
Splitstore has three basic modes for node operators to configure according to your needs:
|
||||
- `discard`: hotstore only, automatically archive out-of-scope objects that are beyond 4 finalities(3600 epochs).
|
||||
- `universal`: stores all chain data that's beyond 4 finalities into coldstore.
|
||||
- `messages`: only stores on-chain messages into coldstore.
|
||||
|
||||
The `EnableColdStoreAutoPrune=` configuration is being deprecated in this release, as there is only ever one compaction running. We welcome all node operators to try the new feature and let us know [here](https://github.com/filecoin-project/lotus/discussions/9179) if you have any feedback!
|
||||
There are more configuration one may set, you can read the full documentation about the SplitStoreV2 here: https://lotus.filecoin.io/lotus/configure/splitstore/.
|
||||
|
||||
### 🧪 Node Cluster (*EXPERIMENTAL.*) 🧪
|
||||
The Lotus HA node cluster feature allows you to run multiple Lotus daemons for the same lotus-miner increasing resiliency. We welcome all Lotus users to join the early testing for this feature and provide your feedback. Please note that this feature is targeted towards more enterprise users of Lotus and requires at least 3 lotus nodes to be set up in a cluster.
|
||||
Check out the documentation here: https://lotus.filecoin.io/lotus/configure/clusters/
|
||||
|
||||
### ⭐️ SnapDeals Enhancements ⭐️
|
||||
Numerous SnapDeals related improvements and fixes made it into this release before the code freeze. Some the highlights of the issues that has been fixed in this feature release are:
|
||||
|
||||
- *Unable to snap-up a sector again if something went wrong.* - This has now been fixed ✅
|
||||
- *Error messages on loop during an open deadline.* - This has now been fixed ✅
|
||||
|
||||
## New features
|
||||
- feat:splitstore:single compaction that can handle prune aka two marksets one compaction (#9571) ([filecoin-project/lotus#9571](https://github.com/filecoin-project/lotus/pull/9571))
|
||||
- Introduces a new SplitStore-mode, `messages`, which will only store on-chain messages. Fixes previously issues with regards to `AutoPrune` not compacting the coldstore. [Link to documentation](https://lotus.filecoin.io/lotus/configure/splitstore/)
|
||||
- feat: Raft consensus for lotus nodes in a cluster ([filecoin-project/lotus#9294](https://github.com/filecoin-project/lotus/pull/9294))
|
||||
- Adds the experimental node cluster feature.
|
||||
- feat: storage: Force exit GenerateSingleVanillaProof on cancelled context ([filecoin-project/lotus#9613](https://github.com/filecoin-project/lotus/pull/9613))
|
||||
- `GenerateSingleVanillaProof` now respects context, which means that it will skip slow to read sectors :snail: and return a context error. Instead of being blocked forever if storage reads where blocked (e.g disconnected NFS).
|
||||
- feat: wdpost: Configurable pre-check timeouts ([filecoin-project/lotus#9680](https://github.com/filecoin-project/lotus/pull/9680))
|
||||
- Adds configuration knobs for setting custom amount of time a proving pre-check can take before a sector and partition will be skipped. [Link to documentation](https://lotus.filecoin.io/storage-providers/advanced-configurations/proving/#pre-check-sector-timeout)
|
||||
- feat: chain: future proof the from & to address protocols ([filecoin-project/lotus#9515](https://github.com/filecoin-project/lotus/pull/9515))
|
||||
- This lets us add new address protocols to go-address without implicitly accepting them in messages on the network.
|
||||
- feat: Retrieval into remote blockstores ([filecoin-project/lotus#9565](https://github.com/filecoin-project/lotus/pull/9565))
|
||||
- Makes it possible to point retrievals at a network-backed blockstore.
|
||||
- feat: Add node uptime rpc / output in info command ([filecoin-project/lotus#9436](https://github.com/filecoin-project/lotus/pull/9436))
|
||||
- Adds node uptime stats to the `lotus-miner info` and `lotus info` commands
|
||||
- feat: wdpost: Add ability to only have single partition per msg for partitions with… ([filecoin-project/lotus#9413](https://github.com/filecoin-project/lotus/pull/9413))
|
||||
- Adds a configuration option to have a single partition per PoSt Message for partitions containing recovering sectors.
|
||||
- feat: miner paramfetch: Don't fetch param files when not needed ([filecoin-project/lotus#9391](https://github.com/filecoin-project/lotus/pull/9391))
|
||||
- A Lotus-Miner processes that has disabled local PoSt / C2 / PR2 does not need the param-files. This makes node startup much faster, reducing downtime by a lot when restarts are needed.
|
||||
- feat: client: Add retrieval deal ID and bytes transferred to retrieval output ([filecoin-project/lotus#9398](https://github.com/filecoin-project/lotus/pull/9398))
|
||||
- Appends retrieval deal ID and bytes transferred to the retrieval output.
|
||||
- feat: dealpublisher: check for duplicate deals before adding ([filecoin-project/lotus#9365](https://github.com/filecoin-project/lotus/pull/9365))
|
||||
- feat: Drop active retrieval check (#764) ([filecoin-project/go-fil-markets#764](https://github.com/filecoin-project/go-fil-markets/pull/764))
|
||||
- feat(retrievalmarkets): expose GetDynamicAsk (#748) ([filecoin-project/go-fil-markets#748](https://github.com/filecoin-project/go-fil-markets/pull/748))
|
||||
- feat: handle retrieval queries for unindexed identity payload CIDs (#747) ([filecoin-project/go-fil-markets#747](https://github.com/filecoin-project/go-fil-markets/pull/747))
|
||||
- feat: add a method for validating an address for a network version (#115) ([filecoin-project/go-state-types#115](https://github.com/filecoin-project/go-state-types/pull/115))
|
||||
|
||||
## Improvements
|
||||
- fix: miner-cli: Fix lotus-miner proving check ([filecoin-project/lotus#9643](https://github.com/filecoin-project/lotus/pull/9643))
|
||||
- Fixes the issue where the `lotus-miner proving check` command always outputted `Error: rg is nil`
|
||||
- fix: sealing pipeline: Clear CreationTime when starting sector upgrade ([filecoin-project/lotus#9642](https://github.com/filecoin-project/lotus/pull/9642))
|
||||
- Fixes the issue where an aborted SnapDeal upgrade could no longer be retried with SnapDeals.
|
||||
- fix:sealing-fsm:wait mutable fsm state for immutable sector upgrade error ([filecoin-project/lotus#9598](https://github.com/filecoin-project/lotus/pull/9598))
|
||||
- Creating a new WaitMutable state - now if the deadline is open and the sectors are trying finalize they will wait on the worker until the deadline has closed. Important to note that they will not finalize as soon as the deadline closes, they will wait 1h before continuing. Fixes the previous issue where upgraded Snap-sectors for an open deadline cause a lot of `error-messages` and `p_aux` issues
|
||||
- fix: cli: add beneficiary info to lotus-miner actor control list ([filecoin-project/lotus#9632](https://github.com/filecoin-project/lotus/pull/9632))
|
||||
- Adds the beneficiary address to the `lotus-miner actor control list` output.
|
||||
- fix: sealing pipeine: Release assigned deals on snapdeals abort ([filecoin-project/lotus#9601](https://github.com/filecoin-project/lotus/pull/9601))
|
||||
- fix: docker: make compatible with arm platform ([filecoin-project/lotus#9363](https://github.com/filecoin-project/lotus/pull/9363))
|
||||
- Makes the `Dockerfile.lotus` compatible with ARM-platforms (e.g Mac M1).
|
||||
- fix: post worker sched: Don't check worker session in a busy loop ([filecoin-project/lotus#9495](https://github.com/filecoin-project/lotus/pull/9495))
|
||||
- Fixes a looping pattern which could result in a flood of requests between `lotus-miner`<->`lotus-worker`, potentially exhausting resources needed to make http requests, that lead to all sorts of random RPC-related issues.
|
||||
- fix: miner: init miner's with 32GiB sectors by default ([filecoin-project/lotus#9364](https://github.com/filecoin-project/lotus/pull/9364))
|
||||
- Makes the `lotus-miner init` defualt to 32GiB sectors.
|
||||
- fix: store identity CIDs in CARs for online deals (#749) ([filecoin-project/go-fil-markets#749](https://github.com/filecoin-project/go-fil-markets/pull/749))
|
||||
- fix: cliutil: Fix URL-based API endpoint parsing
|
||||
|
||||
## Dependencies
|
||||
- deps: upgrade go-merkledag to 0.8.1 (#9717)
|
||||
- deps: Update go-fil-markets to v1.25.0 ([filecoin-project/lotus#9633](https://github.com/filecoin-project/lotus/pull/9633))
|
||||
- deps: upgrade go-merkledag to 0.8.0 ([filecoin-project/lotus#9455](https://github.com/filecoin-project/lotus/pull/9455))
|
||||
|
||||
## Others
|
||||
- fix/build: Update Zondax/hid to 0.9.1
|
||||
- refactor: sealing: minor refactor of FinalizeReplicaUpdate ([filecoin-project/lotus#9614](https://github.com/filecoin-project/lotus/pull/9614))
|
||||
- update ffi to 280c4f8b94fd46dc (#9608) ([filecoin-project/lotus#9608](https://github.com/filecoin-project/lotus/pull/9608))
|
||||
- fix: tvx: make it work with the FVM ([filecoin-project/lotus#9604](https://github.com/filecoin-project/lotus/pull/9604))
|
||||
- fix: autobatch: remove potential deadlock when a block is missing ([filecoin-project/lotus#9602](https://github.com/filecoin-project/lotus/pull/9602))
|
||||
- feat: shed: set control address: add dump bytes option ([filecoin-project/lotus#9572](https://github.com/filecoin-project/lotus/pull/9572))
|
||||
- feat: shed: Online export-car ([filecoin-project/lotus#9590](https://github.com/filecoin-project/lotus/pull/9590))
|
||||
- fix: chain: Update chain.go ([filecoin-project/lotus#9373](https://github.com/filecoin-project/lotus/pull/9373))
|
||||
- fix: fvm: Allow setting local bundles for Debug FVM for av 9+ ([filecoin-project/lotus#9509](https://github.com/filecoin-project/lotus/pull/9509))
|
||||
- fix: ux: Add outputs to wallet commands ([filecoin-project/lotus#9416](https://github.com/filecoin-project/lotus/pull/9416))
|
||||
- fix: ux: specify arg in `net ping` cmd ([filecoin-project/lotus#9459](https://github.com/filecoin-project/lotus/pull/9459))
|
||||
- fix: cli: renew --only-cc with sectorfile ([filecoin-project/lotus#9402](https://github.com/filecoin-project/lotus/pull/9402))
|
||||
- fix: bstore: Handle codecs correctly in membstore Get ([filecoin-project/lotus#9471](https://github.com/filecoin-project/lotus/pull/9471))
|
||||
- fix: not multiplied by the number of seconds ([filecoin-project/lotus#9460](https://github.com/filecoin-project/lotus/pull/9460))
|
||||
- Makefile: Drop rarely used binaries from build-devnets (#9513) ([filecoin-project/lotus#9513](https://github.com/filecoin-project/lotus/pull/9513))
|
||||
- _ci_ Remove unneeded homebrew deps ([filecoin-project/lotus#9559](https://github.com/filecoin-project/lotus/pull/9559))
|
||||
- _ci_: Have apt-get wait until it can get a lock in packer builds ([filecoin-project/lotus#9534](https://github.com/filecoin-project/lotus/pull/9534))
|
||||
- _ci_ Appimage go1.18.1 fix ([filecoin-project/lotus#9496](https://github.com/filecoin-project/lotus/pull/9496))
|
||||
- _ci_: Fix failing Digital Ocean and Amazon Machine Image release process ([filecoin-project/lotus#9425](https://github.com/filecoin-project/lotus/pull/9425))
|
||||
- _ci_: Don't publish new homebrew releases for RC builds ([filecoin-project/lotus#9350](https://github.com/filecoin-project/lotus/pull/9350))
|
||||
- _ci_: Use golang 1.18.1 to build appimage ([filecoin-project/lotus#9386](https://github.com/filecoin-project/lotus/pull/9386))
|
||||
- _ci_ Refactor release pipeline to better support m1 builds ([filecoin-project/lotus#9600](https://github.com/filecoin-project/lotus/pull/9600))
|
||||
- _ci_: Rely on local env varibale instead of context ([filecoin-project/lotus#9740](https://github.com/filecoin-project/lotus/pull/9740))
|
||||
- feat: shed: FIP0036 post poll result processing ([filecoin-project/lotus#9387](https://github.com/filecoin-project/lotus/pull/9387))
|
||||
- misc: github: Cleanup PR template ([filecoin-project/lotus#9472](https://github.com/filecoin-project/lotus/pull/9472))
|
||||
- docs: release template: Mention codegen in release prep ([filecoin-project/lotus#9430](https://github.com/filecoin-project/lotus/pull/9430))
|
||||
- chore: merge releases (v1.17.2) into master ([filecoin-project/lotus#9433](https://github.com/filecoin-project/lotus/pull/9433))
|
||||
- chore: merge release/v1.18.0 into master ([filecoin-project/lotus#9597](https://github.com/filecoin-project/lotus/pull/9597))
|
||||
- chore:shed: Teach shed/sim to understand --tipset=@nnn notation ([filecoin-project/lotus#9434](https://github.com/filecoin-project/lotus/pull/9434))
|
||||
- _chore_: Upgrade `hid` ([filecoin-project/lotus#9406](https://github.com/filecoin-project/lotus/pull/9406))
|
||||
- chore: release: Update `release_issue_template` ([filecoin-project/lotus#9150](https://github.com/filecoin-project/lotus/pull/9150))
|
||||
- chore: update lotus version to 1.19.0-rc1
|
||||
- chore: merge release into master ([filecoin-project/lotus#9657](https://github.com/filecoin-project/lotus/pull/9657))
|
||||
- Backport: #9061 rpc errors ([filecoin-project/lotus#9384](https://github.com/filecoin-project/lotus/pull/9384))
|
||||
- shed: util: get all msig ([filecoin-project/lotus#9322](https://github.com/filecoin-project/lotus/pull/9322))
|
||||
- fix: test: simplify TestDeadlineToggling ([filecoin-project/lotus#9356](https://github.com/filecoin-project/lotus/pull/9356))
|
||||
- fix: build: set PropagationDelaySecs correctly ([filecoin-project/lotus#9358](https://github.com/filecoin-project/lotus/pull/9358))
|
||||
- fix: test: flaky TestDeadlineToggling around nulls (#9354) ([filecoin-project/lotus#9354](https://github.com/filecoin-project/lotus/pull/9354))
|
||||
- fix: retrievals: price-per-byte calculation fix ([filecoin-project/lotus#9353](https://github.com/filecoin-project/lotus/pull/9353))
|
||||
- fix: docs: update Go-badge in readme ([filecoin-project/lotus#9347](https://github.com/filecoin-project/lotus/pull/9347))
|
||||
- docs: release template: clarify location of version.go ([filecoin-project/lotus#9338](https://github.com/filecoin-project/lotus/pull/9338))
|
||||
- build: Bump version to v1.17.3-dev ([filecoin-project/lotus#9337](https://github.com/filecoin-project/lotus/pull/9337))
|
||||
- github.com/filecoin-project/go-fil-markets (v1.24.0-v17 -> v1.25.0):
|
||||
- Merge branch 'release/v1.24.3'
|
||||
- Update ffi and update markets to v9 (#751) (#761) ([filecoin-project/go-fil-markets#761](https://github.com/filecoin-project/go-fil-markets/pull/761))
|
||||
- chore: extract Provider piece logic to separate file (#750) ([filecoin-project/go-fil-markets#750](https://github.com/filecoin-project/go-fil-markets/pull/750))
|
||||
- Merge branch 'release/v1.24.2'
|
||||
- Feat/support custom metadata (#759) ([filecoin-project/go-fil-markets#759](https://github.com/filecoin-project/go-fil-markets/pull/759))
|
||||
- Revert "Update ffi and update markets to v9 (#751)" (#755) ([filecoin-project/go-fil-markets#755](https://github.com/filecoin-project/go-fil-markets/pull/755))
|
||||
- Update ffi and update markets to v9 (#751) ([filecoin-project/go-fil-markets#751](https://github.com/filecoin-project/go-fil-markets/pull/751))
|
||||
- release: v1.24.0 ([filecoin-project/go-fil-markets#745](https://github.com/filecoin-project/go-fil-markets/pull/745))
|
||||
- github.com/filecoin-project/go-state-types (v0.9.8 -> v0.9.9):
|
||||
|
||||
## lotus-market EOL notice
|
||||
|
||||
As mentioned in [lotus v1.17.0 release notes](https://github.com/filecoin-project/lotus/releases/tag/v1.17.0), markets related features, enhancements and fixes is now lower priority for Lotus. We recommend our users to migrate to other deal making focused software, like [boost](https://boost.filecoin.io/) as soon as possible. That being said, the lotus maintainers will be:
|
||||
- Lotus maintainers will stop supporting lotus-market subcomponent/**storage** deal making related issues or enhancements on Jan 31, 2023.
|
||||
- In Q2 2023, we will be deprecating/removing lotus-market related code from this repository.
|
||||
|
||||
If you have any questions or concerns, please raise them in [Lotus discussion](https://github.com/filecoin-project/lotus/discussions/categories/market)!
|
||||
|
||||
## Contributors
|
||||
|
||||
| Contributor | Commits | Lines ± | Files Changed |
|
||||
|-------------|---------|---------|---------------|
|
||||
| Geoff Stuart | 69 | +4745/-19478 | 405 |
|
||||
| Shrenuj Bansal | 39 | +5257/-2183 | 243 |
|
||||
| Łukasz Magiera | 32 | +2763/-730 | 169 |
|
||||
| Aayush | 47 | +1439/-1138 | 157 |
|
||||
| Ian Davis | 21 | +556/-1065 | 41 |
|
||||
| Rod Vagg | 5 | +657/-320 | 18 |
|
||||
| jennijuju | 4 | +632/-317 | 6 |
|
||||
| Aayush Rajasekaran | 13 | +700/-135 | 18 |
|
||||
| Jennifer Wang | 14 | +740/-54 | 25 |
|
||||
| ZenGround0 | 1 | +193/-195 | 14 |
|
||||
| Hannah Howard | 4 | +138/-122 | 52 |
|
||||
| Steven Allen | 4 | +105/-24 | 11 |
|
||||
| zenground0 | 9 | +109/-16 | 14 |
|
||||
| Peter Rabbitson | 1 | +27/-23 | 3 |
|
||||
| hannahhoward | 2 | +49/-0 | 2 |
|
||||
| Airenas Vaičiūnas | 2 | +31/-16 | 2 |
|
||||
| simlecode | 6 | +19/-10 | 12 |
|
||||
| Phi | 5 | +16/-10 | 7 |
|
||||
| sectrgt | 2 | +18/-0 | 2 |
|
||||
| Jiaying Wang | 2 | +4/-4 | 3 |
|
||||
| Rob Quist | 1 | +3/-1 | 1 |
|
||||
| Jakub Sztandera | 1 | +1/-1 | 1 |
|
||||
|
||||
# 1.18.2 / 2022-12-10
|
||||
|
||||
This is an OPTIONAL patch release that fixes a recently reported bug, where the miner process crashes due to a panic during an AddPiece process. More details can be found [here](https://github.com/filecoin-project/lotus/pull/9822).
|
||||
|
||||
|
||||
# 1.18.1 / 2022-11-28
|
||||
|
||||
This is a small OPTIONAL patch release for the mandatory v1.18.0 release that supports the Filecoin nv17 Shark Upgrade.
|
||||
|
@ -33,6 +33,8 @@ RUN set -eux; \
|
||||
COPY ./ /opt/filecoin
|
||||
WORKDIR /opt/filecoin
|
||||
|
||||
RUN scripts/docker-git-state-check.sh
|
||||
|
||||
### make configurable filecoin-ffi build
|
||||
ARG FFI_BUILD_FROM_SOURCE=0
|
||||
ENV FFI_BUILD_FROM_SOURCE=${FFI_BUILD_FROM_SOURCE}
|
||||
|
@ -1 +1 @@
|
||||
1.18.8
|
||||
1.19.7
|
||||
|
4
Makefile
4
Makefile
@ -298,7 +298,7 @@ actors-gen: actors-code-gen fiximports
|
||||
.PHONY: actors-gen
|
||||
|
||||
bundle-gen:
|
||||
$(GOCC) run ./gen/bundle
|
||||
$(GOCC) run ./gen/bundle $(VERSION) $(RELEASE) $(RELEASE_OVERRIDES)
|
||||
$(GOCC) fmt ./build/...
|
||||
.PHONY: bundle-gen
|
||||
|
||||
@ -354,7 +354,7 @@ docsgen-openrpc-gateway: docsgen-openrpc-bin
|
||||
fiximports:
|
||||
./scripts/fiximports
|
||||
|
||||
gen: actors-code-gen type-gen cfgdoc-gen docsgen api-gen circleci bundle-gen fiximports
|
||||
gen: actors-code-gen type-gen cfgdoc-gen docsgen api-gen circleci fiximports
|
||||
@echo ">>> IF YOU'VE MODIFIED THE CLI OR CONFIG, REMEMBER TO ALSO MAKE docsgen-cli"
|
||||
.PHONY: gen
|
||||
|
||||
|
159
api/api_full.go
159
api/api_full.go
@ -7,15 +7,16 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v8/paych"
|
||||
@ -31,6 +32,7 @@ import (
|
||||
lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/types/ethtypes"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||
)
|
||||
@ -151,7 +153,7 @@ type FullNode interface {
|
||||
|
||||
// ChainGetPath returns a set of revert/apply operations needed to get from
|
||||
// one tipset to another, for example:
|
||||
//```
|
||||
// ```
|
||||
// to
|
||||
// ^
|
||||
// from tAA
|
||||
@ -160,7 +162,7 @@ type FullNode interface {
|
||||
// ^---*--^
|
||||
// ^
|
||||
// tRR
|
||||
//```
|
||||
// ```
|
||||
// Would return `[revert(tBA), apply(tAB), apply(tAA)]`
|
||||
ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*HeadChange, error) //perm:read
|
||||
|
||||
@ -171,10 +173,24 @@ type FullNode interface {
|
||||
// If oldmsgskip is set, messages from before the requested roots are also not included.
|
||||
ChainExport(ctx context.Context, nroots abi.ChainEpoch, oldmsgskip bool, tsk types.TipSetKey) (<-chan []byte, error) //perm:read
|
||||
|
||||
// ChainPrune prunes the stored chain state and garbage collects; only supported if you
|
||||
// ChainExportRangeInternal triggers the export of a chain
|
||||
// CAR-snapshot directly to disk. It is similar to ChainExport,
|
||||
// except, depending on options, the snapshot can include receipts,
|
||||
// messages and stateroots for the length between the specified head
|
||||
// and tail, thus producing "archival-grade" snapshots that include
|
||||
// all the on-chain data. The header chain is included back to
|
||||
// genesis and these snapshots can be used to initialize Filecoin
|
||||
// nodes.
|
||||
ChainExportRangeInternal(ctx context.Context, head, tail types.TipSetKey, cfg ChainExportConfig) error //perm:admin
|
||||
|
||||
// ChainPrune forces compaction on cold store and garbage collects; only supported if you
|
||||
// are using the splitstore
|
||||
ChainPrune(ctx context.Context, opts PruneOpts) error //perm:admin
|
||||
|
||||
// ChainHotGC does online (badger) GC on the hot store; only supported if you are using
|
||||
// the splitstore
|
||||
ChainHotGC(ctx context.Context, opts HotGCOpts) error //perm:admin
|
||||
|
||||
// ChainCheckBlockstore performs an (asynchronous) health check on the chain/state blockstore
|
||||
// if supported by the underlying implementation.
|
||||
ChainCheckBlockstore(context.Context) error //perm:admin
|
||||
@ -182,6 +198,9 @@ type FullNode interface {
|
||||
// ChainBlockstoreInfo returns some basic information about the blockstore
|
||||
ChainBlockstoreInfo(context.Context) (map[string]interface{}, error) //perm:read
|
||||
|
||||
// ChainGetEvents returns the events under an event AMT root CID.
|
||||
ChainGetEvents(context.Context, cid.Cid) ([]types.Event, error) //perm:read
|
||||
|
||||
// GasEstimateFeeCap estimates gas fee cap
|
||||
GasEstimateFeeCap(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error) //perm:read
|
||||
|
||||
@ -278,8 +297,10 @@ type FullNode interface {
|
||||
MpoolGetNonce(context.Context, address.Address) (uint64, error) //perm:read
|
||||
MpoolSub(context.Context) (<-chan MpoolUpdate, error) //perm:read
|
||||
|
||||
// MpoolClear clears pending messages from the mpool
|
||||
MpoolClear(context.Context, bool) error //perm:write
|
||||
// MpoolClear clears pending messages from the mpool.
|
||||
// If clearLocal is true, ALL messages will be cleared.
|
||||
// If clearLocal is false, local messages will be protected, all others will be cleared.
|
||||
MpoolClear(ctx context.Context, clearLocal bool) error //perm:write
|
||||
|
||||
// MpoolGetConfig returns (a copy of) the current mpool config
|
||||
MpoolGetConfig(context.Context) (*types.MpoolConfig, error) //perm:read
|
||||
@ -388,12 +409,12 @@ type FullNode interface {
|
||||
ClientCancelRetrievalDeal(ctx context.Context, dealid retrievalmarket.DealID) error //perm:write
|
||||
|
||||
// ClientUnimport removes references to the specified file from filestore
|
||||
//ClientUnimport(path string)
|
||||
// ClientUnimport(path string)
|
||||
|
||||
// ClientListImports lists imported files and their root CIDs
|
||||
ClientListImports(ctx context.Context) ([]Import, error) //perm:write
|
||||
|
||||
//ClientListAsks() []Ask
|
||||
// ClientListAsks() []Ask
|
||||
|
||||
// MethodGroup: State
|
||||
// The State methods are used to query, inspect, and interact with chain state.
|
||||
@ -640,14 +661,14 @@ type FullNode interface {
|
||||
// It takes the following params: <multisig address>, <start epoch>, <end epoch>
|
||||
MsigGetVested(context.Context, address.Address, types.TipSetKey, types.TipSetKey) (types.BigInt, error) //perm:read
|
||||
|
||||
//MsigGetPending returns pending transactions for the given multisig
|
||||
//wallet. Once pending transactions are fully approved, they will no longer
|
||||
//appear here.
|
||||
// MsigGetPending returns pending transactions for the given multisig
|
||||
// wallet. Once pending transactions are fully approved, they will no longer
|
||||
// appear here.
|
||||
MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*MsigTransaction, error) //perm:read
|
||||
|
||||
// MsigCreate creates a multisig wallet
|
||||
// It takes the following params: <required number of senders>, <approving addresses>, <unlock duration>
|
||||
//<initial balance>, <sender address of the create msg>, <gas price>
|
||||
// <initial balance>, <sender address of the create msg>, <gas price>
|
||||
MsigCreate(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (*MessagePrototype, error) //perm:sign
|
||||
|
||||
// MsigPropose proposes a multisig message
|
||||
@ -759,6 +780,88 @@ type FullNode interface {
|
||||
|
||||
NodeStatus(ctx context.Context, inclChainStatus bool) (NodeStatus, error) //perm:read
|
||||
|
||||
// MethodGroup: Eth
|
||||
// These methods are used for Ethereum-compatible JSON-RPC calls
|
||||
//
|
||||
// EthAccounts will always return [] since we don't expect Lotus to manage private keys
|
||||
EthAccounts(ctx context.Context) ([]ethtypes.EthAddress, error) //perm:read
|
||||
// EthAddressToFilecoinAddress converts an EthAddress into an f410 Filecoin Address
|
||||
EthAddressToFilecoinAddress(ctx context.Context, ethAddress ethtypes.EthAddress) (address.Address, error) //perm:read
|
||||
// FilecoinAddressToEthAddress converts an f410 or f0 Filecoin Address to an EthAddress
|
||||
FilecoinAddressToEthAddress(ctx context.Context, filecoinAddress address.Address) (ethtypes.EthAddress, error) //perm:read
|
||||
// EthBlockNumber returns the height of the latest (heaviest) TipSet
|
||||
EthBlockNumber(ctx context.Context) (ethtypes.EthUint64, error) //perm:read
|
||||
// EthGetBlockTransactionCountByNumber returns the number of messages in the TipSet
|
||||
EthGetBlockTransactionCountByNumber(ctx context.Context, blkNum ethtypes.EthUint64) (ethtypes.EthUint64, error) //perm:read
|
||||
// EthGetBlockTransactionCountByHash returns the number of messages in the TipSet
|
||||
EthGetBlockTransactionCountByHash(ctx context.Context, blkHash ethtypes.EthHash) (ethtypes.EthUint64, error) //perm:read
|
||||
|
||||
EthGetBlockByHash(ctx context.Context, blkHash ethtypes.EthHash, fullTxInfo bool) (ethtypes.EthBlock, error) //perm:read
|
||||
EthGetBlockByNumber(ctx context.Context, blkNum string, fullTxInfo bool) (ethtypes.EthBlock, error) //perm:read
|
||||
EthGetTransactionByHash(ctx context.Context, txHash *ethtypes.EthHash) (*ethtypes.EthTx, error) //perm:read
|
||||
EthGetTransactionByHashLimited(ctx context.Context, txHash *ethtypes.EthHash, limit abi.ChainEpoch) (*ethtypes.EthTx, error) //perm:read
|
||||
EthGetTransactionHashByCid(ctx context.Context, cid cid.Cid) (*ethtypes.EthHash, error) //perm:read
|
||||
EthGetMessageCidByTransactionHash(ctx context.Context, txHash *ethtypes.EthHash) (*cid.Cid, error) //perm:read
|
||||
EthGetTransactionCount(ctx context.Context, sender ethtypes.EthAddress, blkOpt string) (ethtypes.EthUint64, error) //perm:read
|
||||
EthGetTransactionReceipt(ctx context.Context, txHash ethtypes.EthHash) (*EthTxReceipt, error) //perm:read
|
||||
EthGetTransactionReceiptLimited(ctx context.Context, txHash ethtypes.EthHash, limit abi.ChainEpoch) (*EthTxReceipt, error) //perm:read
|
||||
EthGetTransactionByBlockHashAndIndex(ctx context.Context, blkHash ethtypes.EthHash, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error) //perm:read
|
||||
EthGetTransactionByBlockNumberAndIndex(ctx context.Context, blkNum ethtypes.EthUint64, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error) //perm:read
|
||||
|
||||
EthGetCode(ctx context.Context, address ethtypes.EthAddress, blkOpt string) (ethtypes.EthBytes, error) //perm:read
|
||||
EthGetStorageAt(ctx context.Context, address ethtypes.EthAddress, position ethtypes.EthBytes, blkParam string) (ethtypes.EthBytes, error) //perm:read
|
||||
EthGetBalance(ctx context.Context, address ethtypes.EthAddress, blkParam string) (ethtypes.EthBigInt, error) //perm:read
|
||||
EthChainId(ctx context.Context) (ethtypes.EthUint64, error) //perm:read
|
||||
NetVersion(ctx context.Context) (string, error) //perm:read
|
||||
NetListening(ctx context.Context) (bool, error) //perm:read
|
||||
EthProtocolVersion(ctx context.Context) (ethtypes.EthUint64, error) //perm:read
|
||||
EthGasPrice(ctx context.Context) (ethtypes.EthBigInt, error) //perm:read
|
||||
EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthFeeHistory, error) //perm:read
|
||||
|
||||
EthMaxPriorityFeePerGas(ctx context.Context) (ethtypes.EthBigInt, error) //perm:read
|
||||
EthEstimateGas(ctx context.Context, tx ethtypes.EthCall) (ethtypes.EthUint64, error) //perm:read
|
||||
EthCall(ctx context.Context, tx ethtypes.EthCall, blkParam string) (ethtypes.EthBytes, error) //perm:read
|
||||
|
||||
EthSendRawTransaction(ctx context.Context, rawTx ethtypes.EthBytes) (ethtypes.EthHash, error) //perm:read
|
||||
|
||||
// Returns event logs matching given filter spec.
|
||||
EthGetLogs(ctx context.Context, filter *ethtypes.EthFilterSpec) (*ethtypes.EthFilterResult, error) //perm:read
|
||||
|
||||
// Polling method for a filter, returns event logs which occurred since last poll.
|
||||
// (requires write perm since timestamp of last filter execution will be written)
|
||||
EthGetFilterChanges(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) //perm:write
|
||||
|
||||
// Returns event logs matching filter with given id.
|
||||
// (requires write perm since timestamp of last filter execution will be written)
|
||||
EthGetFilterLogs(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) //perm:write
|
||||
|
||||
// Installs a persistent filter based on given filter spec.
|
||||
EthNewFilter(ctx context.Context, filter *ethtypes.EthFilterSpec) (ethtypes.EthFilterID, error) //perm:write
|
||||
|
||||
// Installs a persistent filter to notify when a new block arrives.
|
||||
EthNewBlockFilter(ctx context.Context) (ethtypes.EthFilterID, error) //perm:write
|
||||
|
||||
// Installs a persistent filter to notify when new messages arrive in the message pool.
|
||||
EthNewPendingTransactionFilter(ctx context.Context) (ethtypes.EthFilterID, error) //perm:write
|
||||
|
||||
// Uninstalls a filter with given id.
|
||||
EthUninstallFilter(ctx context.Context, id ethtypes.EthFilterID) (bool, error) //perm:write
|
||||
|
||||
// Subscribe to different event types using websockets
|
||||
// eventTypes is one or more of:
|
||||
// - newHeads: notify when new blocks arrive.
|
||||
// - pendingTransactions: notify when new messages arrive in the message pool.
|
||||
// - logs: notify new event logs that match a criteria
|
||||
// params contains additional parameters used with the log event type
|
||||
// The client will receive a stream of EthSubscriptionResponse values until EthUnsubscribe is called.
|
||||
EthSubscribe(ctx context.Context, params jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error) //perm:write
|
||||
|
||||
// Unsubscribe from a websocket subscription
|
||||
EthUnsubscribe(ctx context.Context, id ethtypes.EthSubscriptionID) (bool, error) //perm:write
|
||||
|
||||
// Returns the client version
|
||||
Web3ClientVersion(ctx context.Context) (string, error) //perm:read
|
||||
|
||||
// CreateBackup creates node backup onder the specified file name. The
|
||||
// method requires that the lotus daemon is running with the
|
||||
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
|
||||
@ -769,6 +872,12 @@ type FullNode interface {
|
||||
RaftLeader(ctx context.Context) (peer.ID, error) //perm:read
|
||||
}
|
||||
|
||||
// reverse interface to the client, called after EthSubscribe
|
||||
type EthSubscriber interface {
|
||||
// note: the parameter is ethtypes.EthSubscriptionResponse serialized as json object
|
||||
EthSubscription(ctx context.Context, r jsonrpc.RawParams) error // rpc_method:eth_subscription notify:true
|
||||
}
|
||||
|
||||
type StorageAsk struct {
|
||||
Response *storagemarket.StorageAsk
|
||||
|
||||
@ -1252,3 +1361,27 @@ type PruneOpts struct {
|
||||
MovingGC bool
|
||||
RetainState int64
|
||||
}
|
||||
|
||||
type HotGCOpts struct {
|
||||
Threshold float64
|
||||
Periodic bool
|
||||
Moving bool
|
||||
}
|
||||
|
||||
type EthTxReceipt struct {
|
||||
TransactionHash ethtypes.EthHash `json:"transactionHash"`
|
||||
TransactionIndex ethtypes.EthUint64 `json:"transactionIndex"`
|
||||
BlockHash ethtypes.EthHash `json:"blockHash"`
|
||||
BlockNumber ethtypes.EthUint64 `json:"blockNumber"`
|
||||
From ethtypes.EthAddress `json:"from"`
|
||||
To *ethtypes.EthAddress `json:"to"`
|
||||
StateRoot ethtypes.EthHash `json:"root"`
|
||||
Status ethtypes.EthUint64 `json:"status"`
|
||||
ContractAddress *ethtypes.EthAddress `json:"contractAddress"`
|
||||
CumulativeGasUsed ethtypes.EthUint64 `json:"cumulativeGasUsed"`
|
||||
GasUsed ethtypes.EthUint64 `json:"gasUsed"`
|
||||
EffectiveGasPrice ethtypes.EthBigInt `json:"effectiveGasPrice"`
|
||||
LogsBloom ethtypes.EthBytes `json:"logsBloom"`
|
||||
Logs []ethtypes.EthLog `json:"logs"`
|
||||
Type ethtypes.EthUint64 `json:"type"`
|
||||
}
|
||||
|
@ -3,16 +3,19 @@ package api
|
||||
import (
|
||||
"context"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
|
||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/types/ethtypes"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
)
|
||||
|
||||
// MODIFYING THE API INTERFACE
|
||||
@ -23,13 +26,16 @@ import (
|
||||
// When adding / changing methods in this file:
|
||||
// * Do the change here
|
||||
// * Adjust implementation in `node/impl/`
|
||||
// * Run `make gen` - this will:
|
||||
// * Run `make clean && make deps && make gen` - this will:
|
||||
// * Generate proxy structs
|
||||
// * Generate mocks
|
||||
// * Generate markdown docs
|
||||
// * Generate openrpc blobs
|
||||
|
||||
type Gateway interface {
|
||||
StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (MinerSectors, error)
|
||||
GasEstimateGasPremium(context.Context, uint64, address.Address, int64, types.TipSetKey) (types.BigInt, error)
|
||||
StateReplay(context.Context, types.TipSetKey, cid.Cid) (*InvocResult, error)
|
||||
ChainHasObj(context.Context, cid.Cid) (bool, error)
|
||||
ChainPutObj(context.Context, blocks.Block) error
|
||||
ChainHead(ctx context.Context) (*types.TipSet, error)
|
||||
@ -45,15 +51,18 @@ type Gateway interface {
|
||||
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
|
||||
ChainGetGenesis(context.Context) (*types.TipSet, error)
|
||||
GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *MessageSendSpec, tsk types.TipSetKey) (*types.Message, error)
|
||||
MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error)
|
||||
MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error)
|
||||
MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error)
|
||||
MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*MsigTransaction, error)
|
||||
MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error)
|
||||
MsigGetVestingSchedule(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MsigVesting, error)
|
||||
StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
|
||||
StateCall(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (*InvocResult, error)
|
||||
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (DealCollateralBounds, error)
|
||||
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error)
|
||||
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
|
||||
StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error) //perm:read
|
||||
StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error)
|
||||
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
|
||||
StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
|
||||
StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MarketBalance, error)
|
||||
@ -61,12 +70,51 @@ type Gateway interface {
|
||||
StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (MinerInfo, error)
|
||||
StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error)
|
||||
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error)
|
||||
StateNetworkName(context.Context) (dtypes.NetworkName, error)
|
||||
StateNetworkVersion(context.Context, types.TipSetKey) (apitypes.NetworkVersion, error)
|
||||
StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)
|
||||
StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
|
||||
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
|
||||
StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error)
|
||||
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error)
|
||||
WalletBalance(context.Context, address.Address) (types.BigInt, error)
|
||||
Version(context.Context) (APIVersion, error)
|
||||
Discover(context.Context) (apitypes.OpenRPCDocument, error)
|
||||
|
||||
EthAccounts(ctx context.Context) ([]ethtypes.EthAddress, error)
|
||||
EthBlockNumber(ctx context.Context) (ethtypes.EthUint64, error)
|
||||
EthGetBlockTransactionCountByNumber(ctx context.Context, blkNum ethtypes.EthUint64) (ethtypes.EthUint64, error)
|
||||
EthGetBlockTransactionCountByHash(ctx context.Context, blkHash ethtypes.EthHash) (ethtypes.EthUint64, error)
|
||||
EthGetBlockByHash(ctx context.Context, blkHash ethtypes.EthHash, fullTxInfo bool) (ethtypes.EthBlock, error)
|
||||
EthGetBlockByNumber(ctx context.Context, blkNum string, fullTxInfo bool) (ethtypes.EthBlock, error)
|
||||
EthGetTransactionByHash(ctx context.Context, txHash *ethtypes.EthHash) (*ethtypes.EthTx, error)
|
||||
EthGetTransactionByHashLimited(ctx context.Context, txHash *ethtypes.EthHash, limit abi.ChainEpoch) (*ethtypes.EthTx, error)
|
||||
EthGetTransactionHashByCid(ctx context.Context, cid cid.Cid) (*ethtypes.EthHash, error)
|
||||
EthGetMessageCidByTransactionHash(ctx context.Context, txHash *ethtypes.EthHash) (*cid.Cid, error)
|
||||
EthGetTransactionCount(ctx context.Context, sender ethtypes.EthAddress, blkOpt string) (ethtypes.EthUint64, error)
|
||||
EthGetTransactionReceipt(ctx context.Context, txHash ethtypes.EthHash) (*EthTxReceipt, error)
|
||||
EthGetTransactionReceiptLimited(ctx context.Context, txHash ethtypes.EthHash, limit abi.ChainEpoch) (*EthTxReceipt, error)
|
||||
EthGetCode(ctx context.Context, address ethtypes.EthAddress, blkOpt string) (ethtypes.EthBytes, error)
|
||||
EthGetStorageAt(ctx context.Context, address ethtypes.EthAddress, position ethtypes.EthBytes, blkParam string) (ethtypes.EthBytes, error)
|
||||
EthGetBalance(ctx context.Context, address ethtypes.EthAddress, blkParam string) (ethtypes.EthBigInt, error)
|
||||
EthChainId(ctx context.Context) (ethtypes.EthUint64, error)
|
||||
NetVersion(ctx context.Context) (string, error)
|
||||
NetListening(ctx context.Context) (bool, error)
|
||||
EthProtocolVersion(ctx context.Context) (ethtypes.EthUint64, error)
|
||||
EthGasPrice(ctx context.Context) (ethtypes.EthBigInt, error)
|
||||
EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthFeeHistory, error)
|
||||
EthMaxPriorityFeePerGas(ctx context.Context) (ethtypes.EthBigInt, error)
|
||||
EthEstimateGas(ctx context.Context, tx ethtypes.EthCall) (ethtypes.EthUint64, error)
|
||||
EthCall(ctx context.Context, tx ethtypes.EthCall, blkParam string) (ethtypes.EthBytes, error)
|
||||
EthSendRawTransaction(ctx context.Context, rawTx ethtypes.EthBytes) (ethtypes.EthHash, error)
|
||||
EthGetLogs(ctx context.Context, filter *ethtypes.EthFilterSpec) (*ethtypes.EthFilterResult, error)
|
||||
EthGetFilterChanges(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error)
|
||||
EthGetFilterLogs(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error)
|
||||
EthNewFilter(ctx context.Context, filter *ethtypes.EthFilterSpec) (ethtypes.EthFilterID, error)
|
||||
EthNewBlockFilter(ctx context.Context) (ethtypes.EthFilterID, error)
|
||||
EthNewPendingTransactionFilter(ctx context.Context) (ethtypes.EthFilterID, error)
|
||||
EthUninstallFilter(ctx context.Context, id ethtypes.EthFilterID) (bool, error)
|
||||
EthSubscribe(ctx context.Context, params jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error)
|
||||
EthUnsubscribe(ctx context.Context, id ethtypes.EthSubscriptionID) (bool, error)
|
||||
Web3ClientVersion(ctx context.Context) (string, error)
|
||||
}
|
||||
|
@ -11,7 +11,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
|
||||
"github.com/filecoin-project/go-fil-markets/piecestore"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
@ -22,7 +22,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
||||
abinetwork "github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
builtinactors "github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/storage/pipeline/sealiface"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/fsutil"
|
||||
@ -152,7 +152,7 @@ type StorageMiner interface {
|
||||
WorkerStats(context.Context) (map[uuid.UUID]storiface.WorkerStats, error) //perm:admin
|
||||
WorkerJobs(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) //perm:admin
|
||||
|
||||
//storiface.WorkerReturn
|
||||
// storiface.WorkerReturn
|
||||
ReturnDataCid(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error //perm:admin retry:true
|
||||
ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error //perm:admin retry:true
|
||||
ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storiface.PreCommit1Out, err *storiface.CallError) error //perm:admin retry:true
|
||||
@ -175,7 +175,7 @@ type StorageMiner interface {
|
||||
// SealingSchedDiag dumps internal sealing scheduler state
|
||||
SealingSchedDiag(ctx context.Context, doSched bool) (interface{}, error) //perm:admin
|
||||
SealingAbort(ctx context.Context, call storiface.CallID) error //perm:admin
|
||||
//SealingSchedRemove removes a request from sealing pipeline
|
||||
// SealingSchedRemove removes a request from sealing pipeline
|
||||
SealingRemoveRequest(ctx context.Context, schedId uuid.UUID) error //perm:admin
|
||||
|
||||
// paths.SectorIndex
|
||||
@ -212,9 +212,11 @@ type StorageMiner interface {
|
||||
StorageDetachLocal(ctx context.Context, path string) error //perm:admin
|
||||
StorageRedeclareLocal(ctx context.Context, id *storiface.ID, dropMissing bool) error //perm:admin
|
||||
|
||||
MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error //perm:write
|
||||
MarketListDeals(ctx context.Context) ([]*MarketDeal, error) //perm:read
|
||||
MarketListRetrievalDeals(ctx context.Context) ([]retrievalmarket.ProviderDealState, error) //perm:read
|
||||
MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error //perm:write
|
||||
MarketListDeals(ctx context.Context) ([]*MarketDeal, error) //perm:read
|
||||
|
||||
// MarketListRetrievalDeals is deprecated, returns empty list
|
||||
MarketListRetrievalDeals(ctx context.Context) ([]struct{}, error) //perm:read
|
||||
MarketGetDealUpdates(ctx context.Context) (<-chan storagemarket.MinerDeal, error) //perm:read
|
||||
MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error) //perm:read
|
||||
MarketSetAsk(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error //perm:admin
|
||||
@ -322,7 +324,7 @@ type StorageMiner interface {
|
||||
|
||||
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storiface.SectorRef) (map[abi.SectorNumber]string, error) //perm:admin
|
||||
|
||||
ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, poStEpoch abi.ChainEpoch, nv abinetwork.Version) ([]builtin.PoStProof, error) //perm:read
|
||||
ComputeProof(ctx context.Context, ssi []builtinactors.ExtendedSectorInfo, rand abi.PoStRandomness, poStEpoch abi.ChainEpoch, nv abinetwork.Version) ([]builtinactors.PoStProof, error) //perm:read
|
||||
|
||||
// RecoverFault can be used to declare recoveries manually. It sends messages
|
||||
// to the miner actor with details of recovered sectors and returns the CID of messages. It honors the
|
||||
|
456
api/cbor_gen.go
456
api/cbor_gen.go
@ -50,22 +50,6 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.WaitSentinel (cid.Cid) (struct)
|
||||
if len("WaitSentinel") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"WaitSentinel\" was too long")
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("WaitSentinel"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("WaitSentinel")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cbg.WriteCid(cw, t.WaitSentinel); err != nil {
|
||||
return xerrors.Errorf("failed to write cid field t.WaitSentinel: %w", err)
|
||||
}
|
||||
|
||||
// t.Vouchers ([]*paych.SignedVoucher) (slice)
|
||||
if len("Vouchers") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"Vouchers\" was too long")
|
||||
@ -90,6 +74,23 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// t.WaitSentinel (cid.Cid) (struct)
|
||||
if len("WaitSentinel") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"WaitSentinel\" was too long")
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("WaitSentinel"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("WaitSentinel")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cbg.WriteCid(cw, t.WaitSentinel); err != nil {
|
||||
return xerrors.Errorf("failed to write cid field t.WaitSentinel: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -140,19 +141,6 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
return xerrors.Errorf("unmarshaling t.Channel: %w", err)
|
||||
}
|
||||
|
||||
}
|
||||
// t.WaitSentinel (cid.Cid) (struct)
|
||||
case "WaitSentinel":
|
||||
|
||||
{
|
||||
|
||||
c, err := cbg.ReadCid(cr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to read cid field t.WaitSentinel: %w", err)
|
||||
}
|
||||
|
||||
t.WaitSentinel = c
|
||||
|
||||
}
|
||||
// t.Vouchers ([]*paych.SignedVoucher) (slice)
|
||||
case "Vouchers":
|
||||
@ -184,6 +172,20 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
t.Vouchers[i] = &v
|
||||
}
|
||||
|
||||
// t.WaitSentinel (cid.Cid) (struct)
|
||||
case "WaitSentinel":
|
||||
|
||||
{
|
||||
|
||||
c, err := cbg.ReadCid(cr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to read cid field t.WaitSentinel: %w", err)
|
||||
}
|
||||
|
||||
t.WaitSentinel = c
|
||||
|
||||
}
|
||||
|
||||
default:
|
||||
// Field doesn't exist on this type, so ignore it
|
||||
cbg.ScanForLinks(r, func(cid.Cid) {})
|
||||
@ -204,19 +206,19 @@ func (t *SealedRef) MarshalCBOR(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.SectorID (abi.SectorNumber) (uint64)
|
||||
if len("SectorID") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"SectorID\" was too long")
|
||||
// t.Size (abi.UnpaddedPieceSize) (uint64)
|
||||
if len("Size") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"Size\" was too long")
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SectorID"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Size"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("SectorID")); err != nil {
|
||||
if _, err := io.WriteString(w, string("Size")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SectorID)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Size)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -236,19 +238,19 @@ func (t *SealedRef) MarshalCBOR(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Size (abi.UnpaddedPieceSize) (uint64)
|
||||
if len("Size") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"Size\" was too long")
|
||||
// t.SectorID (abi.SectorNumber) (uint64)
|
||||
if len("SectorID") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"SectorID\" was too long")
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Size"))); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SectorID"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("Size")); err != nil {
|
||||
if _, err := io.WriteString(w, string("SectorID")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Size)); err != nil {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SectorID)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -293,8 +295,8 @@ func (t *SealedRef) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
}
|
||||
|
||||
switch name {
|
||||
// t.SectorID (abi.SectorNumber) (uint64)
|
||||
case "SectorID":
|
||||
// t.Size (abi.UnpaddedPieceSize) (uint64)
|
||||
case "Size":
|
||||
|
||||
{
|
||||
|
||||
@ -305,7 +307,7 @@ func (t *SealedRef) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
if maj != cbg.MajUnsignedInt {
|
||||
return fmt.Errorf("wrong type for uint64 field")
|
||||
}
|
||||
t.SectorID = abi.SectorNumber(extra)
|
||||
t.Size = abi.UnpaddedPieceSize(extra)
|
||||
|
||||
}
|
||||
// t.Offset (abi.PaddedPieceSize) (uint64)
|
||||
@ -323,8 +325,8 @@ func (t *SealedRef) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
t.Offset = abi.PaddedPieceSize(extra)
|
||||
|
||||
}
|
||||
// t.Size (abi.UnpaddedPieceSize) (uint64)
|
||||
case "Size":
|
||||
// t.SectorID (abi.SectorNumber) (uint64)
|
||||
case "SectorID":
|
||||
|
||||
{
|
||||
|
||||
@ -335,7 +337,7 @@ func (t *SealedRef) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
if maj != cbg.MajUnsignedInt {
|
||||
return fmt.Errorf("wrong type for uint64 field")
|
||||
}
|
||||
t.Size = abi.UnpaddedPieceSize(extra)
|
||||
t.SectorID = abi.SectorNumber(extra)
|
||||
|
||||
}
|
||||
|
||||
@ -474,6 +476,28 @@ func (t *SealTicket) MarshalCBOR(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Epoch (abi.ChainEpoch) (int64)
|
||||
if len("Epoch") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"Epoch\" was too long")
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Epoch"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("Epoch")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if t.Epoch >= 0 {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// t.Value (abi.SealRandomness) (slice)
|
||||
if len("Value") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"Value\" was too long")
|
||||
@ -497,28 +521,6 @@ func (t *SealTicket) MarshalCBOR(w io.Writer) error {
|
||||
if _, err := cw.Write(t.Value[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Epoch (abi.ChainEpoch) (int64)
|
||||
if len("Epoch") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"Epoch\" was too long")
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Epoch"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("Epoch")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if t.Epoch >= 0 {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -560,7 +562,33 @@ func (t *SealTicket) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
}
|
||||
|
||||
switch name {
|
||||
// t.Value (abi.SealRandomness) (slice)
|
||||
// t.Epoch (abi.ChainEpoch) (int64)
|
||||
case "Epoch":
|
||||
{
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
var extraI int64
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch maj {
|
||||
case cbg.MajUnsignedInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 positive overflow")
|
||||
}
|
||||
case cbg.MajNegativeInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 negative overflow")
|
||||
}
|
||||
extraI = -1 - extraI
|
||||
default:
|
||||
return fmt.Errorf("wrong type for int64 field: %d", maj)
|
||||
}
|
||||
|
||||
t.Epoch = abi.ChainEpoch(extraI)
|
||||
}
|
||||
// t.Value (abi.SealRandomness) (slice)
|
||||
case "Value":
|
||||
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
@ -582,32 +610,6 @@ func (t *SealTicket) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
if _, err := io.ReadFull(cr, t.Value[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
// t.Epoch (abi.ChainEpoch) (int64)
|
||||
case "Epoch":
|
||||
{
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
var extraI int64
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch maj {
|
||||
case cbg.MajUnsignedInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 positive overflow")
|
||||
}
|
||||
case cbg.MajNegativeInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 negative oveflow")
|
||||
}
|
||||
extraI = -1 - extraI
|
||||
default:
|
||||
return fmt.Errorf("wrong type for int64 field: %d", maj)
|
||||
}
|
||||
|
||||
t.Epoch = abi.ChainEpoch(extraI)
|
||||
}
|
||||
|
||||
default:
|
||||
// Field doesn't exist on this type, so ignore it
|
||||
@ -629,6 +631,28 @@ func (t *SealSeed) MarshalCBOR(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Epoch (abi.ChainEpoch) (int64)
|
||||
if len("Epoch") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"Epoch\" was too long")
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Epoch"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("Epoch")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if t.Epoch >= 0 {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// t.Value (abi.InteractiveSealRandomness) (slice)
|
||||
if len("Value") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"Value\" was too long")
|
||||
@ -652,28 +676,6 @@ func (t *SealSeed) MarshalCBOR(w io.Writer) error {
|
||||
if _, err := cw.Write(t.Value[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Epoch (abi.ChainEpoch) (int64)
|
||||
if len("Epoch") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"Epoch\" was too long")
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Epoch"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("Epoch")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if t.Epoch >= 0 {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -715,7 +717,33 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
}
|
||||
|
||||
switch name {
|
||||
// t.Value (abi.InteractiveSealRandomness) (slice)
|
||||
// t.Epoch (abi.ChainEpoch) (int64)
|
||||
case "Epoch":
|
||||
{
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
var extraI int64
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch maj {
|
||||
case cbg.MajUnsignedInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 positive overflow")
|
||||
}
|
||||
case cbg.MajNegativeInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 negative overflow")
|
||||
}
|
||||
extraI = -1 - extraI
|
||||
default:
|
||||
return fmt.Errorf("wrong type for int64 field: %d", maj)
|
||||
}
|
||||
|
||||
t.Epoch = abi.ChainEpoch(extraI)
|
||||
}
|
||||
// t.Value (abi.InteractiveSealRandomness) (slice)
|
||||
case "Value":
|
||||
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
@ -737,32 +765,6 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
if _, err := io.ReadFull(cr, t.Value[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
// t.Epoch (abi.ChainEpoch) (int64)
|
||||
case "Epoch":
|
||||
{
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
var extraI int64
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch maj {
|
||||
case cbg.MajUnsignedInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 positive overflow")
|
||||
}
|
||||
case cbg.MajNegativeInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 negative oveflow")
|
||||
}
|
||||
extraI = -1 - extraI
|
||||
default:
|
||||
return fmt.Errorf("wrong type for int64 field: %d", maj)
|
||||
}
|
||||
|
||||
t.Epoch = abi.ChainEpoch(extraI)
|
||||
}
|
||||
|
||||
default:
|
||||
// Field doesn't exist on this type, so ignore it
|
||||
@ -784,6 +786,22 @@ func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.DealID (abi.DealID) (uint64)
|
||||
if len("DealID") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"DealID\" was too long")
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealID"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("DealID")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.PublishCid (cid.Cid) (struct)
|
||||
if len("PublishCid") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"PublishCid\" was too long")
|
||||
@ -806,22 +824,6 @@ func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error {
|
||||
}
|
||||
}
|
||||
|
||||
// t.DealID (abi.DealID) (uint64)
|
||||
if len("DealID") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"DealID\" was too long")
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealID"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("DealID")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.DealProposal (market.DealProposal) (struct)
|
||||
if len("DealProposal") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"DealProposal\" was too long")
|
||||
@ -910,7 +912,22 @@ func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
}
|
||||
|
||||
switch name {
|
||||
// t.PublishCid (cid.Cid) (struct)
|
||||
// t.DealID (abi.DealID) (uint64)
|
||||
case "DealID":
|
||||
|
||||
{
|
||||
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajUnsignedInt {
|
||||
return fmt.Errorf("wrong type for uint64 field")
|
||||
}
|
||||
t.DealID = abi.DealID(extra)
|
||||
|
||||
}
|
||||
// t.PublishCid (cid.Cid) (struct)
|
||||
case "PublishCid":
|
||||
|
||||
{
|
||||
@ -932,21 +949,6 @@ func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
t.PublishCid = &c
|
||||
}
|
||||
|
||||
}
|
||||
// t.DealID (abi.DealID) (uint64)
|
||||
case "DealID":
|
||||
|
||||
{
|
||||
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajUnsignedInt {
|
||||
return fmt.Errorf("wrong type for uint64 field")
|
||||
}
|
||||
t.DealID = abi.DealID(extra)
|
||||
|
||||
}
|
||||
// t.DealProposal (market.DealProposal) (struct)
|
||||
case "DealProposal":
|
||||
@ -1140,28 +1142,6 @@ func (t *DealSchedule) MarshalCBOR(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.StartEpoch (abi.ChainEpoch) (int64)
|
||||
if len("StartEpoch") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"StartEpoch\" was too long")
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("StartEpoch"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("StartEpoch")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if t.StartEpoch >= 0 {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// t.EndEpoch (abi.ChainEpoch) (int64)
|
||||
if len("EndEpoch") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"EndEpoch\" was too long")
|
||||
@ -1183,6 +1163,28 @@ func (t *DealSchedule) MarshalCBOR(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// t.StartEpoch (abi.ChainEpoch) (int64)
|
||||
if len("StartEpoch") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"StartEpoch\" was too long")
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("StartEpoch"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("StartEpoch")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if t.StartEpoch >= 0 {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1224,33 +1226,7 @@ func (t *DealSchedule) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
}
|
||||
|
||||
switch name {
|
||||
// t.StartEpoch (abi.ChainEpoch) (int64)
|
||||
case "StartEpoch":
|
||||
{
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
var extraI int64
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch maj {
|
||||
case cbg.MajUnsignedInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 positive overflow")
|
||||
}
|
||||
case cbg.MajNegativeInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 negative oveflow")
|
||||
}
|
||||
extraI = -1 - extraI
|
||||
default:
|
||||
return fmt.Errorf("wrong type for int64 field: %d", maj)
|
||||
}
|
||||
|
||||
t.StartEpoch = abi.ChainEpoch(extraI)
|
||||
}
|
||||
// t.EndEpoch (abi.ChainEpoch) (int64)
|
||||
// t.EndEpoch (abi.ChainEpoch) (int64)
|
||||
case "EndEpoch":
|
||||
{
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
@ -1267,7 +1243,7 @@ func (t *DealSchedule) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
case cbg.MajNegativeInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 negative oveflow")
|
||||
return fmt.Errorf("int64 negative overflow")
|
||||
}
|
||||
extraI = -1 - extraI
|
||||
default:
|
||||
@ -1276,6 +1252,32 @@ func (t *DealSchedule) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
|
||||
t.EndEpoch = abi.ChainEpoch(extraI)
|
||||
}
|
||||
// t.StartEpoch (abi.ChainEpoch) (int64)
|
||||
case "StartEpoch":
|
||||
{
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
var extraI int64
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch maj {
|
||||
case cbg.MajUnsignedInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 positive overflow")
|
||||
}
|
||||
case cbg.MajNegativeInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 negative overflow")
|
||||
}
|
||||
extraI = -1 - extraI
|
||||
default:
|
||||
return fmt.Errorf("wrong type for int64 field: %d", maj)
|
||||
}
|
||||
|
||||
t.StartEpoch = abi.ChainEpoch(extraI)
|
||||
}
|
||||
|
||||
default:
|
||||
// Field doesn't exist on this type, so ignore it
|
||||
|
@ -35,10 +35,10 @@ func NewFullNodeRPCV0(ctx context.Context, addr string, requestHeader http.Heade
|
||||
}
|
||||
|
||||
// NewFullNodeRPCV1 creates a new http jsonrpc client.
|
||||
func NewFullNodeRPCV1(ctx context.Context, addr string, requestHeader http.Header) (api.FullNode, jsonrpc.ClientCloser, error) {
|
||||
func NewFullNodeRPCV1(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (api.FullNode, jsonrpc.ClientCloser, error) {
|
||||
var res v1api.FullNodeStruct
|
||||
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
||||
api.GetInternalStructs(&res), requestHeader, jsonrpc.WithErrors(api.RPCErrors))
|
||||
api.GetInternalStructs(&res), requestHeader, append([]jsonrpc.Option{jsonrpc.WithErrors(api.RPCErrors)}, opts...)...)
|
||||
|
||||
return &res, closer, err
|
||||
}
|
||||
|
@ -14,9 +14,9 @@ import (
|
||||
"unicode"
|
||||
|
||||
"github.com/google/uuid"
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-graphsync"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
textselector "github.com/ipld/go-ipld-selector-text-lite"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/metrics"
|
||||
@ -24,12 +24,10 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"golang.org/x/text/cases"
|
||||
"golang.org/x/text/language"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
|
||||
"github.com/filecoin-project/go-fil-markets/filestore"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
@ -43,6 +41,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/api/v0api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/types/ethtypes"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||
sealing "github.com/filecoin-project/lotus/storage/pipeline"
|
||||
@ -70,6 +69,7 @@ func init() {
|
||||
}
|
||||
|
||||
ExampleValues[reflect.TypeOf(c)] = c
|
||||
ExampleValues[reflect.TypeOf(&c)] = &c
|
||||
|
||||
c2, err := cid.Decode("bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve")
|
||||
if err != nil {
|
||||
@ -152,8 +152,8 @@ func init() {
|
||||
addExample(map[string]int{"name": 42})
|
||||
addExample(map[string]time.Time{"name": time.Unix(1615243938, 0).UTC()})
|
||||
addExample(&types.ExecutionTrace{
|
||||
Msg: ExampleValue("init", reflect.TypeOf(&types.Message{}), nil).(*types.Message),
|
||||
MsgRct: ExampleValue("init", reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt),
|
||||
Msg: ExampleValue("init", reflect.TypeOf(types.MessageTrace{}), nil).(types.MessageTrace),
|
||||
MsgRct: ExampleValue("init", reflect.TypeOf(types.ReturnTrace{}), nil).(types.ReturnTrace),
|
||||
})
|
||||
addExample(map[string]types.Actor{
|
||||
"t01236": ExampleValue("init", reflect.TypeOf(types.Actor{}), nil).(types.Actor),
|
||||
@ -300,7 +300,8 @@ func init() {
|
||||
"title": "Lotus RPC API",
|
||||
"version": "1.2.1/generated=2020-11-22T08:22:42-06:00",
|
||||
},
|
||||
"methods": []interface{}{}},
|
||||
"methods": []interface{}{},
|
||||
},
|
||||
)
|
||||
|
||||
addExample(api.CheckStatusCode(0))
|
||||
@ -337,7 +338,8 @@ func init() {
|
||||
NumConnsInbound: 3,
|
||||
NumConnsOutbound: 4,
|
||||
NumFD: 5,
|
||||
}})
|
||||
},
|
||||
})
|
||||
addExample(api.NetLimit{
|
||||
Memory: 123,
|
||||
StreamsInbound: 1,
|
||||
@ -348,6 +350,7 @@ func init() {
|
||||
Conns: 4,
|
||||
FD: 5,
|
||||
})
|
||||
|
||||
addExample(map[string]bitfield.BitField{
|
||||
"": bitfield.NewFromSet([]uint64{5, 6, 7, 10}),
|
||||
})
|
||||
@ -367,11 +370,44 @@ func init() {
|
||||
Headers: nil,
|
||||
},
|
||||
})
|
||||
|
||||
ethint := ethtypes.EthUint64(5)
|
||||
addExample(ethint)
|
||||
addExample(ðint)
|
||||
|
||||
ethaddr, _ := ethtypes.ParseEthAddress("0x5CbEeCF99d3fDB3f25E309Cc264f240bb0664031")
|
||||
addExample(ethaddr)
|
||||
addExample(ðaddr)
|
||||
|
||||
ethhash, _ := ethtypes.EthHashFromCid(c)
|
||||
addExample(ethhash)
|
||||
addExample(ðhash)
|
||||
|
||||
ethFeeHistoryReward := [][]ethtypes.EthBigInt{}
|
||||
addExample(ðFeeHistoryReward)
|
||||
|
||||
addExample(&uuid.UUID{})
|
||||
|
||||
filterid := ethtypes.EthFilterID(ethhash)
|
||||
addExample(filterid)
|
||||
addExample(&filterid)
|
||||
|
||||
subid := ethtypes.EthSubscriptionID(ethhash)
|
||||
addExample(subid)
|
||||
addExample(&subid)
|
||||
|
||||
pstring := func(s string) *string { return &s }
|
||||
addExample(ðtypes.EthFilterSpec{
|
||||
FromBlock: pstring("2301220"),
|
||||
Address: []ethtypes.EthAddress{ethaddr},
|
||||
})
|
||||
|
||||
percent := types.Percent(123)
|
||||
addExample(percent)
|
||||
addExample(&percent)
|
||||
}
|
||||
|
||||
func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []reflect.Type) {
|
||||
|
||||
switch pkg {
|
||||
case "api": // latest
|
||||
switch name {
|
||||
@ -441,7 +477,7 @@ func ExampleValue(method string, t, parent reflect.Type) interface{} {
|
||||
case reflect.Ptr:
|
||||
if t.Elem().Kind() == reflect.Struct {
|
||||
es := exampleStruct(method, t.Elem(), t)
|
||||
//ExampleValues[t] = es
|
||||
ExampleValues[t] = es
|
||||
return es
|
||||
}
|
||||
case reflect.Interface:
|
||||
@ -458,8 +494,8 @@ func exampleStruct(method string, t, parent reflect.Type) interface{} {
|
||||
if f.Type == parent {
|
||||
continue
|
||||
}
|
||||
caser := cases.Title(language.English)
|
||||
if caser.String(f.Name) == f.Name {
|
||||
|
||||
if f.IsExported() {
|
||||
ns.Elem().Field(i).Set(reflect.ValueOf(ExampleValue(method, f.Type, t)))
|
||||
}
|
||||
}
|
||||
|
46
api/eth_aliases.go
Normal file
46
api/eth_aliases.go
Normal file
@ -0,0 +1,46 @@
|
||||
package api
|
||||
|
||||
import apitypes "github.com/filecoin-project/lotus/api/types"
|
||||
|
||||
func CreateEthRPCAliases(as apitypes.Aliaser) {
|
||||
// TODO: maybe use reflect to automatically register all the eth aliases
|
||||
as.AliasMethod("eth_accounts", "Filecoin.EthAccounts")
|
||||
as.AliasMethod("eth_blockNumber", "Filecoin.EthBlockNumber")
|
||||
as.AliasMethod("eth_getBlockTransactionCountByNumber", "Filecoin.EthGetBlockTransactionCountByNumber")
|
||||
as.AliasMethod("eth_getBlockTransactionCountByHash", "Filecoin.EthGetBlockTransactionCountByHash")
|
||||
|
||||
as.AliasMethod("eth_getBlockByHash", "Filecoin.EthGetBlockByHash")
|
||||
as.AliasMethod("eth_getBlockByNumber", "Filecoin.EthGetBlockByNumber")
|
||||
as.AliasMethod("eth_getTransactionByHash", "Filecoin.EthGetTransactionByHash")
|
||||
as.AliasMethod("eth_getTransactionCount", "Filecoin.EthGetTransactionCount")
|
||||
as.AliasMethod("eth_getTransactionReceipt", "Filecoin.EthGetTransactionReceipt")
|
||||
as.AliasMethod("eth_getTransactionByBlockHashAndIndex", "Filecoin.EthGetTransactionByBlockHashAndIndex")
|
||||
as.AliasMethod("eth_getTransactionByBlockNumberAndIndex", "Filecoin.EthGetTransactionByBlockNumberAndIndex")
|
||||
|
||||
as.AliasMethod("eth_getCode", "Filecoin.EthGetCode")
|
||||
as.AliasMethod("eth_getStorageAt", "Filecoin.EthGetStorageAt")
|
||||
as.AliasMethod("eth_getBalance", "Filecoin.EthGetBalance")
|
||||
as.AliasMethod("eth_chainId", "Filecoin.EthChainId")
|
||||
as.AliasMethod("eth_feeHistory", "Filecoin.EthFeeHistory")
|
||||
as.AliasMethod("eth_protocolVersion", "Filecoin.EthProtocolVersion")
|
||||
as.AliasMethod("eth_maxPriorityFeePerGas", "Filecoin.EthMaxPriorityFeePerGas")
|
||||
as.AliasMethod("eth_gasPrice", "Filecoin.EthGasPrice")
|
||||
as.AliasMethod("eth_sendRawTransaction", "Filecoin.EthSendRawTransaction")
|
||||
as.AliasMethod("eth_estimateGas", "Filecoin.EthEstimateGas")
|
||||
as.AliasMethod("eth_call", "Filecoin.EthCall")
|
||||
|
||||
as.AliasMethod("eth_getLogs", "Filecoin.EthGetLogs")
|
||||
as.AliasMethod("eth_getFilterChanges", "Filecoin.EthGetFilterChanges")
|
||||
as.AliasMethod("eth_getFilterLogs", "Filecoin.EthGetFilterLogs")
|
||||
as.AliasMethod("eth_newFilter", "Filecoin.EthNewFilter")
|
||||
as.AliasMethod("eth_newBlockFilter", "Filecoin.EthNewBlockFilter")
|
||||
as.AliasMethod("eth_newPendingTransactionFilter", "Filecoin.EthNewPendingTransactionFilter")
|
||||
as.AliasMethod("eth_uninstallFilter", "Filecoin.EthUninstallFilter")
|
||||
as.AliasMethod("eth_subscribe", "Filecoin.EthSubscribe")
|
||||
as.AliasMethod("eth_unsubscribe", "Filecoin.EthUnsubscribe")
|
||||
|
||||
as.AliasMethod("net_version", "Filecoin.NetVersion")
|
||||
as.AliasMethod("net_listening", "Filecoin.NetListening")
|
||||
|
||||
as.AliasMethod("web3_clientVersion", "Filecoin.Web3ClientVersion")
|
||||
}
|
@ -12,8 +12,8 @@ import (
|
||||
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
uuid "github.com/google/uuid"
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
cid "github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
metrics "github.com/libp2p/go-libp2p/core/metrics"
|
||||
network0 "github.com/libp2p/go-libp2p/core/network"
|
||||
peer "github.com/libp2p/go-libp2p/core/peer"
|
||||
@ -21,8 +21,9 @@ import (
|
||||
|
||||
address "github.com/filecoin-project/go-address"
|
||||
bitfield "github.com/filecoin-project/go-bitfield"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
|
||||
retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
jsonrpc "github.com/filecoin-project/go-jsonrpc"
|
||||
auth "github.com/filecoin-project/go-jsonrpc/auth"
|
||||
abi "github.com/filecoin-project/go-state-types/abi"
|
||||
big "github.com/filecoin-project/go-state-types/big"
|
||||
@ -37,6 +38,7 @@ import (
|
||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||
miner0 "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
types "github.com/filecoin-project/lotus/chain/types"
|
||||
ethtypes "github.com/filecoin-project/lotus/chain/types/ethtypes"
|
||||
alerting "github.com/filecoin-project/lotus/journal/alerting"
|
||||
dtypes "github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
imports "github.com/filecoin-project/lotus/node/repo/imports"
|
||||
@ -153,6 +155,20 @@ func (mr *MockFullNodeMockRecorder) ChainExport(arg0, arg1, arg2, arg3 interface
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainExport", reflect.TypeOf((*MockFullNode)(nil).ChainExport), arg0, arg1, arg2, arg3)
|
||||
}
|
||||
|
||||
// ChainExportRangeInternal mocks base method.
|
||||
func (m *MockFullNode) ChainExportRangeInternal(arg0 context.Context, arg1, arg2 types.TipSetKey, arg3 api.ChainExportConfig) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ChainExportRangeInternal", arg0, arg1, arg2, arg3)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ChainExportRangeInternal indicates an expected call of ChainExportRangeInternal.
|
||||
func (mr *MockFullNodeMockRecorder) ChainExportRangeInternal(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainExportRangeInternal", reflect.TypeOf((*MockFullNode)(nil).ChainExportRangeInternal), arg0, arg1, arg2, arg3)
|
||||
}
|
||||
|
||||
// ChainGetBlock mocks base method.
|
||||
func (m *MockFullNode) ChainGetBlock(arg0 context.Context, arg1 cid.Cid) (*types.BlockHeader, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@ -183,6 +199,21 @@ func (mr *MockFullNodeMockRecorder) ChainGetBlockMessages(arg0, arg1 interface{}
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetBlockMessages", reflect.TypeOf((*MockFullNode)(nil).ChainGetBlockMessages), arg0, arg1)
|
||||
}
|
||||
|
||||
// ChainGetEvents mocks base method.
|
||||
func (m *MockFullNode) ChainGetEvents(arg0 context.Context, arg1 cid.Cid) ([]types.Event, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ChainGetEvents", arg0, arg1)
|
||||
ret0, _ := ret[0].([]types.Event)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ChainGetEvents indicates an expected call of ChainGetEvents.
|
||||
func (mr *MockFullNodeMockRecorder) ChainGetEvents(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetEvents", reflect.TypeOf((*MockFullNode)(nil).ChainGetEvents), arg0, arg1)
|
||||
}
|
||||
|
||||
// ChainGetGenesis mocks base method.
|
||||
func (m *MockFullNode) ChainGetGenesis(arg0 context.Context) (*types.TipSet, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@ -363,6 +394,20 @@ func (mr *MockFullNodeMockRecorder) ChainHead(arg0 interface{}) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHead", reflect.TypeOf((*MockFullNode)(nil).ChainHead), arg0)
|
||||
}
|
||||
|
||||
// ChainHotGC mocks base method.
|
||||
func (m *MockFullNode) ChainHotGC(arg0 context.Context, arg1 api.HotGCOpts) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ChainHotGC", arg0, arg1)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ChainHotGC indicates an expected call of ChainHotGC.
|
||||
func (mr *MockFullNodeMockRecorder) ChainHotGC(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHotGC", reflect.TypeOf((*MockFullNode)(nil).ChainHotGC), arg0, arg1)
|
||||
}
|
||||
|
||||
// ChainNotify mocks base method.
|
||||
func (m *MockFullNode) ChainNotify(arg0 context.Context) (<-chan []*api.HeadChange, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@ -921,6 +966,561 @@ func (mr *MockFullNodeMockRecorder) Discover(arg0 interface{}) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Discover", reflect.TypeOf((*MockFullNode)(nil).Discover), arg0)
|
||||
}
|
||||
|
||||
// EthAccounts mocks base method.
|
||||
func (m *MockFullNode) EthAccounts(arg0 context.Context) ([]ethtypes.EthAddress, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthAccounts", arg0)
|
||||
ret0, _ := ret[0].([]ethtypes.EthAddress)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthAccounts indicates an expected call of EthAccounts.
|
||||
func (mr *MockFullNodeMockRecorder) EthAccounts(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthAccounts", reflect.TypeOf((*MockFullNode)(nil).EthAccounts), arg0)
|
||||
}
|
||||
|
||||
// EthAddressToFilecoinAddress mocks base method.
|
||||
func (m *MockFullNode) EthAddressToFilecoinAddress(arg0 context.Context, arg1 ethtypes.EthAddress) (address.Address, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthAddressToFilecoinAddress", arg0, arg1)
|
||||
ret0, _ := ret[0].(address.Address)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthAddressToFilecoinAddress indicates an expected call of EthAddressToFilecoinAddress.
|
||||
func (mr *MockFullNodeMockRecorder) EthAddressToFilecoinAddress(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthAddressToFilecoinAddress", reflect.TypeOf((*MockFullNode)(nil).EthAddressToFilecoinAddress), arg0, arg1)
|
||||
}
|
||||
|
||||
// EthBlockNumber mocks base method.
|
||||
func (m *MockFullNode) EthBlockNumber(arg0 context.Context) (ethtypes.EthUint64, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthBlockNumber", arg0)
|
||||
ret0, _ := ret[0].(ethtypes.EthUint64)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthBlockNumber indicates an expected call of EthBlockNumber.
|
||||
func (mr *MockFullNodeMockRecorder) EthBlockNumber(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthBlockNumber", reflect.TypeOf((*MockFullNode)(nil).EthBlockNumber), arg0)
|
||||
}
|
||||
|
||||
// EthCall mocks base method.
|
||||
func (m *MockFullNode) EthCall(arg0 context.Context, arg1 ethtypes.EthCall, arg2 string) (ethtypes.EthBytes, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthCall", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(ethtypes.EthBytes)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthCall indicates an expected call of EthCall.
|
||||
func (mr *MockFullNodeMockRecorder) EthCall(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthCall", reflect.TypeOf((*MockFullNode)(nil).EthCall), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// EthChainId mocks base method.
|
||||
func (m *MockFullNode) EthChainId(arg0 context.Context) (ethtypes.EthUint64, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthChainId", arg0)
|
||||
ret0, _ := ret[0].(ethtypes.EthUint64)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthChainId indicates an expected call of EthChainId.
|
||||
func (mr *MockFullNodeMockRecorder) EthChainId(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthChainId", reflect.TypeOf((*MockFullNode)(nil).EthChainId), arg0)
|
||||
}
|
||||
|
||||
// EthEstimateGas mocks base method.
|
||||
func (m *MockFullNode) EthEstimateGas(arg0 context.Context, arg1 ethtypes.EthCall) (ethtypes.EthUint64, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthEstimateGas", arg0, arg1)
|
||||
ret0, _ := ret[0].(ethtypes.EthUint64)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthEstimateGas indicates an expected call of EthEstimateGas.
|
||||
func (mr *MockFullNodeMockRecorder) EthEstimateGas(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthEstimateGas", reflect.TypeOf((*MockFullNode)(nil).EthEstimateGas), arg0, arg1)
|
||||
}
|
||||
|
||||
// EthFeeHistory mocks base method.
|
||||
func (m *MockFullNode) EthFeeHistory(arg0 context.Context, arg1 jsonrpc.RawParams) (ethtypes.EthFeeHistory, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthFeeHistory", arg0, arg1)
|
||||
ret0, _ := ret[0].(ethtypes.EthFeeHistory)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthFeeHistory indicates an expected call of EthFeeHistory.
|
||||
func (mr *MockFullNodeMockRecorder) EthFeeHistory(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthFeeHistory", reflect.TypeOf((*MockFullNode)(nil).EthFeeHistory), arg0, arg1)
|
||||
}
|
||||
|
||||
// EthGasPrice mocks base method.
|
||||
func (m *MockFullNode) EthGasPrice(arg0 context.Context) (ethtypes.EthBigInt, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthGasPrice", arg0)
|
||||
ret0, _ := ret[0].(ethtypes.EthBigInt)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthGasPrice indicates an expected call of EthGasPrice.
|
||||
func (mr *MockFullNodeMockRecorder) EthGasPrice(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGasPrice", reflect.TypeOf((*MockFullNode)(nil).EthGasPrice), arg0)
|
||||
}
|
||||
|
||||
// EthGetBalance mocks base method.
|
||||
func (m *MockFullNode) EthGetBalance(arg0 context.Context, arg1 ethtypes.EthAddress, arg2 string) (ethtypes.EthBigInt, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthGetBalance", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(ethtypes.EthBigInt)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthGetBalance indicates an expected call of EthGetBalance.
|
||||
func (mr *MockFullNodeMockRecorder) EthGetBalance(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetBalance", reflect.TypeOf((*MockFullNode)(nil).EthGetBalance), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// EthGetBlockByHash mocks base method.
|
||||
func (m *MockFullNode) EthGetBlockByHash(arg0 context.Context, arg1 ethtypes.EthHash, arg2 bool) (ethtypes.EthBlock, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthGetBlockByHash", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(ethtypes.EthBlock)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthGetBlockByHash indicates an expected call of EthGetBlockByHash.
|
||||
func (mr *MockFullNodeMockRecorder) EthGetBlockByHash(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetBlockByHash", reflect.TypeOf((*MockFullNode)(nil).EthGetBlockByHash), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// EthGetBlockByNumber mocks base method.
|
||||
func (m *MockFullNode) EthGetBlockByNumber(arg0 context.Context, arg1 string, arg2 bool) (ethtypes.EthBlock, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthGetBlockByNumber", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(ethtypes.EthBlock)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthGetBlockByNumber indicates an expected call of EthGetBlockByNumber.
|
||||
func (mr *MockFullNodeMockRecorder) EthGetBlockByNumber(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetBlockByNumber", reflect.TypeOf((*MockFullNode)(nil).EthGetBlockByNumber), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// EthGetBlockTransactionCountByHash mocks base method.
|
||||
func (m *MockFullNode) EthGetBlockTransactionCountByHash(arg0 context.Context, arg1 ethtypes.EthHash) (ethtypes.EthUint64, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthGetBlockTransactionCountByHash", arg0, arg1)
|
||||
ret0, _ := ret[0].(ethtypes.EthUint64)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthGetBlockTransactionCountByHash indicates an expected call of EthGetBlockTransactionCountByHash.
|
||||
func (mr *MockFullNodeMockRecorder) EthGetBlockTransactionCountByHash(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetBlockTransactionCountByHash", reflect.TypeOf((*MockFullNode)(nil).EthGetBlockTransactionCountByHash), arg0, arg1)
|
||||
}
|
||||
|
||||
// EthGetBlockTransactionCountByNumber mocks base method.
|
||||
func (m *MockFullNode) EthGetBlockTransactionCountByNumber(arg0 context.Context, arg1 ethtypes.EthUint64) (ethtypes.EthUint64, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthGetBlockTransactionCountByNumber", arg0, arg1)
|
||||
ret0, _ := ret[0].(ethtypes.EthUint64)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthGetBlockTransactionCountByNumber indicates an expected call of EthGetBlockTransactionCountByNumber.
|
||||
func (mr *MockFullNodeMockRecorder) EthGetBlockTransactionCountByNumber(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetBlockTransactionCountByNumber", reflect.TypeOf((*MockFullNode)(nil).EthGetBlockTransactionCountByNumber), arg0, arg1)
|
||||
}
|
||||
|
||||
// EthGetCode mocks base method.
|
||||
func (m *MockFullNode) EthGetCode(arg0 context.Context, arg1 ethtypes.EthAddress, arg2 string) (ethtypes.EthBytes, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthGetCode", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(ethtypes.EthBytes)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthGetCode indicates an expected call of EthGetCode.
|
||||
func (mr *MockFullNodeMockRecorder) EthGetCode(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetCode", reflect.TypeOf((*MockFullNode)(nil).EthGetCode), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// EthGetFilterChanges mocks base method.
|
||||
func (m *MockFullNode) EthGetFilterChanges(arg0 context.Context, arg1 ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthGetFilterChanges", arg0, arg1)
|
||||
ret0, _ := ret[0].(*ethtypes.EthFilterResult)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthGetFilterChanges indicates an expected call of EthGetFilterChanges.
|
||||
func (mr *MockFullNodeMockRecorder) EthGetFilterChanges(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetFilterChanges", reflect.TypeOf((*MockFullNode)(nil).EthGetFilterChanges), arg0, arg1)
|
||||
}
|
||||
|
||||
// EthGetFilterLogs mocks base method.
|
||||
func (m *MockFullNode) EthGetFilterLogs(arg0 context.Context, arg1 ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthGetFilterLogs", arg0, arg1)
|
||||
ret0, _ := ret[0].(*ethtypes.EthFilterResult)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthGetFilterLogs indicates an expected call of EthGetFilterLogs.
|
||||
func (mr *MockFullNodeMockRecorder) EthGetFilterLogs(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetFilterLogs", reflect.TypeOf((*MockFullNode)(nil).EthGetFilterLogs), arg0, arg1)
|
||||
}
|
||||
|
||||
// EthGetLogs mocks base method.
|
||||
func (m *MockFullNode) EthGetLogs(arg0 context.Context, arg1 *ethtypes.EthFilterSpec) (*ethtypes.EthFilterResult, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthGetLogs", arg0, arg1)
|
||||
ret0, _ := ret[0].(*ethtypes.EthFilterResult)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthGetLogs indicates an expected call of EthGetLogs.
|
||||
func (mr *MockFullNodeMockRecorder) EthGetLogs(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetLogs", reflect.TypeOf((*MockFullNode)(nil).EthGetLogs), arg0, arg1)
|
||||
}
|
||||
|
||||
// EthGetMessageCidByTransactionHash mocks base method.
|
||||
func (m *MockFullNode) EthGetMessageCidByTransactionHash(arg0 context.Context, arg1 *ethtypes.EthHash) (*cid.Cid, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthGetMessageCidByTransactionHash", arg0, arg1)
|
||||
ret0, _ := ret[0].(*cid.Cid)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthGetMessageCidByTransactionHash indicates an expected call of EthGetMessageCidByTransactionHash.
|
||||
func (mr *MockFullNodeMockRecorder) EthGetMessageCidByTransactionHash(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetMessageCidByTransactionHash", reflect.TypeOf((*MockFullNode)(nil).EthGetMessageCidByTransactionHash), arg0, arg1)
|
||||
}
|
||||
|
||||
// EthGetStorageAt mocks base method.
|
||||
func (m *MockFullNode) EthGetStorageAt(arg0 context.Context, arg1 ethtypes.EthAddress, arg2 ethtypes.EthBytes, arg3 string) (ethtypes.EthBytes, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthGetStorageAt", arg0, arg1, arg2, arg3)
|
||||
ret0, _ := ret[0].(ethtypes.EthBytes)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthGetStorageAt indicates an expected call of EthGetStorageAt.
|
||||
func (mr *MockFullNodeMockRecorder) EthGetStorageAt(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetStorageAt", reflect.TypeOf((*MockFullNode)(nil).EthGetStorageAt), arg0, arg1, arg2, arg3)
|
||||
}
|
||||
|
||||
// EthGetTransactionByBlockHashAndIndex mocks base method.
|
||||
func (m *MockFullNode) EthGetTransactionByBlockHashAndIndex(arg0 context.Context, arg1 ethtypes.EthHash, arg2 ethtypes.EthUint64) (ethtypes.EthTx, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthGetTransactionByBlockHashAndIndex", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(ethtypes.EthTx)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthGetTransactionByBlockHashAndIndex indicates an expected call of EthGetTransactionByBlockHashAndIndex.
|
||||
func (mr *MockFullNodeMockRecorder) EthGetTransactionByBlockHashAndIndex(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetTransactionByBlockHashAndIndex", reflect.TypeOf((*MockFullNode)(nil).EthGetTransactionByBlockHashAndIndex), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// EthGetTransactionByBlockNumberAndIndex mocks base method.
|
||||
func (m *MockFullNode) EthGetTransactionByBlockNumberAndIndex(arg0 context.Context, arg1, arg2 ethtypes.EthUint64) (ethtypes.EthTx, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthGetTransactionByBlockNumberAndIndex", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(ethtypes.EthTx)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthGetTransactionByBlockNumberAndIndex indicates an expected call of EthGetTransactionByBlockNumberAndIndex.
|
||||
func (mr *MockFullNodeMockRecorder) EthGetTransactionByBlockNumberAndIndex(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetTransactionByBlockNumberAndIndex", reflect.TypeOf((*MockFullNode)(nil).EthGetTransactionByBlockNumberAndIndex), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// EthGetTransactionByHash mocks base method.
|
||||
func (m *MockFullNode) EthGetTransactionByHash(arg0 context.Context, arg1 *ethtypes.EthHash) (*ethtypes.EthTx, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthGetTransactionByHash", arg0, arg1)
|
||||
ret0, _ := ret[0].(*ethtypes.EthTx)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthGetTransactionByHash indicates an expected call of EthGetTransactionByHash.
|
||||
func (mr *MockFullNodeMockRecorder) EthGetTransactionByHash(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetTransactionByHash", reflect.TypeOf((*MockFullNode)(nil).EthGetTransactionByHash), arg0, arg1)
|
||||
}
|
||||
|
||||
// EthGetTransactionByHashLimited mocks base method.
|
||||
func (m *MockFullNode) EthGetTransactionByHashLimited(arg0 context.Context, arg1 *ethtypes.EthHash, arg2 abi.ChainEpoch) (*ethtypes.EthTx, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthGetTransactionByHashLimited", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(*ethtypes.EthTx)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthGetTransactionByHashLimited indicates an expected call of EthGetTransactionByHashLimited.
|
||||
func (mr *MockFullNodeMockRecorder) EthGetTransactionByHashLimited(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetTransactionByHashLimited", reflect.TypeOf((*MockFullNode)(nil).EthGetTransactionByHashLimited), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// EthGetTransactionCount mocks base method.
|
||||
func (m *MockFullNode) EthGetTransactionCount(arg0 context.Context, arg1 ethtypes.EthAddress, arg2 string) (ethtypes.EthUint64, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthGetTransactionCount", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(ethtypes.EthUint64)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthGetTransactionCount indicates an expected call of EthGetTransactionCount.
|
||||
func (mr *MockFullNodeMockRecorder) EthGetTransactionCount(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetTransactionCount", reflect.TypeOf((*MockFullNode)(nil).EthGetTransactionCount), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// EthGetTransactionHashByCid mocks base method.
|
||||
func (m *MockFullNode) EthGetTransactionHashByCid(arg0 context.Context, arg1 cid.Cid) (*ethtypes.EthHash, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthGetTransactionHashByCid", arg0, arg1)
|
||||
ret0, _ := ret[0].(*ethtypes.EthHash)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthGetTransactionHashByCid indicates an expected call of EthGetTransactionHashByCid.
|
||||
func (mr *MockFullNodeMockRecorder) EthGetTransactionHashByCid(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetTransactionHashByCid", reflect.TypeOf((*MockFullNode)(nil).EthGetTransactionHashByCid), arg0, arg1)
|
||||
}
|
||||
|
||||
// EthGetTransactionReceipt mocks base method.
|
||||
func (m *MockFullNode) EthGetTransactionReceipt(arg0 context.Context, arg1 ethtypes.EthHash) (*api.EthTxReceipt, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthGetTransactionReceipt", arg0, arg1)
|
||||
ret0, _ := ret[0].(*api.EthTxReceipt)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthGetTransactionReceipt indicates an expected call of EthGetTransactionReceipt.
|
||||
func (mr *MockFullNodeMockRecorder) EthGetTransactionReceipt(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetTransactionReceipt", reflect.TypeOf((*MockFullNode)(nil).EthGetTransactionReceipt), arg0, arg1)
|
||||
}
|
||||
|
||||
// EthGetTransactionReceiptLimited mocks base method.
|
||||
func (m *MockFullNode) EthGetTransactionReceiptLimited(arg0 context.Context, arg1 ethtypes.EthHash, arg2 abi.ChainEpoch) (*api.EthTxReceipt, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthGetTransactionReceiptLimited", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(*api.EthTxReceipt)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthGetTransactionReceiptLimited indicates an expected call of EthGetTransactionReceiptLimited.
|
||||
func (mr *MockFullNodeMockRecorder) EthGetTransactionReceiptLimited(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetTransactionReceiptLimited", reflect.TypeOf((*MockFullNode)(nil).EthGetTransactionReceiptLimited), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// EthMaxPriorityFeePerGas mocks base method.
|
||||
func (m *MockFullNode) EthMaxPriorityFeePerGas(arg0 context.Context) (ethtypes.EthBigInt, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthMaxPriorityFeePerGas", arg0)
|
||||
ret0, _ := ret[0].(ethtypes.EthBigInt)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthMaxPriorityFeePerGas indicates an expected call of EthMaxPriorityFeePerGas.
|
||||
func (mr *MockFullNodeMockRecorder) EthMaxPriorityFeePerGas(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthMaxPriorityFeePerGas", reflect.TypeOf((*MockFullNode)(nil).EthMaxPriorityFeePerGas), arg0)
|
||||
}
|
||||
|
||||
// EthNewBlockFilter mocks base method.
|
||||
func (m *MockFullNode) EthNewBlockFilter(arg0 context.Context) (ethtypes.EthFilterID, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthNewBlockFilter", arg0)
|
||||
ret0, _ := ret[0].(ethtypes.EthFilterID)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthNewBlockFilter indicates an expected call of EthNewBlockFilter.
|
||||
func (mr *MockFullNodeMockRecorder) EthNewBlockFilter(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthNewBlockFilter", reflect.TypeOf((*MockFullNode)(nil).EthNewBlockFilter), arg0)
|
||||
}
|
||||
|
||||
// EthNewFilter mocks base method.
|
||||
func (m *MockFullNode) EthNewFilter(arg0 context.Context, arg1 *ethtypes.EthFilterSpec) (ethtypes.EthFilterID, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthNewFilter", arg0, arg1)
|
||||
ret0, _ := ret[0].(ethtypes.EthFilterID)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthNewFilter indicates an expected call of EthNewFilter.
|
||||
func (mr *MockFullNodeMockRecorder) EthNewFilter(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthNewFilter", reflect.TypeOf((*MockFullNode)(nil).EthNewFilter), arg0, arg1)
|
||||
}
|
||||
|
||||
// EthNewPendingTransactionFilter mocks base method.
|
||||
func (m *MockFullNode) EthNewPendingTransactionFilter(arg0 context.Context) (ethtypes.EthFilterID, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthNewPendingTransactionFilter", arg0)
|
||||
ret0, _ := ret[0].(ethtypes.EthFilterID)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthNewPendingTransactionFilter indicates an expected call of EthNewPendingTransactionFilter.
|
||||
func (mr *MockFullNodeMockRecorder) EthNewPendingTransactionFilter(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthNewPendingTransactionFilter", reflect.TypeOf((*MockFullNode)(nil).EthNewPendingTransactionFilter), arg0)
|
||||
}
|
||||
|
||||
// EthProtocolVersion mocks base method.
|
||||
func (m *MockFullNode) EthProtocolVersion(arg0 context.Context) (ethtypes.EthUint64, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthProtocolVersion", arg0)
|
||||
ret0, _ := ret[0].(ethtypes.EthUint64)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthProtocolVersion indicates an expected call of EthProtocolVersion.
|
||||
func (mr *MockFullNodeMockRecorder) EthProtocolVersion(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthProtocolVersion", reflect.TypeOf((*MockFullNode)(nil).EthProtocolVersion), arg0)
|
||||
}
|
||||
|
||||
// EthSendRawTransaction mocks base method.
|
||||
func (m *MockFullNode) EthSendRawTransaction(arg0 context.Context, arg1 ethtypes.EthBytes) (ethtypes.EthHash, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthSendRawTransaction", arg0, arg1)
|
||||
ret0, _ := ret[0].(ethtypes.EthHash)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthSendRawTransaction indicates an expected call of EthSendRawTransaction.
|
||||
func (mr *MockFullNodeMockRecorder) EthSendRawTransaction(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthSendRawTransaction", reflect.TypeOf((*MockFullNode)(nil).EthSendRawTransaction), arg0, arg1)
|
||||
}
|
||||
|
||||
// EthSubscribe mocks base method.
|
||||
func (m *MockFullNode) EthSubscribe(arg0 context.Context, arg1 jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthSubscribe", arg0, arg1)
|
||||
ret0, _ := ret[0].(ethtypes.EthSubscriptionID)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthSubscribe indicates an expected call of EthSubscribe.
|
||||
func (mr *MockFullNodeMockRecorder) EthSubscribe(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthSubscribe", reflect.TypeOf((*MockFullNode)(nil).EthSubscribe), arg0, arg1)
|
||||
}
|
||||
|
||||
// EthUninstallFilter mocks base method.
|
||||
func (m *MockFullNode) EthUninstallFilter(arg0 context.Context, arg1 ethtypes.EthFilterID) (bool, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthUninstallFilter", arg0, arg1)
|
||||
ret0, _ := ret[0].(bool)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthUninstallFilter indicates an expected call of EthUninstallFilter.
|
||||
func (mr *MockFullNodeMockRecorder) EthUninstallFilter(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthUninstallFilter", reflect.TypeOf((*MockFullNode)(nil).EthUninstallFilter), arg0, arg1)
|
||||
}
|
||||
|
||||
// EthUnsubscribe mocks base method.
|
||||
func (m *MockFullNode) EthUnsubscribe(arg0 context.Context, arg1 ethtypes.EthSubscriptionID) (bool, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthUnsubscribe", arg0, arg1)
|
||||
ret0, _ := ret[0].(bool)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthUnsubscribe indicates an expected call of EthUnsubscribe.
|
||||
func (mr *MockFullNodeMockRecorder) EthUnsubscribe(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthUnsubscribe", reflect.TypeOf((*MockFullNode)(nil).EthUnsubscribe), arg0, arg1)
|
||||
}
|
||||
|
||||
// FilecoinAddressToEthAddress mocks base method.
|
||||
func (m *MockFullNode) FilecoinAddressToEthAddress(arg0 context.Context, arg1 address.Address) (ethtypes.EthAddress, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "FilecoinAddressToEthAddress", arg0, arg1)
|
||||
ret0, _ := ret[0].(ethtypes.EthAddress)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// FilecoinAddressToEthAddress indicates an expected call of FilecoinAddressToEthAddress.
|
||||
func (mr *MockFullNodeMockRecorder) FilecoinAddressToEthAddress(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FilecoinAddressToEthAddress", reflect.TypeOf((*MockFullNode)(nil).FilecoinAddressToEthAddress), arg0, arg1)
|
||||
}
|
||||
|
||||
// GasEstimateFeeCap mocks base method.
|
||||
func (m *MockFullNode) GasEstimateFeeCap(arg0 context.Context, arg1 *types.Message, arg2 int64, arg3 types.TipSetKey) (big.Int, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@ -1843,6 +2443,21 @@ func (mr *MockFullNodeMockRecorder) NetLimit(arg0, arg1 interface{}) *gomock.Cal
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetLimit", reflect.TypeOf((*MockFullNode)(nil).NetLimit), arg0, arg1)
|
||||
}
|
||||
|
||||
// NetListening mocks base method.
|
||||
func (m *MockFullNode) NetListening(arg0 context.Context) (bool, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "NetListening", arg0)
|
||||
ret0, _ := ret[0].(bool)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// NetListening indicates an expected call of NetListening.
|
||||
func (mr *MockFullNodeMockRecorder) NetListening(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetListening", reflect.TypeOf((*MockFullNode)(nil).NetListening), arg0)
|
||||
}
|
||||
|
||||
// NetPeerInfo mocks base method.
|
||||
func (m *MockFullNode) NetPeerInfo(arg0 context.Context, arg1 peer.ID) (*api.ExtendedPeerInfo, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@ -1975,6 +2590,21 @@ func (mr *MockFullNodeMockRecorder) NetStat(arg0, arg1 interface{}) *gomock.Call
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetStat", reflect.TypeOf((*MockFullNode)(nil).NetStat), arg0, arg1)
|
||||
}
|
||||
|
||||
// NetVersion mocks base method.
|
||||
func (m *MockFullNode) NetVersion(arg0 context.Context) (string, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "NetVersion", arg0)
|
||||
ret0, _ := ret[0].(string)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// NetVersion indicates an expected call of NetVersion.
|
||||
func (mr *MockFullNodeMockRecorder) NetVersion(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetVersion", reflect.TypeOf((*MockFullNode)(nil).NetVersion), arg0)
|
||||
}
|
||||
|
||||
// NodeStatus mocks base method.
|
||||
func (m *MockFullNode) NodeStatus(arg0 context.Context, arg1 bool) (api.NodeStatus, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@ -3555,3 +4185,18 @@ func (mr *MockFullNodeMockRecorder) WalletVerify(arg0, arg1, arg2, arg3 interfac
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalletVerify", reflect.TypeOf((*MockFullNode)(nil).WalletVerify), arg0, arg1, arg2, arg3)
|
||||
}
|
||||
|
||||
// Web3ClientVersion mocks base method.
|
||||
func (m *MockFullNode) Web3ClientVersion(arg0 context.Context) (string, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Web3ClientVersion", arg0)
|
||||
ret0, _ := ret[0].(string)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// Web3ClientVersion indicates an expected call of Web3ClientVersion.
|
||||
func (mr *MockFullNodeMockRecorder) Web3ClientVersion(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Web3ClientVersion", reflect.TypeOf((*MockFullNode)(nil).Web3ClientVersion), arg0)
|
||||
}
|
||||
|
2146
api/proxy_gen.go
2146
api/proxy_gen.go
File diff suppressed because it is too large
Load Diff
28
api/types.go
28
api/types.go
@ -8,13 +8,15 @@ import (
|
||||
"github.com/google/uuid"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-graphsync"
|
||||
"github.com/ipld/go-ipld-prime"
|
||||
"github.com/ipld/go-ipld-prime/codec/dagjson"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
||||
@ -110,16 +112,12 @@ func NewDataTransferChannel(hostID peer.ID, channelState datatransfer.ChannelSta
|
||||
IsSender: channelState.Sender() == hostID,
|
||||
Message: channelState.Message(),
|
||||
}
|
||||
stringer, ok := channelState.Voucher().(fmt.Stringer)
|
||||
if ok {
|
||||
channel.Voucher = stringer.String()
|
||||
voucher := channelState.Voucher()
|
||||
voucherJSON, err := ipld.Encode(voucher.Voucher, dagjson.Encode)
|
||||
if err != nil {
|
||||
channel.Voucher = fmt.Errorf("Voucher Serialization: %w", err).Error()
|
||||
} else {
|
||||
voucherJSON, err := json.Marshal(channelState.Voucher())
|
||||
if err != nil {
|
||||
channel.Voucher = fmt.Errorf("Voucher Serialization: %w", err).Error()
|
||||
} else {
|
||||
channel.Voucher = string(voucherJSON)
|
||||
}
|
||||
channel.Voucher = string(voucherJSON)
|
||||
}
|
||||
if channel.IsSender {
|
||||
channel.IsInitiator = !channelState.IsPull()
|
||||
@ -338,6 +336,7 @@ type ForkUpgradeParams struct {
|
||||
UpgradeOhSnapHeight abi.ChainEpoch
|
||||
UpgradeSkyrHeight abi.ChainEpoch
|
||||
UpgradeSharkHeight abi.ChainEpoch
|
||||
UpgradeHyggeHeight abi.ChainEpoch
|
||||
}
|
||||
|
||||
type NonceMapType map[address.Address]uint64
|
||||
@ -397,3 +396,12 @@ func (m *MsgUuidMapType) UnmarshalJSON(b []byte) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChainExportConfig holds configuration for chain ranged exports.
|
||||
type ChainExportConfig struct {
|
||||
WriteBufferSize int
|
||||
NumWorkers int
|
||||
IncludeMessages bool
|
||||
IncludeReceipts bool
|
||||
IncludeStateRoots bool
|
||||
}
|
||||
|
5
api/types/rpc.go
Normal file
5
api/types/rpc.go
Normal file
@ -0,0 +1,5 @@
|
||||
package apitypes
|
||||
|
||||
type Aliaser interface {
|
||||
AliasMethod(alias, original string)
|
||||
}
|
@ -3,14 +3,14 @@ package v0api
|
||||
import (
|
||||
"context"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
textselector "github.com/ipld/go-ipld-selector-text-lite"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
@ -141,7 +141,7 @@ type FullNode interface {
|
||||
|
||||
// ChainGetPath returns a set of revert/apply operations needed to get from
|
||||
// one tipset to another, for example:
|
||||
//```
|
||||
// ```
|
||||
// to
|
||||
// ^
|
||||
// from tAA
|
||||
@ -150,7 +150,7 @@ type FullNode interface {
|
||||
// ^---*--^
|
||||
// ^
|
||||
// tRR
|
||||
//```
|
||||
// ```
|
||||
// Would return `[revert(tBA), apply(tAB), apply(tAA)]`
|
||||
ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*api.HeadChange, error) //perm:read
|
||||
|
||||
@ -367,12 +367,12 @@ type FullNode interface {
|
||||
ClientCancelRetrievalDeal(ctx context.Context, dealid retrievalmarket.DealID) error //perm:write
|
||||
|
||||
// ClientUnimport removes references to the specified file from filestore
|
||||
//ClientUnimport(path string)
|
||||
// ClientUnimport(path string)
|
||||
|
||||
// ClientListImports lists imported files and their root CIDs
|
||||
ClientListImports(ctx context.Context) ([]api.Import, error) //perm:write
|
||||
|
||||
//ClientListAsks() []Ask
|
||||
// ClientListAsks() []Ask
|
||||
|
||||
// MethodGroup: State
|
||||
// The State methods are used to query, inspect, and interact with chain state.
|
||||
@ -641,14 +641,14 @@ type FullNode interface {
|
||||
// It takes the following params: <multisig address>, <start epoch>, <end epoch>
|
||||
MsigGetVested(context.Context, address.Address, types.TipSetKey, types.TipSetKey) (types.BigInt, error) //perm:read
|
||||
|
||||
//MsigGetPending returns pending transactions for the given multisig
|
||||
//wallet. Once pending transactions are fully approved, they will no longer
|
||||
//appear here.
|
||||
// MsigGetPending returns pending transactions for the given multisig
|
||||
// wallet. Once pending transactions are fully approved, they will no longer
|
||||
// appear here.
|
||||
MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*api.MsigTransaction, error) //perm:read
|
||||
|
||||
// MsigCreate creates a multisig wallet
|
||||
// It takes the following params: <required number of senders>, <approving addresses>, <unlock duration>
|
||||
//<initial balance>, <sender address of the create msg>, <gas price>
|
||||
// <initial balance>, <sender address of the create msg>, <gas price>
|
||||
MsigCreate(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (cid.Cid, error) //perm:sign
|
||||
// MsigPropose proposes a multisig message
|
||||
// It takes the following params: <multisig address>, <recipient address>, <value to transfer>,
|
||||
|
@ -3,8 +3,8 @@ package v0api
|
||||
import (
|
||||
"context"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
@ -14,6 +14,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
)
|
||||
|
||||
// MODIFYING THE API INTERFACE
|
||||
@ -34,6 +35,9 @@ import (
|
||||
// * Generate openrpc blobs
|
||||
|
||||
type Gateway interface {
|
||||
StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (api.MinerSectors, error)
|
||||
GasEstimateGasPremium(context.Context, uint64, address.Address, int64, types.TipSetKey) (types.BigInt, error)
|
||||
StateReplay(context.Context, types.TipSetKey, cid.Cid) (*api.InvocResult, error)
|
||||
ChainHasObj(context.Context, cid.Cid) (bool, error)
|
||||
ChainPutObj(context.Context, blocks.Block) error
|
||||
ChainHead(ctx context.Context) (*types.TipSet, error)
|
||||
@ -44,12 +48,15 @@ type Gateway interface {
|
||||
ChainNotify(context.Context) (<-chan []*api.HeadChange, error)
|
||||
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
|
||||
GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error)
|
||||
MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error)
|
||||
MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error)
|
||||
MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error)
|
||||
MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error)
|
||||
MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*api.MsigTransaction, error)
|
||||
StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
|
||||
StateCall(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (*api.InvocResult, error)
|
||||
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error)
|
||||
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error)
|
||||
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
|
||||
StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error)
|
||||
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
|
||||
@ -59,6 +66,7 @@ type Gateway interface {
|
||||
StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (api.MinerInfo, error)
|
||||
StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error)
|
||||
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error)
|
||||
StateNetworkName(context.Context) (dtypes.NetworkName, error)
|
||||
StateNetworkVersion(context.Context, types.TipSetKey) (abinetwork.Version, error)
|
||||
StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error)
|
||||
StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)
|
||||
|
@ -5,14 +5,14 @@ package v0api
|
||||
import (
|
||||
"context"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
@ -39,383 +39,385 @@ type FullNodeStruct struct {
|
||||
|
||||
NetStruct
|
||||
|
||||
Internal struct {
|
||||
BeaconGetEntry func(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"`
|
||||
Internal FullNodeMethods
|
||||
}
|
||||
|
||||
ChainDeleteObj func(p0 context.Context, p1 cid.Cid) error `perm:"admin"`
|
||||
type FullNodeMethods struct {
|
||||
BeaconGetEntry func(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"`
|
||||
|
||||
ChainExport func(p0 context.Context, p1 abi.ChainEpoch, p2 bool, p3 types.TipSetKey) (<-chan []byte, error) `perm:"read"`
|
||||
ChainDeleteObj func(p0 context.Context, p1 cid.Cid) error `perm:"admin"`
|
||||
|
||||
ChainGetBlock func(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) `perm:"read"`
|
||||
ChainExport func(p0 context.Context, p1 abi.ChainEpoch, p2 bool, p3 types.TipSetKey) (<-chan []byte, error) `perm:"read"`
|
||||
|
||||
ChainGetBlockMessages func(p0 context.Context, p1 cid.Cid) (*api.BlockMessages, error) `perm:"read"`
|
||||
ChainGetBlock func(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) `perm:"read"`
|
||||
|
||||
ChainGetGenesis func(p0 context.Context) (*types.TipSet, error) `perm:"read"`
|
||||
ChainGetBlockMessages func(p0 context.Context, p1 cid.Cid) (*api.BlockMessages, error) `perm:"read"`
|
||||
|
||||
ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) `perm:"read"`
|
||||
ChainGetGenesis func(p0 context.Context) (*types.TipSet, error) `perm:"read"`
|
||||
|
||||
ChainGetMessagesInTipset func(p0 context.Context, p1 types.TipSetKey) ([]api.Message, error) `perm:"read"`
|
||||
ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) `perm:"read"`
|
||||
|
||||
ChainGetNode func(p0 context.Context, p1 string) (*api.IpldObject, error) `perm:"read"`
|
||||
ChainGetMessagesInTipset func(p0 context.Context, p1 types.TipSetKey) ([]api.Message, error) `perm:"read"`
|
||||
|
||||
ChainGetParentMessages func(p0 context.Context, p1 cid.Cid) ([]api.Message, error) `perm:"read"`
|
||||
ChainGetNode func(p0 context.Context, p1 string) (*api.IpldObject, error) `perm:"read"`
|
||||
|
||||
ChainGetParentReceipts func(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) `perm:"read"`
|
||||
ChainGetParentMessages func(p0 context.Context, p1 cid.Cid) ([]api.Message, error) `perm:"read"`
|
||||
|
||||
ChainGetPath func(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*api.HeadChange, error) `perm:"read"`
|
||||
ChainGetParentReceipts func(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) `perm:"read"`
|
||||
|
||||
ChainGetRandomnessFromBeacon func(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) `perm:"read"`
|
||||
ChainGetPath func(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*api.HeadChange, error) `perm:"read"`
|
||||
|
||||
ChainGetRandomnessFromTickets func(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) `perm:"read"`
|
||||
ChainGetRandomnessFromBeacon func(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) `perm:"read"`
|
||||
|
||||
ChainGetTipSet func(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) `perm:"read"`
|
||||
ChainGetRandomnessFromTickets func(p0 context.Context, p1 types.TipSetKey, p2 crypto.DomainSeparationTag, p3 abi.ChainEpoch, p4 []byte) (abi.Randomness, error) `perm:"read"`
|
||||
|
||||
ChainGetTipSetByHeight func(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) `perm:"read"`
|
||||
ChainGetTipSet func(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) `perm:"read"`
|
||||
|
||||
ChainHasObj func(p0 context.Context, p1 cid.Cid) (bool, error) `perm:"read"`
|
||||
ChainGetTipSetByHeight func(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) `perm:"read"`
|
||||
|
||||
ChainHead func(p0 context.Context) (*types.TipSet, error) `perm:"read"`
|
||||
ChainHasObj func(p0 context.Context, p1 cid.Cid) (bool, error) `perm:"read"`
|
||||
|
||||
ChainNotify func(p0 context.Context) (<-chan []*api.HeadChange, error) `perm:"read"`
|
||||
ChainHead func(p0 context.Context) (*types.TipSet, error) `perm:"read"`
|
||||
|
||||
ChainPutObj func(p0 context.Context, p1 blocks.Block) error ``
|
||||
ChainNotify func(p0 context.Context) (<-chan []*api.HeadChange, error) `perm:"read"`
|
||||
|
||||
ChainReadObj func(p0 context.Context, p1 cid.Cid) ([]byte, error) `perm:"read"`
|
||||
ChainPutObj func(p0 context.Context, p1 blocks.Block) error ``
|
||||
|
||||
ChainSetHead func(p0 context.Context, p1 types.TipSetKey) error `perm:"admin"`
|
||||
ChainReadObj func(p0 context.Context, p1 cid.Cid) ([]byte, error) `perm:"read"`
|
||||
|
||||
ChainStatObj func(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (api.ObjStat, error) `perm:"read"`
|
||||
ChainSetHead func(p0 context.Context, p1 types.TipSetKey) error `perm:"admin"`
|
||||
|
||||
ChainTipSetWeight func(p0 context.Context, p1 types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
||||
ChainStatObj func(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (api.ObjStat, error) `perm:"read"`
|
||||
|
||||
ClientCalcCommP func(p0 context.Context, p1 string) (*api.CommPRet, error) `perm:"write"`
|
||||
ChainTipSetWeight func(p0 context.Context, p1 types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
||||
|
||||
ClientCancelDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"`
|
||||
ClientCalcCommP func(p0 context.Context, p1 string) (*api.CommPRet, error) `perm:"write"`
|
||||
|
||||
ClientCancelRetrievalDeal func(p0 context.Context, p1 retrievalmarket.DealID) error `perm:"write"`
|
||||
ClientCancelDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"`
|
||||
|
||||
ClientDataTransferUpdates func(p0 context.Context) (<-chan api.DataTransferChannel, error) `perm:"write"`
|
||||
ClientCancelRetrievalDeal func(p0 context.Context, p1 retrievalmarket.DealID) error `perm:"write"`
|
||||
|
||||
ClientDealPieceCID func(p0 context.Context, p1 cid.Cid) (api.DataCIDSize, error) `perm:"read"`
|
||||
ClientDataTransferUpdates func(p0 context.Context) (<-chan api.DataTransferChannel, error) `perm:"write"`
|
||||
|
||||
ClientDealSize func(p0 context.Context, p1 cid.Cid) (api.DataSize, error) `perm:"read"`
|
||||
ClientDealPieceCID func(p0 context.Context, p1 cid.Cid) (api.DataCIDSize, error) `perm:"read"`
|
||||
|
||||
ClientFindData func(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]api.QueryOffer, error) `perm:"read"`
|
||||
ClientDealSize func(p0 context.Context, p1 cid.Cid) (api.DataSize, error) `perm:"read"`
|
||||
|
||||
ClientGenCar func(p0 context.Context, p1 api.FileRef, p2 string) error `perm:"write"`
|
||||
ClientFindData func(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]api.QueryOffer, error) `perm:"read"`
|
||||
|
||||
ClientGetDealInfo func(p0 context.Context, p1 cid.Cid) (*api.DealInfo, error) `perm:"read"`
|
||||
ClientGenCar func(p0 context.Context, p1 api.FileRef, p2 string) error `perm:"write"`
|
||||
|
||||
ClientGetDealStatus func(p0 context.Context, p1 uint64) (string, error) `perm:"read"`
|
||||
ClientGetDealInfo func(p0 context.Context, p1 cid.Cid) (*api.DealInfo, error) `perm:"read"`
|
||||
|
||||
ClientGetDealUpdates func(p0 context.Context) (<-chan api.DealInfo, error) `perm:"write"`
|
||||
ClientGetDealStatus func(p0 context.Context, p1 uint64) (string, error) `perm:"read"`
|
||||
|
||||
ClientGetRetrievalUpdates func(p0 context.Context) (<-chan api.RetrievalInfo, error) `perm:"write"`
|
||||
ClientGetDealUpdates func(p0 context.Context) (<-chan api.DealInfo, error) `perm:"write"`
|
||||
|
||||
ClientHasLocal func(p0 context.Context, p1 cid.Cid) (bool, error) `perm:"write"`
|
||||
ClientGetRetrievalUpdates func(p0 context.Context) (<-chan api.RetrievalInfo, error) `perm:"write"`
|
||||
|
||||
ClientImport func(p0 context.Context, p1 api.FileRef) (*api.ImportRes, error) `perm:"admin"`
|
||||
ClientHasLocal func(p0 context.Context, p1 cid.Cid) (bool, error) `perm:"write"`
|
||||
|
||||
ClientListDataTransfers func(p0 context.Context) ([]api.DataTransferChannel, error) `perm:"write"`
|
||||
ClientImport func(p0 context.Context, p1 api.FileRef) (*api.ImportRes, error) `perm:"admin"`
|
||||
|
||||
ClientListDeals func(p0 context.Context) ([]api.DealInfo, error) `perm:"write"`
|
||||
ClientListDataTransfers func(p0 context.Context) ([]api.DataTransferChannel, error) `perm:"write"`
|
||||
|
||||
ClientListImports func(p0 context.Context) ([]api.Import, error) `perm:"write"`
|
||||
ClientListDeals func(p0 context.Context) ([]api.DealInfo, error) `perm:"write"`
|
||||
|
||||
ClientListRetrievals func(p0 context.Context) ([]api.RetrievalInfo, error) `perm:"write"`
|
||||
ClientListImports func(p0 context.Context) ([]api.Import, error) `perm:"write"`
|
||||
|
||||
ClientMinerQueryOffer func(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (api.QueryOffer, error) `perm:"read"`
|
||||
ClientListRetrievals func(p0 context.Context) ([]api.RetrievalInfo, error) `perm:"write"`
|
||||
|
||||
ClientQueryAsk func(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) `perm:"read"`
|
||||
ClientMinerQueryOffer func(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (api.QueryOffer, error) `perm:"read"`
|
||||
|
||||
ClientRemoveImport func(p0 context.Context, p1 imports.ID) error `perm:"admin"`
|
||||
ClientQueryAsk func(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) `perm:"read"`
|
||||
|
||||
ClientRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"`
|
||||
ClientRemoveImport func(p0 context.Context, p1 imports.ID) error `perm:"admin"`
|
||||
|
||||
ClientRetrieve func(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) error `perm:"admin"`
|
||||
ClientRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"`
|
||||
|
||||
ClientRetrieveTryRestartInsufficientFunds func(p0 context.Context, p1 address.Address) error `perm:"write"`
|
||||
ClientRetrieve func(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) error `perm:"admin"`
|
||||
|
||||
ClientRetrieveWithEvents func(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"`
|
||||
ClientRetrieveTryRestartInsufficientFunds func(p0 context.Context, p1 address.Address) error `perm:"write"`
|
||||
|
||||
ClientStartDeal func(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) `perm:"admin"`
|
||||
ClientRetrieveWithEvents func(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"`
|
||||
|
||||
ClientStatelessDeal func(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) `perm:"write"`
|
||||
ClientStartDeal func(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) `perm:"admin"`
|
||||
|
||||
CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"`
|
||||
ClientStatelessDeal func(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) `perm:"write"`
|
||||
|
||||
GasEstimateFeeCap func(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
||||
CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"`
|
||||
|
||||
GasEstimateGasLimit func(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (int64, error) `perm:"read"`
|
||||
GasEstimateFeeCap func(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
||||
|
||||
GasEstimateGasPremium func(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
||||
GasEstimateGasLimit func(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (int64, error) `perm:"read"`
|
||||
|
||||
GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) `perm:"read"`
|
||||
GasEstimateGasPremium func(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
||||
|
||||
MarketAddBalance func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) `perm:"sign"`
|
||||
GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) `perm:"read"`
|
||||
|
||||
MarketGetReserved func(p0 context.Context, p1 address.Address) (types.BigInt, error) `perm:"sign"`
|
||||
MarketAddBalance func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) `perm:"sign"`
|
||||
|
||||
MarketReleaseFunds func(p0 context.Context, p1 address.Address, p2 types.BigInt) error `perm:"sign"`
|
||||
MarketGetReserved func(p0 context.Context, p1 address.Address) (types.BigInt, error) `perm:"sign"`
|
||||
|
||||
MarketReserveFunds func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) `perm:"sign"`
|
||||
MarketReleaseFunds func(p0 context.Context, p1 address.Address, p2 types.BigInt) error `perm:"sign"`
|
||||
|
||||
MarketWithdraw func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) `perm:"sign"`
|
||||
MarketReserveFunds func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) `perm:"sign"`
|
||||
|
||||
MinerCreateBlock func(p0 context.Context, p1 *api.BlockTemplate) (*types.BlockMsg, error) `perm:"write"`
|
||||
MarketWithdraw func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) `perm:"sign"`
|
||||
|
||||
MinerGetBaseInfo func(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*api.MiningBaseInfo, error) `perm:"read"`
|
||||
MinerCreateBlock func(p0 context.Context, p1 *api.BlockTemplate) (*types.BlockMsg, error) `perm:"write"`
|
||||
|
||||
MpoolBatchPush func(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) `perm:"write"`
|
||||
MinerGetBaseInfo func(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*api.MiningBaseInfo, error) `perm:"read"`
|
||||
|
||||
MpoolBatchPushMessage func(p0 context.Context, p1 []*types.Message, p2 *api.MessageSendSpec) ([]*types.SignedMessage, error) `perm:"sign"`
|
||||
MpoolBatchPush func(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) `perm:"write"`
|
||||
|
||||
MpoolBatchPushUntrusted func(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) `perm:"write"`
|
||||
MpoolBatchPushMessage func(p0 context.Context, p1 []*types.Message, p2 *api.MessageSendSpec) ([]*types.SignedMessage, error) `perm:"sign"`
|
||||
|
||||
MpoolClear func(p0 context.Context, p1 bool) error `perm:"write"`
|
||||
MpoolBatchPushUntrusted func(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) `perm:"write"`
|
||||
|
||||
MpoolGetConfig func(p0 context.Context) (*types.MpoolConfig, error) `perm:"read"`
|
||||
MpoolClear func(p0 context.Context, p1 bool) error `perm:"write"`
|
||||
|
||||
MpoolGetNonce func(p0 context.Context, p1 address.Address) (uint64, error) `perm:"read"`
|
||||
MpoolGetConfig func(p0 context.Context) (*types.MpoolConfig, error) `perm:"read"`
|
||||
|
||||
MpoolPending func(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) `perm:"read"`
|
||||
MpoolGetNonce func(p0 context.Context, p1 address.Address) (uint64, error) `perm:"read"`
|
||||
|
||||
MpoolPush func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) `perm:"write"`
|
||||
MpoolPending func(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) `perm:"read"`
|
||||
|
||||
MpoolPushMessage func(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec) (*types.SignedMessage, error) `perm:"sign"`
|
||||
MpoolPush func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) `perm:"write"`
|
||||
|
||||
MpoolPushUntrusted func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) `perm:"write"`
|
||||
MpoolPushMessage func(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec) (*types.SignedMessage, error) `perm:"sign"`
|
||||
|
||||
MpoolSelect func(p0 context.Context, p1 types.TipSetKey, p2 float64) ([]*types.SignedMessage, error) `perm:"read"`
|
||||
MpoolPushUntrusted func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) `perm:"write"`
|
||||
|
||||
MpoolSetConfig func(p0 context.Context, p1 *types.MpoolConfig) error `perm:"admin"`
|
||||
MpoolSelect func(p0 context.Context, p1 types.TipSetKey, p2 float64) ([]*types.SignedMessage, error) `perm:"read"`
|
||||
|
||||
MpoolSub func(p0 context.Context) (<-chan api.MpoolUpdate, error) `perm:"read"`
|
||||
MpoolSetConfig func(p0 context.Context, p1 *types.MpoolConfig) error `perm:"admin"`
|
||||
|
||||
MsigAddApprove func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (cid.Cid, error) `perm:"sign"`
|
||||
MpoolSub func(p0 context.Context) (<-chan api.MpoolUpdate, error) `perm:"read"`
|
||||
|
||||
MsigAddCancel func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (cid.Cid, error) `perm:"sign"`
|
||||
MsigAddApprove func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (cid.Cid, error) `perm:"sign"`
|
||||
|
||||
MsigAddPropose func(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) `perm:"sign"`
|
||||
MsigAddCancel func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (cid.Cid, error) `perm:"sign"`
|
||||
|
||||
MsigApprove func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (cid.Cid, error) `perm:"sign"`
|
||||
MsigAddPropose func(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) `perm:"sign"`
|
||||
|
||||
MsigApproveTxnHash func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (cid.Cid, error) `perm:"sign"`
|
||||
MsigApprove func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (cid.Cid, error) `perm:"sign"`
|
||||
|
||||
MsigCancel func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (cid.Cid, error) `perm:"sign"`
|
||||
MsigApproveTxnHash func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (cid.Cid, error) `perm:"sign"`
|
||||
|
||||
MsigCreate func(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (cid.Cid, error) `perm:"sign"`
|
||||
MsigCancel func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (cid.Cid, error) `perm:"sign"`
|
||||
|
||||
MsigGetAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
||||
MsigCreate func(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (cid.Cid, error) `perm:"sign"`
|
||||
|
||||
MsigGetPending func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*api.MsigTransaction, error) `perm:"read"`
|
||||
MsigGetAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
||||
|
||||
MsigGetVested func(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
||||
MsigGetPending func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*api.MsigTransaction, error) `perm:"read"`
|
||||
|
||||
MsigGetVestingSchedule func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MsigVesting, error) `perm:"read"`
|
||||
MsigGetVested func(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
||||
|
||||
MsigPropose func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (cid.Cid, error) `perm:"sign"`
|
||||
MsigGetVestingSchedule func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MsigVesting, error) `perm:"read"`
|
||||
|
||||
MsigRemoveSigner func(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) `perm:"sign"`
|
||||
MsigPropose func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (cid.Cid, error) `perm:"sign"`
|
||||
|
||||
MsigSwapApprove func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (cid.Cid, error) `perm:"sign"`
|
||||
MsigRemoveSigner func(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) `perm:"sign"`
|
||||
|
||||
MsigSwapCancel func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (cid.Cid, error) `perm:"sign"`
|
||||
MsigSwapApprove func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (cid.Cid, error) `perm:"sign"`
|
||||
|
||||
MsigSwapPropose func(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (cid.Cid, error) `perm:"sign"`
|
||||
MsigSwapCancel func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (cid.Cid, error) `perm:"sign"`
|
||||
|
||||
PaychAllocateLane func(p0 context.Context, p1 address.Address) (uint64, error) `perm:"sign"`
|
||||
MsigSwapPropose func(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (cid.Cid, error) `perm:"sign"`
|
||||
|
||||
PaychAvailableFunds func(p0 context.Context, p1 address.Address) (*api.ChannelAvailableFunds, error) `perm:"sign"`
|
||||
PaychAllocateLane func(p0 context.Context, p1 address.Address) (uint64, error) `perm:"sign"`
|
||||
|
||||
PaychAvailableFundsByFromTo func(p0 context.Context, p1 address.Address, p2 address.Address) (*api.ChannelAvailableFunds, error) `perm:"sign"`
|
||||
PaychAvailableFunds func(p0 context.Context, p1 address.Address) (*api.ChannelAvailableFunds, error) `perm:"sign"`
|
||||
|
||||
PaychCollect func(p0 context.Context, p1 address.Address) (cid.Cid, error) `perm:"sign"`
|
||||
PaychAvailableFundsByFromTo func(p0 context.Context, p1 address.Address, p2 address.Address) (*api.ChannelAvailableFunds, error) `perm:"sign"`
|
||||
|
||||
PaychGet func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (*api.ChannelInfo, error) `perm:"sign"`
|
||||
PaychCollect func(p0 context.Context, p1 address.Address) (cid.Cid, error) `perm:"sign"`
|
||||
|
||||
PaychGetWaitReady func(p0 context.Context, p1 cid.Cid) (address.Address, error) `perm:"sign"`
|
||||
PaychGet func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (*api.ChannelInfo, error) `perm:"sign"`
|
||||
|
||||
PaychList func(p0 context.Context) ([]address.Address, error) `perm:"read"`
|
||||
PaychGetWaitReady func(p0 context.Context, p1 cid.Cid) (address.Address, error) `perm:"sign"`
|
||||
|
||||
PaychNewPayment func(p0 context.Context, p1 address.Address, p2 address.Address, p3 []api.VoucherSpec) (*api.PaymentInfo, error) `perm:"sign"`
|
||||
PaychList func(p0 context.Context) ([]address.Address, error) `perm:"read"`
|
||||
|
||||
PaychSettle func(p0 context.Context, p1 address.Address) (cid.Cid, error) `perm:"sign"`
|
||||
PaychNewPayment func(p0 context.Context, p1 address.Address, p2 address.Address, p3 []api.VoucherSpec) (*api.PaymentInfo, error) `perm:"sign"`
|
||||
|
||||
PaychStatus func(p0 context.Context, p1 address.Address) (*api.PaychStatus, error) `perm:"read"`
|
||||
PaychSettle func(p0 context.Context, p1 address.Address) (cid.Cid, error) `perm:"sign"`
|
||||
|
||||
PaychVoucherAdd func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 types.BigInt) (types.BigInt, error) `perm:"write"`
|
||||
PaychStatus func(p0 context.Context, p1 address.Address) (*api.PaychStatus, error) `perm:"read"`
|
||||
|
||||
PaychVoucherCheckSpendable func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (bool, error) `perm:"read"`
|
||||
PaychVoucherAdd func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 types.BigInt) (types.BigInt, error) `perm:"write"`
|
||||
|
||||
PaychVoucherCheckValid func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher) error `perm:"read"`
|
||||
PaychVoucherCheckSpendable func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (bool, error) `perm:"read"`
|
||||
|
||||
PaychVoucherCreate func(p0 context.Context, p1 address.Address, p2 types.BigInt, p3 uint64) (*api.VoucherCreateResult, error) `perm:"sign"`
|
||||
PaychVoucherCheckValid func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher) error `perm:"read"`
|
||||
|
||||
PaychVoucherList func(p0 context.Context, p1 address.Address) ([]*paych.SignedVoucher, error) `perm:"write"`
|
||||
PaychVoucherCreate func(p0 context.Context, p1 address.Address, p2 types.BigInt, p3 uint64) (*api.VoucherCreateResult, error) `perm:"sign"`
|
||||
|
||||
PaychVoucherSubmit func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) `perm:"sign"`
|
||||
PaychVoucherList func(p0 context.Context, p1 address.Address) ([]*paych.SignedVoucher, error) `perm:"write"`
|
||||
|
||||
StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `perm:"read"`
|
||||
PaychVoucherSubmit func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) `perm:"sign"`
|
||||
|
||||
StateActorCodeCIDs func(p0 context.Context, p1 abinetwork.Version) (map[string]cid.Cid, error) `perm:"read"`
|
||||
StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `perm:"read"`
|
||||
|
||||
StateActorManifestCID func(p0 context.Context, p1 abinetwork.Version) (cid.Cid, error) `perm:"read"`
|
||||
StateActorCodeCIDs func(p0 context.Context, p1 abinetwork.Version) (map[string]cid.Cid, error) `perm:"read"`
|
||||
|
||||
StateAllMinerFaults func(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) ([]*api.Fault, error) `perm:"read"`
|
||||
StateActorManifestCID func(p0 context.Context, p1 abinetwork.Version) (cid.Cid, error) `perm:"read"`
|
||||
|
||||
StateCall func(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*api.InvocResult, error) `perm:"read"`
|
||||
StateAllMinerFaults func(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) ([]*api.Fault, error) `perm:"read"`
|
||||
|
||||
StateChangedActors func(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (map[string]types.Actor, error) `perm:"read"`
|
||||
StateCall func(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*api.InvocResult, error) `perm:"read"`
|
||||
|
||||
StateCirculatingSupply func(p0 context.Context, p1 types.TipSetKey) (abi.TokenAmount, error) `perm:"read"`
|
||||
StateChangedActors func(p0 context.Context, p1 cid.Cid, p2 cid.Cid) (map[string]types.Actor, error) `perm:"read"`
|
||||
|
||||
StateCompute func(p0 context.Context, p1 abi.ChainEpoch, p2 []*types.Message, p3 types.TipSetKey) (*api.ComputeStateOutput, error) `perm:"read"`
|
||||
StateCirculatingSupply func(p0 context.Context, p1 types.TipSetKey) (abi.TokenAmount, error) `perm:"read"`
|
||||
|
||||
StateDealProviderCollateralBounds func(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (api.DealCollateralBounds, error) `perm:"read"`
|
||||
StateCompute func(p0 context.Context, p1 abi.ChainEpoch, p2 []*types.Message, p3 types.TipSetKey) (*api.ComputeStateOutput, error) `perm:"read"`
|
||||
|
||||
StateDecodeParams func(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) `perm:"read"`
|
||||
StateDealProviderCollateralBounds func(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (api.DealCollateralBounds, error) `perm:"read"`
|
||||
|
||||
StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) `perm:"read"`
|
||||
StateDecodeParams func(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) `perm:"read"`
|
||||
|
||||
StateGetAllocation func(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) `perm:"read"`
|
||||
StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) `perm:"read"`
|
||||
|
||||
StateGetAllocationForPendingDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) `perm:"read"`
|
||||
StateGetAllocation func(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) `perm:"read"`
|
||||
|
||||
StateGetAllocations func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) `perm:"read"`
|
||||
StateGetAllocationForPendingDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) `perm:"read"`
|
||||
|
||||
StateGetClaim func(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) `perm:"read"`
|
||||
StateGetAllocations func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) `perm:"read"`
|
||||
|
||||
StateGetClaims func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) `perm:"read"`
|
||||
StateGetClaim func(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) `perm:"read"`
|
||||
|
||||
StateGetNetworkParams func(p0 context.Context) (*api.NetworkParams, error) `perm:"read"`
|
||||
StateGetClaims func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) `perm:"read"`
|
||||
|
||||
StateGetRandomnessFromBeacon func(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) `perm:"read"`
|
||||
StateGetNetworkParams func(p0 context.Context) (*api.NetworkParams, error) `perm:"read"`
|
||||
|
||||
StateGetRandomnessFromTickets func(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) `perm:"read"`
|
||||
StateGetRandomnessFromBeacon func(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) `perm:"read"`
|
||||
|
||||
StateGetReceipt func(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) `perm:"read"`
|
||||
StateGetRandomnessFromTickets func(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) `perm:"read"`
|
||||
|
||||
StateListActors func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) `perm:"read"`
|
||||
StateGetReceipt func(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) `perm:"read"`
|
||||
|
||||
StateListMessages func(p0 context.Context, p1 *api.MessageMatch, p2 types.TipSetKey, p3 abi.ChainEpoch) ([]cid.Cid, error) `perm:"read"`
|
||||
StateListActors func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) `perm:"read"`
|
||||
|
||||
StateListMiners func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) `perm:"read"`
|
||||
StateListMessages func(p0 context.Context, p1 *api.MessageMatch, p2 types.TipSetKey, p3 abi.ChainEpoch) ([]cid.Cid, error) `perm:"read"`
|
||||
|
||||
StateLookupID func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `perm:"read"`
|
||||
StateListMiners func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) `perm:"read"`
|
||||
|
||||
StateMarketBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MarketBalance, error) `perm:"read"`
|
||||
StateLookupID func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `perm:"read"`
|
||||
|
||||
StateMarketDeals func(p0 context.Context, p1 types.TipSetKey) (map[string]*api.MarketDeal, error) `perm:"read"`
|
||||
StateMarketBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MarketBalance, error) `perm:"read"`
|
||||
|
||||
StateMarketParticipants func(p0 context.Context, p1 types.TipSetKey) (map[string]api.MarketBalance, error) `perm:"read"`
|
||||
StateMarketDeals func(p0 context.Context, p1 types.TipSetKey) (map[string]*api.MarketDeal, error) `perm:"read"`
|
||||
|
||||
StateMarketStorageDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*api.MarketDeal, error) `perm:"read"`
|
||||
StateMarketParticipants func(p0 context.Context, p1 types.TipSetKey) (map[string]api.MarketBalance, error) `perm:"read"`
|
||||
|
||||
StateMinerActiveSectors func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) `perm:"read"`
|
||||
StateMarketStorageDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*api.MarketDeal, error) `perm:"read"`
|
||||
|
||||
StateMinerAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
||||
StateMinerActiveSectors func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) `perm:"read"`
|
||||
|
||||
StateMinerDeadlines func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]api.Deadline, error) `perm:"read"`
|
||||
StateMinerAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
||||
|
||||
StateMinerFaults func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) `perm:"read"`
|
||||
StateMinerDeadlines func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]api.Deadline, error) `perm:"read"`
|
||||
|
||||
StateMinerInfo func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MinerInfo, error) `perm:"read"`
|
||||
StateMinerFaults func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) `perm:"read"`
|
||||
|
||||
StateMinerInitialPledgeCollateral func(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
||||
StateMinerInfo func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MinerInfo, error) `perm:"read"`
|
||||
|
||||
StateMinerPartitions func(p0 context.Context, p1 address.Address, p2 uint64, p3 types.TipSetKey) ([]api.Partition, error) `perm:"read"`
|
||||
StateMinerInitialPledgeCollateral func(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
||||
|
||||
StateMinerPower func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*api.MinerPower, error) `perm:"read"`
|
||||
StateMinerPartitions func(p0 context.Context, p1 address.Address, p2 uint64, p3 types.TipSetKey) ([]api.Partition, error) `perm:"read"`
|
||||
|
||||
StateMinerPreCommitDepositForPower func(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
||||
StateMinerPower func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*api.MinerPower, error) `perm:"read"`
|
||||
|
||||
StateMinerProvingDeadline func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) `perm:"read"`
|
||||
StateMinerPreCommitDepositForPower func(p0 context.Context, p1 address.Address, p2 miner.SectorPreCommitInfo, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
||||
|
||||
StateMinerRecoveries func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) `perm:"read"`
|
||||
StateMinerProvingDeadline func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) `perm:"read"`
|
||||
|
||||
StateMinerSectorAllocated func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (bool, error) `perm:"read"`
|
||||
StateMinerRecoveries func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (bitfield.BitField, error) `perm:"read"`
|
||||
|
||||
StateMinerSectorCount func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MinerSectors, error) `perm:"read"`
|
||||
StateMinerSectorAllocated func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (bool, error) `perm:"read"`
|
||||
|
||||
StateMinerSectors func(p0 context.Context, p1 address.Address, p2 *bitfield.BitField, p3 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) `perm:"read"`
|
||||
StateMinerSectorCount func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MinerSectors, error) `perm:"read"`
|
||||
|
||||
StateNetworkName func(p0 context.Context) (dtypes.NetworkName, error) `perm:"read"`
|
||||
StateMinerSectors func(p0 context.Context, p1 address.Address, p2 *bitfield.BitField, p3 types.TipSetKey) ([]*miner.SectorOnChainInfo, error) `perm:"read"`
|
||||
|
||||
StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) `perm:"read"`
|
||||
StateNetworkName func(p0 context.Context) (dtypes.NetworkName, error) `perm:"read"`
|
||||
|
||||
StateReadState func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*api.ActorState, error) `perm:"read"`
|
||||
StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) `perm:"read"`
|
||||
|
||||
StateReplay func(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*api.InvocResult, error) `perm:"read"`
|
||||
StateReadState func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*api.ActorState, error) `perm:"read"`
|
||||
|
||||
StateSearchMsg func(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) `perm:"read"`
|
||||
StateReplay func(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*api.InvocResult, error) `perm:"read"`
|
||||
|
||||
StateSearchMsgLimited func(p0 context.Context, p1 cid.Cid, p2 abi.ChainEpoch) (*api.MsgLookup, error) `perm:"read"`
|
||||
StateSearchMsg func(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) `perm:"read"`
|
||||
|
||||
StateSectorExpiration func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorExpiration, error) `perm:"read"`
|
||||
StateSearchMsgLimited func(p0 context.Context, p1 cid.Cid, p2 abi.ChainEpoch) (*api.MsgLookup, error) `perm:"read"`
|
||||
|
||||
StateSectorGetInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) `perm:"read"`
|
||||
StateSectorExpiration func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorExpiration, error) `perm:"read"`
|
||||
|
||||
StateSectorPartition func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorLocation, error) `perm:"read"`
|
||||
StateSectorGetInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) `perm:"read"`
|
||||
|
||||
StateSectorPreCommitInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) `perm:"read"`
|
||||
StateSectorPartition func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorLocation, error) `perm:"read"`
|
||||
|
||||
StateVMCirculatingSupplyInternal func(p0 context.Context, p1 types.TipSetKey) (api.CirculatingSupply, error) `perm:"read"`
|
||||
StateSectorPreCommitInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) `perm:"read"`
|
||||
|
||||
StateVerifiedClientStatus func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) `perm:"read"`
|
||||
StateVMCirculatingSupplyInternal func(p0 context.Context, p1 types.TipSetKey) (api.CirculatingSupply, error) `perm:"read"`
|
||||
|
||||
StateVerifiedRegistryRootKey func(p0 context.Context, p1 types.TipSetKey) (address.Address, error) `perm:"read"`
|
||||
StateVerifiedClientStatus func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) `perm:"read"`
|
||||
|
||||
StateVerifierStatus func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) `perm:"read"`
|
||||
StateVerifiedRegistryRootKey func(p0 context.Context, p1 types.TipSetKey) (address.Address, error) `perm:"read"`
|
||||
|
||||
StateWaitMsg func(p0 context.Context, p1 cid.Cid, p2 uint64) (*api.MsgLookup, error) `perm:"read"`
|
||||
StateVerifierStatus func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) `perm:"read"`
|
||||
|
||||
StateWaitMsgLimited func(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch) (*api.MsgLookup, error) `perm:"read"`
|
||||
StateWaitMsg func(p0 context.Context, p1 cid.Cid, p2 uint64) (*api.MsgLookup, error) `perm:"read"`
|
||||
|
||||
SyncCheckBad func(p0 context.Context, p1 cid.Cid) (string, error) `perm:"read"`
|
||||
StateWaitMsgLimited func(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch) (*api.MsgLookup, error) `perm:"read"`
|
||||
|
||||
SyncCheckpoint func(p0 context.Context, p1 types.TipSetKey) error `perm:"admin"`
|
||||
SyncCheckBad func(p0 context.Context, p1 cid.Cid) (string, error) `perm:"read"`
|
||||
|
||||
SyncIncomingBlocks func(p0 context.Context) (<-chan *types.BlockHeader, error) `perm:"read"`
|
||||
SyncCheckpoint func(p0 context.Context, p1 types.TipSetKey) error `perm:"admin"`
|
||||
|
||||
SyncMarkBad func(p0 context.Context, p1 cid.Cid) error `perm:"admin"`
|
||||
SyncIncomingBlocks func(p0 context.Context) (<-chan *types.BlockHeader, error) `perm:"read"`
|
||||
|
||||
SyncState func(p0 context.Context) (*api.SyncState, error) `perm:"read"`
|
||||
SyncMarkBad func(p0 context.Context, p1 cid.Cid) error `perm:"admin"`
|
||||
|
||||
SyncSubmitBlock func(p0 context.Context, p1 *types.BlockMsg) error `perm:"write"`
|
||||
SyncState func(p0 context.Context) (*api.SyncState, error) `perm:"read"`
|
||||
|
||||
SyncUnmarkAllBad func(p0 context.Context) error `perm:"admin"`
|
||||
SyncSubmitBlock func(p0 context.Context, p1 *types.BlockMsg) error `perm:"write"`
|
||||
|
||||
SyncUnmarkBad func(p0 context.Context, p1 cid.Cid) error `perm:"admin"`
|
||||
SyncUnmarkAllBad func(p0 context.Context) error `perm:"admin"`
|
||||
|
||||
SyncValidateTipset func(p0 context.Context, p1 types.TipSetKey) (bool, error) `perm:"read"`
|
||||
SyncUnmarkBad func(p0 context.Context, p1 cid.Cid) error `perm:"admin"`
|
||||
|
||||
WalletBalance func(p0 context.Context, p1 address.Address) (types.BigInt, error) `perm:"read"`
|
||||
SyncValidateTipset func(p0 context.Context, p1 types.TipSetKey) (bool, error) `perm:"read"`
|
||||
|
||||
WalletDefaultAddress func(p0 context.Context) (address.Address, error) `perm:"write"`
|
||||
WalletBalance func(p0 context.Context, p1 address.Address) (types.BigInt, error) `perm:"read"`
|
||||
|
||||
WalletDelete func(p0 context.Context, p1 address.Address) error `perm:"admin"`
|
||||
WalletDefaultAddress func(p0 context.Context) (address.Address, error) `perm:"write"`
|
||||
|
||||
WalletExport func(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) `perm:"admin"`
|
||||
WalletDelete func(p0 context.Context, p1 address.Address) error `perm:"admin"`
|
||||
|
||||
WalletHas func(p0 context.Context, p1 address.Address) (bool, error) `perm:"write"`
|
||||
WalletExport func(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) `perm:"admin"`
|
||||
|
||||
WalletImport func(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) `perm:"admin"`
|
||||
WalletHas func(p0 context.Context, p1 address.Address) (bool, error) `perm:"write"`
|
||||
|
||||
WalletList func(p0 context.Context) ([]address.Address, error) `perm:"write"`
|
||||
WalletImport func(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) `perm:"admin"`
|
||||
|
||||
WalletNew func(p0 context.Context, p1 types.KeyType) (address.Address, error) `perm:"write"`
|
||||
WalletList func(p0 context.Context) ([]address.Address, error) `perm:"write"`
|
||||
|
||||
WalletSetDefault func(p0 context.Context, p1 address.Address) error `perm:"write"`
|
||||
WalletNew func(p0 context.Context, p1 types.KeyType) (address.Address, error) `perm:"write"`
|
||||
|
||||
WalletSign func(p0 context.Context, p1 address.Address, p2 []byte) (*crypto.Signature, error) `perm:"sign"`
|
||||
WalletSetDefault func(p0 context.Context, p1 address.Address) error `perm:"write"`
|
||||
|
||||
WalletSignMessage func(p0 context.Context, p1 address.Address, p2 *types.Message) (*types.SignedMessage, error) `perm:"sign"`
|
||||
WalletSign func(p0 context.Context, p1 address.Address, p2 []byte) (*crypto.Signature, error) `perm:"sign"`
|
||||
|
||||
WalletValidateAddress func(p0 context.Context, p1 string) (address.Address, error) `perm:"read"`
|
||||
WalletSignMessage func(p0 context.Context, p1 address.Address, p2 *types.Message) (*types.SignedMessage, error) `perm:"sign"`
|
||||
|
||||
WalletVerify func(p0 context.Context, p1 address.Address, p2 []byte, p3 *crypto.Signature) (bool, error) `perm:"read"`
|
||||
}
|
||||
WalletValidateAddress func(p0 context.Context, p1 string) (address.Address, error) `perm:"read"`
|
||||
|
||||
WalletVerify func(p0 context.Context, p1 address.Address, p2 []byte, p3 *crypto.Signature) (bool, error) `perm:"read"`
|
||||
}
|
||||
|
||||
type FullNodeStub struct {
|
||||
@ -425,71 +427,87 @@ type FullNodeStub struct {
|
||||
}
|
||||
|
||||
type GatewayStruct struct {
|
||||
Internal struct {
|
||||
ChainGetBlockMessages func(p0 context.Context, p1 cid.Cid) (*api.BlockMessages, error) ``
|
||||
Internal GatewayMethods
|
||||
}
|
||||
|
||||
ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) ``
|
||||
type GatewayMethods struct {
|
||||
ChainGetBlockMessages func(p0 context.Context, p1 cid.Cid) (*api.BlockMessages, error) ``
|
||||
|
||||
ChainGetTipSet func(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) ``
|
||||
ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) ``
|
||||
|
||||
ChainGetTipSetByHeight func(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) ``
|
||||
ChainGetTipSet func(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) ``
|
||||
|
||||
ChainHasObj func(p0 context.Context, p1 cid.Cid) (bool, error) ``
|
||||
ChainGetTipSetByHeight func(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (*types.TipSet, error) ``
|
||||
|
||||
ChainHead func(p0 context.Context) (*types.TipSet, error) ``
|
||||
ChainHasObj func(p0 context.Context, p1 cid.Cid) (bool, error) ``
|
||||
|
||||
ChainNotify func(p0 context.Context) (<-chan []*api.HeadChange, error) ``
|
||||
ChainHead func(p0 context.Context) (*types.TipSet, error) ``
|
||||
|
||||
ChainPutObj func(p0 context.Context, p1 blocks.Block) error ``
|
||||
ChainNotify func(p0 context.Context) (<-chan []*api.HeadChange, error) ``
|
||||
|
||||
ChainReadObj func(p0 context.Context, p1 cid.Cid) ([]byte, error) ``
|
||||
ChainPutObj func(p0 context.Context, p1 blocks.Block) error ``
|
||||
|
||||
GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) ``
|
||||
ChainReadObj func(p0 context.Context, p1 cid.Cid) ([]byte, error) ``
|
||||
|
||||
MpoolPush func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) ``
|
||||
GasEstimateGasPremium func(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) ``
|
||||
|
||||
MsigGetAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) ``
|
||||
GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) ``
|
||||
|
||||
MsigGetPending func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*api.MsigTransaction, error) ``
|
||||
MpoolGetNonce func(p0 context.Context, p1 address.Address) (uint64, error) ``
|
||||
|
||||
MsigGetVested func(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) ``
|
||||
MpoolPush func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) ``
|
||||
|
||||
StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) ``
|
||||
MsigGetAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) ``
|
||||
|
||||
StateDealProviderCollateralBounds func(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (api.DealCollateralBounds, error) ``
|
||||
MsigGetPending func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]*api.MsigTransaction, error) ``
|
||||
|
||||
StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) ``
|
||||
MsigGetVested func(p0 context.Context, p1 address.Address, p2 types.TipSetKey, p3 types.TipSetKey) (types.BigInt, error) ``
|
||||
|
||||
StateGetReceipt func(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) ``
|
||||
StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) ``
|
||||
|
||||
StateListMiners func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) ``
|
||||
StateCall func(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*api.InvocResult, error) ``
|
||||
|
||||
StateLookupID func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) ``
|
||||
StateDealProviderCollateralBounds func(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (api.DealCollateralBounds, error) ``
|
||||
|
||||
StateMarketBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MarketBalance, error) ``
|
||||
StateDecodeParams func(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) ``
|
||||
|
||||
StateMarketStorageDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*api.MarketDeal, error) ``
|
||||
StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) ``
|
||||
|
||||
StateMinerInfo func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MinerInfo, error) ``
|
||||
StateGetReceipt func(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) ``
|
||||
|
||||
StateMinerPower func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*api.MinerPower, error) ``
|
||||
StateListMiners func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) ``
|
||||
|
||||
StateMinerProvingDeadline func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) ``
|
||||
StateLookupID func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) ``
|
||||
|
||||
StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (abinetwork.Version, error) ``
|
||||
StateMarketBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MarketBalance, error) ``
|
||||
|
||||
StateSearchMsg func(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) ``
|
||||
StateMarketStorageDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*api.MarketDeal, error) ``
|
||||
|
||||
StateSectorGetInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) ``
|
||||
StateMinerInfo func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MinerInfo, error) ``
|
||||
|
||||
StateVerifiedClientStatus func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) ``
|
||||
StateMinerPower func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*api.MinerPower, error) ``
|
||||
|
||||
StateWaitMsg func(p0 context.Context, p1 cid.Cid, p2 uint64) (*api.MsgLookup, error) ``
|
||||
StateMinerProvingDeadline func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) ``
|
||||
|
||||
Version func(p0 context.Context) (api.APIVersion, error) ``
|
||||
StateMinerSectorCount func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MinerSectors, error) ``
|
||||
|
||||
WalletBalance func(p0 context.Context, p1 address.Address) (types.BigInt, error) ``
|
||||
}
|
||||
StateNetworkName func(p0 context.Context) (dtypes.NetworkName, error) ``
|
||||
|
||||
StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (abinetwork.Version, error) ``
|
||||
|
||||
StateReplay func(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*api.InvocResult, error) ``
|
||||
|
||||
StateSearchMsg func(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) ``
|
||||
|
||||
StateSectorGetInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) ``
|
||||
|
||||
StateVerifiedClientStatus func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) ``
|
||||
|
||||
StateWaitMsg func(p0 context.Context, p1 cid.Cid, p2 uint64) (*api.MsgLookup, error) ``
|
||||
|
||||
Version func(p0 context.Context) (api.APIVersion, error) ``
|
||||
|
||||
WalletBalance func(p0 context.Context, p1 address.Address) (types.BigInt, error) ``
|
||||
}
|
||||
|
||||
type GatewayStub struct {
|
||||
@ -2662,6 +2680,17 @@ func (s *GatewayStub) ChainReadObj(p0 context.Context, p1 cid.Cid) ([]byte, erro
|
||||
return *new([]byte), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) GasEstimateGasPremium(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) {
|
||||
if s.Internal.GasEstimateGasPremium == nil {
|
||||
return *new(types.BigInt), ErrNotSupported
|
||||
}
|
||||
return s.Internal.GasEstimateGasPremium(p0, p1, p2, p3, p4)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) GasEstimateGasPremium(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) {
|
||||
return *new(types.BigInt), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) GasEstimateMessageGas(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) {
|
||||
if s.Internal.GasEstimateMessageGas == nil {
|
||||
return nil, ErrNotSupported
|
||||
@ -2673,6 +2702,17 @@ func (s *GatewayStub) GasEstimateMessageGas(p0 context.Context, p1 *types.Messag
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) MpoolGetNonce(p0 context.Context, p1 address.Address) (uint64, error) {
|
||||
if s.Internal.MpoolGetNonce == nil {
|
||||
return 0, ErrNotSupported
|
||||
}
|
||||
return s.Internal.MpoolGetNonce(p0, p1)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) MpoolGetNonce(p0 context.Context, p1 address.Address) (uint64, error) {
|
||||
return 0, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) {
|
||||
if s.Internal.MpoolPush == nil {
|
||||
return *new(cid.Cid), ErrNotSupported
|
||||
@ -2728,6 +2768,17 @@ func (s *GatewayStub) StateAccountKey(p0 context.Context, p1 address.Address, p2
|
||||
return *new(address.Address), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateCall(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*api.InvocResult, error) {
|
||||
if s.Internal.StateCall == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateCall(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) StateCall(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*api.InvocResult, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (api.DealCollateralBounds, error) {
|
||||
if s.Internal.StateDealProviderCollateralBounds == nil {
|
||||
return *new(api.DealCollateralBounds), ErrNotSupported
|
||||
@ -2739,6 +2790,17 @@ func (s *GatewayStub) StateDealProviderCollateralBounds(p0 context.Context, p1 a
|
||||
return *new(api.DealCollateralBounds), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateDecodeParams(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) {
|
||||
if s.Internal.StateDecodeParams == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateDecodeParams(p0, p1, p2, p3, p4)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) StateDecodeParams(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) {
|
||||
if s.Internal.StateGetActor == nil {
|
||||
return nil, ErrNotSupported
|
||||
@ -2838,6 +2900,28 @@ func (s *GatewayStub) StateMinerProvingDeadline(p0 context.Context, p1 address.A
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateMinerSectorCount(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MinerSectors, error) {
|
||||
if s.Internal.StateMinerSectorCount == nil {
|
||||
return *new(api.MinerSectors), ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateMinerSectorCount(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) StateMinerSectorCount(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (api.MinerSectors, error) {
|
||||
return *new(api.MinerSectors), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateNetworkName(p0 context.Context) (dtypes.NetworkName, error) {
|
||||
if s.Internal.StateNetworkName == nil {
|
||||
return *new(dtypes.NetworkName), ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateNetworkName(p0)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) StateNetworkName(p0 context.Context) (dtypes.NetworkName, error) {
|
||||
return *new(dtypes.NetworkName), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (abinetwork.Version, error) {
|
||||
if s.Internal.StateNetworkVersion == nil {
|
||||
return *new(abinetwork.Version), ErrNotSupported
|
||||
@ -2849,6 +2933,17 @@ func (s *GatewayStub) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey
|
||||
return *new(abinetwork.Version), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateReplay(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*api.InvocResult, error) {
|
||||
if s.Internal.StateReplay == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateReplay(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) StateReplay(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid) (*api.InvocResult, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateSearchMsg(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) {
|
||||
if s.Internal.StateSearchMsg == nil {
|
||||
return nil, ErrNotSupported
|
||||
|
@ -11,8 +11,8 @@ import (
|
||||
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
uuid "github.com/google/uuid"
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
cid "github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
metrics "github.com/libp2p/go-libp2p/core/metrics"
|
||||
network0 "github.com/libp2p/go-libp2p/core/network"
|
||||
peer "github.com/libp2p/go-libp2p/core/peer"
|
||||
@ -20,7 +20,7 @@ import (
|
||||
|
||||
address "github.com/filecoin-project/go-address"
|
||||
bitfield "github.com/filecoin-project/go-bitfield"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
|
||||
retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
auth "github.com/filecoin-project/go-jsonrpc/auth"
|
||||
|
@ -3,8 +3,8 @@ package blockstore
|
||||
import (
|
||||
"context"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
|
@ -5,9 +5,9 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
block "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
block "github.com/ipfs/go-libipfs/blocks"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
|
@ -13,13 +13,14 @@ import (
|
||||
"github.com/dgraph-io/badger/v2"
|
||||
"github.com/dgraph-io/badger/v2/options"
|
||||
"github.com/dgraph-io/badger/v2/pb"
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
logger "github.com/ipfs/go-log/v2"
|
||||
pool "github.com/libp2p/go-buffer-pool"
|
||||
"github.com/multiformats/go-base32"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
)
|
||||
@ -44,7 +45,8 @@ const (
|
||||
// MemoryMap is equivalent to badger/options.MemoryMap.
|
||||
MemoryMap = options.MemoryMap
|
||||
// LoadToRAM is equivalent to badger/options.LoadToRAM.
|
||||
LoadToRAM = options.LoadToRAM
|
||||
LoadToRAM = options.LoadToRAM
|
||||
defaultGCThreshold = 0.125
|
||||
)
|
||||
|
||||
// Options embeds the badger options themselves, and augments them with
|
||||
@ -439,7 +441,7 @@ func (b *Blockstore) deleteDB(path string) {
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Blockstore) onlineGC() error {
|
||||
func (b *Blockstore) onlineGC(ctx context.Context, threshold float64) error {
|
||||
b.lockDB()
|
||||
defer b.unlockDB()
|
||||
|
||||
@ -448,6 +450,9 @@ func (b *Blockstore) onlineGC() error {
|
||||
if nworkers < 2 {
|
||||
nworkers = 2
|
||||
}
|
||||
if nworkers > 7 { // max out at 1 goroutine per badger level
|
||||
nworkers = 7
|
||||
}
|
||||
|
||||
err := b.db.Flatten(nworkers)
|
||||
if err != nil {
|
||||
@ -455,7 +460,12 @@ func (b *Blockstore) onlineGC() error {
|
||||
}
|
||||
|
||||
for err == nil {
|
||||
err = b.db.RunValueLogGC(0.125)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
default:
|
||||
err = b.db.RunValueLogGC(threshold)
|
||||
}
|
||||
}
|
||||
|
||||
if err == badger.ErrNoRewrite {
|
||||
@ -468,7 +478,7 @@ func (b *Blockstore) onlineGC() error {
|
||||
|
||||
// CollectGarbage compacts and runs garbage collection on the value log;
|
||||
// implements the BlockstoreGC trait
|
||||
func (b *Blockstore) CollectGarbage(opts ...blockstore.BlockstoreGCOption) error {
|
||||
func (b *Blockstore) CollectGarbage(ctx context.Context, opts ...blockstore.BlockstoreGCOption) error {
|
||||
if err := b.access(); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -485,8 +495,48 @@ func (b *Blockstore) CollectGarbage(opts ...blockstore.BlockstoreGCOption) error
|
||||
if options.FullGC {
|
||||
return b.movingGC()
|
||||
}
|
||||
threshold := options.Threshold
|
||||
if threshold == 0 {
|
||||
threshold = defaultGCThreshold
|
||||
}
|
||||
return b.onlineGC(ctx, threshold)
|
||||
}
|
||||
|
||||
return b.onlineGC()
|
||||
// GCOnce runs garbage collection on the value log;
|
||||
// implements BlockstoreGCOnce trait
|
||||
func (b *Blockstore) GCOnce(ctx context.Context, opts ...blockstore.BlockstoreGCOption) error {
|
||||
if err := b.access(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer b.viewers.Done()
|
||||
|
||||
var options blockstore.BlockstoreGCOptions
|
||||
for _, opt := range opts {
|
||||
err := opt(&options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if options.FullGC {
|
||||
return xerrors.Errorf("FullGC option specified for GCOnce but full GC is non incremental")
|
||||
}
|
||||
|
||||
threshold := options.Threshold
|
||||
if threshold == 0 {
|
||||
threshold = defaultGCThreshold
|
||||
}
|
||||
|
||||
b.lockDB()
|
||||
defer b.unlockDB()
|
||||
|
||||
// Note no compaction needed before single GC as we will hit at most one vlog anyway
|
||||
err := b.db.RunValueLogGC(threshold)
|
||||
if err == badger.ErrNoRewrite {
|
||||
// not really an error in this case, it signals the end of GC
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Size returns the aggregate size of the blockstore
|
||||
@ -551,6 +601,18 @@ func (b *Blockstore) View(ctx context.Context, cid cid.Cid, fn func([]byte) erro
|
||||
})
|
||||
}
|
||||
|
||||
func (b *Blockstore) Flush(context.Context) error {
|
||||
if err := b.access(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer b.viewers.Done()
|
||||
|
||||
b.lockDB()
|
||||
defer b.unlockDB()
|
||||
|
||||
return b.db.Sync()
|
||||
}
|
||||
|
||||
// Has implements Blockstore.Has.
|
||||
func (b *Blockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) {
|
||||
if err := b.access(); err != nil {
|
||||
|
@ -10,8 +10,8 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
@ -145,7 +145,7 @@ func testMove(t *testing.T, optsF func(string) Options) {
|
||||
return nil
|
||||
})
|
||||
g.Go(func() error {
|
||||
return db.CollectGarbage(blockstore.WithFullGC(true))
|
||||
return db.CollectGarbage(ctx, blockstore.WithFullGC(true))
|
||||
})
|
||||
|
||||
err = g.Wait()
|
||||
@ -230,7 +230,7 @@ func testMove(t *testing.T, optsF func(string) Options) {
|
||||
checkPath()
|
||||
|
||||
// now do another FullGC to test the double move and following of symlinks
|
||||
if err := db.CollectGarbage(blockstore.WithFullGC(true)); err != nil {
|
||||
if err := db.CollectGarbage(ctx, blockstore.WithFullGC(true)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -9,10 +9,10 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
u "github.com/ipfs/go-ipfs-util"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
|
@ -18,6 +18,7 @@ type Blockstore interface {
|
||||
blockstore.Blockstore
|
||||
blockstore.Viewer
|
||||
BatchDeleter
|
||||
Flusher
|
||||
}
|
||||
|
||||
// BasicBlockstore is an alias to the original IPFS Blockstore.
|
||||
@ -25,6 +26,10 @@ type BasicBlockstore = blockstore.Blockstore
|
||||
|
||||
type Viewer = blockstore.Viewer
|
||||
|
||||
type Flusher interface {
|
||||
Flush(context.Context) error
|
||||
}
|
||||
|
||||
type BatchDeleter interface {
|
||||
DeleteMany(ctx context.Context, cids []cid.Cid) error
|
||||
}
|
||||
@ -36,7 +41,12 @@ type BlockstoreIterator interface {
|
||||
|
||||
// BlockstoreGC is a trait for blockstores that support online garbage collection
|
||||
type BlockstoreGC interface {
|
||||
CollectGarbage(options ...BlockstoreGCOption) error
|
||||
CollectGarbage(ctx context.Context, options ...BlockstoreGCOption) error
|
||||
}
|
||||
|
||||
// BlockstoreGCOnce is a trait for a blockstore that supports incremental online garbage collection
|
||||
type BlockstoreGCOnce interface {
|
||||
GCOnce(ctx context.Context, options ...BlockstoreGCOption) error
|
||||
}
|
||||
|
||||
// BlockstoreGCOption is a functional interface for controlling blockstore GC options
|
||||
@ -45,6 +55,8 @@ type BlockstoreGCOption = func(*BlockstoreGCOptions) error
|
||||
// BlockstoreGCOptions is a struct with GC options
|
||||
type BlockstoreGCOptions struct {
|
||||
FullGC bool
|
||||
// fraction of garbage in badger vlog before its worth processing in online GC
|
||||
Threshold float64
|
||||
}
|
||||
|
||||
func WithFullGC(fullgc bool) BlockstoreGCOption {
|
||||
@ -54,6 +66,13 @@ func WithFullGC(fullgc bool) BlockstoreGCOption {
|
||||
}
|
||||
}
|
||||
|
||||
func WithThreshold(threshold float64) BlockstoreGCOption {
|
||||
return func(opts *BlockstoreGCOptions) error {
|
||||
opts.Threshold = threshold
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// BlockstoreSize is a trait for on-disk blockstores that can report their size
|
||||
type BlockstoreSize interface {
|
||||
Size() (int64, error)
|
||||
@ -92,6 +111,13 @@ type adaptedBlockstore struct {
|
||||
|
||||
var _ Blockstore = (*adaptedBlockstore)(nil)
|
||||
|
||||
func (a *adaptedBlockstore) Flush(ctx context.Context) error {
|
||||
if flusher, canFlush := a.Blockstore.(Flusher); canFlush {
|
||||
return flusher.Flush(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *adaptedBlockstore) View(ctx context.Context, cid cid.Cid, callback func([]byte) error) error {
|
||||
blk, err := a.Get(ctx, cid)
|
||||
if err != nil {
|
||||
|
@ -4,9 +4,9 @@ import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
block "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
block "github.com/ipfs/go-libipfs/blocks"
|
||||
)
|
||||
|
||||
// buflog is a logger for the buffered blockstore. It is subscoped from the
|
||||
@ -46,6 +46,8 @@ var (
|
||||
_ Viewer = (*BufferedBlockstore)(nil)
|
||||
)
|
||||
|
||||
func (bs *BufferedBlockstore) Flush(ctx context.Context) error { return bs.write.Flush(ctx) }
|
||||
|
||||
func (bs *BufferedBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
||||
a, err := bs.read.AllKeysChan(ctx)
|
||||
if err != nil {
|
||||
|
@ -4,8 +4,8 @@ import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
)
|
||||
|
||||
var _ Blockstore = (*discardstore)(nil)
|
||||
@ -38,6 +38,10 @@ func (b *discardstore) View(ctx context.Context, cid cid.Cid, f func([]byte) err
|
||||
return b.bs.View(ctx, cid, f)
|
||||
}
|
||||
|
||||
func (b *discardstore) Flush(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *discardstore) Put(ctx context.Context, blk blocks.Block) error {
|
||||
return nil
|
||||
}
|
||||
|
@ -5,9 +5,9 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
|
@ -4,8 +4,8 @@ import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
@ -179,3 +179,7 @@ func (b *idstore) Close() error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *idstore) Flush(ctx context.Context) error {
|
||||
return b.bs.Flush(ctx)
|
||||
}
|
||||
|
@ -5,9 +5,9 @@ import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
httpapi "github.com/ipfs/go-ipfs-http-client"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
iface "github.com/ipfs/interface-go-ipfs-core"
|
||||
"github.com/ipfs/interface-go-ipfs-core/options"
|
||||
"github.com/ipfs/interface-go-ipfs-core/path"
|
||||
|
@ -3,9 +3,9 @@ package blockstore
|
||||
import (
|
||||
"context"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
)
|
||||
|
||||
// NewMemory returns a temporary memory-backed blockstore.
|
||||
@ -17,6 +17,8 @@ func NewMemory() MemBlockstore {
|
||||
// To match behavior of badger blockstore we index by multihash only.
|
||||
type MemBlockstore map[string]blocks.Block
|
||||
|
||||
func (MemBlockstore) Flush(context.Context) error { return nil }
|
||||
|
||||
func (m MemBlockstore) DeleteBlock(ctx context.Context, k cid.Cid) error {
|
||||
delete(m, string(k.Hash()))
|
||||
return nil
|
||||
|
@ -4,8 +4,8 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -8,9 +8,9 @@ import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
"github.com/libp2p/go-msgio"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
@ -410,6 +410,8 @@ func (n *NetworkStore) HashOnRead(enabled bool) {
|
||||
return
|
||||
}
|
||||
|
||||
func (*NetworkStore) Flush(context.Context) error { return nil }
|
||||
|
||||
func (n *NetworkStore) Stop(ctx context.Context) error {
|
||||
close(n.closing)
|
||||
|
||||
|
@ -5,9 +5,9 @@ import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
|
||||
block "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
block "github.com/ipfs/go-libipfs/blocks"
|
||||
"github.com/libp2p/go-msgio"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
@ -6,8 +6,8 @@ import (
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
block "github.com/ipfs/go-block-format"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
block "github.com/ipfs/go-libipfs/blocks"
|
||||
"github.com/libp2p/go-msgio"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -12,8 +12,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
"go.uber.org/multierr"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
@ -11,6 +11,8 @@ import (
|
||||
"github.com/ipfs/go-cid"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/system"
|
||||
)
|
||||
|
||||
type BadgerMarkSetEnv struct {
|
||||
@ -349,7 +351,7 @@ func (s *BadgerMarkSet) write(seqno int) (err error) {
|
||||
persist := s.persist
|
||||
s.mx.RUnlock()
|
||||
|
||||
if persist {
|
||||
if persist && !system.BadgerFsyncDisable { // WARNING: disabling sync makes recovery from crash during critical section unsound
|
||||
return s.db.Sync()
|
||||
}
|
||||
|
||||
|
@ -8,10 +8,10 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
dstore "github.com/ipfs/go-datastore"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"go.opencensus.io/stats"
|
||||
"go.uber.org/multierr"
|
||||
@ -115,6 +115,23 @@ type Config struct {
|
||||
// A positive value is the number of compactions before a full GC is performed;
|
||||
// a value of 1 will perform full GC in every compaction.
|
||||
HotStoreFullGCFrequency uint64
|
||||
|
||||
// HotstoreMaxSpaceTarget suggests the max allowed space the hotstore can take.
|
||||
// This is not a hard limit, it is possible for the hotstore to exceed the target
|
||||
// for example if state grows massively between compactions. The splitstore
|
||||
// will make a best effort to avoid overflowing the target and in practice should
|
||||
// never overflow. This field is used when doing GC at the end of a compaction to
|
||||
// adaptively choose moving GC
|
||||
HotstoreMaxSpaceTarget uint64
|
||||
|
||||
// Moving GC will be triggered when total moving size exceeds
|
||||
// HotstoreMaxSpaceTarget - HotstoreMaxSpaceThreshold
|
||||
HotstoreMaxSpaceThreshold uint64
|
||||
|
||||
// Safety buffer to prevent moving GC from overflowing disk.
|
||||
// Moving GC will not occur when total moving size exceeds
|
||||
// HotstoreMaxSpaceTarget - HotstoreMaxSpaceSafetyBuffer
|
||||
HotstoreMaxSpaceSafetyBuffer uint64
|
||||
}
|
||||
|
||||
// ChainAccessor allows the Splitstore to access the chain. It will most likely
|
||||
@ -165,6 +182,7 @@ type SplitStore struct {
|
||||
|
||||
compactionIndex int64
|
||||
pruneIndex int64
|
||||
onlineGCCnt int64
|
||||
|
||||
ctx context.Context
|
||||
cancel func()
|
||||
@ -195,6 +213,17 @@ type SplitStore struct {
|
||||
|
||||
// registered protectors
|
||||
protectors []func(func(cid.Cid) error) error
|
||||
|
||||
// dag sizes measured during latest compaction
|
||||
// logged and used for GC strategy
|
||||
|
||||
// protected by compaction lock
|
||||
szWalk int64
|
||||
szProtectedTxns int64
|
||||
szKeys int64 // approximate, not counting keys protected when entering critical section
|
||||
|
||||
// protected by txnLk
|
||||
szMarkedLiveRefs int64
|
||||
}
|
||||
|
||||
var _ bstore.Blockstore = (*SplitStore)(nil)
|
||||
@ -447,6 +476,23 @@ func (s *SplitStore) GetSize(ctx context.Context, cid cid.Cid) (int, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SplitStore) Flush(ctx context.Context) error {
|
||||
s.txnLk.RLock()
|
||||
defer s.txnLk.RUnlock()
|
||||
|
||||
if err := s.cold.Flush(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.hot.Flush(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.ds.Sync(ctx, dstore.Key{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SplitStore) Put(ctx context.Context, blk blocks.Block) error {
|
||||
if isIdentiyCid(blk.Cid()) {
|
||||
return nil
|
||||
|
@ -95,7 +95,7 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error {
|
||||
}
|
||||
defer visitor.Close() //nolint
|
||||
|
||||
err = s.walkChain(curTs, boundaryEpoch, boundaryEpoch, visitor,
|
||||
size := s.walkChain(curTs, boundaryEpoch, boundaryEpoch, visitor,
|
||||
func(c cid.Cid) error {
|
||||
if isUnitaryObject(c) {
|
||||
return errStopWalk
|
||||
@ -133,7 +133,7 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infow("check done", "cold", *coldCnt, "missing", *missingCnt)
|
||||
log.Infow("check done", "cold", *coldCnt, "missing", *missingCnt, "walk size", size)
|
||||
write("--")
|
||||
write("cold: %d missing: %d", *coldCnt, *missingCnt)
|
||||
write("DONE")
|
||||
|
@ -10,9 +10,9 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"go.opencensus.io/stats"
|
||||
"golang.org/x/sync/errgroup"
|
||||
@ -66,7 +66,8 @@ var (
|
||||
)
|
||||
|
||||
const (
|
||||
batchSize = 16384
|
||||
batchSize = 16384
|
||||
cidKeySize = 128
|
||||
)
|
||||
|
||||
func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error {
|
||||
@ -115,8 +116,6 @@ func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Prioritize hot store compaction over cold store prune
|
||||
|
||||
if epoch-s.baseEpoch > CompactionThreshold {
|
||||
// it's time to compact -- prepare the transaction and go!
|
||||
s.beginTxnProtect()
|
||||
@ -176,6 +175,8 @@ func (s *SplitStore) protectTipSets(apply []*types.TipSet) {
|
||||
timestamp := time.Unix(int64(curTs.MinTimestamp()), 0)
|
||||
doSync := time.Since(timestamp) < SyncWaitTime
|
||||
go func() {
|
||||
// we are holding the txnLk while marking
|
||||
// so critical section cannot delete
|
||||
if doSync {
|
||||
defer func() {
|
||||
s.txnSyncMx.Lock()
|
||||
@ -199,9 +200,11 @@ func (s *SplitStore) markLiveRefs(cids []cid.Cid) {
|
||||
log.Debugf("marking %d live refs", len(cids))
|
||||
startMark := time.Now()
|
||||
|
||||
szMarked := new(int64)
|
||||
|
||||
count := new(int32)
|
||||
visitor := newConcurrentVisitor()
|
||||
walkObject := func(c cid.Cid) error {
|
||||
walkObject := func(c cid.Cid) (int64, error) {
|
||||
return s.walkObjectIncomplete(c, visitor,
|
||||
func(c cid.Cid) error {
|
||||
if isUnitaryObject(c) {
|
||||
@ -228,10 +231,12 @@ func (s *SplitStore) markLiveRefs(cids []cid.Cid) {
|
||||
|
||||
// optimize the common case of single put
|
||||
if len(cids) == 1 {
|
||||
if err := walkObject(cids[0]); err != nil {
|
||||
sz, err := walkObject(cids[0])
|
||||
if err != nil {
|
||||
log.Errorf("error marking tipset refs: %s", err)
|
||||
}
|
||||
log.Debugw("marking live refs done", "took", time.Since(startMark), "marked", *count)
|
||||
atomic.AddInt64(szMarked, sz)
|
||||
return
|
||||
}
|
||||
|
||||
@ -243,9 +248,11 @@ func (s *SplitStore) markLiveRefs(cids []cid.Cid) {
|
||||
|
||||
worker := func() error {
|
||||
for c := range workch {
|
||||
if err := walkObject(c); err != nil {
|
||||
sz, err := walkObject(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
atomic.AddInt64(szMarked, sz)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -268,7 +275,8 @@ func (s *SplitStore) markLiveRefs(cids []cid.Cid) {
|
||||
log.Errorf("error marking tipset refs: %s", err)
|
||||
}
|
||||
|
||||
log.Debugw("marking live refs done", "took", time.Since(startMark), "marked", *count)
|
||||
log.Debugw("marking live refs done", "took", time.Since(startMark), "marked", *count, "size marked", *szMarked)
|
||||
s.szMarkedLiveRefs += atomic.LoadInt64(szMarked)
|
||||
}
|
||||
|
||||
// transactionally protect a view
|
||||
@ -361,6 +369,7 @@ func (s *SplitStore) protectTxnRefs(markSet MarkSet) error {
|
||||
|
||||
log.Infow("protecting transactional references", "refs", len(txnRefs))
|
||||
count := 0
|
||||
sz := new(int64)
|
||||
workch := make(chan cid.Cid, len(txnRefs))
|
||||
startProtect := time.Now()
|
||||
|
||||
@ -393,10 +402,11 @@ func (s *SplitStore) protectTxnRefs(markSet MarkSet) error {
|
||||
|
||||
worker := func() error {
|
||||
for c := range workch {
|
||||
err := s.doTxnProtect(c, markSet)
|
||||
szTxn, err := s.doTxnProtect(c, markSet)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error protecting transactional references to %s: %w", c, err)
|
||||
}
|
||||
atomic.AddInt64(sz, szTxn)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -409,16 +419,16 @@ func (s *SplitStore) protectTxnRefs(markSet MarkSet) error {
|
||||
if err := g.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infow("protecting transactional refs done", "took", time.Since(startProtect), "protected", count)
|
||||
s.szProtectedTxns += atomic.LoadInt64(sz)
|
||||
log.Infow("protecting transactional refs done", "took", time.Since(startProtect), "protected", count, "protected size", sz)
|
||||
}
|
||||
}
|
||||
|
||||
// transactionally protect a reference by walking the object and marking.
|
||||
// concurrent markings are short circuited by checking the markset.
|
||||
func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSet) error {
|
||||
func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSet) (int64, error) {
|
||||
if err := s.checkClosing(); err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Note: cold objects are deleted heaviest first, so the consituents of an object
|
||||
@ -442,7 +452,7 @@ func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSet) error {
|
||||
},
|
||||
func(c cid.Cid) error {
|
||||
if s.txnMissing != nil {
|
||||
log.Warnf("missing object reference %s in %s", c, root)
|
||||
log.Debugf("missing object reference %s in %s", c, root)
|
||||
s.txnRefsMx.Lock()
|
||||
s.txnMissing[c] = struct{}{}
|
||||
s.txnRefsMx.Unlock()
|
||||
@ -509,6 +519,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
// might be potentially inconsistent; abort compaction and notify the user to intervene.
|
||||
return xerrors.Errorf("checkpoint exists; aborting compaction")
|
||||
}
|
||||
s.clearSizeMeasurements()
|
||||
|
||||
currentEpoch := curTs.Height()
|
||||
boundaryEpoch := currentEpoch - CompactionBoundary
|
||||
@ -598,7 +609,6 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
}
|
||||
|
||||
err = s.walkChain(curTs, boundaryEpoch, inclMsgsEpoch, &noopVisitor{}, fHot, fCold)
|
||||
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error marking: %w", err)
|
||||
}
|
||||
@ -638,7 +648,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
defer purgew.Close() //nolint:errcheck
|
||||
|
||||
// some stats for logging
|
||||
var hotCnt, coldCnt, purgeCnt int
|
||||
var hotCnt, coldCnt, purgeCnt int64
|
||||
err = s.hot.ForEachKey(func(c cid.Cid) error {
|
||||
// was it marked?
|
||||
mark, err := markSet.Has(c)
|
||||
@ -689,9 +699,10 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
|
||||
log.Infow("cold collection done", "took", time.Since(startCollect))
|
||||
|
||||
log.Infow("compaction stats", "hot", hotCnt, "cold", coldCnt)
|
||||
stats.Record(s.ctx, metrics.SplitstoreCompactionHot.M(int64(hotCnt)))
|
||||
stats.Record(s.ctx, metrics.SplitstoreCompactionCold.M(int64(coldCnt)))
|
||||
log.Infow("compaction stats", "hot", hotCnt, "cold", coldCnt, "purge", purgeCnt)
|
||||
s.szKeys = hotCnt * cidKeySize
|
||||
stats.Record(s.ctx, metrics.SplitstoreCompactionHot.M(hotCnt))
|
||||
stats.Record(s.ctx, metrics.SplitstoreCompactionCold.M(coldCnt))
|
||||
|
||||
if err := s.checkClosing(); err != nil {
|
||||
return err
|
||||
@ -773,8 +784,8 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
return xerrors.Errorf("error purging cold objects: %w", err)
|
||||
}
|
||||
log.Infow("purging cold objects from hotstore done", "took", time.Since(startPurge))
|
||||
|
||||
s.endCriticalSection()
|
||||
log.Infow("critical section done", "total protected size", s.szProtectedTxns, "total marked live size", s.szMarkedLiveRefs)
|
||||
|
||||
if err := checkpoint.Close(); err != nil {
|
||||
log.Warnf("error closing checkpoint: %s", err)
|
||||
@ -788,10 +799,13 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
if err := os.Remove(s.coldSetPath()); err != nil {
|
||||
log.Warnf("error removing coldset: %s", err)
|
||||
}
|
||||
if err := os.Remove(s.discardSetPath()); err != nil {
|
||||
log.Warnf("error removing discardset: %s", err)
|
||||
}
|
||||
|
||||
// we are done; do some housekeeping
|
||||
s.endTxnProtect()
|
||||
s.gcHotstore()
|
||||
s.gcHotAfterCompaction()
|
||||
|
||||
err = s.setBaseEpoch(boundaryEpoch)
|
||||
if err != nil {
|
||||
@ -904,6 +918,11 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp
|
||||
copy(toWalk, ts.Cids())
|
||||
walkCnt := new(int64)
|
||||
scanCnt := new(int64)
|
||||
szWalk := new(int64)
|
||||
|
||||
tsRef := func(blkCids []cid.Cid) (cid.Cid, error) {
|
||||
return types.NewTipSetKey(blkCids...).Cid()
|
||||
}
|
||||
|
||||
stopWalk := func(_ cid.Cid) error { return errStopWalk }
|
||||
|
||||
@ -926,49 +945,73 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp
|
||||
err = s.view(c, func(data []byte) error {
|
||||
return hdr.UnmarshalCBOR(bytes.NewBuffer(data))
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error unmarshaling block header (cid: %s): %w", c, err)
|
||||
}
|
||||
|
||||
// tipset CID references are retained
|
||||
pRef, err := tsRef(hdr.Parents)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error computing cid reference to parent tipset")
|
||||
}
|
||||
sz, err := s.walkObjectIncomplete(pRef, visitor, fHot, stopWalk)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error walking parent tipset cid reference")
|
||||
}
|
||||
atomic.AddInt64(szWalk, sz)
|
||||
|
||||
// message are retained if within the inclMsgs boundary
|
||||
if hdr.Height >= inclMsgs && hdr.Height > 0 {
|
||||
if inclMsgs < inclState {
|
||||
// we need to use walkObjectIncomplete here, as messages/receipts may be missing early on if we
|
||||
// synced from snapshot and have a long HotStoreMessageRetentionPolicy.
|
||||
if err := s.walkObjectIncomplete(hdr.Messages, visitor, fHot, stopWalk); err != nil {
|
||||
sz, err := s.walkObjectIncomplete(hdr.Messages, visitor, fHot, stopWalk)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err)
|
||||
}
|
||||
atomic.AddInt64(szWalk, sz)
|
||||
|
||||
if err := s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fHot, stopWalk); err != nil {
|
||||
sz, err = s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fHot, stopWalk)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error walking messages receipts (cid: %s): %w", hdr.ParentMessageReceipts, err)
|
||||
}
|
||||
atomic.AddInt64(szWalk, sz)
|
||||
} else {
|
||||
if err := s.walkObject(hdr.Messages, visitor, fHot); err != nil {
|
||||
sz, err = s.walkObject(hdr.Messages, visitor, fHot)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err)
|
||||
}
|
||||
atomic.AddInt64(szWalk, sz)
|
||||
|
||||
if err := s.walkObject(hdr.ParentMessageReceipts, visitor, fHot); err != nil {
|
||||
sz, err := s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fHot, stopWalk)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error walking message receipts (cid: %s): %w", hdr.ParentMessageReceipts, err)
|
||||
}
|
||||
atomic.AddInt64(szWalk, sz)
|
||||
}
|
||||
}
|
||||
|
||||
// messages and receipts outside of inclMsgs are included in the cold store
|
||||
if hdr.Height < inclMsgs && hdr.Height > 0 {
|
||||
if err := s.walkObjectIncomplete(hdr.Messages, visitor, fCold, stopWalk); err != nil {
|
||||
sz, err := s.walkObjectIncomplete(hdr.Messages, visitor, fCold, stopWalk)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err)
|
||||
}
|
||||
if err := s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fCold, stopWalk); err != nil {
|
||||
atomic.AddInt64(szWalk, sz)
|
||||
sz, err = s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fCold, stopWalk)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error walking messages receipts (cid: %s): %w", hdr.ParentMessageReceipts, err)
|
||||
}
|
||||
atomic.AddInt64(szWalk, sz)
|
||||
}
|
||||
|
||||
// state is only retained if within the inclState boundary, with the exception of genesis
|
||||
if hdr.Height >= inclState || hdr.Height == 0 {
|
||||
if err := s.walkObject(hdr.ParentStateRoot, visitor, fHot); err != nil {
|
||||
sz, err := s.walkObject(hdr.ParentStateRoot, visitor, fHot)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error walking state root (cid: %s): %w", hdr.ParentStateRoot, err)
|
||||
}
|
||||
atomic.AddInt64(szWalk, sz)
|
||||
atomic.AddInt64(scanCnt, 1)
|
||||
}
|
||||
|
||||
@ -981,6 +1024,17 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp
|
||||
return nil
|
||||
}
|
||||
|
||||
// retain ref to chain head
|
||||
hRef, err := tsRef(ts.Cids())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error computing cid reference to parent tipset")
|
||||
}
|
||||
sz, err := s.walkObjectIncomplete(hRef, visitor, fHot, stopWalk)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error walking parent tipset cid reference")
|
||||
}
|
||||
atomic.AddInt64(szWalk, sz)
|
||||
|
||||
for len(toWalk) > 0 {
|
||||
// walking can take a while, so check this with every opportunity
|
||||
if err := s.checkClosing(); err != nil {
|
||||
@ -1023,123 +1077,129 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp
|
||||
}
|
||||
}
|
||||
|
||||
log.Infow("chain walk done", "walked", *walkCnt, "scanned", *scanCnt)
|
||||
|
||||
log.Infow("chain walk done", "walked", *walkCnt, "scanned", *scanCnt, "walk size", szWalk)
|
||||
s.szWalk = atomic.LoadInt64(szWalk)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SplitStore) walkObject(c cid.Cid, visitor ObjectVisitor, f func(cid.Cid) error) error {
|
||||
func (s *SplitStore) walkObject(c cid.Cid, visitor ObjectVisitor, f func(cid.Cid) error) (int64, error) {
|
||||
var sz int64
|
||||
visit, err := visitor.Visit(c)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error visiting object: %w", err)
|
||||
return 0, xerrors.Errorf("error visiting object: %w", err)
|
||||
}
|
||||
|
||||
if !visit {
|
||||
return nil
|
||||
return sz, nil
|
||||
}
|
||||
|
||||
if err := f(c); err != nil {
|
||||
if err == errStopWalk {
|
||||
return nil
|
||||
return sz, nil
|
||||
}
|
||||
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if c.Prefix().Codec != cid.DagCBOR {
|
||||
return nil
|
||||
return sz, nil
|
||||
}
|
||||
|
||||
// check this before recursing
|
||||
if err := s.checkClosing(); err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var links []cid.Cid
|
||||
err = s.view(c, func(data []byte) error {
|
||||
sz += int64(len(data))
|
||||
return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) {
|
||||
links = append(links, c)
|
||||
})
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error scanning linked block (cid: %s): %w", c, err)
|
||||
return 0, xerrors.Errorf("error scanning linked block (cid: %s): %w", c, err)
|
||||
}
|
||||
|
||||
for _, c := range links {
|
||||
err := s.walkObject(c, visitor, f)
|
||||
szLink, err := s.walkObject(c, visitor, f)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error walking link (cid: %s): %w", c, err)
|
||||
return 0, xerrors.Errorf("error walking link (cid: %s): %w", c, err)
|
||||
}
|
||||
sz += szLink
|
||||
}
|
||||
|
||||
return nil
|
||||
return sz, nil
|
||||
}
|
||||
|
||||
// like walkObject, but the object may be potentially incomplete (references missing)
|
||||
func (s *SplitStore) walkObjectIncomplete(c cid.Cid, visitor ObjectVisitor, f, missing func(cid.Cid) error) error {
|
||||
func (s *SplitStore) walkObjectIncomplete(c cid.Cid, visitor ObjectVisitor, f, missing func(cid.Cid) error) (int64, error) {
|
||||
var sz int64
|
||||
visit, err := visitor.Visit(c)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error visiting object: %w", err)
|
||||
return 0, xerrors.Errorf("error visiting object: %w", err)
|
||||
}
|
||||
|
||||
if !visit {
|
||||
return nil
|
||||
return sz, nil
|
||||
}
|
||||
|
||||
// occurs check -- only for DAGs
|
||||
if c.Prefix().Codec == cid.DagCBOR {
|
||||
has, err := s.has(c)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error occur checking %s: %w", c, err)
|
||||
return 0, xerrors.Errorf("error occur checking %s: %w", c, err)
|
||||
}
|
||||
|
||||
if !has {
|
||||
err = missing(c)
|
||||
if err == errStopWalk {
|
||||
return nil
|
||||
return sz, nil
|
||||
}
|
||||
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := f(c); err != nil {
|
||||
if err == errStopWalk {
|
||||
return nil
|
||||
return sz, nil
|
||||
}
|
||||
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if c.Prefix().Codec != cid.DagCBOR {
|
||||
return nil
|
||||
return sz, nil
|
||||
}
|
||||
|
||||
// check this before recursing
|
||||
if err := s.checkClosing(); err != nil {
|
||||
return err
|
||||
return sz, err
|
||||
}
|
||||
|
||||
var links []cid.Cid
|
||||
err = s.view(c, func(data []byte) error {
|
||||
sz += int64(len(data))
|
||||
return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) {
|
||||
links = append(links, c)
|
||||
})
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error scanning linked block (cid: %s): %w", c, err)
|
||||
return 0, xerrors.Errorf("error scanning linked block (cid: %s): %w", c, err)
|
||||
}
|
||||
|
||||
for _, c := range links {
|
||||
err := s.walkObjectIncomplete(c, visitor, f, missing)
|
||||
szLink, err := s.walkObjectIncomplete(c, visitor, f, missing)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error walking link (cid: %s): %w", c, err)
|
||||
return 0, xerrors.Errorf("error walking link (cid: %s): %w", c, err)
|
||||
}
|
||||
sz += szLink
|
||||
}
|
||||
|
||||
return nil
|
||||
return sz, nil
|
||||
}
|
||||
|
||||
// internal version used during compaction and related operations
|
||||
@ -1405,8 +1465,9 @@ func (s *SplitStore) completeCompaction() error {
|
||||
}
|
||||
s.compactType = none
|
||||
|
||||
// Note: at this point we can start the splitstore; a compaction should run on
|
||||
// the first head change, which will trigger gc on the hotstore.
|
||||
// Note: at this point we can start the splitstore; base epoch is not
|
||||
// incremented here so a compaction should run on the first head
|
||||
// change, which will trigger gc on the hotstore.
|
||||
// We don't mind the second (back-to-back) compaction as the head will
|
||||
// have advanced during marking and coldset accumulation.
|
||||
return nil
|
||||
@ -1464,6 +1525,13 @@ func (s *SplitStore) completePurge(coldr *ColdSetReader, checkpoint *Checkpoint,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SplitStore) clearSizeMeasurements() {
|
||||
s.szKeys = 0
|
||||
s.szMarkedLiveRefs = 0
|
||||
s.szProtectedTxns = 0
|
||||
s.szWalk = 0
|
||||
}
|
||||
|
||||
// I really don't like having this code, but we seem to have some occasional DAG references with
|
||||
// missing constituents. During testing in mainnet *some* of these references *sometimes* appeared
|
||||
// after a little bit.
|
||||
@ -1504,7 +1572,7 @@ func (s *SplitStore) waitForMissingRefs(markSet MarkSet) {
|
||||
missing = make(map[cid.Cid]struct{})
|
||||
|
||||
for c := range towalk {
|
||||
err := s.walkObjectIncomplete(c, visitor,
|
||||
_, err := s.walkObjectIncomplete(c, visitor,
|
||||
func(c cid.Cid) error {
|
||||
if isUnitaryObject(c) {
|
||||
return errStopWalk
|
||||
|
@ -4,9 +4,9 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
|
||||
bstore "github.com/filecoin-project/lotus/blockstore"
|
||||
)
|
||||
@ -77,6 +77,10 @@ func (es *exposedSplitStore) GetSize(ctx context.Context, c cid.Cid) (int, error
|
||||
return size, err
|
||||
}
|
||||
|
||||
func (es *exposedSplitStore) Flush(ctx context.Context) error {
|
||||
return es.s.Flush(ctx)
|
||||
}
|
||||
|
||||
func (es *exposedSplitStore) Put(ctx context.Context, blk blocks.Block) error {
|
||||
return es.s.Put(ctx, blk)
|
||||
}
|
||||
|
@ -7,15 +7,61 @@ import (
|
||||
bstore "github.com/filecoin-project/lotus/blockstore"
|
||||
)
|
||||
|
||||
func (s *SplitStore) gcHotstore() {
|
||||
const (
|
||||
// Fraction of garbage in badger vlog for online GC traversal to collect garbage
|
||||
AggressiveOnlineGCThreshold = 0.0001
|
||||
)
|
||||
|
||||
func (s *SplitStore) gcHotAfterCompaction() {
|
||||
// Measure hotstore size, determine if we should do full GC, determine if we can do full GC.
|
||||
// We should do full GC if
|
||||
// FullGCFrequency is specified and compaction index matches frequency
|
||||
// OR HotstoreMaxSpaceTarget is specified and total moving space is within 150 GB of target
|
||||
// We can do full if
|
||||
// HotstoreMaxSpaceTarget is not specified
|
||||
// OR total moving space would not exceed 50 GB below target
|
||||
//
|
||||
// a) If we should not do full GC => online GC
|
||||
// b) If we should do full GC and can => moving GC
|
||||
// c) If we should do full GC and can't => aggressive online GC
|
||||
getSize := func() int64 {
|
||||
sizer, ok := s.hot.(bstore.BlockstoreSize)
|
||||
if ok {
|
||||
size, err := sizer.Size()
|
||||
if err != nil {
|
||||
log.Warnf("error getting hotstore size: %s, estimating empty hot store for targeting", err)
|
||||
return 0
|
||||
}
|
||||
return size
|
||||
}
|
||||
log.Errorf("Could not measure hotstore size, assuming it is 0 bytes, which it is not")
|
||||
return 0
|
||||
}
|
||||
hotSize := getSize()
|
||||
|
||||
copySizeApprox := s.szKeys + s.szMarkedLiveRefs + s.szProtectedTxns + s.szWalk
|
||||
shouldTarget := s.cfg.HotstoreMaxSpaceTarget > 0 && hotSize+copySizeApprox > int64(s.cfg.HotstoreMaxSpaceTarget)-int64(s.cfg.HotstoreMaxSpaceThreshold)
|
||||
shouldFreq := s.cfg.HotStoreFullGCFrequency > 0 && s.compactionIndex%int64(s.cfg.HotStoreFullGCFrequency) == 0
|
||||
shouldDoFull := shouldTarget || shouldFreq
|
||||
canDoFull := s.cfg.HotstoreMaxSpaceTarget == 0 || hotSize+copySizeApprox < int64(s.cfg.HotstoreMaxSpaceTarget)-int64(s.cfg.HotstoreMaxSpaceSafetyBuffer)
|
||||
log.Debugw("approximating new hot store size", "key size", s.szKeys, "marked live refs", s.szMarkedLiveRefs, "protected txns", s.szProtectedTxns, "walked DAG", s.szWalk)
|
||||
log.Infof("measured hot store size: %d, approximate new size: %d, should do full %t, can do full %t", hotSize, copySizeApprox, shouldDoFull, canDoFull)
|
||||
|
||||
var opts []bstore.BlockstoreGCOption
|
||||
if s.cfg.HotStoreFullGCFrequency > 0 && s.compactionIndex%int64(s.cfg.HotStoreFullGCFrequency) == 0 {
|
||||
if shouldDoFull && canDoFull {
|
||||
opts = append(opts, bstore.WithFullGC(true))
|
||||
} else if shouldDoFull && !canDoFull {
|
||||
log.Warnf("Attention! Estimated moving GC size %d is not within safety buffer %d of target max %d, performing aggressive online GC to attempt to bring hotstore size down safely", copySizeApprox, s.cfg.HotstoreMaxSpaceSafetyBuffer, s.cfg.HotstoreMaxSpaceTarget)
|
||||
log.Warn("If problem continues you can 1) temporarily allocate more disk space to hotstore and 2) reflect in HotstoreMaxSpaceTarget OR trigger manual move with `lotus chain prune hot-moving`")
|
||||
log.Warn("If problem continues and you do not have any more disk space you can run continue to manually trigger online GC at aggressive thresholds (< 0.01) with `lotus chain prune hot`")
|
||||
|
||||
opts = append(opts, bstore.WithThreshold(AggressiveOnlineGCThreshold))
|
||||
}
|
||||
|
||||
if err := s.gcBlockstore(s.hot, opts); err != nil {
|
||||
log.Warnf("error garbage collecting hostore: %s", err)
|
||||
}
|
||||
log.Infof("measured hot store size after GC: %d", getSize())
|
||||
}
|
||||
|
||||
func (s *SplitStore) gcBlockstore(b bstore.Blockstore, opts []bstore.BlockstoreGCOption) error {
|
||||
@ -23,7 +69,7 @@ func (s *SplitStore) gcBlockstore(b bstore.Blockstore, opts []bstore.BlockstoreG
|
||||
log.Info("garbage collecting blockstore")
|
||||
startGC := time.Now()
|
||||
|
||||
if err := gc.CollectGarbage(opts...); err != nil {
|
||||
if err := gc.CollectGarbage(s.ctx, opts...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -33,3 +79,19 @@ func (s *SplitStore) gcBlockstore(b bstore.Blockstore, opts []bstore.BlockstoreG
|
||||
|
||||
return fmt.Errorf("blockstore doesn't support garbage collection: %T", b)
|
||||
}
|
||||
|
||||
func (s *SplitStore) gcBlockstoreOnce(b bstore.Blockstore, opts []bstore.BlockstoreGCOption) error {
|
||||
if gc, ok := b.(bstore.BlockstoreGCOnce); ok {
|
||||
log.Debug("gc blockstore once")
|
||||
startGC := time.Now()
|
||||
|
||||
if err := gc.GCOnce(s.ctx, opts...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugw("gc blockstore once done", "took", time.Since(startGC))
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("blockstore doesn't support gc once: %T", b)
|
||||
}
|
||||
|
@ -47,6 +47,23 @@ var (
|
||||
PruneThreshold = 7 * build.Finality
|
||||
)
|
||||
|
||||
// GCHotstore runs online GC on the chain state in the hotstore according the to options specified
|
||||
func (s *SplitStore) GCHotStore(opts api.HotGCOpts) error {
|
||||
if opts.Moving {
|
||||
gcOpts := []bstore.BlockstoreGCOption{bstore.WithFullGC(true)}
|
||||
return s.gcBlockstore(s.hot, gcOpts)
|
||||
}
|
||||
|
||||
gcOpts := []bstore.BlockstoreGCOption{bstore.WithThreshold(opts.Threshold)}
|
||||
var err error
|
||||
if opts.Periodic {
|
||||
err = s.gcBlockstore(s.hot, gcOpts)
|
||||
} else {
|
||||
err = s.gcBlockstoreOnce(s.hot, gcOpts)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// PruneChain instructs the SplitStore to prune chain state in the coldstore, according to the
|
||||
// options specified.
|
||||
func (s *SplitStore) PruneChain(opts api.PruneOpts) error {
|
||||
@ -329,9 +346,9 @@ func (s *SplitStore) doPrune(curTs *types.TipSet, retainStateP func(int64) bool,
|
||||
}
|
||||
|
||||
s.pruneIndex++
|
||||
err = s.ds.Put(s.ctx, pruneIndexKey, int64ToBytes(s.compactionIndex))
|
||||
err = s.ds.Put(s.ctx, pruneIndexKey, int64ToBytes(s.pruneIndex))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error saving compaction index: %w", err)
|
||||
return xerrors.Errorf("error saving prune index: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -5,8 +5,8 @@ import (
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
@ -101,7 +101,7 @@ func (s *SplitStore) doReify(c cid.Cid) {
|
||||
defer s.txnLk.RUnlock()
|
||||
|
||||
count := 0
|
||||
err := s.walkObjectIncomplete(c, newTmpVisitor(),
|
||||
_, err := s.walkObjectIncomplete(c, newTmpVisitor(),
|
||||
func(c cid.Cid) error {
|
||||
if isUnitaryObject(c) {
|
||||
return errStopWalk
|
||||
|
@ -11,11 +11,11 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-datastore"
|
||||
dssync "github.com/ipfs/go-datastore/sync"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
|
||||
@ -757,6 +757,8 @@ func (b *mockStore) DeleteMany(_ context.Context, cids []cid.Cid) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *mockStore) Flush(context.Context) error { return nil }
|
||||
|
||||
func (b *mockStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
@ -5,9 +5,9 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
@ -4,8 +4,8 @@ import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
)
|
||||
|
||||
// NewMemorySync returns a thread-safe in-memory blockstore.
|
||||
@ -20,6 +20,8 @@ type SyncBlockstore struct {
|
||||
bs MemBlockstore // specifically use a memStore to save indirection overhead.
|
||||
}
|
||||
|
||||
func (*SyncBlockstore) Flush(context.Context) error { return nil }
|
||||
|
||||
func (m *SyncBlockstore) DeleteBlock(ctx context.Context, k cid.Cid) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
@ -6,9 +6,9 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
"github.com/raulk/clock"
|
||||
"go.uber.org/multierr"
|
||||
)
|
||||
@ -93,6 +93,16 @@ func (t *TimedCacheBlockstore) rotate() {
|
||||
t.mu.Unlock()
|
||||
}
|
||||
|
||||
func (t *TimedCacheBlockstore) Flush(ctx context.Context) error {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
if err := t.active.Flush(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return t.inactive.Flush(ctx)
|
||||
}
|
||||
|
||||
func (t *TimedCacheBlockstore) Put(ctx context.Context, b blocks.Block) error {
|
||||
// Don't check the inactive set here. We want to keep this block for at
|
||||
// least one interval.
|
||||
|
@ -6,8 +6,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
"github.com/raulk/clock"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -3,9 +3,9 @@ package blockstore
|
||||
import (
|
||||
"context"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
)
|
||||
|
||||
type unionBlockstore []Blockstore
|
||||
@ -55,6 +55,15 @@ func (m unionBlockstore) GetSize(ctx context.Context, cid cid.Cid) (size int, er
|
||||
return size, err
|
||||
}
|
||||
|
||||
func (m unionBlockstore) Flush(ctx context.Context) (err error) {
|
||||
for _, bs := range m {
|
||||
if err = bs.Flush(ctx); err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (m unionBlockstore) Put(ctx context.Context, block blocks.Block) (err error) {
|
||||
for _, bs := range m {
|
||||
if err = bs.Put(ctx, block); err != nil {
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
blocks "github.com/ipfs/go-libipfs/blocks"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -52,4 +52,4 @@ popd
|
||||
|
||||
echo "Generating metadata..."
|
||||
|
||||
make -C ../../ bundle-gen
|
||||
make -C ../../ VERSION="$VERSION" RELEASE="$RELEASE" RELEASE_OVERRIDES="${RELEASE_OVERRIDES[*]}" bundle-gen
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1,2 +1,2 @@
|
||||
/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWKeDMuJbouvypr1nL2qRruhNVXzv4QiLsZRh6gnvLkc7p
|
||||
/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWSsACNHLGoJbPqeitNY7tom19Nxq8x5ag36eTwmgcAeLo
|
||||
/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWCa1wgMMBB9JjA2kYqaN1v5uh7xvcsc2gQJBHzPp7G57H
|
||||
/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWD6fCvo1dyci6wsjTLyv7eJK73pCVz6RCQjbtPvbc8LYw
|
||||
|
@ -95,10 +95,11 @@ func loadManifests(netw string) error {
|
||||
}
|
||||
|
||||
type BuiltinActorsMetadata struct {
|
||||
Network string
|
||||
Version actorstypes.Version
|
||||
ManifestCid cid.Cid
|
||||
Actors map[string]cid.Cid
|
||||
Network string
|
||||
Version actorstypes.Version
|
||||
ManifestCid cid.Cid
|
||||
Actors map[string]cid.Cid
|
||||
BundleGitTag string
|
||||
}
|
||||
|
||||
// ReadEmbeddedBuiltinActorsMetadata reads the metadata from the embedded built-in actor bundles.
|
||||
|
@ -7,25 +7,32 @@ import (
|
||||
)
|
||||
|
||||
var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMetadata{{
|
||||
Network: "butterflynet",
|
||||
Version: 8,
|
||||
ManifestCid: MustParseCid("bafy2bzacedvaarfyh6q3bk4dyzux46ednlace2ckxp5nbyn6mb3da2apqn6sk"),
|
||||
Network: "butterflynet",
|
||||
Version: 8,
|
||||
|
||||
ManifestCid: MustParseCid("bafy2bzaceba5qgs4z3imhlxwds5vamahngatvuuglbv5yl3ftfiosj6ud5chs"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzaceavzeu4gqte7o33vr4htiaapiwpfq6p26tgdkqla2baqhmiqswfso"),
|
||||
"cron": MustParseCid("bafk2bzacech35onpqxep4yox36k7sr4mj4bch54s3i4b3yyaustrbo5xwfbfs"),
|
||||
"init": MustParseCid("bafk2bzaceahxin3sf5f6ude5j6we4yeqlg66s5qe4tu7lwp26jcg7yp2ns6hi"),
|
||||
"multisig": MustParseCid("bafk2bzacectfmzjtniypgl4whm42sws5aupihqgfikwsr7p5yoq3bmqaogldi"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacecbwu54ce5mjgp2pqxyj6kpn2vlgiu5wv2lj2byjiegxnn3infd5i"),
|
||||
"reward": MustParseCid("bafk2bzacecskkbhe6c4ud5jt62wg4w7j7shj6xdwoyic74s5y6pgywxxvnw72"),
|
||||
"storagemarket": MustParseCid("bafk2bzacebycxcwwm7hwhuhpasaskil2kxaqb7tins7azdvvm72rorlciuysi"),
|
||||
"storageminer": MustParseCid("bafk2bzacecgx3etor5m6lahpmjdwqnryutqe6naiurfhgsju72rd4nqssutbg"),
|
||||
"storagepower": MustParseCid("bafk2bzaceayvy6xyp5cwtngm457c5hssvihidppgq3o7gy3dlmhgor3yzujoc"),
|
||||
"system": MustParseCid("bafk2bzacec6xctjxybp7r3kkhase56o6jsaiua7ure5ttu2xfuojt4jhlsoa6"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacec2hcqlqcfacylfcrhhliwkisvh4y3adwt47xkf2gdvodwu6ccepc"),
|
||||
"account": MustParseCid("bafk2bzacebd5zetyjtragjwrv2nqktct6u2pmsi4eifbanovxohx3a7lszjxi"),
|
||||
"cron": MustParseCid("bafk2bzacecrszortqkc7har77ssgajglymv6ftrqvmdko5h2yqqh5k2qospl2"),
|
||||
"datacap": MustParseCid("bafk2bzacecapjnxnyw4talwqv5ajbtbkzmzqiosztj5cb3sortyp73ndjl76e"),
|
||||
"eam": MustParseCid("bafk2bzacecflry2dyjqj6fhpovkbcbei377zabectznuxsf6bxggsve7bsxga"),
|
||||
"ethaccount": MustParseCid("bafk2bzacedl4pmkfxkzoqajs6im3ranmopozsmxjcxsnk3kwvd3vv7mfwwrf4"),
|
||||
"evm": MustParseCid("bafk2bzacebgzvmvwv7rsnnhp3zhqbiqkumvyrc7pazfovpptgpgtqkalrli74"),
|
||||
"init": MustParseCid("bafk2bzacecbxp66q3ytjkg37nyv4rmzezbfaigvx4i5yhvqbm5gg4amjeaias"),
|
||||
"multisig": MustParseCid("bafk2bzacecjltag3mn75dsnmrmopjow27buxqhabissowayqlmavrcfetqswc"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacednzxg263eqbl2imwz3uhujov63tjkffieyl4hl3dhrgxyhwep6hc"),
|
||||
"placeholder": MustParseCid("bafk2bzaceaamp2a35vpfml4skap4dffklzae2urcm34mtwwce2lvhaons3a5y"),
|
||||
"reward": MustParseCid("bafk2bzacectp23cxsbbdrr3uggnw7f263qll5wkkfzqhn5yq37ae2ehdjdzri"),
|
||||
"storagemarket": MustParseCid("bafk2bzacea45ko3ezkpeujsniovncwnizc4wsxd7kyckskhs7gvzwthzb2mqe"),
|
||||
"storageminer": MustParseCid("bafk2bzaced74qthwrl3gahcf7o3vrdrodbcqhlplh6fykbgy5sd2iyouhq44c"),
|
||||
"storagepower": MustParseCid("bafk2bzaceduksv6wqthr5fgp7mx5prv6gzul2oozf3svrjbuggc4bgokdxgfy"),
|
||||
"system": MustParseCid("bafk2bzacebe6j2ius6clbbr7dypsg54jzmn5xablzunph7ebedw6yhwla4cj2"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacebu4joy25gneu2qv3qfm3ktakzalndjrbhekeqrqk3zhotv6nyy2g"),
|
||||
},
|
||||
}, {
|
||||
Network: "butterflynet",
|
||||
Version: 9,
|
||||
Network: "butterflynet",
|
||||
Version: 9,
|
||||
|
||||
ManifestCid: MustParseCid("bafy2bzacec35by4erhcdgcsgzp7yb3j57utydlxxfc73m3k5pep67ehvvyv6i"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzaceajsdln7v4chxqoukiw7lxw6aexg5qdsaex2hgelz2sbu24iblhzg"),
|
||||
@ -42,29 +49,32 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
|
||||
"verifiedregistry": MustParseCid("bafk2bzacecjkesz766626ab4svnzpq3jfs26a75vfktlfaku5fjdao2eyiqyq"),
|
||||
},
|
||||
}, {
|
||||
Network: "butterflynet",
|
||||
Version: 10,
|
||||
ManifestCid: MustParseCid("bafy2bzaceciz4ytt5gnn6gc4epez7v6xeg6efkgbvwfxkoa34o2gj3hp5f7zc"),
|
||||
Network: "butterflynet",
|
||||
Version: 10,
|
||||
BundleGitTag: "v10.0.0",
|
||||
ManifestCid: MustParseCid("bafy2bzaceckjhsggacixv2d377zfdcnuio4hzkveprio3xnhm3gohi3zy3zco"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzacedavorwsriewoddjlaganjpsk3o7zfts2wyid3clv5xnctacg37j2"),
|
||||
"cron": MustParseCid("bafk2bzacebtauucwaewxuzgxfpjtmn6xt3kya4om4ugyprlkhhkde76h7fkqg"),
|
||||
"datacap": MustParseCid("bafk2bzacebzdjapqwasq6woxkgq2nm2nre3v7cl2754xwiuo2cfhvsceq4cba"),
|
||||
"eam": MustParseCid("bafk2bzacecmr4zdbpfnemvgo446qby7x4y4v5cbfespt3f6ousv2hxnflyrlk"),
|
||||
"embryo": MustParseCid("bafk2bzacebj2mj5zlcs3yjlgpbznzistfjkdlwaoncjziliqrxqavvz4dcvnk"),
|
||||
"evm": MustParseCid("bafk2bzacebuewexvig54cuvsvwn4k4zr36tm2q5fel4ezq4v7363n2lmn362k"),
|
||||
"init": MustParseCid("bafk2bzacebww5gsctsk5hack2alkt4kh55bmpb4ywzbyyhoaskryymjj3snj6"),
|
||||
"multisig": MustParseCid("bafk2bzacec5k4wxvou34pyjd5kcsrbsfnlk4k753kkscg3ron2r7tsxollfsq"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacebzdeaxglaqpmegalakmxr6secjd24mu5llo4ctoy7pvom5upyuvs"),
|
||||
"reward": MustParseCid("bafk2bzaceb4hyabxnyrrsno5erqqwk5ynnjibblzfcaq3aotlz3ek4uu6dyla"),
|
||||
"storagemarket": MustParseCid("bafk2bzacedpocbf2lg2x2jg6arw2argnwmvo2hyjqvpkrgfu4khz5mtlzxz2o"),
|
||||
"storageminer": MustParseCid("bafk2bzaceacrumah7jdfc62bmvemob4lsh5yiohwodest2cgxakgnn24cenlk"),
|
||||
"storagepower": MustParseCid("bafk2bzaceaxz6n5nywermfptnz6dc53vqsa42lic4rf66l4irm3mqfj4ak5ps"),
|
||||
"system": MustParseCid("bafk2bzaceb4w5bblgyu25ylytpmfrixjsk2ra6emd44j4mv42xfxbwnqloyzi"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacedbz2koeb6teewobcjdpgfv7qdae7utgoka6wzlkf6gronnis2nn2"),
|
||||
"account": MustParseCid("bafk2bzacedkt3uzgugcsdrcsyfvizcpyr5eshltmienbyhjne2t7t3ktkihny"),
|
||||
"cron": MustParseCid("bafk2bzacecrehknegmfnhmhwy2g43cw52mvl7ptfpp44syus4iph7az7uveuq"),
|
||||
"datacap": MustParseCid("bafk2bzaced4krgbpj4sywcc453l3pygqr4qocc6nxylhztsm4duvkgfwd7vws"),
|
||||
"eam": MustParseCid("bafk2bzacebn5lyg5pfhjpdlf3r7lnah4x33bhp5afftdgbr4kbpuioytr4bhe"),
|
||||
"ethaccount": MustParseCid("bafk2bzaceaxyu24a2tbiacfr4p367xjtptrbang4qrh3fx65cojyrzolwyi4u"),
|
||||
"evm": MustParseCid("bafk2bzacea5bqaubqeuqmpguxrem2pgocjr43wcfi5e3jpw2e3b4o6tcvs746"),
|
||||
"init": MustParseCid("bafk2bzaceaufptkdg2gc4eq4ijqxtqp7wxwifusxb6kxay3vdz3wr5epqjbho"),
|
||||
"multisig": MustParseCid("bafk2bzacedp3c26ccw3l7fci4xhedxhqeqevkubuf5okuslq7o7rcqwqfahci"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacedlmiqvbutz4ebx2mezy3pqj72x2yt4gwea7sf4dv4a4s7xidelok"),
|
||||
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
|
||||
"reward": MustParseCid("bafk2bzacecrzxiowkhzpgz4rl2pdldzwmmnctuq5zzntqjkgyhyfllo3afb5s"),
|
||||
"storagemarket": MustParseCid("bafk2bzacebh2q3ofolirt5q2jpx367dfv22aecevsmybba3yhnxfs3foe6c5q"),
|
||||
"storageminer": MustParseCid("bafk2bzaceavop4j7iwneew6h7p667gvx37baloxilxetwkhsrr26jme6yye5o"),
|
||||
"storagepower": MustParseCid("bafk2bzacecfblbat4w7jkxx7kjst33lowyb7s6apdnl7fsnpmy5c3jfq5kvye"),
|
||||
"system": MustParseCid("bafk2bzacebojf25kc5yo7gskdbdgg5f52oppej2jp6nknzlvrww4ue5vkddd2"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzaceavue3zekq4wmvttck2vgxlcensrsgh5niu5qhna2owejycorftcc"),
|
||||
},
|
||||
}, {
|
||||
Network: "calibrationnet",
|
||||
Version: 8,
|
||||
Network: "calibrationnet",
|
||||
Version: 8,
|
||||
|
||||
ManifestCid: MustParseCid("bafy2bzacedrdn6z3z7xz7lx4wll3tlgktirhllzqxb766dxpaqp3ukxsjfsba"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzacecruossn66xqbeutqx5r4k2kjzgd43frmwd4qkw6haez44ubvvpxo"),
|
||||
@ -80,8 +90,9 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
|
||||
"verifiedregistry": MustParseCid("bafk2bzaceaihibfu625lbtzdp3tcftscshrmbgghgrc7kzqhxn4455pycpdkm"),
|
||||
},
|
||||
}, {
|
||||
Network: "calibrationnet",
|
||||
Version: 9,
|
||||
Network: "calibrationnet",
|
||||
Version: 9,
|
||||
|
||||
ManifestCid: MustParseCid("bafy2bzacedbedgynklc4dgpyxippkxmba2mgtw7ecntoneclsvvl4klqwuyyy"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzaceavfgpiw6whqigmskk74z4blm22nwjfnzxb4unlqz2e4wg3c5ujpw"),
|
||||
@ -98,85 +109,101 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
|
||||
"verifiedregistry": MustParseCid("bafk2bzacebh7dj6j7yi5vadh7lgqjtq42qi2uq4n6zy2g5vjeathacwn2tscu"),
|
||||
},
|
||||
}, {
|
||||
Network: "calibrationnet",
|
||||
Version: 10,
|
||||
ManifestCid: MustParseCid("bafy2bzaced7wbd43lvgc55xb37mkoo4ppev6ig4jj4j7dtswtjfjq4u5qmpck"),
|
||||
Network: "calibrationnet",
|
||||
Version: 10,
|
||||
BundleGitTag: "v10.0.0-rc.1",
|
||||
ManifestCid: MustParseCid("bafy2bzaced25ta3j6ygs34roprilbtb3f6mxifyfnm7z7ndquaruxzdq3y7lo"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzacecq4owv5begvryvpsy4atfb2jnf7g7o4hxovtdb5a4jfkzacownli"),
|
||||
"cron": MustParseCid("bafk2bzaced4uz5w5h5wksx4end27lphd4qc4kh7q336uyt46lba5ddynwftya"),
|
||||
"datacap": MustParseCid("bafk2bzacedoc7y4s5n3p2zo4bcmafcrellkakn2e3uyf5wb3mtbuqhvwqn2l4"),
|
||||
"eam": MustParseCid("bafk2bzacealpqjgz5qmucm3v6z6hn36igx7zijixhqrxwoj3g4bdgvyml3adi"),
|
||||
"embryo": MustParseCid("bafk2bzacebj2mj5zlcs3yjlgpbznzistfjkdlwaoncjziliqrxqavvz4dcvnk"),
|
||||
"evm": MustParseCid("bafk2bzacedmlmyy2efbt4qk5ighawiychklhzc6pzyiwvpijwvxoq3xyxlgxw"),
|
||||
"init": MustParseCid("bafk2bzaceaqcfmfylwdemq5bdcelydpf6iqfct4p7b2zwtmqyhuxn522yvic2"),
|
||||
"multisig": MustParseCid("bafk2bzacebuh55hkbkobmmoaoduruss5nsh6e2gtqtdbqsmw6e7k5vg6heyrm"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacedcpzw7prdoxnaclcvmtwr6yf54zi4bzzwe5w3xknh72ji6p3qfc6"),
|
||||
"reward": MustParseCid("bafk2bzaced74ym6j424zzbr6millasfcyl3r4zm5fnauasrwn3ti6fdarbkym"),
|
||||
"storagemarket": MustParseCid("bafk2bzacec7delr2q42yj4wu3daa5xjz4zezeivphtx3xwyvpgwpdnfoevhh2"),
|
||||
"storageminer": MustParseCid("bafk2bzaced7isnew5lhu237pdtwaqmbv65qqvfmmnve2c5yfobtfqw2fptuvc"),
|
||||
"storagepower": MustParseCid("bafk2bzacebe5frk6gcgzcvzkxavhhbs3id3iyacybn7y7gxwzgl5t6zawzswg"),
|
||||
"system": MustParseCid("bafk2bzacectivaezqijucle5s2f7xeui5uxig7bnk7fe4vsvz3xu7agjtb2ge"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzaceczgwckte4exultjxyzgzoo6m6r5coyphnlappi4clethhhybslxc"),
|
||||
"account": MustParseCid("bafk2bzacebhfuz3sv7duvk653544xsxhdn4lsmy7ol7k6gdgancyctvmd7lnq"),
|
||||
"cron": MustParseCid("bafk2bzacecw2yjb6ysieffa7lk7xd32b3n4ssowvafolt7eq52lp6lk4lkhji"),
|
||||
"datacap": MustParseCid("bafk2bzaceaot6tv6p4cat3cg5fknq22htosw3p5rwyijmdsraatwqyc4qyero"),
|
||||
"eam": MustParseCid("bafk2bzacec5untyj6cefdsfm47wckozw6wt6svqqh5dzh63nu4f6dvf26fkco"),
|
||||
"ethaccount": MustParseCid("bafk2bzacebiyrhz32xwxi6xql67aaq5nrzeelzas472kuwjqmdmgwotpkj35e"),
|
||||
"evm": MustParseCid("bafk2bzaceblpgzid4qjfavuiht6uwvq2lznshklk2qmf5akm3dzx2fczdqdxc"),
|
||||
"init": MustParseCid("bafk2bzacedhxbcglnonzruxf2jpczara73eh735wf2kznatx2u4gsuhgqwffq"),
|
||||
"multisig": MustParseCid("bafk2bzacebv5gdlte2pyovmz6s37me6x2rixaa6a33w6lgqdohmycl23snvwm"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacea7ngq44gedftjlar3j3ql3dmd7e7xkkb6squgxinfncybfmppmlc"),
|
||||
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
|
||||
"reward": MustParseCid("bafk2bzacea3yo22x4dsh4axioshrdp42eoeugef3tqtmtwz5untyvth7uc73o"),
|
||||
"storagemarket": MustParseCid("bafk2bzacecclsfboql3iraf3e66pzuh3h7qp3vgmfurqz26qh5g5nrexjgknc"),
|
||||
"storageminer": MustParseCid("bafk2bzacedu4chbl36rilas45py4vhqtuj6o7aa5stlvnwef3kshgwcsmha6y"),
|
||||
"storagepower": MustParseCid("bafk2bzacedu3c67spbf2dmwo77ymkjel6i2o5gpzyksgu2iuwu2xvcnxgfdjg"),
|
||||
"system": MustParseCid("bafk2bzacea4mtukm5zazygkdbgdf26cpnwwif5n2no7s6tknpxlwy6fpq3mug"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacec67wuchq64k7kgrujguukjvdlsl24pgighqdx5vgjhyk6bycrwnc"),
|
||||
},
|
||||
}, {
|
||||
Network: "caterpillarnet",
|
||||
Version: 8,
|
||||
ManifestCid: MustParseCid("bafy2bzacecsmunz6fzhg53276cixadn6ybhcnzkgbw3la5hf342tfxsdoet26"),
|
||||
Network: "caterpillarnet",
|
||||
Version: 8,
|
||||
|
||||
ManifestCid: MustParseCid("bafy2bzacebsdvrxmdajiyxq2mxxxppvg2zwvqjzz3pgbsxwh6pvdcjofpmnxw"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzaced6yatl4y2nmqmx2h3btk3np6oelyw2yt57elsb2nfmm33fadzt2g"),
|
||||
"cron": MustParseCid("bafk2bzacebrujytq4u7g62jbz52gio5k2s6rhruty7nt4eqq7ygitzxuee5zi"),
|
||||
"init": MustParseCid("bafk2bzacedajw5ptnwfdidv6m4rvd4c2m7dve4lhfbawygl5idkalcxbiiudu"),
|
||||
"multisig": MustParseCid("bafk2bzaceb3kh5hjh6eebb5236xp7crn2owyyo7irap6sy4ns76uc7om6pxuy"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacedl5am53e4mtxpzligcycxvmkolfkhfiuavww2dq3ukgaqwowj7vw"),
|
||||
"reward": MustParseCid("bafk2bzacecbswf242j43cymj3wh7nszawwlofv6z6z4qipb5d32hpxdhxywng"),
|
||||
"storagemarket": MustParseCid("bafk2bzaceca5ersmg3zxf2cztgktq33bmfjuiqjcjlktwj52xyrpujbdsqvek"),
|
||||
"storageminer": MustParseCid("bafk2bzacedg2fqaq5udfp3h6cxhywm27dgagxtselfgkyyyunqq362eaxpdm4"),
|
||||
"storagepower": MustParseCid("bafk2bzaceb3dm2i2q323e6iozo3r6pyded645vvlpf537kga2a3hu5x7abgl4"),
|
||||
"system": MustParseCid("bafk2bzacebu47th3xerlngqavlipb6cfu2utljkxxzgadc3totogto2tmx2jc"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzaceci3niq3rmbcmepgn27zvlgci6d5t4dvthx3pbmmx3wcu5elova6i"),
|
||||
"account": MustParseCid("bafk2bzacedfms6w3ghqtljpgsfuiqa6ztjx7kcuin6myjezj6rypj3zjbqms6"),
|
||||
"cron": MustParseCid("bafk2bzaceaganmlpozvy4jywigs46pfrtdmhjjey6uyhpurplqbasojsislba"),
|
||||
"datacap": MustParseCid("bafk2bzacebafqqe3wv5ytkfwmqzbmchgem66pw6yq6rl7w6vlhqsbkxnisswq"),
|
||||
"eam": MustParseCid("bafk2bzaceaeayeksiivw4y3gdqtigbgfntyvwc3q7v2ivb5kx7u55pn4q5lt6"),
|
||||
"ethaccount": MustParseCid("bafk2bzaceburkmtd63nmzxpux5rcxsbqr6x5didl2ce7al32g4tqrvo4pjz2i"),
|
||||
"evm": MustParseCid("bafk2bzacea7tp4lop7ivhay3ozitkmxxurk74v4zse42ant47rh2uw5z3tq5e"),
|
||||
"init": MustParseCid("bafk2bzaced23r54kwuebl7t6mdantbby5qpfduxwxfryeliof2enyqzhokix6"),
|
||||
"multisig": MustParseCid("bafk2bzacebcn3rib6j6jvclys7dkf62hco45ssgamczkrtzt6xyewd6gt3mtu"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacecvas4leo44pqdguj22nnwqoqdgwajzrpm5d6ltkehc37ni6p6doq"),
|
||||
"placeholder": MustParseCid("bafk2bzaceaamp2a35vpfml4skap4dffklzae2urcm34mtwwce2lvhaons3a5y"),
|
||||
"reward": MustParseCid("bafk2bzacebiizh4ohvv6p4uxjusoygex4wxcgvudqmdl2fsh6ft6s2zt4tz6q"),
|
||||
"storagemarket": MustParseCid("bafk2bzacedhkidshm7w2sqlw7izvaieyhkvmyhfsem6t6qfnkh7dnwqe56po2"),
|
||||
"storageminer": MustParseCid("bafk2bzacedcmsibwfwhkp3sabmbyjmhqibyhjf3wwst7u5bkb2k6xpun3xevg"),
|
||||
"storagepower": MustParseCid("bafk2bzacecrgnpypxnxzgglhlitaallfee3dl4ejy3y63knl7llnwba4ycf7i"),
|
||||
"system": MustParseCid("bafk2bzacecl7gizbe52xj6sfm5glubkhrdblmzuwlid6lxrwr5zhcmv4dl2ew"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacebzndvdqtdck2y35smcxezldgh6nm6rbkj3g3fmiknsgg2uah235y"),
|
||||
},
|
||||
}, {
|
||||
Network: "caterpillarnet",
|
||||
Version: 9,
|
||||
ManifestCid: MustParseCid("bafy2bzacedo6tmei6rzjaaddh2yffe5xgr6w4smnadofjhomc3saiv3ubplqe"),
|
||||
Network: "caterpillarnet",
|
||||
Version: 9,
|
||||
|
||||
ManifestCid: MustParseCid("bafy2bzacebsdvrxmdajiyxq2mxxxppvg2zwvqjzz3pgbsxwh6pvdcjofpmnxw"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzacebb32htqlwcwiotyvtbeehfmluu2ubjnepo57gelelwitudrstwba"),
|
||||
"cron": MustParseCid("bafk2bzaceatvkww7soy4a6onu6xhe7pzkdzkqw46ywuu56yv3ncl76xpotzqu"),
|
||||
"datacap": MustParseCid("bafk2bzaced57nk7i7w6qmbosy4gd6atme6yppesdgjllou6nppbti5yw6glcg"),
|
||||
"init": MustParseCid("bafk2bzacedtoputbtz573ytg4yo5wbbg7fbhrzplux4uknxrb2jarifcuxxou"),
|
||||
"multisig": MustParseCid("bafk2bzacec22z3xz45mbwgtliwkj7ngc43bervnt557c6dqsg6aesatpd5isy"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacedym7xnaxr2igfq72rttj2adqyqqfxk3j4qovp2bcwqk5paoe4t7e"),
|
||||
"reward": MustParseCid("bafk2bzacedemsmbmbtk5toprmm6jivjq3wkxumavc65vpvm6ngspgjfkth7z6"),
|
||||
"storagemarket": MustParseCid("bafk2bzacecb53mmklf4rbv263dvufqj3nsf7mi6zk2tjlgwmzbr633kw3ds3w"),
|
||||
"storageminer": MustParseCid("bafk2bzacea3wljpn2ixgnd4lovr6yckiwd652ytcrz5amgj47lg6drjhgggqa"),
|
||||
"storagepower": MustParseCid("bafk2bzaceakvohgvovpeldb6hjfg7readxo37a5h4qauis4nz6pte7mcll6c2"),
|
||||
"system": MustParseCid("bafk2bzacecisuqj2ln7ep72xaejvs2lrgh2logc7retxxpd3qvobymwyz7bxo"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacebyjosiripwqyf56yhjfs5hg26mch7totsqth4rgpt5j32hqg6ric"),
|
||||
"account": MustParseCid("bafk2bzacedfms6w3ghqtljpgsfuiqa6ztjx7kcuin6myjezj6rypj3zjbqms6"),
|
||||
"cron": MustParseCid("bafk2bzaceaganmlpozvy4jywigs46pfrtdmhjjey6uyhpurplqbasojsislba"),
|
||||
"datacap": MustParseCid("bafk2bzacebafqqe3wv5ytkfwmqzbmchgem66pw6yq6rl7w6vlhqsbkxnisswq"),
|
||||
"eam": MustParseCid("bafk2bzaceaeayeksiivw4y3gdqtigbgfntyvwc3q7v2ivb5kx7u55pn4q5lt6"),
|
||||
"ethaccount": MustParseCid("bafk2bzaceburkmtd63nmzxpux5rcxsbqr6x5didl2ce7al32g4tqrvo4pjz2i"),
|
||||
"evm": MustParseCid("bafk2bzacea7tp4lop7ivhay3ozitkmxxurk74v4zse42ant47rh2uw5z3tq5e"),
|
||||
"init": MustParseCid("bafk2bzaced23r54kwuebl7t6mdantbby5qpfduxwxfryeliof2enyqzhokix6"),
|
||||
"multisig": MustParseCid("bafk2bzacebcn3rib6j6jvclys7dkf62hco45ssgamczkrtzt6xyewd6gt3mtu"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacecvas4leo44pqdguj22nnwqoqdgwajzrpm5d6ltkehc37ni6p6doq"),
|
||||
"placeholder": MustParseCid("bafk2bzaceaamp2a35vpfml4skap4dffklzae2urcm34mtwwce2lvhaons3a5y"),
|
||||
"reward": MustParseCid("bafk2bzacebiizh4ohvv6p4uxjusoygex4wxcgvudqmdl2fsh6ft6s2zt4tz6q"),
|
||||
"storagemarket": MustParseCid("bafk2bzacedhkidshm7w2sqlw7izvaieyhkvmyhfsem6t6qfnkh7dnwqe56po2"),
|
||||
"storageminer": MustParseCid("bafk2bzacedcmsibwfwhkp3sabmbyjmhqibyhjf3wwst7u5bkb2k6xpun3xevg"),
|
||||
"storagepower": MustParseCid("bafk2bzacecrgnpypxnxzgglhlitaallfee3dl4ejy3y63knl7llnwba4ycf7i"),
|
||||
"system": MustParseCid("bafk2bzacecl7gizbe52xj6sfm5glubkhrdblmzuwlid6lxrwr5zhcmv4dl2ew"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacebzndvdqtdck2y35smcxezldgh6nm6rbkj3g3fmiknsgg2uah235y"),
|
||||
},
|
||||
}, {
|
||||
Network: "caterpillarnet",
|
||||
Version: 10,
|
||||
ManifestCid: MustParseCid("bafy2bzacea5csj2os7h76a6yvf6shgpwkysawijxemk5uvvzejxrwjo6ir4yg"),
|
||||
Network: "caterpillarnet",
|
||||
Version: 10,
|
||||
BundleGitTag: "v10.0.0",
|
||||
ManifestCid: MustParseCid("bafy2bzaceajftd7jawqnwf4kzkotksrwy6ag7mu2apkvypzrrmxboheuum5oi"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzacea7tpruyxdgyz4xa7curiphwdw4abmspft3ee24puruazdcl3tq5c"),
|
||||
"cron": MustParseCid("bafk2bzacebc6kkj7kzsicm5baszjgd37b4b3kijsffqmmkhhjlyd7zhkwfcqm"),
|
||||
"datacap": MustParseCid("bafk2bzaceddcmwl6po2jd3tfkkgv4zvub7i47gsx33pkqdspqhgvhe4npc4as"),
|
||||
"eam": MustParseCid("bafk2bzaceccsvcww2rmqnh4plkq6oapqaeqbhydrtup54z4dwunolz5tpgtb4"),
|
||||
"embryo": MustParseCid("bafk2bzacebj2mj5zlcs3yjlgpbznzistfjkdlwaoncjziliqrxqavvz4dcvnk"),
|
||||
"evm": MustParseCid("bafk2bzacea5sig3zpxfkqppoj3t344cvuhzvkx6ge2isgdzc34rfpng2ogdje"),
|
||||
"init": MustParseCid("bafk2bzacedtby353aho7itoyoj7w6moydmigjm3sgy6djgnfxqehlpae4vcc2"),
|
||||
"multisig": MustParseCid("bafk2bzacedyguvwz5zfveqoqicn3j6lkdzipf247nhvdi6dvmahulr7nzgox6"),
|
||||
"paymentchannel": MustParseCid("bafk2bzaceavaatmmnsz3v3ksopcbu6jx4iq7u7nnmqbclsiabsfkfu3zfpmka"),
|
||||
"reward": MustParseCid("bafk2bzacecrphs4avteik4yejsqwkpy5bcqramdhnzykbfq3uu2qalj2p26ti"),
|
||||
"storagemarket": MustParseCid("bafk2bzaceajby2jb5m3fenzarum374zxdzuyrpkspfljwovu7c3hvyceqd5sa"),
|
||||
"storageminer": MustParseCid("bafk2bzacebqtn7jdvk756ighri5ajro6gjepnef3c6rxupbbgkth62zytiy5s"),
|
||||
"storagepower": MustParseCid("bafk2bzacedwlo32brlalpovfkkk7qwo3ou2kpgv2bf7fioy5srn7uejmn7n46"),
|
||||
"system": MustParseCid("bafk2bzacebbt63h26x5vw5fdo2pmdb4q65u3t6lilkugvmjar6zfsc7ethxsi"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacecr5kbyypdxnxlepzk5sji2k72t454vto5ok4owfcuwfpeyivjtu4"),
|
||||
"account": MustParseCid("bafk2bzacecsbx4tovnr5x2ifcpqbpx33oht74mgtvmaauzrqcq2wnm7prr7ak"),
|
||||
"cron": MustParseCid("bafk2bzacecpzfajba6m4v4ty342jw6lcu6n63bwtldmzko733wpd2q5jzfdvu"),
|
||||
"datacap": MustParseCid("bafk2bzaceaa5zplkxvguwvnecfen62buhli5rraa3ga74b33a3sbscanzx4ok"),
|
||||
"eam": MustParseCid("bafk2bzaceaffoa3eqmj7h53lwjatfqrjw63l3czk3vthyjz6oyhgwka3xwp6g"),
|
||||
"ethaccount": MustParseCid("bafk2bzaceb7suh5m4xagoq6ap5v5x7vrhex2coq6gu6d54jteblm36cxhk5b2"),
|
||||
"evm": MustParseCid("bafk2bzaceccmwmnb42pn7y7skbjwjur7b2eqxuw4lvm3he2xpvudjzluss4os"),
|
||||
"init": MustParseCid("bafk2bzaceai72h4hxbgbp6gwm3m24uujscrj4bmbh6pxoerqtduijxt6dchfq"),
|
||||
"multisig": MustParseCid("bafk2bzacebycdokda2gysqpnl3dwksgidujgsksf4n6qotjq4erj5zd7clkzy"),
|
||||
"paymentchannel": MustParseCid("bafk2bzaceb5ucvftftiim6cxjusdpsmbht4x33kgexxgv5447gevk47h7jjqk"),
|
||||
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
|
||||
"reward": MustParseCid("bafk2bzaceajqygfkhamlzfsquqjgoy4p7pc2fruouqajapfucf22rbmtt5yf6"),
|
||||
"storagemarket": MustParseCid("bafk2bzacednmzko2o5iv5kc6qxvpqfx5rq72krxzvna6cqoqem6flbfukglby"),
|
||||
"storageminer": MustParseCid("bafk2bzacedayzz5qw7t7ykycf3a2hp666j5hb23a3mnmgp4xbbpvrx3h3ags4"),
|
||||
"storagepower": MustParseCid("bafk2bzacedd3eiejzp35xuwjf3cvgd43b5ukqhelqmtgzqzqnt2wcy56pb744"),
|
||||
"system": MustParseCid("bafk2bzacecfivztuulqqv4o5oyvvvrkblwix4hqt24pqru6ivnpioefhuhria"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacecdhw6x7dfrxfysmn6tdbn2ny464omgqppxhjuawxauscidppd7pc"),
|
||||
},
|
||||
}, {
|
||||
Network: "devnet",
|
||||
Version: 8,
|
||||
Network: "devnet",
|
||||
Version: 8,
|
||||
|
||||
ManifestCid: MustParseCid("bafy2bzacedq7tuibavyqxzkq4uybjj7ly22eu42mjkoehwn5d47xfunmtjm4k"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzacea4tlgnp7m6tlldpz3termlwxlnyq24nwd4zdzv4r6nsjuaktuuzc"),
|
||||
@ -192,8 +219,9 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
|
||||
"verifiedregistry": MustParseCid("bafk2bzaceaajgtglewgitshgdi2nzrvq7eihjtyqj5yiamesqun2hujl3xev2"),
|
||||
},
|
||||
}, {
|
||||
Network: "devnet",
|
||||
Version: 9,
|
||||
Network: "devnet",
|
||||
Version: 9,
|
||||
|
||||
ManifestCid: MustParseCid("bafy2bzacedozk3jh2j4nobqotkbofodq4chbrabioxbfrygpldgoxs3zwgggk"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzaced5llqnqqhypolyuogz3h2wjomugqkrhyhocvly3aoib4c5xiush6"),
|
||||
@ -210,29 +238,55 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
|
||||
"verifiedregistry": MustParseCid("bafk2bzacednorhcy446agy7ecpmfms2u4aoa3mj2eqomffuoerbik5yavrxyi"),
|
||||
},
|
||||
}, {
|
||||
Network: "devnet",
|
||||
Version: 10,
|
||||
ManifestCid: MustParseCid("bafy2bzacea73thrlpfejrswlcu5uhe7rcgdewvmrcwoef6jzngsba3i4v5ibi"),
|
||||
Network: "devnet",
|
||||
Version: 10,
|
||||
BundleGitTag: "v10.0.0",
|
||||
ManifestCid: MustParseCid("bafy2bzacebzz376j5kizfck56366kdz5aut6ktqrvqbi3efa2d4l2o2m653ts"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzaceau2o55aripm7kqrbzzog72zcduv5psnxzpohx5rdkykepc4z7aag"),
|
||||
"cron": MustParseCid("bafk2bzacec5qc5xluwikf4lolfa4oe356iwep25tiezbxfdyg5jib54rhlh6q"),
|
||||
"datacap": MustParseCid("bafk2bzacebo47u6q3xou5exsecjpa4rpfqjfm7vyhz4qlr3nk7p46trsk4occ"),
|
||||
"eam": MustParseCid("bafk2bzacea6yeptevserd7ayf4ahokor4sdpizpxpbqwkuvvhzdkon672shsm"),
|
||||
"embryo": MustParseCid("bafk2bzacebj2mj5zlcs3yjlgpbznzistfjkdlwaoncjziliqrxqavvz4dcvnk"),
|
||||
"evm": MustParseCid("bafk2bzacebi46zgjili4luu3nqy6mno5k4skvo4cvs7genhkdfaukhtw7xirw"),
|
||||
"init": MustParseCid("bafk2bzacedvf2bij6jovem2dfzkz347yvmydxj7vlgaiagosz5t3c5jyy43zu"),
|
||||
"multisig": MustParseCid("bafk2bzacecukolwx6y5pcajnxg2aawiubgxo5zyj24a23zg5t4qu3k4qbofh4"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacecwyih7nodrwsw5vyl5zk7fapklje76jpowqjr6x6br2bm55smqqy"),
|
||||
"reward": MustParseCid("bafk2bzacea6vfrcprxg2i4l5qnigf4c6pyvnjxpzfqr4pmph3elif7sfidrei"),
|
||||
"storagemarket": MustParseCid("bafk2bzaceahradb3od4ahs46x6yriwvm36iabgtohhoiolubsumto5eravzbu"),
|
||||
"storageminer": MustParseCid("bafk2bzacedekivqgvqapbepvzn6jte3xyymyg5yjuwy42xvboa6rcqnzgo74u"),
|
||||
"storagepower": MustParseCid("bafk2bzacedkmiosllqqqarmr53twspyswdvsm7givwczgo3qqsxzpad4hzjma"),
|
||||
"system": MustParseCid("bafk2bzaceagdymtxb4lxqqjgmnphbgdtdgveuuqaouswpzagj4bpbon3ptop4"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacec556wsqldm22k2abshvvnsrawlm3bbqkwzht6ubcj76m2jsy3azi"),
|
||||
"account": MustParseCid("bafk2bzacedkj5dqs5xxamnlug2d5dyjl6askf7wlmvwzhmsrzcvogv7acqfe6"),
|
||||
"cron": MustParseCid("bafk2bzaceabslrigld2vshng6sppbp3bsptjtttvbxctwqe5lkyl2efom2wu4"),
|
||||
"datacap": MustParseCid("bafk2bzaceagg4qklzhhg5oj4shwqpoeykeyxus7xhj2abuot2tycdwsf2oaaa"),
|
||||
"eam": MustParseCid("bafk2bzaceafttsbglcetxwtzqtdniittwczogkefgnxztgsp7mymcpvdlhdik"),
|
||||
"ethaccount": MustParseCid("bafk2bzacedypn6tf3yrj4bavmscddygeima3puih37fbkxuhjhlrzbjh3dbo4"),
|
||||
"evm": MustParseCid("bafk2bzacec5ywczgg73fnwi36nlxso3zduop3fwj3pq6ynn5zltrs4dpcwglg"),
|
||||
"init": MustParseCid("bafk2bzacebkanlbkwwtniyz4fawevnkoyje67l5nflltmciplqiutekxzzfh4"),
|
||||
"multisig": MustParseCid("bafk2bzacectxa2izvpaybmmpvearekrybxtglctwnexzzneyn6xrnrmectmpa"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacectov7vawkhsvq7aobyjq3oppamytq425wpkxejmq65vvcdm4bt2e"),
|
||||
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
|
||||
"reward": MustParseCid("bafk2bzacec3xpbrxw2rnpuve4mxfhny44lxbpbwmduy4ula4ohj2bp6wplpvc"),
|
||||
"storagemarket": MustParseCid("bafk2bzacec5nexsejraoqraywka7zcacjoxgpdbopehdkhiwqwcyghtof4s3w"),
|
||||
"storageminer": MustParseCid("bafk2bzacecw5xzj6z5b7qxx5xca5py4aoecmqj2pxb6nw673alufy22zckkyo"),
|
||||
"storagepower": MustParseCid("bafk2bzaceckhnpxoaanjf474wxzkntlnzdofoy75ehyuydfjkuw4swhotws4y"),
|
||||
"system": MustParseCid("bafk2bzaceairk5qz5hyzt4yyaxa356aszyifswiust5ilxizwxujcmtzvjzoa"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzaced2mkyqobpgna5jevosym3adv2bvraggigyz2jgn5cxymirxj4x3i"),
|
||||
},
|
||||
}, {
|
||||
Network: "mainnet",
|
||||
Version: 8,
|
||||
Network: "hyperspace",
|
||||
Version: 8,
|
||||
|
||||
ManifestCid: MustParseCid("bafy2bzacedvffumcvf72f2btjqvece3kpcdorxq5tq76iwcmqbzvsiu526cqm"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzacecim7uybic2qprbkjhowg7qkniv4zywj5h5g4u4ss72urco2akzuo"),
|
||||
"cron": MustParseCid("bafk2bzaceahgq64awp4f7li3hdgimc4upqvdvltpmeywckvens33umcxt424a"),
|
||||
"datacap": MustParseCid("bafk2bzacebkxn52ttooaslkwncijk3bgd3tm2zw7vijdhwvg2cxnxbrzmmq5e"),
|
||||
"eam": MustParseCid("bafk2bzaceczhgub5anrnaf7ol65mu54gsgwcj6c6m3yhet7rhxm2l6kz4s4ru"),
|
||||
"ethaccount": MustParseCid("bafk2bzacealn5enbxyxbfs7gbsjbyma2zk3bcr7okvflxhpr753d4eh6ixooa"),
|
||||
"evm": MustParseCid("bafk2bzacedljkrmazyewawpnddrkzrt55556374dw2pm2hokgkompgzw4vx5y"),
|
||||
"init": MustParseCid("bafk2bzacec55gyyaqjrw7zughywocgwcjvv6k5fijjpjw4xgckuqz6pjtff5a"),
|
||||
"multisig": MustParseCid("bafk2bzaceblozbdzybdivvjdiid4jwm2jc6x5a66sunh2vvwsqba6wzqmr7i6"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacealcyke5a6n24efs6qe4iikynpk2twqssyugy7jcyf6p6shgw2iwa"),
|
||||
"placeholder": MustParseCid("bafk2bzaceaamp2a35vpfml4skap4dffklzae2urcm34mtwwce2lvhaons3a5y"),
|
||||
"reward": MustParseCid("bafk2bzacebafzaqhwsm3nmsfwcd6ngvx6ev6zlcpyfljqh4kb77vok6opban6"),
|
||||
"storagemarket": MustParseCid("bafk2bzacecrjfg4p7fxznsdkoobs4po2ve3ywixrirrk6netgxh63qqaefamg"),
|
||||
"storageminer": MustParseCid("bafk2bzaceb3ctd4atxwhdkmlg4i63zxo5aopknlj7l5kaiqr22xpcmico6vg4"),
|
||||
"storagepower": MustParseCid("bafk2bzacecvcix3ugopvby2vah5wwiu5cqjedwzwkanmr34kdoc4f3o6p7nsq"),
|
||||
"system": MustParseCid("bafk2bzacedo2hfopt6gy52goj7fot5qwzhtnysmgo7h25crq4clpugkerjabk"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacea7rfkjrixaidksnmjehglmavyt56nyeu3sfxu2e3dcpf62oab6tw"),
|
||||
},
|
||||
}, {
|
||||
Network: "mainnet",
|
||||
Version: 8,
|
||||
|
||||
ManifestCid: MustParseCid("bafy2bzacebogjbpiemi7npzxchgcjjki3tfxon4ims55obfyfleqntteljsea"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzacedudbf7fc5va57t3tmo63snmt3en4iaidv4vo3qlyacbxaa6hlx6y"),
|
||||
@ -248,8 +302,9 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
|
||||
"verifiedregistry": MustParseCid("bafk2bzaceb3zbkjz3auizmoln2unmxep7dyfcmsre64vnqfhdyh7rkqfoxlw4"),
|
||||
},
|
||||
}, {
|
||||
Network: "mainnet",
|
||||
Version: 9,
|
||||
Network: "mainnet",
|
||||
Version: 9,
|
||||
|
||||
ManifestCid: MustParseCid("bafy2bzaceb6j6666h36xnhksu3ww4kxb6e25niayfgkdnifaqi6m6ooc66i6i"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzacect2p7urje3pylrrrjy3tngn6yaih4gtzauuatf2jllk3ksgfiw2y"),
|
||||
@ -266,29 +321,32 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
|
||||
"verifiedregistry": MustParseCid("bafk2bzacecf3yodlyudzukumehbuabgqljyhjt5ifiv4vetcfohnvsxzynwga"),
|
||||
},
|
||||
}, {
|
||||
Network: "mainnet",
|
||||
Version: 10,
|
||||
ManifestCid: MustParseCid("bafy2bzaceduyggnyqhlr346hfw32tbobzrvhzhill33zhe7jw64pmwjci2xoc"),
|
||||
Network: "mainnet",
|
||||
Version: 10,
|
||||
BundleGitTag: "v10.0.0",
|
||||
ManifestCid: MustParseCid("bafy2bzacecsuyf7mmvrhkx2evng5gnz5canlnz2fdlzu2lvcgptiq2pzuovos"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzacedmr3wxl7qmhquageorrt3aavbzqfpm7eymxidakwuhaobu7dseqs"),
|
||||
"cron": MustParseCid("bafk2bzaceblekxapm5nnqnxmw3mk27236iyutvbhhpsc3fyde7zi7guccn7cc"),
|
||||
"datacap": MustParseCid("bafk2bzacedu4jevyvqsilq7bq4uhegbkm75muwebc5ifqpfaojwhexf2j4i6a"),
|
||||
"eam": MustParseCid("bafk2bzacedc7224twbolvdq6iwc7ybdpah2ywe3ueo33jv67ecimndinle374"),
|
||||
"embryo": MustParseCid("bafk2bzacebj2mj5zlcs3yjlgpbznzistfjkdlwaoncjziliqrxqavvz4dcvnk"),
|
||||
"evm": MustParseCid("bafk2bzaceaggldo6wmkvp5innv4pnjv4xnpedspzofvma3dhu7vk45hh5djoq"),
|
||||
"init": MustParseCid("bafk2bzacedutlaebaczkdi4vqvt3xim24u3whleqk2r4lufjd5jnmxcosea6q"),
|
||||
"multisig": MustParseCid("bafk2bzaceatiqxjwtugpzus3s52zoggnrftxqn7kiw3obvjgkjvtd6zr3636q"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacebyviac6i43gtsvmjfg6mzcp6rwgz44axidc7m432btbmvt7i2m2g"),
|
||||
"reward": MustParseCid("bafk2bzacecbcnlvk2izojpfoaksitqenhzaofn6ynxx5pegl4y45wjlouexdi"),
|
||||
"storagemarket": MustParseCid("bafk2bzacebobteeoz2jycplgtydfyltzughegz2sopn6pzy2udjfvuo77joyk"),
|
||||
"storageminer": MustParseCid("bafk2bzacecwcypas3y6u4rya7qolfwmou437xgrjxh7mnnim7bo3nhk4dscxw"),
|
||||
"storagepower": MustParseCid("bafk2bzacec62kids6rcrdmdeqhwiz3s5rs35s5gn25ilwemgmm6jqnr2rnaaq"),
|
||||
"system": MustParseCid("bafk2bzacecj3c4bjbs2xfttn7zqle7yocqh47u2s7hwuxrsn7fi5h74tcyxoc"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacedgf7zbnlste5ukzueduemkimiit64scz7lvebztufx5jxtx6gkz2"),
|
||||
"account": MustParseCid("bafk2bzaceampw4romta75hyz5p4cqriypmpbgnkxncgxgqn6zptv5lsp2w2bo"),
|
||||
"cron": MustParseCid("bafk2bzacedcbtsifegiu432m5tysjzkxkmoczxscb6hqpmrr6img7xzdbbs2g"),
|
||||
"datacap": MustParseCid("bafk2bzacealj5uk7wixhvk7l5tnredtelralwnceafqq34nb2lbylhtuyo64u"),
|
||||
"eam": MustParseCid("bafk2bzacedrpm5gbleh4xkyo2jvs7p5g6f34soa6dpv7ashcdgy676snsum6g"),
|
||||
"ethaccount": MustParseCid("bafk2bzaceaqoc5zakbhjxn3jljc4lxnthllzunhdor7sxhwgmskvc6drqc3fa"),
|
||||
"evm": MustParseCid("bafk2bzaceahmzdxhqsm7cu2mexusjp6frm7r4kdesvti3etv5evfqboos2j4g"),
|
||||
"init": MustParseCid("bafk2bzaced2f5rhir3hbpqbz5ght7ohv2kgj42g5ykxrypuo2opxsup3ykwl6"),
|
||||
"multisig": MustParseCid("bafk2bzaceduf3hayh63jnl4z2knxv7cnrdenoubni22fxersc4octlwpxpmy4"),
|
||||
"paymentchannel": MustParseCid("bafk2bzaceartlg4mrbwgzcwric6mtvyawpbgx2xclo2vj27nna57nxynf3pgc"),
|
||||
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
|
||||
"reward": MustParseCid("bafk2bzacebnhtaejfjtzymyfmbdrfmo7vgj3zsof6zlucbmkhrvcuotw5dxpq"),
|
||||
"storagemarket": MustParseCid("bafk2bzaceclejwjtpu2dhw3qbx6ow7b4pmhwa7ocrbbiqwp36sq5yeg6jz2bc"),
|
||||
"storageminer": MustParseCid("bafk2bzaced4h7noksockro7glnssz2jnmo2rpzd7dvnmfs4p24zx3h6gtx47s"),
|
||||
"storagepower": MustParseCid("bafk2bzacec4ay4crzo73ypmh7o3fjendhbqrxake46bprabw67fvwjz5q6ixq"),
|
||||
"system": MustParseCid("bafk2bzacedakk5nofebyup4m7nvx6djksfwhnxzrfuq4oyemhpl4lllaikr64"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacedfel6edzqpe5oujno7fog4i526go4dtcs6vwrdtbpy2xq6htvcg6"),
|
||||
},
|
||||
}, {
|
||||
Network: "testing",
|
||||
Version: 8,
|
||||
Network: "testing",
|
||||
Version: 8,
|
||||
|
||||
ManifestCid: MustParseCid("bafy2bzacedkjpqx27wgsvfxzuxfvixuxtbpt2y6yo6igcasez6gqiowron776"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzacebmfbtdj5vruje5auacrhhprcjdd6uclhukb7je7t2f6ozfcgqlu2"),
|
||||
@ -304,8 +362,9 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
|
||||
"verifiedregistry": MustParseCid("bafk2bzacectzxvtoselhnzsair5nv6k5vokvegnht6z2lfee4p3xexo4kg4m6"),
|
||||
},
|
||||
}, {
|
||||
Network: "testing",
|
||||
Version: 9,
|
||||
Network: "testing",
|
||||
Version: 9,
|
||||
|
||||
ManifestCid: MustParseCid("bafy2bzacecnnrmekqw2xvud46g3vo6x26cogh3ydgljqajlxqxzzbuxsjlwjm"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzaceaiebfiuu76zoywzltelio2zuvsavirka27ur6kspn7scvcl5cuiy"),
|
||||
@ -322,29 +381,32 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
|
||||
"verifiedregistry": MustParseCid("bafk2bzaceatmqip2o3ausbntvdhj7yemu6hb3b5yqv6hm42gylbbmz7geocpm"),
|
||||
},
|
||||
}, {
|
||||
Network: "testing",
|
||||
Version: 10,
|
||||
ManifestCid: MustParseCid("bafy2bzacearlgbespxi2zdrybtp2rrbwscmtbyou5qa2egbdvcz6v2yjjqvjo"),
|
||||
Network: "testing",
|
||||
Version: 10,
|
||||
BundleGitTag: "v10.0.0",
|
||||
ManifestCid: MustParseCid("bafy2bzacebsp3bkxwsijenqeimhvhtg52d6o76hn6qhzxveqfq7d5hdd5l2ee"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzaceba6me5ipkcijuhyypnzjydhv3ebi2ctailar7mtzlk4vk3rbxfee"),
|
||||
"cron": MustParseCid("bafk2bzacea6k2mai2xnakygqvbigivfrvv5q7d34qrzjv2crkqtwbjxnxmkbe"),
|
||||
"datacap": MustParseCid("bafk2bzaceah4oxcgck6bcfkzctm2klpvmltyidq7uxnlkcap6ypi3lnkcvrqk"),
|
||||
"eam": MustParseCid("bafk2bzacedjtkvocrnkrot2oztsfrxtpwl32wwbmbkrjfbbm4xipwzrhhxn5c"),
|
||||
"embryo": MustParseCid("bafk2bzacebj2mj5zlcs3yjlgpbznzistfjkdlwaoncjziliqrxqavvz4dcvnk"),
|
||||
"evm": MustParseCid("bafk2bzaced6vhabkr2ojpjzsybrq5yvksjzpjk6yei6fwobkwwydlj5f473pw"),
|
||||
"init": MustParseCid("bafk2bzaceaib3o5e7wop7kwjirgpferqarmngrgjkur2yhdnwplctidpxsgme"),
|
||||
"multisig": MustParseCid("bafk2bzaced4z3awacxumq6yr33a3adu2legb7colahgvqpmigs3fvvjxs3byc"),
|
||||
"paymentchannel": MustParseCid("bafk2bzaceb6mfi24mpzt7qlkratj2tdtqo7aia67zcztuslrxcjaycz6fnai6"),
|
||||
"reward": MustParseCid("bafk2bzacebngh5kwtem4ncarpjtxhs4rwyoficttkgxlsjtiz5ucdi4p3czoc"),
|
||||
"storagemarket": MustParseCid("bafk2bzacecnsibyil62jfq2gbkoe6c2epehfcrxzjmqjnwz7kxab2hkbu3lks"),
|
||||
"storageminer": MustParseCid("bafk2bzacedzw4vkrt3sdkhagpvn62pknyyjkcrzewncvtvae5qgwe6ulzx4a4"),
|
||||
"storagepower": MustParseCid("bafk2bzacedxgadibot6nzvripqt3z5shvjsoscupinejnsvswq4cbeskblwyy"),
|
||||
"system": MustParseCid("bafk2bzacedm24avrmp5o5odhpad43qeglooflygwh4ah7qnzbij2h4c3v6cge"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzaceapq3j6ww3ofytwq3pz3obumaqsyg3wrm6tksdh7op23a72co3rya"),
|
||||
"account": MustParseCid("bafk2bzaceazxb6p2xg6caivmie6k2bvutyesngwyvhwv4eemwu7ia4vnqkcuy"),
|
||||
"cron": MustParseCid("bafk2bzaceax6ym73boyl5zdpbcr6zmbajzylmcdvlapz5zcqgzcshakz44jbq"),
|
||||
"datacap": MustParseCid("bafk2bzacea63x3v6lvtb4ast5uq3nhrpokvylymvezyr5xyjl6vtlfwkuw6qo"),
|
||||
"eam": MustParseCid("bafk2bzacebhualcn7fofyqr6lhrel32ud23hcwzeenfqu3rrn5nmt6gugqgo6"),
|
||||
"ethaccount": MustParseCid("bafk2bzacecgft7e3v4kbpb3tlt5s6hng74ptu3ggcdi4wmt5p4vr6qkmkw2zc"),
|
||||
"evm": MustParseCid("bafk2bzaceaoqvbqetgicqpvwvcnpjx5aa74kwlhq3u7mwv4yseszxkimwz5pk"),
|
||||
"init": MustParseCid("bafk2bzaceapmoyg2qppzle24t25ncyycn2uwhnw6crqkqlokkbc7w4mn74wko"),
|
||||
"multisig": MustParseCid("bafk2bzacecn3dlepgaps3h6iwlq65dx6zyrbfi4pmgdqxphb5idubb6ibflwe"),
|
||||
"paymentchannel": MustParseCid("bafk2bzaceaanxurr2k3ueolwcnminmdfp3tyxtntqg5fou37smeulb5dxqjzk"),
|
||||
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
|
||||
"reward": MustParseCid("bafk2bzacedujdvwk4omjexdnmh2qrkqbw27v4c2g3krajhtzyfzart36bimum"),
|
||||
"storagemarket": MustParseCid("bafk2bzacecdbjjxvdtltobiu7thwyyr2puunoz3q4vyfnhhxl2sbp4ovwq37s"),
|
||||
"storageminer": MustParseCid("bafk2bzacebo5q7jrf4qjrhtotwt5ouzlygvml4bzofs2egdnbxyfmuo7tro6c"),
|
||||
"storagepower": MustParseCid("bafk2bzacebt2ipqnorxbzncwjadkulip6blzksmwd4mmyrfjsmjyf55itra2k"),
|
||||
"system": MustParseCid("bafk2bzacecf2jimdz7knhngs64ximfz3eaud6s3kiunmkybgrkupdjyo2dw7o"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacecdmek2htsgcyoyl35glakyab66cojqo2y335njnm7krleb6yfbps"),
|
||||
},
|
||||
}, {
|
||||
Network: "testing-fake-proofs",
|
||||
Version: 8,
|
||||
Network: "testing-fake-proofs",
|
||||
Version: 8,
|
||||
|
||||
ManifestCid: MustParseCid("bafy2bzacecd3lb5v6tzjylnhnrhexslssyaozy6hogzgpkhztoe76exbrgrug"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzacebmfbtdj5vruje5auacrhhprcjdd6uclhukb7je7t2f6ozfcgqlu2"),
|
||||
@ -360,8 +422,9 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
|
||||
"verifiedregistry": MustParseCid("bafk2bzacectzxvtoselhnzsair5nv6k5vokvegnht6z2lfee4p3xexo4kg4m6"),
|
||||
},
|
||||
}, {
|
||||
Network: "testing-fake-proofs",
|
||||
Version: 9,
|
||||
Network: "testing-fake-proofs",
|
||||
Version: 9,
|
||||
|
||||
ManifestCid: MustParseCid("bafy2bzacecql2gj2tri4fnbznmldue73qzt6zszvugw4exd64mwb52zrhv7k2"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzaceaiebfiuu76zoywzltelio2zuvsavirka27ur6kspn7scvcl5cuiy"),
|
||||
@ -378,24 +441,26 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
|
||||
"verifiedregistry": MustParseCid("bafk2bzaceatmqip2o3ausbntvdhj7yemu6hb3b5yqv6hm42gylbbmz7geocpm"),
|
||||
},
|
||||
}, {
|
||||
Network: "testing-fake-proofs",
|
||||
Version: 10,
|
||||
ManifestCid: MustParseCid("bafy2bzacea4irr2oxhclwt4mvtrevbzb7mbqddcebjz7bkqjq6eoflpfhencc"),
|
||||
Network: "testing-fake-proofs",
|
||||
Version: 10,
|
||||
BundleGitTag: "v10.0.0",
|
||||
ManifestCid: MustParseCid("bafy2bzacedwap2uuii4luljckrnb4vkur2unb6fyinn7xjie6xlva2wmlygj2"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzaceba6me5ipkcijuhyypnzjydhv3ebi2ctailar7mtzlk4vk3rbxfee"),
|
||||
"cron": MustParseCid("bafk2bzacea6k2mai2xnakygqvbigivfrvv5q7d34qrzjv2crkqtwbjxnxmkbe"),
|
||||
"datacap": MustParseCid("bafk2bzaceah4oxcgck6bcfkzctm2klpvmltyidq7uxnlkcap6ypi3lnkcvrqk"),
|
||||
"eam": MustParseCid("bafk2bzacedjtkvocrnkrot2oztsfrxtpwl32wwbmbkrjfbbm4xipwzrhhxn5c"),
|
||||
"embryo": MustParseCid("bafk2bzacebj2mj5zlcs3yjlgpbznzistfjkdlwaoncjziliqrxqavvz4dcvnk"),
|
||||
"evm": MustParseCid("bafk2bzaced6vhabkr2ojpjzsybrq5yvksjzpjk6yei6fwobkwwydlj5f473pw"),
|
||||
"init": MustParseCid("bafk2bzaceaib3o5e7wop7kwjirgpferqarmngrgjkur2yhdnwplctidpxsgme"),
|
||||
"multisig": MustParseCid("bafk2bzaced4z3awacxumq6yr33a3adu2legb7colahgvqpmigs3fvvjxs3byc"),
|
||||
"paymentchannel": MustParseCid("bafk2bzaceb6mfi24mpzt7qlkratj2tdtqo7aia67zcztuslrxcjaycz6fnai6"),
|
||||
"reward": MustParseCid("bafk2bzacebngh5kwtem4ncarpjtxhs4rwyoficttkgxlsjtiz5ucdi4p3czoc"),
|
||||
"storagemarket": MustParseCid("bafk2bzacecnsibyil62jfq2gbkoe6c2epehfcrxzjmqjnwz7kxab2hkbu3lks"),
|
||||
"storageminer": MustParseCid("bafk2bzaceb4grddnw54gczgcdak5a2gqvwed66mhibbug6qu4jy35bf45jltg"),
|
||||
"storagepower": MustParseCid("bafk2bzacedp2dnbk4bg3hhaeztre4q3jv7eqs267rlafszpggb2njjn3x5eru"),
|
||||
"system": MustParseCid("bafk2bzacedm24avrmp5o5odhpad43qeglooflygwh4ah7qnzbij2h4c3v6cge"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzaceapq3j6ww3ofytwq3pz3obumaqsyg3wrm6tksdh7op23a72co3rya"),
|
||||
"account": MustParseCid("bafk2bzaceazxb6p2xg6caivmie6k2bvutyesngwyvhwv4eemwu7ia4vnqkcuy"),
|
||||
"cron": MustParseCid("bafk2bzaceax6ym73boyl5zdpbcr6zmbajzylmcdvlapz5zcqgzcshakz44jbq"),
|
||||
"datacap": MustParseCid("bafk2bzacea63x3v6lvtb4ast5uq3nhrpokvylymvezyr5xyjl6vtlfwkuw6qo"),
|
||||
"eam": MustParseCid("bafk2bzacebhualcn7fofyqr6lhrel32ud23hcwzeenfqu3rrn5nmt6gugqgo6"),
|
||||
"ethaccount": MustParseCid("bafk2bzacecgft7e3v4kbpb3tlt5s6hng74ptu3ggcdi4wmt5p4vr6qkmkw2zc"),
|
||||
"evm": MustParseCid("bafk2bzaceaoqvbqetgicqpvwvcnpjx5aa74kwlhq3u7mwv4yseszxkimwz5pk"),
|
||||
"init": MustParseCid("bafk2bzaceapmoyg2qppzle24t25ncyycn2uwhnw6crqkqlokkbc7w4mn74wko"),
|
||||
"multisig": MustParseCid("bafk2bzacecn3dlepgaps3h6iwlq65dx6zyrbfi4pmgdqxphb5idubb6ibflwe"),
|
||||
"paymentchannel": MustParseCid("bafk2bzaceaanxurr2k3ueolwcnminmdfp3tyxtntqg5fou37smeulb5dxqjzk"),
|
||||
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
|
||||
"reward": MustParseCid("bafk2bzacedujdvwk4omjexdnmh2qrkqbw27v4c2g3krajhtzyfzart36bimum"),
|
||||
"storagemarket": MustParseCid("bafk2bzacecdbjjxvdtltobiu7thwyyr2puunoz3q4vyfnhhxl2sbp4ovwq37s"),
|
||||
"storageminer": MustParseCid("bafk2bzacedc5klueery4fn2voso4u76rgo54uctsculesdbxxbeh6rgp2q4te"),
|
||||
"storagepower": MustParseCid("bafk2bzacecuz2h2renlfio4xkyrvvro7nwidf7utpjy3oizk2xuszoz3gmea6"),
|
||||
"system": MustParseCid("bafk2bzacecf2jimdz7knhngs64ximfz3eaud6s3kiunmkybgrkupdjyo2dw7o"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacecdmek2htsgcyoyl35glakyab66cojqo2y335njnm7krleb6yfbps"),
|
||||
},
|
||||
}}
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
@ -16,7 +17,13 @@ func TestEmbeddedMetadata(t *testing.T) {
|
||||
metadata, err := build.ReadEmbeddedBuiltinActorsMetadata()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, metadata, build.EmbeddedBuiltinActorsMetadata)
|
||||
for i, v1 := range metadata {
|
||||
v2 := build.EmbeddedBuiltinActorsMetadata[i]
|
||||
require.Equal(t, v1.Network, v2.Network)
|
||||
require.Equal(t, v1.Version, v2.Version)
|
||||
require.Equal(t, v1.ManifestCid, v2.ManifestCid)
|
||||
require.Equal(t, v1.Actors, v2.Actors)
|
||||
}
|
||||
}
|
||||
|
||||
// Test that we're registering the manifest correctly.
|
||||
@ -26,7 +33,7 @@ func TestRegistration(t *testing.T) {
|
||||
require.True(t, found)
|
||||
require.True(t, manifestCid.Defined())
|
||||
|
||||
for _, key := range actors.GetBuiltinActorsKeys(av) {
|
||||
for _, key := range manifest.GetBuiltinActorsKeys(av) {
|
||||
actorCid, found := actors.GetActorCodeID(av, key)
|
||||
require.True(t, found)
|
||||
name, version, found := actors.GetActorMetaByCode(actorCid)
|
||||
|
@ -69,10 +69,6 @@ var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{
|
||||
ChainInfoJSON: `{"public_key":"8cda589f88914aa728fd183f383980b35789ce81b274e5daee1f338b77d02566ef4d3fb0098af1f844f10f9c803c1827","period":25,"genesis_time":1595348225,"hash":"e73b7dc3c4f6a236378220c0dd6aa110eb16eed26c11259606e07ee122838d4f","groupHash":"567d4785122a5a3e75a9bc9911d7ea807dd85ff76b78dc4ff06b075712898607"}`,
|
||||
},
|
||||
DrandIncentinet: {
|
||||
Servers: []string{
|
||||
"https://dev1.drand.sh",
|
||||
"https://dev2.drand.sh",
|
||||
},
|
||||
ChainInfoJSON: `{"public_key":"8cad0c72c606ab27d36ee06de1d5b2db1faf92e447025ca37575ab3a8aac2eaae83192f846fc9e158bc738423753d000","period":30,"genesis_time":1595873820,"hash":"80c8b872c714f4c00fdd3daa465d5514049f457f01f85a4caf68cdcd394ba039","groupHash":"d9406aaed487f7af71851b4399448e311f2328923d454e971536c05398ce2d9b"}`,
|
||||
},
|
||||
}
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -22,8 +22,9 @@ const GenesisFile = ""
|
||||
|
||||
var NetworkBundle = "devnet"
|
||||
var BundleOverrides map[actorstypes.Version]string
|
||||
var ActorDebugging = true
|
||||
|
||||
const GenesisNetworkVersion = network.Version18
|
||||
const GenesisNetworkVersion = network.Version17
|
||||
|
||||
var UpgradeBreezeHeight = abi.ChainEpoch(-1)
|
||||
|
||||
@ -59,6 +60,8 @@ var UpgradeSkyrHeight = abi.ChainEpoch(-19)
|
||||
|
||||
var UpgradeSharkHeight = abi.ChainEpoch(-20)
|
||||
|
||||
var UpgradeHyggeHeight = abi.ChainEpoch(30)
|
||||
|
||||
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
}
|
||||
@ -111,6 +114,7 @@ func init() {
|
||||
UpgradeOhSnapHeight = getUpgradeHeight("LOTUS_OHSNAP_HEIGHT", UpgradeOhSnapHeight)
|
||||
UpgradeSkyrHeight = getUpgradeHeight("LOTUS_SKYR_HEIGHT", UpgradeSkyrHeight)
|
||||
UpgradeSharkHeight = getUpgradeHeight("LOTUS_SHARK_HEIGHT", UpgradeSharkHeight)
|
||||
UpgradeHyggeHeight = getUpgradeHeight("LOTUS_HYGGE_HEIGHT", UpgradeHyggeHeight)
|
||||
|
||||
BuildType |= Build2k
|
||||
|
||||
@ -131,6 +135,10 @@ const InteractivePoRepConfidence = 6
|
||||
|
||||
const BootstrapPeerThreshold = 1
|
||||
|
||||
// ChainId defines the chain ID used in the Ethereum JSON-RPC endpoint.
|
||||
// As per https://github.com/ethereum-lists/chains
|
||||
const Eip155ChainId = 31415926
|
||||
|
||||
var WhitelistedBlock = cid.Undef
|
||||
|
||||
// Reducing the delivery delay for equivocation of
|
||||
|
@ -19,10 +19,11 @@ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
}
|
||||
|
||||
const GenesisNetworkVersion = network.Version16
|
||||
const GenesisNetworkVersion = network.Version17
|
||||
|
||||
var NetworkBundle = "butterflynet"
|
||||
var BundleOverrides map[actorstypes.Version]string
|
||||
var ActorDebugging = false
|
||||
|
||||
const BootstrappersFile = "butterflynet.pi"
|
||||
const GenesisFile = "butterflynet.car"
|
||||
@ -49,7 +50,8 @@ const UpgradeHyperdriveHeight = -16
|
||||
const UpgradeChocolateHeight = -17
|
||||
const UpgradeOhSnapHeight = -18
|
||||
const UpgradeSkyrHeight = -19
|
||||
const UpgradeSharkHeight = abi.ChainEpoch(600)
|
||||
const UpgradeSharkHeight = abi.ChainEpoch(-20)
|
||||
const UpgradeHyggeHeight = abi.ChainEpoch(600)
|
||||
|
||||
var SupportedProofTypes = []abi.RegisteredSealProof{
|
||||
abi.RegisteredSealProof_StackedDrg512MiBV1,
|
||||
@ -80,4 +82,8 @@ const PropagationDelaySecs = uint64(6)
|
||||
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
|
||||
const BootstrapPeerThreshold = 2
|
||||
|
||||
// ChainId defines the chain ID used in the Ethereum JSON-RPC endpoint.
|
||||
// As per https://github.com/ethereum-lists/chains
|
||||
const Eip155ChainId = 3141592
|
||||
|
||||
var WhitelistedBlock = cid.Undef
|
||||
|
@ -26,6 +26,7 @@ const GenesisNetworkVersion = network.Version0
|
||||
|
||||
var NetworkBundle = "calibrationnet"
|
||||
var BundleOverrides map[actorstypes.Version]string
|
||||
var ActorDebugging = false
|
||||
|
||||
const BootstrappersFile = "calibnet.pi"
|
||||
const GenesisFile = "calibnet.car"
|
||||
@ -69,6 +70,9 @@ const UpgradeSkyrHeight = 510
|
||||
|
||||
const UpgradeSharkHeight = 16800 // 6 days after genesis
|
||||
|
||||
// 2023-02-21T16:30:00Z
|
||||
const UpgradeHyggeHeight = 322354
|
||||
|
||||
var SupportedProofTypes = []abi.RegisteredSealProof{
|
||||
abi.RegisteredSealProof_StackedDrg32GiBV1,
|
||||
abi.RegisteredSealProof_StackedDrg64GiBV1,
|
||||
@ -113,4 +117,8 @@ var PropagationDelaySecs = uint64(10)
|
||||
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
|
||||
const BootstrapPeerThreshold = 4
|
||||
|
||||
// ChainId defines the chain ID used in the Ethereum JSON-RPC endpoint.
|
||||
// As per https://github.com/ethereum-lists/chains
|
||||
const Eip155ChainId = 314159
|
||||
|
||||
var WhitelistedBlock = cid.Undef
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
|
||||
var NetworkBundle = "caterpillarnet"
|
||||
var BundleOverrides map[actorstypes.Version]string
|
||||
var ActorDebugging = false
|
||||
|
||||
const BootstrappersFile = "interopnet.pi"
|
||||
const GenesisFile = "interopnet.car"
|
||||
@ -49,7 +50,9 @@ var UpgradeChocolateHeight = abi.ChainEpoch(-17)
|
||||
var UpgradeOhSnapHeight = abi.ChainEpoch(-18)
|
||||
var UpgradeSkyrHeight = abi.ChainEpoch(-19)
|
||||
|
||||
const UpgradeSharkHeight = abi.ChainEpoch(99999999999999)
|
||||
const UpgradeSharkHeight = abi.ChainEpoch(-20)
|
||||
|
||||
const UpgradeHyggeHeight = abi.ChainEpoch(99999999999999)
|
||||
|
||||
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
@ -104,6 +107,7 @@ func init() {
|
||||
UpgradeOhSnapHeight = getUpgradeHeight("LOTUS_OHSNAP_HEIGHT", UpgradeOhSnapHeight)
|
||||
UpgradeSkyrHeight = getUpgradeHeight("LOTUS_SKYR_HEIGHT", UpgradeSkyrHeight)
|
||||
UpgradeSharkHeight = getUpgradeHeight("LOTUS_SHARK_HEIGHT", UpgradeSharkHeight)
|
||||
UpgradeHyggeHeight = getUpgradeHeight("LOTUS_HYGGE_HEIGHT", UpgradeHyggeHeight)
|
||||
|
||||
BuildType |= BuildInteropnet
|
||||
SetAddressNetwork(address.Testnet)
|
||||
@ -118,4 +122,9 @@ const PropagationDelaySecs = uint64(6)
|
||||
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
|
||||
const BootstrapPeerThreshold = 2
|
||||
|
||||
// ChainId defines the chain ID used in the Ethereum JSON-RPC endpoint.
|
||||
// As per https://github.com/ethereum-lists/chains
|
||||
// TODO same as butterfly for now, as we didn't submit an assignment for interopnet.
|
||||
const Eip155ChainId = 3141592
|
||||
|
||||
var WhitelistedBlock = cid.Undef
|
||||
|
@ -26,6 +26,9 @@ var NetworkBundle = "mainnet"
|
||||
// NOTE: DO NOT change this unless you REALLY know what you're doing. This is consensus critical.
|
||||
var BundleOverrides map[actorstypes.Version]string
|
||||
|
||||
// NOTE: DO NOT change this unless you REALLY know what you're doing. This is consensus critical.
|
||||
const ActorDebugging = false
|
||||
|
||||
const GenesisNetworkVersion = network.Version0
|
||||
|
||||
const BootstrappersFile = "mainnet.pi"
|
||||
@ -57,6 +60,7 @@ const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 60)
|
||||
const UpgradeOrangeHeight = 336458
|
||||
|
||||
// 2020-12-22T02:00:00Z
|
||||
// var because of wdpost_test.go
|
||||
var UpgradeClausHeight = abi.ChainEpoch(343200)
|
||||
|
||||
// 2021-03-04T00:00:30Z
|
||||
@ -81,7 +85,10 @@ const UpgradeOhSnapHeight = 1594680
|
||||
const UpgradeSkyrHeight = 1960320
|
||||
|
||||
// 2022-11-30T14:00:00Z
|
||||
var UpgradeSharkHeight = abi.ChainEpoch(2383680)
|
||||
const UpgradeSharkHeight = 2383680
|
||||
|
||||
// 2023-03-14T15:14:00Z
|
||||
var UpgradeHyggeHeight = abi.ChainEpoch(2683348)
|
||||
|
||||
var SupportedProofTypes = []abi.RegisteredSealProof{
|
||||
abi.RegisteredSealProof_StackedDrg32GiBV1,
|
||||
@ -96,8 +103,8 @@ func init() {
|
||||
SetAddressNetwork(address.Mainnet)
|
||||
}
|
||||
|
||||
if os.Getenv("LOTUS_DISABLE_SHARK") == "1" {
|
||||
UpgradeSharkHeight = math.MaxInt64
|
||||
if os.Getenv("LOTUS_DISABLE_HYGGE") == "1" {
|
||||
UpgradeHyggeHeight = math.MaxInt64
|
||||
}
|
||||
|
||||
// NOTE: DO NOT change this unless you REALLY know what you're doing. This is not consensus critical, however,
|
||||
@ -125,6 +132,10 @@ const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
|
||||
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
|
||||
const BootstrapPeerThreshold = 4
|
||||
|
||||
// ChainId defines the chain ID used in the Ethereum JSON-RPC endpoint.
|
||||
// As per https://github.com/ethereum-lists/chains
|
||||
const Eip155ChainId = 314
|
||||
|
||||
// we skip checks on message validity in this block to sidestep the zero-bls signature
|
||||
var WhitelistedBlock = MustParseCid("bafy2bzaceapyg2uyzk7vueh3xccxkuwbz3nxewjyguoxvhx77malc2lzn2ybi")
|
||||
|
||||
|
@ -42,9 +42,6 @@ var (
|
||||
|
||||
AllowableClockDriftSecs = uint64(1)
|
||||
|
||||
Finality = policy.ChainFinality
|
||||
ForkLengthThreshold = Finality
|
||||
|
||||
SlashablePowerDelay = 20
|
||||
InteractivePoRepConfidence = 6
|
||||
|
||||
@ -108,6 +105,7 @@ var (
|
||||
UpgradeOhSnapHeight abi.ChainEpoch = -17
|
||||
UpgradeSkyrHeight abi.ChainEpoch = -18
|
||||
UpgradeSharkHeight abi.ChainEpoch = -19
|
||||
UpgradeHyggeHeight abi.ChainEpoch = -20
|
||||
|
||||
DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
@ -116,6 +114,7 @@ var (
|
||||
GenesisNetworkVersion = network.Version0
|
||||
NetworkBundle = "devnet"
|
||||
BundleOverrides map[actorstypes.Version]string
|
||||
ActorDebugging = true
|
||||
|
||||
NewestNetworkVersion = network.Version16
|
||||
ActorUpgradeNetworkVersion = network.Version16
|
||||
@ -128,4 +127,11 @@ var (
|
||||
GenesisFile = ""
|
||||
)
|
||||
|
||||
const Finality = policy.ChainFinality
|
||||
const ForkLengthThreshold = Finality
|
||||
|
||||
const BootstrapPeerThreshold = 1
|
||||
|
||||
// ChainId defines the chain ID used in the Ethereum JSON-RPC endpoint.
|
||||
// As per https://github.com/ethereum-lists/chains
|
||||
const Eip155ChainId = 31415926
|
||||
|
@ -37,7 +37,7 @@ func BuildTypeString() string {
|
||||
}
|
||||
|
||||
// BuildVersion is the local build version
|
||||
const BuildVersion = "1.19.1-dev"
|
||||
const BuildVersion = "1.21.0-dev"
|
||||
|
||||
func UserVersion() string {
|
||||
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
@ -28,7 +29,7 @@ func GetActorCodeID(av actorstypes.Version, name string) (cid.Cid, bool) {
|
||||
// Actors V7 and lower
|
||||
switch name {
|
||||
|
||||
case AccountKey:
|
||||
case manifest.AccountKey:
|
||||
switch av {
|
||||
|
||||
case actorstypes.Version0:
|
||||
@ -53,7 +54,7 @@ func GetActorCodeID(av actorstypes.Version, name string) (cid.Cid, bool) {
|
||||
return builtin7.AccountActorCodeID, true
|
||||
}
|
||||
|
||||
case CronKey:
|
||||
case manifest.CronKey:
|
||||
switch av {
|
||||
|
||||
case actorstypes.Version0:
|
||||
@ -78,7 +79,7 @@ func GetActorCodeID(av actorstypes.Version, name string) (cid.Cid, bool) {
|
||||
return builtin7.CronActorCodeID, true
|
||||
}
|
||||
|
||||
case InitKey:
|
||||
case manifest.InitKey:
|
||||
switch av {
|
||||
|
||||
case actorstypes.Version0:
|
||||
@ -103,7 +104,7 @@ func GetActorCodeID(av actorstypes.Version, name string) (cid.Cid, bool) {
|
||||
return builtin7.InitActorCodeID, true
|
||||
}
|
||||
|
||||
case MarketKey:
|
||||
case manifest.MarketKey:
|
||||
switch av {
|
||||
|
||||
case actorstypes.Version0:
|
||||
@ -128,7 +129,7 @@ func GetActorCodeID(av actorstypes.Version, name string) (cid.Cid, bool) {
|
||||
return builtin7.StorageMarketActorCodeID, true
|
||||
}
|
||||
|
||||
case MinerKey:
|
||||
case manifest.MinerKey:
|
||||
switch av {
|
||||
|
||||
case actorstypes.Version0:
|
||||
@ -153,7 +154,7 @@ func GetActorCodeID(av actorstypes.Version, name string) (cid.Cid, bool) {
|
||||
return builtin7.StorageMinerActorCodeID, true
|
||||
}
|
||||
|
||||
case MultisigKey:
|
||||
case manifest.MultisigKey:
|
||||
switch av {
|
||||
|
||||
case actorstypes.Version0:
|
||||
@ -178,7 +179,7 @@ func GetActorCodeID(av actorstypes.Version, name string) (cid.Cid, bool) {
|
||||
return builtin7.MultisigActorCodeID, true
|
||||
}
|
||||
|
||||
case PaychKey:
|
||||
case manifest.PaychKey:
|
||||
switch av {
|
||||
|
||||
case actorstypes.Version0:
|
||||
@ -203,7 +204,7 @@ func GetActorCodeID(av actorstypes.Version, name string) (cid.Cid, bool) {
|
||||
return builtin7.PaymentChannelActorCodeID, true
|
||||
}
|
||||
|
||||
case PowerKey:
|
||||
case manifest.PowerKey:
|
||||
switch av {
|
||||
|
||||
case actorstypes.Version0:
|
||||
@ -228,7 +229,7 @@ func GetActorCodeID(av actorstypes.Version, name string) (cid.Cid, bool) {
|
||||
return builtin7.StoragePowerActorCodeID, true
|
||||
}
|
||||
|
||||
case RewardKey:
|
||||
case manifest.RewardKey:
|
||||
switch av {
|
||||
|
||||
case actorstypes.Version0:
|
||||
@ -253,7 +254,7 @@ func GetActorCodeID(av actorstypes.Version, name string) (cid.Cid, bool) {
|
||||
return builtin7.RewardActorCodeID, true
|
||||
}
|
||||
|
||||
case SystemKey:
|
||||
case manifest.SystemKey:
|
||||
switch av {
|
||||
|
||||
case actorstypes.Version0:
|
||||
@ -278,7 +279,7 @@ func GetActorCodeID(av actorstypes.Version, name string) (cid.Cid, bool) {
|
||||
return builtin7.SystemActorCodeID, true
|
||||
}
|
||||
|
||||
case VerifregKey:
|
||||
case manifest.VerifregKey:
|
||||
switch av {
|
||||
|
||||
case actorstypes.Version0:
|
||||
@ -314,7 +315,7 @@ func GetActorCodeIDs(av actorstypes.Version) (map[string]cid.Cid, error) {
|
||||
return cids, nil
|
||||
}
|
||||
|
||||
actorsKeys := GetBuiltinActorsKeys(av)
|
||||
actorsKeys := manifest.GetBuiltinActorsKeys(av)
|
||||
synthCids := make(map[string]cid.Cid)
|
||||
|
||||
for _, key := range actorsKeys {
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
builtin10 "github.com/filecoin-project/go-state-types/builtin"
|
||||
"github.com/filecoin-project/go-state-types/cbor"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
@ -25,7 +26,7 @@ var Methods = builtin10.MethodsAccount
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
if name, av, ok := actors.GetActorMetaByCode(act.Code); ok {
|
||||
if name != actors.AccountKey {
|
||||
if name != manifest.AccountKey {
|
||||
return nil, xerrors.Errorf("actor code is not account: %s", name)
|
||||
}
|
||||
|
||||
|
@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
|
||||
{{range .versions}}
|
||||
{{if (le . 7)}}
|
||||
@ -24,7 +25,7 @@ var Methods = builtin{{.latestVersion}}.MethodsAccount
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
if name, av, ok := actors.GetActorMetaByCode(act.Code); ok {
|
||||
if name != actors.AccountKey {
|
||||
if name != manifest.AccountKey {
|
||||
return nil, xerrors.Errorf("actor code is not account: %s", name)
|
||||
}
|
||||
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
|
||||
{{if (le .v 7)}}
|
||||
account{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/account"
|
||||
@ -48,7 +49,7 @@ func (s *state{{.v}}) GetState() interface{} {
|
||||
}
|
||||
|
||||
func (s *state{{.v}}) ActorKey() string {
|
||||
return actors.AccountKey
|
||||
return manifest.AccountKey
|
||||
}
|
||||
|
||||
func (s *state{{.v}}) ActorVersion() actorstypes.Version {
|
||||
|
3
chain/actors/builtin/account/v0.go
generated
3
chain/actors/builtin/account/v0.go
generated
@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
account0 "github.com/filecoin-project/specs-actors/actors/builtin/account"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
@ -44,7 +45,7 @@ func (s *state0) GetState() interface{} {
|
||||
}
|
||||
|
||||
func (s *state0) ActorKey() string {
|
||||
return actors.AccountKey
|
||||
return manifest.AccountKey
|
||||
}
|
||||
|
||||
func (s *state0) ActorVersion() actorstypes.Version {
|
||||
|
3
chain/actors/builtin/account/v10.go
generated
3
chain/actors/builtin/account/v10.go
generated
@ -8,6 +8,7 @@ import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
account10 "github.com/filecoin-project/go-state-types/builtin/v10/account"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
@ -44,7 +45,7 @@ func (s *state10) GetState() interface{} {
|
||||
}
|
||||
|
||||
func (s *state10) ActorKey() string {
|
||||
return actors.AccountKey
|
||||
return manifest.AccountKey
|
||||
}
|
||||
|
||||
func (s *state10) ActorVersion() actorstypes.Version {
|
||||
|
3
chain/actors/builtin/account/v2.go
generated
3
chain/actors/builtin/account/v2.go
generated
@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
account2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/account"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
@ -44,7 +45,7 @@ func (s *state2) GetState() interface{} {
|
||||
}
|
||||
|
||||
func (s *state2) ActorKey() string {
|
||||
return actors.AccountKey
|
||||
return manifest.AccountKey
|
||||
}
|
||||
|
||||
func (s *state2) ActorVersion() actorstypes.Version {
|
||||
|
3
chain/actors/builtin/account/v3.go
generated
3
chain/actors/builtin/account/v3.go
generated
@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
account3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/account"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
@ -44,7 +45,7 @@ func (s *state3) GetState() interface{} {
|
||||
}
|
||||
|
||||
func (s *state3) ActorKey() string {
|
||||
return actors.AccountKey
|
||||
return manifest.AccountKey
|
||||
}
|
||||
|
||||
func (s *state3) ActorVersion() actorstypes.Version {
|
||||
|
3
chain/actors/builtin/account/v4.go
generated
3
chain/actors/builtin/account/v4.go
generated
@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
account4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/account"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
@ -44,7 +45,7 @@ func (s *state4) GetState() interface{} {
|
||||
}
|
||||
|
||||
func (s *state4) ActorKey() string {
|
||||
return actors.AccountKey
|
||||
return manifest.AccountKey
|
||||
}
|
||||
|
||||
func (s *state4) ActorVersion() actorstypes.Version {
|
||||
|
3
chain/actors/builtin/account/v5.go
generated
3
chain/actors/builtin/account/v5.go
generated
@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
account5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/account"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
@ -44,7 +45,7 @@ func (s *state5) GetState() interface{} {
|
||||
}
|
||||
|
||||
func (s *state5) ActorKey() string {
|
||||
return actors.AccountKey
|
||||
return manifest.AccountKey
|
||||
}
|
||||
|
||||
func (s *state5) ActorVersion() actorstypes.Version {
|
||||
|
3
chain/actors/builtin/account/v6.go
generated
3
chain/actors/builtin/account/v6.go
generated
@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
account6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/account"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
@ -44,7 +45,7 @@ func (s *state6) GetState() interface{} {
|
||||
}
|
||||
|
||||
func (s *state6) ActorKey() string {
|
||||
return actors.AccountKey
|
||||
return manifest.AccountKey
|
||||
}
|
||||
|
||||
func (s *state6) ActorVersion() actorstypes.Version {
|
||||
|
3
chain/actors/builtin/account/v7.go
generated
3
chain/actors/builtin/account/v7.go
generated
@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
account7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/account"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
@ -44,7 +45,7 @@ func (s *state7) GetState() interface{} {
|
||||
}
|
||||
|
||||
func (s *state7) ActorKey() string {
|
||||
return actors.AccountKey
|
||||
return manifest.AccountKey
|
||||
}
|
||||
|
||||
func (s *state7) ActorVersion() actorstypes.Version {
|
||||
|
3
chain/actors/builtin/account/v8.go
generated
3
chain/actors/builtin/account/v8.go
generated
@ -8,6 +8,7 @@ import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
account8 "github.com/filecoin-project/go-state-types/builtin/v8/account"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
@ -44,7 +45,7 @@ func (s *state8) GetState() interface{} {
|
||||
}
|
||||
|
||||
func (s *state8) ActorKey() string {
|
||||
return actors.AccountKey
|
||||
return manifest.AccountKey
|
||||
}
|
||||
|
||||
func (s *state8) ActorVersion() actorstypes.Version {
|
||||
|
3
chain/actors/builtin/account/v9.go
generated
3
chain/actors/builtin/account/v9.go
generated
@ -8,6 +8,7 @@ import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
account9 "github.com/filecoin-project/go-state-types/builtin/v9/account"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
@ -44,7 +45,7 @@ func (s *state9) GetState() interface{} {
|
||||
}
|
||||
|
||||
func (s *state9) ActorKey() string {
|
||||
return actors.AccountKey
|
||||
return manifest.AccountKey
|
||||
}
|
||||
|
||||
func (s *state9) ActorVersion() actorstypes.Version {
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/builtin"
|
||||
smoothingtypes "github.com/filecoin-project/go-state-types/builtin/v8/util/smoothing"
|
||||
minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
"github.com/filecoin-project/go-state-types/proof"
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
@ -25,6 +26,7 @@ import (
|
||||
var SystemActorAddr = builtin.SystemActorAddr
|
||||
var BurntFundsActorAddr = builtin.BurntFundsActorAddr
|
||||
var CronActorAddr = builtin.CronActorAddr
|
||||
var EthereumAddressManagerActorAddr = builtin.EthereumAddressManagerActorAddr
|
||||
var SaftAddress = makeAddress("t0122")
|
||||
var ReserveAddress = makeAddress("t090")
|
||||
var RootVerifierAddress = makeAddress("t080")
|
||||
@ -165,7 +167,7 @@ func IsAccountActor(c cid.Cid) bool {
|
||||
func IsStorageMinerActor(c cid.Cid) bool {
|
||||
name, _, ok := actors.GetActorMetaByCode(c)
|
||||
if ok {
|
||||
return name == actors.MinerKey
|
||||
return name == manifest.MinerKey
|
||||
}
|
||||
|
||||
if c == builtin0.StorageMinerActorCodeID {
|
||||
@ -202,7 +204,7 @@ func IsStorageMinerActor(c cid.Cid) bool {
|
||||
func IsMultisigActor(c cid.Cid) bool {
|
||||
name, _, ok := actors.GetActorMetaByCode(c)
|
||||
if ok {
|
||||
return name == actors.MultisigKey
|
||||
return name == manifest.MultisigKey
|
||||
}
|
||||
|
||||
if c == builtin0.MultisigActorCodeID {
|
||||
@ -273,6 +275,33 @@ func IsPaymentChannelActor(c cid.Cid) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func IsPlaceholderActor(c cid.Cid) bool {
|
||||
name, _, ok := actors.GetActorMetaByCode(c)
|
||||
if ok {
|
||||
return name == manifest.PlaceholderKey
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func IsEvmActor(c cid.Cid) bool {
|
||||
name, _, ok := actors.GetActorMetaByCode(c)
|
||||
if ok {
|
||||
return name == manifest.EvmKey
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func IsEthAccountActor(c cid.Cid) bool {
|
||||
name, _, ok := actors.GetActorMetaByCode(c)
|
||||
if ok {
|
||||
return name == manifest.EthAccountKey
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func makeAddress(addr string) address.Address {
|
||||
ret, err := address.NewFromString(addr)
|
||||
if err != nil {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user