v1.27.0-a #10

Closed
jonathanface wants to merge 473 commits from v1.27.0-a into master
175 changed files with 11865 additions and 663 deletions
Showing only changes of commit 92b67334ae - Show all commits

View File

@ -1,7 +1,7 @@
version: 2.1
orbs:
aws-cli: circleci/aws-cli@1.3.2
docker: circleci/docker@2.1.4
aws-cli: circleci/aws-cli@4.1.1
docker: circleci/docker@2.3.0
executors:
golang:
@ -70,8 +70,6 @@ commands:
name: Restore parameters cache
keys:
- 'v26-2k-lotus-params'
paths:
- /var/tmp/filecoin-proof-parameters/
- run: ./lotus fetch-params 2048
- save_cache:
name: Save parameters cache
@ -96,6 +94,7 @@ commands:
git fetch --all
install-ubuntu-deps:
steps:
- run: sudo apt install curl ca-certificates gnupg
- run: sudo apt-get update
- run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev
check-go-version:
@ -143,9 +142,9 @@ jobs:
Run tests with gotestsum.
working_directory: ~/lotus
parameters: &test-params
executor:
type: executor
default: golang
resource_class:
type: string
default: medium+
go-test-flags:
type: string
default: "-timeout 20m"
@ -164,7 +163,14 @@ jobs:
type: string
default: unit
description: Test suite name to report to CircleCI.
executor: << parameters.executor >>
docker:
- image: cimg/go:1.20
environment:
LOTUS_HARMONYDB_HOSTS: yugabyte
- image: yugabytedb/yugabyte:2.18.0.0-b65
command: bin/yugabyted start --daemon=false
name: yugabyte
resource_class: << parameters.resource_class >>
steps:
- install-ubuntu-deps
- attach_workspace:
@ -182,6 +188,8 @@ jobs:
command: |
mkdir -p /tmp/test-reports/<< parameters.suite >>
mkdir -p /tmp/test-artifacts
dockerize -wait tcp://yugabyte:5433 -timeout 3m
env
gotestsum \
--format standard-verbose \
--junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \
@ -209,7 +217,9 @@ jobs:
Branch on github.com/filecoin-project/test-vectors to checkout and
test with. If empty (the default) the commit defined by the git
submodule is used.
executor: << parameters.executor >>
docker:
- image: cimg/go:1.20
resource_class: << parameters.resource_class >>
steps:
- install-ubuntu-deps
- attach_workspace:
@ -396,15 +406,14 @@ jobs:
Run golangci-lint.
working_directory: ~/lotus
parameters:
executor:
type: executor
default: golang
args:
type: string
default: ''
description: |
Arguments to pass to golangci-lint
executor: << parameters.executor >>
docker:
- image: cimg/go:1.20
resource_class: medium+
steps:
- install-ubuntu-deps
- attach_workspace:
@ -575,7 +584,7 @@ workflows:
- build
suite: itest-deals_concurrent
target: "./itests/deals_concurrent_test.go"
executor: golang-2xl
resource_class: 2xlarge
- test:
name: test-itest-deals_invalid_utf8_label
requires:
@ -768,6 +777,18 @@ workflows:
- build
suite: itest-get_messages_in_ts
target: "./itests/get_messages_in_ts_test.go"
- test:
name: test-itest-harmonydb
requires:
- build
suite: itest-harmonydb
target: "./itests/harmonydb_test.go"
- test:
name: test-itest-harmonytask
requires:
- build
suite: itest-harmonytask
target: "./itests/harmonytask_test.go"
- test:
name: test-itest-lite_migration
requires:
@ -970,14 +991,14 @@ workflows:
- build
suite: itest-wdpost_worker_config
target: "./itests/wdpost_worker_config_test.go"
executor: golang-2xl
resource_class: 2xlarge
- test:
name: test-itest-worker
requires:
- build
suite: itest-worker
target: "./itests/worker_test.go"
executor: golang-2xl
resource_class: 2xlarge
- test:
name: test-itest-worker_upgrade
requires:
@ -990,7 +1011,7 @@ workflows:
- build
suite: utest-unit-cli
target: "./cli/... ./cmd/... ./api/..."
executor: golang-2xl
resource_class: 2xlarge
get-params: true
- test:
name: test-unit-node
@ -1004,7 +1025,7 @@ workflows:
- build
suite: utest-unit-rest
target: "./blockstore/... ./build/... ./chain/... ./conformance/... ./gateway/... ./journal/... ./lib/... ./markets/... ./paychmgr/... ./tools/..."
executor: golang-2xl
resource_class: 2xlarge
- test:
name: test-unit-storage
requires:

View File

@ -10,11 +10,25 @@ import (
"text/template"
)
var GoVersion = "" // from init below. Ex: 1.19.7
//go:generate go run ./gen.go ..
//go:embed template.yml
var templateFile embed.FS
func init() {
b, err := os.ReadFile("../go.mod")
if err != nil {
panic("cannot find go.mod in parent folder")
}
for _, line := range strings.Split(string(b), "\n") {
if strings.HasPrefix(line, "go ") {
GoVersion = line[3:]
}
}
}
type (
dirs = []string
suite = string
@ -111,6 +125,7 @@ func main() {
Networks []string
ItestFiles []string
UnitSuites map[string]string
GoVersion string
}
in := data{
Networks: []string{"mainnet", "butterflynet", "calibnet", "debug"},
@ -125,6 +140,7 @@ func main() {
}
return ret
}(),
GoVersion: GoVersion,
}
out, err := os.Create("./config.yml")

View File

@ -1,7 +1,7 @@
version: 2.1
orbs:
aws-cli: circleci/aws-cli@1.3.2
docker: circleci/docker@2.1.4
aws-cli: circleci/aws-cli@4.1.1
docker: circleci/docker@2.3.0
executors:
golang:
@ -70,8 +70,6 @@ commands:
name: Restore parameters cache
keys:
- 'v26-2k-lotus-params'
paths:
- /var/tmp/filecoin-proof-parameters/
- run: ./lotus fetch-params 2048
- save_cache:
name: Save parameters cache
@ -96,6 +94,7 @@ commands:
git fetch --all
install-ubuntu-deps:
steps:
- run: sudo apt install curl ca-certificates gnupg
- run: sudo apt-get update
- run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev
check-go-version:
@ -143,9 +142,9 @@ jobs:
Run tests with gotestsum.
working_directory: ~/lotus
parameters: &test-params
executor:
type: executor
default: golang
resource_class:
type: string
default: medium+
go-test-flags:
type: string
default: "-timeout 20m"
@ -164,7 +163,14 @@ jobs:
type: string
default: unit
description: Test suite name to report to CircleCI.
executor: << parameters.executor >>
docker:
- image: cimg/go:[[ .GoVersion]]
environment:
LOTUS_HARMONYDB_HOSTS: yugabyte
- image: yugabytedb/yugabyte:2.18.0.0-b65
command: bin/yugabyted start --daemon=false
name: yugabyte
resource_class: << parameters.resource_class >>
steps:
- install-ubuntu-deps
- attach_workspace:
@ -182,6 +188,8 @@ jobs:
command: |
mkdir -p /tmp/test-reports/<< parameters.suite >>
mkdir -p /tmp/test-artifacts
dockerize -wait tcp://yugabyte:5433 -timeout 3m
env
gotestsum \
--format standard-verbose \
--junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \
@ -209,7 +217,9 @@ jobs:
Branch on github.com/filecoin-project/test-vectors to checkout and
test with. If empty (the default) the commit defined by the git
submodule is used.
executor: << parameters.executor >>
docker:
- image: cimg/go:[[ .GoVersion]]
resource_class: << parameters.resource_class >>
steps:
- install-ubuntu-deps
- attach_workspace:
@ -396,15 +406,14 @@ jobs:
Run golangci-lint.
working_directory: ~/lotus
parameters:
executor:
type: executor
default: golang
args:
type: string
default: ''
description: |
Arguments to pass to golangci-lint
executor: << parameters.executor >>
docker:
- image: cimg/go:[[ .GoVersion]]
resource_class: medium+
steps:
- install-ubuntu-deps
- attach_workspace:
@ -543,7 +552,7 @@ workflows:
suite: itest-[[ $name ]]
target: "./itests/[[ $file ]]"
[[- if or (eq $name "worker") (eq $name "deals_concurrent") (eq $name "wdpost_worker_config")]]
executor: golang-2xl
resource_class: 2xlarge
[[- end]]
[[- if or (eq $name "wdpost") (eq $name "sector_pledge")]]
get-params: true
@ -561,11 +570,11 @@ workflows:
get-params: true
[[- end -]]
[[- if eq $suite "unit-cli"]]
executor: golang-2xl
resource_class: 2xlarge
get-params: true
[[- end -]]
[[- if eq $suite "unit-rest"]]
executor: golang-2xl
resource_class: 2xlarge
[[- end -]]
[[- end]]
- test:

1
.gitignore vendored
View File

@ -6,6 +6,7 @@
/lotus-chainwatch
/lotus-shed
/lotus-sim
/lotus-provider
/lotus-townhall
/lotus-fountain
/lotus-stats

View File

@ -28,7 +28,7 @@ The full list of [protocol improvements delivered in the network upgrade can be
## ☢️ Upgrade Warnings ☢️
- Read through the [changelog of the mandatory v1.24.0 release](https://github.com/filecoin-project/lotus/releases/tag/v1.24.0). Especially the `Migration` and `v12 Builtin Actor Bundle` sections.
- Please remove and clone a new Lotus repo (`git clone https://github.com/filecoin-project/lotus.git`) when upgrading to this release.
- Please remove and clone a new Lotus repo (`git clone https://github.com/filecoin-project/lotus.git`) when upgrading to this release.
- This feature release requires a minimum Go version of v1.20.7 or higher to successfully build Lotus. Go version 1.21.x is not supported yet.
- EthRPC providers, please check out the [new tracing API to Lotus RPC](https://github.com/filecoin-project/lotus/pull/11100)
@ -190,7 +190,7 @@ account bafk2bzaceboftg75mdiba7xbo2i3uvgtca4brhnr3u5ptihonixgpnrvhpxoa
init bafk2bzacebllyegx5r6lggf6ymyetbp7amacwpuxakhtjvjtvoy2bfkzk3vms
```
## Migration
## Migration
We are expecting a heavier than normal state migration for this upgrade due to the amount of state changes introduced for miner sector info. (This is a similar migration as the Shark upgrade, however, we have introduced a couple of migration performance optimizations since then for a smoother upgrade experience.)
@ -209,7 +209,7 @@ You can check out the [tutorial for benchmarking the network migration here.](ht
## BREAKING CHANGE
There is a new protocol limit on how many partition could be submited in one PoSt - if you have any customized tooling for batching PoSts, please update accordingly.
There is a new protocol limit on how many partition could be submited in one PoSt - if you have any customized tooling for batching PoSts, please update accordingly.
- feat: limit PoSted partitions to 3 ([filecoin-project/lotus#11327](https://github.com/filecoin-project/lotus/pull/11327))
## New features
@ -221,7 +221,7 @@ There is a new protocol limit on how many partition could be submited in one PoS
## Improvements
- Backport: feat: sealing: Switch to calling PreCommitSectorBatch2 ([filecoin-project/lotus#11215](https://github.com/filecoin-project/lotus/pull/11215))
- updated the boostrap nodes
- updated the boostrap nodes
## Dependencies
- github.com/filecoin-project/go-amt-ipld/v4 (v4.0.0 -> v4.2.0)
@ -231,9 +231,9 @@ There is a new protocol limit on how many partition could be submited in one PoS
- chore: deps: update libp2p to v0.30.0 #11434
## Snapshots
## Snapshots
The [Forest team](https://filecoinproject.slack.com/archives/C029LPZ5N73) at Chainsafe has launched a brand new lightweight snapshot service that is backed up by forest nodes! This is a great alternative service along with the fil-infra one, and it is compatible with lotus! We recommend lotus users to check it out [here](https://docs.filecoin.io/networks/mainnet#resources)!
The [Forest team](https://filecoinproject.slack.com/archives/C029LPZ5N73) at Chainsafe has launched a brand new lightweight snapshot service that is backed up by forest nodes! This is a great alternative service along with the fil-infra one, and it is compatible with lotus! We recommend lotus users to check it out [here](https://docs.filecoin.io/networks/mainnet#resources)!

View File

@ -109,6 +109,7 @@ COPY --from=lotus-builder /opt/filecoin/lotus-wallet /usr/local/bin/
COPY --from=lotus-builder /opt/filecoin/lotus-gateway /usr/local/bin/
COPY --from=lotus-builder /opt/filecoin/lotus-miner /usr/local/bin/
COPY --from=lotus-builder /opt/filecoin/lotus-worker /usr/local/bin/
COPY --from=lotus-builder /opt/filecoin/lotus-provider /usr/local/bin/
COPY --from=lotus-builder /opt/filecoin/lotus-stats /usr/local/bin/
COPY --from=lotus-builder /opt/filecoin/lotus-fountain /usr/local/bin/
@ -117,11 +118,13 @@ RUN mkdir /var/lib/lotus
RUN mkdir /var/lib/lotus-miner
RUN mkdir /var/lib/lotus-worker
RUN mkdir /var/lib/lotus-wallet
RUN mkdir /var/lib/lotus-provider
RUN chown fc: /var/tmp/filecoin-proof-parameters
RUN chown fc: /var/lib/lotus
RUN chown fc: /var/lib/lotus-miner
RUN chown fc: /var/lib/lotus-worker
RUN chown fc: /var/lib/lotus-wallet
RUN chown fc: /var/lib/lotus-provider
VOLUME /var/tmp/filecoin-proof-parameters
@ -129,6 +132,7 @@ VOLUME /var/lib/lotus
VOLUME /var/lib/lotus-miner
VOLUME /var/lib/lotus-worker
VOLUME /var/lib/lotus-wallet
VOLUME /var/lib/lotus-provider
EXPOSE 1234
EXPOSE 2345

View File

@ -66,7 +66,7 @@ CLEAN+=build/.update-modules
deps: $(BUILD_DEPS)
.PHONY: deps
build-devnets: build lotus-seed lotus-shed
build-devnets: build lotus-seed lotus-shed lotus-provider
.PHONY: build-devnets
debug: GOFLAGS+=-tags=debug
@ -97,6 +97,13 @@ lotus-miner: $(BUILD_DEPS)
.PHONY: lotus-miner
BINS+=lotus-miner
lotus-provider: $(BUILD_DEPS)
rm -f lotus-provider
$(GOCC) build $(GOFLAGS) -o lotus-provider ./cmd/lotus-provider
lp2k: GOFLAGS+=-tags=2k
lp2k: lotus-provider
lotus-worker: $(BUILD_DEPS)
rm -f lotus-worker
$(GOCC) build $(GOFLAGS) -o lotus-worker ./cmd/lotus-worker
@ -115,13 +122,13 @@ lotus-gateway: $(BUILD_DEPS)
.PHONY: lotus-gateway
BINS+=lotus-gateway
build: lotus lotus-miner lotus-worker
build: lotus lotus-miner lotus-worker
@[[ $$(type -P "lotus") ]] && echo "Caution: you have \
an existing lotus binary in your PATH. This may cause problems if you don't run 'sudo make install'" || true
.PHONY: build
install: install-daemon install-miner install-worker
install: install-daemon install-miner install-worker install-provider
install-daemon:
install -C ./lotus /usr/local/bin/lotus
@ -129,6 +136,9 @@ install-daemon:
install-miner:
install -C ./lotus-miner /usr/local/bin/lotus-miner
install-provider:
install -C ./lotus-provider /usr/local/bin/lotus-provider
install-worker:
install -C ./lotus-worker /usr/local/bin/lotus-worker
@ -144,6 +154,9 @@ uninstall-daemon:
uninstall-miner:
rm -f /usr/local/bin/lotus-miner
uninstall-provider:
rm -f /usr/local/bin/lotus-provider
uninstall-worker:
rm -f /usr/local/bin/lotus-worker
@ -241,6 +254,14 @@ install-miner-service: install-miner install-daemon-service
@echo
@echo "lotus-miner service installed. Don't forget to run 'sudo systemctl start lotus-miner' to start it and 'sudo systemctl enable lotus-miner' for it to be enabled on startup."
install-provider-service: install-provider install-daemon-service
mkdir -p /etc/systemd/system
mkdir -p /var/log/lotus
install -C -m 0644 ./scripts/lotus-provider.service /etc/systemd/system/lotus-provider.service
systemctl daemon-reload
@echo
@echo "lotus-provider service installed. Don't forget to run 'sudo systemctl start lotus-provider' to start it and 'sudo systemctl enable lotus-provider' for it to be enabled on startup."
install-main-services: install-miner-service
install-all-services: install-main-services
@ -259,6 +280,12 @@ clean-miner-service:
rm -f /etc/systemd/system/lotus-miner.service
systemctl daemon-reload
clean-provider-service:
-systemctl stop lotus-provider
-systemctl disable lotus-provider
rm -f /etc/systemd/system/lotus-provider.service
systemctl daemon-reload
clean-main-services: clean-daemon-service
clean-all-services: clean-main-services
@ -294,7 +321,8 @@ actors-code-gen:
$(GOCC) run ./chain/actors/agen
$(GOCC) fmt ./...
actors-gen: actors-code-gen fiximports
actors-gen: actors-code-gen
./scripts/fiximports
.PHONY: actors-gen
bundle-gen:
@ -328,7 +356,7 @@ docsgen-md-bin: api-gen actors-gen
docsgen-openrpc-bin: api-gen actors-gen
$(GOCC) build $(GOFLAGS) -o docgen-openrpc ./api/docgen-openrpc/cmd
docsgen-md: docsgen-md-full docsgen-md-storage docsgen-md-worker
docsgen-md: docsgen-md-full docsgen-md-storage docsgen-md-worker docsgen-md-provider
docsgen-md-full: docsgen-md-bin
./docgen-md "api/api_full.go" "FullNode" "api" "./api" > documentation/en/api-v1-unstable-methods.md
@ -337,6 +365,8 @@ docsgen-md-storage: docsgen-md-bin
./docgen-md "api/api_storage.go" "StorageMiner" "api" "./api" > documentation/en/api-v0-methods-miner.md
docsgen-md-worker: docsgen-md-bin
./docgen-md "api/api_worker.go" "Worker" "api" "./api" > documentation/en/api-v0-methods-worker.md
docsgen-md-provider: docsgen-md-bin
./docgen-md "api/api_lp.go" "Provider" "api" "./api" > documentation/en/api-v0-methods-provider.md
docsgen-openrpc: docsgen-openrpc-full docsgen-openrpc-storage docsgen-openrpc-worker docsgen-openrpc-gateway
@ -354,21 +384,23 @@ docsgen-openrpc-gateway: docsgen-openrpc-bin
fiximports:
./scripts/fiximports
gen: actors-code-gen type-gen cfgdoc-gen docsgen api-gen circleci fiximports
gen: actors-code-gen type-gen cfgdoc-gen docsgen api-gen circleci
./scripts/fiximports
@echo ">>> IF YOU'VE MODIFIED THE CLI OR CONFIG, REMEMBER TO ALSO RUN 'make docsgen-cli'"
.PHONY: gen
jen: gen
snap: lotus lotus-miner lotus-worker
snap: lotus lotus-miner lotus-worker lotus-provider
snapcraft
# snapcraft upload ./lotus_*.snap
# separate from gen because it needs binaries
docsgen-cli: lotus lotus-miner lotus-worker
docsgen-cli: lotus lotus-miner lotus-worker lotus-provider
python3 ./scripts/generate-lotus-cli.py
./lotus config default > documentation/en/default-lotus-config.toml
./lotus-miner config default > documentation/en/default-lotus-miner-config.toml
./lotus-provider config default > documentation/en/default-lotus-provider-config.toml
.PHONY: docsgen-cli
print-%:

10
api/api_lp.go Normal file
View File

@ -0,0 +1,10 @@
package api
import "context"
type LotusProvider interface {
Version(context.Context) (Version, error) //perm:admin
// Trigger shutdown
Shutdown(context.Context) error //perm:admin
}

View File

@ -15,6 +15,16 @@ import (
"github.com/filecoin-project/lotus/lib/rpcenc"
)
// NewProviderRpc creates a new http jsonrpc client.
func NewProviderRpc(ctx context.Context, addr string, requestHeader http.Header) (api.LotusProvider, jsonrpc.ClientCloser, error) {
var res v1api.LotusProviderStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
api.GetInternalStructs(&res), requestHeader, jsonrpc.WithErrors(api.RPCErrors))
return &res, closer, err
}
// NewCommonRPCV0 creates a new http jsonrpc client.
func NewCommonRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.CommonNet, jsonrpc.ClientCloser, error) {
var res v0api.CommonNetStruct

View File

@ -428,6 +428,10 @@ func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []r
i = &api.GatewayStruct{}
t = reflect.TypeOf(new(struct{ api.Gateway })).Elem()
permStruct = append(permStruct, reflect.TypeOf(api.GatewayStruct{}.Internal))
case "Provider":
i = &api.LotusProviderStruct{}
t = reflect.TypeOf(new(struct{ api.LotusProvider })).Elem()
permStruct = append(permStruct, reflect.TypeOf(api.LotusProviderStruct{}.Internal))
default:
panic("unknown type")
}

View File

@ -41,6 +41,12 @@ func PermissionedWorkerAPI(a Worker) Worker {
return &out
}
func PermissionedAPI[T, P any](a T) *P {
var out P
permissionedProxies(a, &out)
return &out
}
func PermissionedWalletAPI(a Wallet) Wallet {
var out WalletStruct
permissionedProxies(a, &out)

View File

@ -827,6 +827,19 @@ type GatewayMethods struct {
type GatewayStub struct {
}
type LotusProviderStruct struct {
Internal LotusProviderMethods
}
type LotusProviderMethods struct {
Shutdown func(p0 context.Context) error `perm:"admin"`
Version func(p0 context.Context) (Version, error) `perm:"admin"`
}
type LotusProviderStub struct {
}
type NetStruct struct {
Internal NetMethods
}
@ -5188,6 +5201,28 @@ func (s *GatewayStub) Web3ClientVersion(p0 context.Context) (string, error) {
return "", ErrNotSupported
}
func (s *LotusProviderStruct) Shutdown(p0 context.Context) error {
if s.Internal.Shutdown == nil {
return ErrNotSupported
}
return s.Internal.Shutdown(p0)
}
func (s *LotusProviderStub) Shutdown(p0 context.Context) error {
return ErrNotSupported
}
func (s *LotusProviderStruct) Version(p0 context.Context) (Version, error) {
if s.Internal.Version == nil {
return *new(Version), ErrNotSupported
}
return s.Internal.Version(p0)
}
func (s *LotusProviderStub) Version(p0 context.Context) (Version, error) {
return *new(Version), ErrNotSupported
}
func (s *NetStruct) ID(p0 context.Context) (peer.ID, error) {
if s.Internal.ID == nil {
return *new(peer.ID), ErrNotSupported
@ -7416,6 +7451,7 @@ var _ CommonNet = new(CommonNetStruct)
var _ EthSubscriber = new(EthSubscriberStruct)
var _ FullNode = new(FullNodeStruct)
var _ Gateway = new(GatewayStruct)
var _ LotusProvider = new(LotusProviderStruct)
var _ Net = new(NetStruct)
var _ Signable = new(SignableStruct)
var _ StorageMiner = new(StorageMinerStruct)

View File

@ -12,3 +12,5 @@ type RawFullNodeAPI FullNode
func PermissionedFullAPI(a FullNode) FullNode {
return api.PermissionedFullAPI(a)
}
type LotusProviderStruct = api.LotusProviderStruct

View File

@ -59,6 +59,8 @@ var (
MinerAPIVersion0 = newVer(1, 5, 0)
WorkerAPIVersion0 = newVer(1, 7, 0)
ProviderAPIVersion0 = newVer(1, 0, 0)
)
//nolint:varcheck,deadcode

View File

@ -3,9 +3,9 @@ package splitstore
import (
"context"
"crypto/rand"
"errors"
"fmt"
"math/rand"
"sync"
"sync/atomic"
"testing"

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -37,7 +37,7 @@ func BuildTypeString() string {
}
// BuildVersion is the local build version
const BuildVersion = "1.25.1-dev"
const BuildVersion = "1.25.3-dev"
func UserVersion() string {
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {

View File

@ -867,6 +867,24 @@ func AggregatePreCommitNetworkFee(nwVer network.Version, aggregateSize int, base
}
}
var PoStToSealMap map[abi.RegisteredPoStProof]abi.RegisteredSealProof
func init() {
PoStToSealMap = make(map[abi.RegisteredPoStProof]abi.RegisteredSealProof)
for sealProof, info := range abi.SealProofInfos {
PoStToSealMap[info.WinningPoStProof] = sealProof
PoStToSealMap[info.WindowPoStProof] = sealProof
}
}
func GetSealProofFromPoStProof(postProof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) {
sealProof, exists := PoStToSealMap[postProof]
if !exists {
return 0, xerrors.New("no corresponding RegisteredSealProof for the given RegisteredPoStProof")
}
return sealProof, nil
}
func min(a, b int) int {
if a < b {
return a

View File

@ -343,9 +343,26 @@ func AggregatePreCommitNetworkFee(nwVer network.Version, aggregateSize int, base
}
}
var PoStToSealMap map[abi.RegisteredPoStProof]abi.RegisteredSealProof
func init() {
PoStToSealMap = make(map[abi.RegisteredPoStProof]abi.RegisteredSealProof)
for sealProof, info := range abi.SealProofInfos {
PoStToSealMap[info.WinningPoStProof] = sealProof
PoStToSealMap[info.WindowPoStProof] = sealProof
}
}
func GetSealProofFromPoStProof(postProof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) {
sealProof, exists := PoStToSealMap[postProof]
if !exists {
return 0, xerrors.New("no corresponding RegisteredSealProof for the given RegisteredPoStProof")
}
return sealProof, nil
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
}

View File

@ -362,7 +362,8 @@ func CreateBlockHeader(ctx context.Context, sm *stmgr.StateManager, pts *types.T
var blsMsgCids, secpkMsgCids []cid.Cid
var blsSigs []crypto.Signature
nv := sm.GetNetworkVersion(ctx, bt.Epoch)
for _, msg := range bt.Messages {
for _, msgTmp := range bt.Messages {
msg := msgTmp
if msg.Signature.Type == crypto.SigTypeBLS {
blsSigs = append(blsSigs, msg.Signature)
blsMessages = append(blsMessages, &msg.Message)

View File

@ -306,9 +306,9 @@ func (t *Response) UnmarshalCBOR(r io.Reader) (err error) {
return nil
}
var lengthBufCompactedMessages = []byte{132}
var lengthBufCompactedMessagesCBOR = []byte{132}
func (t *CompactedMessages) MarshalCBOR(w io.Writer) error {
func (t *CompactedMessagesCBOR) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
@ -316,12 +316,12 @@ func (t *CompactedMessages) MarshalCBOR(w io.Writer) error {
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufCompactedMessages); err != nil {
if _, err := cw.Write(lengthBufCompactedMessagesCBOR); err != nil {
return err
}
// t.Bls ([]*types.Message) (slice)
if len(t.Bls) > cbg.MaxLength {
if len(t.Bls) > 150000 {
return xerrors.Errorf("Slice value in field t.Bls was too long")
}
@ -334,7 +334,7 @@ func (t *CompactedMessages) MarshalCBOR(w io.Writer) error {
}
}
// t.BlsIncludes ([][]uint64) (slice)
// t.BlsIncludes ([]exchange.messageIndices) (slice)
if len(t.BlsIncludes) > cbg.MaxLength {
return xerrors.Errorf("Slice value in field t.BlsIncludes was too long")
}
@ -343,24 +343,13 @@ func (t *CompactedMessages) MarshalCBOR(w io.Writer) error {
return err
}
for _, v := range t.BlsIncludes {
if len(v) > cbg.MaxLength {
return xerrors.Errorf("Slice value in field v was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(v))); err != nil {
if err := v.MarshalCBOR(cw); err != nil {
return err
}
for _, v := range v {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(v)); err != nil {
return err
}
}
}
// t.Secpk ([]*types.SignedMessage) (slice)
if len(t.Secpk) > cbg.MaxLength {
if len(t.Secpk) > 150000 {
return xerrors.Errorf("Slice value in field t.Secpk was too long")
}
@ -373,7 +362,7 @@ func (t *CompactedMessages) MarshalCBOR(w io.Writer) error {
}
}
// t.SecpkIncludes ([][]uint64) (slice)
// t.SecpkIncludes ([]exchange.messageIndices) (slice)
if len(t.SecpkIncludes) > cbg.MaxLength {
return xerrors.Errorf("Slice value in field t.SecpkIncludes was too long")
}
@ -382,26 +371,15 @@ func (t *CompactedMessages) MarshalCBOR(w io.Writer) error {
return err
}
for _, v := range t.SecpkIncludes {
if len(v) > cbg.MaxLength {
return xerrors.Errorf("Slice value in field v was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(v))); err != nil {
if err := v.MarshalCBOR(cw); err != nil {
return err
}
for _, v := range v {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(v)); err != nil {
return err
}
}
}
return nil
}
func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) (err error) {
*t = CompactedMessages{}
func (t *CompactedMessagesCBOR) UnmarshalCBOR(r io.Reader) (err error) {
*t = CompactedMessagesCBOR{}
cr := cbg.NewCborReader(r)
@ -430,7 +408,7 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) (err error) {
return err
}
if extra > cbg.MaxLength {
if extra > 150000 {
return fmt.Errorf("t.Bls: array too large (%d)", extra)
}
@ -471,7 +449,7 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) (err error) {
}
}
// t.BlsIncludes ([][]uint64) (slice)
// t.BlsIncludes ([]exchange.messageIndices) (slice)
maj, extra, err = cr.ReadHeader()
if err != nil {
@ -487,7 +465,7 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) (err error) {
}
if extra > 0 {
t.BlsIncludes = make([][]uint64, extra)
t.BlsIncludes = make([]messageIndices, extra)
}
for i := 0; i < int(extra); i++ {
@ -499,47 +477,13 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) (err error) {
_ = extra
_ = err
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
{
if extra > cbg.MaxLength {
return fmt.Errorf("t.BlsIncludes[i]: array too large (%d)", extra)
}
if maj != cbg.MajArray {
return fmt.Errorf("expected cbor array")
}
if extra > 0 {
t.BlsIncludes[i] = make([]uint64, extra)
}
for j := 0; j < int(extra); j++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
{
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.BlsIncludes[i][j] = uint64(extra)
}
if err := t.BlsIncludes[i].UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.BlsIncludes[i]: %w", err)
}
}
}
}
}
@ -550,7 +494,7 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) (err error) {
return err
}
if extra > cbg.MaxLength {
if extra > 150000 {
return fmt.Errorf("t.Secpk: array too large (%d)", extra)
}
@ -591,7 +535,7 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) (err error) {
}
}
// t.SecpkIncludes ([][]uint64) (slice)
// t.SecpkIncludes ([]exchange.messageIndices) (slice)
maj, extra, err = cr.ReadHeader()
if err != nil {
@ -607,7 +551,7 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) (err error) {
}
if extra > 0 {
t.SecpkIncludes = make([][]uint64, extra)
t.SecpkIncludes = make([]messageIndices, extra)
}
for i := 0; i < int(extra); i++ {
@ -619,47 +563,13 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) (err error) {
_ = extra
_ = err
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
{
if extra > cbg.MaxLength {
return fmt.Errorf("t.SecpkIncludes[i]: array too large (%d)", extra)
}
if maj != cbg.MajArray {
return fmt.Errorf("expected cbor array")
}
if extra > 0 {
t.SecpkIncludes[i] = make([]uint64, extra)
}
for j := 0; j < int(extra); j++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
{
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.SecpkIncludes[i][j] = uint64(extra)
}
if err := t.SecpkIncludes[i].UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.SecpkIncludes[i]: %w", err)
}
}
}
}
}

View File

@ -4,6 +4,7 @@ import (
"bufio"
"context"
"fmt"
"io"
"math/rand"
"time"
@ -23,6 +24,10 @@ import (
"github.com/filecoin-project/lotus/lib/peermgr"
)
// Set the max exchange message size to 120MiB. Purely based on gas numbers, we can include ~8MiB of
// messages per block, so I've set this to 120MiB to be _very_ safe.
const maxExchangeMessageSize = (15 * 8) << 20
// client implements exchange.Client, using the libp2p ChainExchange protocol
// as the fetching mechanism.
type client struct {
@ -434,10 +439,11 @@ func (c *client) sendRequestToPeer(ctx context.Context, peer peer.ID, req *Reque
log.Warnw("CloseWrite err", "error", err)
}
// Read response.
// Read response, limiting the size of the response to maxExchangeMessageSize as we allow a
// lot of messages (10k+) but they'll mostly be quite small.
var res Response
err = cborutil.ReadCborRPC(
bufio.NewReader(incrt.New(stream, ReadResMinSpeed, ReadResDeadline)),
bufio.NewReader(io.LimitReader(incrt.New(stream, ReadResMinSpeed, ReadResDeadline), maxExchangeMessageSize)),
&res)
if err != nil {
c.peerTracker.logFailure(peer, build.Clock.Since(connectionStart), req.Length)

View File

@ -154,6 +154,8 @@ type BSTipSet struct {
// FIXME: The logic to decompress this structure should belong
//
// to itself, not to the consumer.
//
// NOTE: Max messages is: BlockMessageLimit (10k) * MaxTipsetSize (15) = 150k
type CompactedMessages struct {
Bls []*types.Message
BlsIncludes [][]uint64

View File

@ -0,0 +1,125 @@
package exchange
import (
"fmt"
"io"
cbg "github.com/whyrusleeping/cbor-gen"
xerrors "golang.org/x/xerrors"
"github.com/filecoin-project/lotus/build"
types "github.com/filecoin-project/lotus/chain/types"
)
// Type used for encoding/decoding compacted messages. This is a ustom type as we need custom limits.
// - Max messages is 150,000 as that's 15 times the max block size (in messages). It needs to be
// large enough to cover a full tipset full of full blocks.
type CompactedMessagesCBOR struct {
Bls []*types.Message `cborgen:"maxlen=150000"`
BlsIncludes []messageIndices
Secpk []*types.SignedMessage `cborgen:"maxlen=150000"`
SecpkIncludes []messageIndices
}
// Unmarshal into the "decoding" struct, then copy into the actual struct.
func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) (err error) {
var c CompactedMessagesCBOR
if err := c.UnmarshalCBOR(r); err != nil {
return err
}
t.Bls = c.Bls
t.BlsIncludes = make([][]uint64, len(c.BlsIncludes))
for i, v := range c.BlsIncludes {
t.BlsIncludes[i] = v.v
}
t.Secpk = c.Secpk
t.SecpkIncludes = make([][]uint64, len(c.SecpkIncludes))
for i, v := range c.SecpkIncludes {
t.SecpkIncludes[i] = v.v
}
return nil
}
// Copy into the encoding struct, then marshal.
func (t *CompactedMessages) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
var c CompactedMessagesCBOR
c.Bls = t.Bls
c.BlsIncludes = make([]messageIndices, len(t.BlsIncludes))
for i, v := range t.BlsIncludes {
c.BlsIncludes[i].v = v
}
c.Secpk = t.Secpk
c.SecpkIncludes = make([]messageIndices, len(t.SecpkIncludes))
for i, v := range t.SecpkIncludes {
c.SecpkIncludes[i].v = v
}
return c.MarshalCBOR(w)
}
// this needs to be a struct or cborgen will peak into it and ignore the Unmarshal/Marshal functions
type messageIndices struct {
v []uint64
}
func (t *messageIndices) UnmarshalCBOR(r io.Reader) (err error) {
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
if extra > uint64(build.BlockMessageLimit) {
return fmt.Errorf("cbor input had wrong number of fields")
}
if extra > 0 {
t.v = make([]uint64, extra)
}
for i := 0; i < int(extra); i++ {
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.v[i] = extra
}
return nil
}
func (t *messageIndices) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if len(t.v) > build.BlockMessageLimit {
return xerrors.Errorf("Slice value in field v was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.v))); err != nil {
return err
}
for _, v := range t.v {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, v); err != nil {
return err
}
}
return nil
}

View File

@ -251,7 +251,8 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal
}
params := &markettypes.PublishStorageDealsParams{}
for _, preseal := range m.Sectors {
for _, presealTmp := range m.Sectors {
preseal := presealTmp
preseal.Deal.VerifiedDeal = true
preseal.Deal.EndEpoch = minerInfos[i].presealExp
p := markettypes.ClientDealProposal{

View File

@ -5,7 +5,6 @@ import (
"math"
"math/rand"
"testing"
"time"
)
func TestBlockProbability(t *testing.T) {
@ -23,7 +22,6 @@ func TestBlockProbability(t *testing.T) {
func TestWinnerProba(t *testing.T) {
//stm: @OTHER_IMPLEMENTATION_BLOCK_PROB_002
rand.Seed(time.Now().UnixNano())
const N = 1000000
winnerProba := noWinnersProb()
sum := 0

View File

@ -438,7 +438,8 @@ func (st *StateTree) Flush(ctx context.Context) (cid.Cid, error) {
return cid.Undef, xerrors.Errorf("tried to flush state tree with snapshots on the stack")
}
for addr, sto := range st.snaps.layers[0].actors {
for addr, stoTmp := range st.snaps.layers[0].actors {
sto := stoTmp
if sto.Delete {
if err := st.root.Delete(abi.AddrKey(addr)); err != nil {
return cid.Undef, err

View File

@ -886,6 +886,35 @@ func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, know
}
}
incomingParentsTsk := incoming.Parents()
commonParent := false
for _, incomingParent := range incomingParentsTsk.Cids() {
if known.Contains(incomingParent) {
commonParent = true
}
}
if commonParent {
// known contains at least one of incoming's Parents => the common ancestor is known's Parents (incoming's Grandparents)
// in this case, we need to return {incoming.Parents()}
incomingParents, err := syncer.store.LoadTipSet(ctx, incomingParentsTsk)
if err != nil {
// fallback onto the network
tips, err := syncer.Exchange.GetBlocks(ctx, incoming.Parents(), 1)
if err != nil {
return nil, xerrors.Errorf("failed to fetch incomingParents from the network: %w", err)
}
if len(tips) == 0 {
return nil, xerrors.Errorf("network didn't return any tipsets")
}
incomingParents = tips[0]
}
return []*types.TipSet{incomingParents}, nil
}
// TODO: Does this mean we always ask for ForkLengthThreshold blocks from the network, even if we just need, like, 2? Yes.
// Would it not be better to ask in smaller chunks, given that an ~ForkLengthThreshold is very rare?
tips, err := syncer.Exchange.GetBlocks(ctx, incoming.Parents(), int(build.ForkLengthThreshold))

View File

@ -229,13 +229,25 @@ type EthCall struct {
}
func (c *EthCall) UnmarshalJSON(b []byte) error {
type TempEthCall EthCall
var params TempEthCall
type EthCallRaw EthCall // Avoid a recursive call.
type EthCallDecode struct {
// The field should be "input" by spec, but many clients use "data" so we support
// both, but prefer "input".
Input *EthBytes `json:"input"`
EthCallRaw
}
var params EthCallDecode
if err := json.Unmarshal(b, &params); err != nil {
return err
}
*c = EthCall(params)
// If input is specified, prefer it.
if params.Input != nil {
params.Data = *params.Input
}
*c = EthCall(params.EthCallRaw)
return nil
}

View File

@ -194,11 +194,40 @@ func TestMaskedIDInF4(t *testing.T) {
}
func TestUnmarshalEthCall(t *testing.T) {
data := `{"from":"0x4D6D86b31a112a05A473c4aE84afaF873f632325","to":"0xFe01CC39f5Ae8553D6914DBb9dC27D219fa22D7f","gas":"0x5","gasPrice":"0x6","value":"0x123","data":""}`
data := `{"from":"0x4D6D86b31a112a05A473c4aE84afaF873f632325","to":"0xFe01CC39f5Ae8553D6914DBb9dC27D219fa22D7f","gas":"0x5","gasPrice":"0x6","value":"0x123","data":"0xFF"}`
var c EthCall
err := c.UnmarshalJSON([]byte(data))
require.Nil(t, err)
require.EqualValues(t, []byte{0xff}, c.Data)
}
func TestUnmarshalEthCallInput(t *testing.T) {
data := `{"from":"0x4D6D86b31a112a05A473c4aE84afaF873f632325","to":"0xFe01CC39f5Ae8553D6914DBb9dC27D219fa22D7f","gas":"0x5","gasPrice":"0x6","value":"0x123","input":"0xFF"}`
var c EthCall
err := c.UnmarshalJSON([]byte(data))
require.Nil(t, err)
require.EqualValues(t, []byte{0xff}, c.Data)
}
func TestUnmarshalEthCallInputAndData(t *testing.T) {
data := `{"from":"0x4D6D86b31a112a05A473c4aE84afaF873f632325","to":"0xFe01CC39f5Ae8553D6914DBb9dC27D219fa22D7f","gas":"0x5","gasPrice":"0x6","value":"0x123","data":"0xFE","input":"0xFF"}`
var c EthCall
err := c.UnmarshalJSON([]byte(data))
require.Nil(t, err)
require.EqualValues(t, []byte{0xff}, c.Data)
}
func TestUnmarshalEthCallInputAndDataEmpty(t *testing.T) {
// Even if the input is empty, it should be used when specified.
data := `{"from":"0x4D6D86b31a112a05A473c4aE84afaF873f632325","to":"0xFe01CC39f5Ae8553D6914DBb9dC27D219fa22D7f","gas":"0x5","gasPrice":"0x6","value":"0x123","data":"0xFE","input":""}`
var c EthCall
err := c.UnmarshalJSON([]byte(data))
require.Nil(t, err)
require.EqualValues(t, []byte{}, c.Data)
}
func TestUnmarshalEthBytes(t *testing.T) {

View File

@ -12,6 +12,9 @@ import (
type FIL BigInt
func (f FIL) String() string {
if f.Int == nil {
return "0 FIL"
}
return f.Unitless() + " FIL"
}

View File

@ -2,6 +2,7 @@ package main
import (
"context"
crand "crypto/rand"
"encoding/json"
"fmt"
"math/rand"
@ -145,7 +146,10 @@ func MakeUnsignedMessageVectors() []vectors.UnsignedMessageVector {
}
params := make([]byte, 32)
rand.Read(params)
_, err = crand.Read(params)
if err != nil {
panic(err)
}
msg := &types.Message{
To: to,

View File

@ -1,6 +1,7 @@
package cli
import (
"errors"
"fmt"
"io"
"os"
@ -8,7 +9,6 @@ import (
"syscall"
ufcli "github.com/urfave/cli/v2"
"golang.org/x/xerrors"
)
type PrintHelpErr struct {
@ -52,7 +52,7 @@ func RunApp(app *ufcli.App) {
fmt.Fprintf(os.Stderr, "ERROR: %s\n\n", err) // nolint:errcheck
}
var phe *PrintHelpErr
if xerrors.As(err, &phe) {
if errors.As(err, &phe) {
_ = ufcli.ShowCommandHelp(phe.Ctx, phe.Ctx.Command.Name)
}
os.Exit(1)

View File

@ -847,7 +847,8 @@ var NetStatCmd = &cli.Command{
})
for _, stat := range stats {
printScope(&stat.stat, name+stat.name)
tmp := stat.stat
printScope(&tmp, name+stat.name)
}
}

View File

@ -119,7 +119,7 @@ func GetAPIInfoMulti(ctx *cli.Context, t repo.RepoType) ([]APIInfo, error) {
}
}
return []APIInfo{}, fmt.Errorf("could not determine API endpoint for node type: %v", t.Type())
return []APIInfo{}, fmt.Errorf("could not determine API endpoint for node type: %v. Try setting environment variable: %s", t.Type(), primaryEnv)
}
func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) {
@ -164,6 +164,28 @@ func GetRawAPIMulti(ctx *cli.Context, t repo.RepoType, version string) ([]HttpHe
return httpHeads, nil
}
func GetRawAPIMultiV2(ctx *cli.Context, ainfoCfg []string, version string) ([]HttpHead, error) {
var httpHeads []HttpHead
if len(ainfoCfg) == 0 {
return httpHeads, xerrors.Errorf("could not get API info: none configured. \nConsider getting base.toml with './lotus-provider config get base >/tmp/base.toml' \nthen adding \n[APIs] \n ChainApiInfo = [\" result_from lotus auth api-info --perm=admin \"]\n and updating it with './lotus-provider config set /tmp/base.toml'")
}
for _, i := range ainfoCfg {
ainfo := ParseApiInfo(i)
addr, err := ainfo.DialArgs(version)
if err != nil {
return httpHeads, xerrors.Errorf("could not get DialArgs: %w", err)
}
httpHeads = append(httpHeads, HttpHead{addr: addr, header: ainfo.AuthHeader()})
}
if IsVeryVerbose {
_, _ = fmt.Fprintf(ctx.App.Writer, "using raw API %s endpoint: %s\n", version, httpHeads[0].addr)
}
return httpHeads, nil
}
func GetRawAPI(ctx *cli.Context, t repo.RepoType, version string) (string, http.Header, error) {
heads, err := GetRawAPIMulti(ctx, t, version)
if err != nil {
@ -393,6 +415,68 @@ func GetFullNodeAPIV1(ctx *cli.Context, opts ...GetFullNodeOption) (v1api.FullNo
return &v1API, finalCloser, nil
}
func GetFullNodeAPIV1LotusProvider(ctx *cli.Context, ainfoCfg []string, opts ...GetFullNodeOption) (v1api.FullNode, jsonrpc.ClientCloser, error) {
if tn, ok := ctx.App.Metadata["testnode-full"]; ok {
return tn.(v1api.FullNode), func() {}, nil
}
var options GetFullNodeOptions
for _, opt := range opts {
opt(&options)
}
var rpcOpts []jsonrpc.Option
if options.ethSubHandler != nil {
rpcOpts = append(rpcOpts, jsonrpc.WithClientHandler("Filecoin", options.ethSubHandler), jsonrpc.WithClientHandlerAlias("eth_subscription", "Filecoin.EthSubscription"))
}
heads, err := GetRawAPIMultiV2(ctx, ainfoCfg, "v1")
if err != nil {
return nil, nil, err
}
if IsVeryVerbose {
_, _ = fmt.Fprintln(ctx.App.Writer, "using full node API v1 endpoint:", heads[0].addr)
}
var fullNodes []api.FullNode
var closers []jsonrpc.ClientCloser
for _, head := range heads {
v1api, closer, err := client.NewFullNodeRPCV1(ctx.Context, head.addr, head.header, rpcOpts...)
if err != nil {
log.Warnf("Not able to establish connection to node with addr: %s, Reason: %s", head.addr, err.Error())
continue
}
fullNodes = append(fullNodes, v1api)
closers = append(closers, closer)
}
// When running in cluster mode and trying to establish connections to multiple nodes, fail
// if less than 2 lotus nodes are actually running
if len(heads) > 1 && len(fullNodes) < 2 {
return nil, nil, xerrors.Errorf("Not able to establish connection to more than a single node")
}
finalCloser := func() {
for _, c := range closers {
c()
}
}
var v1API api.FullNodeStruct
FullNodeProxy(fullNodes, &v1API)
v, err := v1API.Version(ctx.Context)
if err != nil {
return nil, nil, err
}
if !v.APIVersion.EqMajorMinor(api.FullAPIVersion1) {
return nil, nil, xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", api.FullAPIVersion1, v.APIVersion)
}
return &v1API, finalCloser, nil
}
type GetStorageMinerOptions struct {
PreferHttp bool
}

View File

@ -3,10 +3,10 @@ package main
import (
"bytes"
"context"
"crypto/rand"
"encoding/json"
"fmt"
"math/big"
"math/rand"
"os"
"path/filepath"
"sync"
@ -547,7 +547,10 @@ var sealBenchCmd = &cli.Command{
}
var challenge [32]byte
rand.Read(challenge[:])
_, err = rand.Read(challenge[:])
if err != nil {
return err
}
beforePost := time.Now()
@ -777,9 +780,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par
start := time.Now()
log.Infof("[%d] Writing piece into sector...", i)
r := rand.New(rand.NewSource(100 + int64(i)))
pi, err := sb.AddPiece(context.TODO(), sid, nil, abi.PaddedPieceSize(sectorSize).Unpadded(), r)
pi, err := sb.AddPiece(context.TODO(), sid, nil, abi.PaddedPieceSize(sectorSize).Unpadded(), rand.Reader)
if err != nil {
return nil, nil, err
}

View File

@ -308,7 +308,36 @@ var simplePreCommit2 = &cli.Command{
Name: "synthetic",
Usage: "generate synthetic PoRep proofs",
},
&cli.StringFlag{
Name: "external-pc2",
Usage: "command for computing PC2 externally",
},
},
Description: `Compute PreCommit2 inputs and seal a sector.
--external-pc2 can be used to compute the PreCommit2 inputs externally.
The flag behaves similarly to the related lotus-worker flag, using it in
lotus-bench may be useful for testing if the external PreCommit2 command is
invoked correctly.
The command will be called with a number of environment variables set:
* EXTSEAL_PC2_SECTOR_NUM: the sector number
* EXTSEAL_PC2_SECTOR_MINER: the miner id
* EXTSEAL_PC2_PROOF_TYPE: the proof type
* EXTSEAL_PC2_SECTOR_SIZE: the sector size in bytes
* EXTSEAL_PC2_CACHE: the path to the cache directory
* EXTSEAL_PC2_SEALED: the path to the sealed sector file (initialized with unsealed data by the caller)
* EXTSEAL_PC2_PC1OUT: output from rust-fil-proofs precommit1 phase (base64 encoded json)
The command is expected to:
* Create cache sc-02-data-tree-r* files
* Create cache sc-02-data-tree-c* files
* Create cache p_aux / t_aux files
* Transform the sealed file in place
Example invocation of lotus-bench as external executor:
'./lotus-bench simple precommit2 --sector-size $EXTSEAL_PC2_SECTOR_SIZE $EXTSEAL_PC2_SEALED $EXTSEAL_PC2_CACHE $EXTSEAL_PC2_PC1OUT'
`,
ArgsUsage: "[sealed] [cache] [pc1 out]",
Action: func(cctx *cli.Context) error {
ctx := cctx.Context
@ -333,7 +362,18 @@ var simplePreCommit2 = &cli.Command{
storiface.FTSealed: cctx.Args().Get(0),
storiface.FTCache: cctx.Args().Get(1),
}
sealer, err := ffiwrapper.New(pp)
var opts []ffiwrapper.FFIWrapperOpt
if cctx.IsSet("external-pc2") {
extSeal := ffiwrapper.ExternalSealer{
PreCommit2: ffiwrapper.MakeExternPrecommit2(cctx.String("external-pc2")),
}
opts = append(opts, ffiwrapper.WithExternalSealCalls(extSeal))
}
sealer, err := ffiwrapper.New(pp, opts...)
if err != nil {
return err
}

View File

@ -120,6 +120,11 @@ var initCmd = &cli.Command{
Name: "from",
Usage: "select which address to send actor creation message from",
},
&cli.Uint64Flag{
Name: "confidence",
Usage: "number of block confirmations to wait for",
Value: build.MessageConfidence,
},
},
Subcommands: []*cli.Command{
restoreCmd,
@ -146,6 +151,8 @@ var initCmd = &cli.Command{
return xerrors.Errorf("failed to parse gas-price flag: %s", err)
}
confidence := cctx.Uint64("confidence")
symlink := cctx.Bool("symlink-imported-sectors")
if symlink {
log.Info("will attempt to symlink to imported sectors")
@ -265,7 +272,7 @@ var initCmd = &cli.Command{
}
}
if err := storageMinerInit(ctx, cctx, api, r, ssize, gasPrice); err != nil {
if err := storageMinerInit(ctx, cctx, api, r, ssize, gasPrice, confidence); err != nil {
log.Errorf("Failed to initialize lotus-miner: %+v", err)
path, err := homedir.Expand(repoPath)
if err != nil {
@ -414,7 +421,7 @@ func findMarketDealID(ctx context.Context, api v1api.FullNode, deal markettypes.
return 0, xerrors.New("deal not found")
}
func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode, r repo.Repo, ssize abi.SectorSize, gasPrice types.BigInt) error {
func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode, r repo.Repo, ssize abi.SectorSize, gasPrice types.BigInt, confidence uint64) error {
lr, err := r.Lock(repo.StorageMiner)
if err != nil {
return err
@ -463,7 +470,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode
wsts := statestore.New(namespace.Wrap(mds, modules.WorkerCallsPrefix))
smsts := statestore.New(namespace.Wrap(mds, modules.ManagerWorkPrefix))
si := paths.NewIndex(nil)
si := paths.NewMemIndex(nil)
lstor, err := paths.NewLocal(ctx, lr, si, nil)
if err != nil {
@ -501,7 +508,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode
return xerrors.Errorf("failed to start up genesis miner: %w", err)
}
cerr := configureStorageMiner(ctx, api, a, peerid, gasPrice)
cerr := configureStorageMiner(ctx, api, a, peerid, gasPrice, confidence)
if err := m.Stop(ctx); err != nil {
log.Error("failed to shut down miner: ", err)
@ -541,13 +548,13 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode
}
}
if err := configureStorageMiner(ctx, api, a, peerid, gasPrice); err != nil {
if err := configureStorageMiner(ctx, api, a, peerid, gasPrice, confidence); err != nil {
return xerrors.Errorf("failed to configure miner: %w", err)
}
addr = a
} else {
a, err := createStorageMiner(ctx, api, ssize, peerid, gasPrice, cctx)
a, err := createStorageMiner(ctx, api, ssize, peerid, gasPrice, confidence, cctx)
if err != nil {
return xerrors.Errorf("creating miner failed: %w", err)
}
@ -589,7 +596,7 @@ func makeHostKey(lr repo.LockedRepo) (crypto.PrivKey, error) {
return pk, nil
}
func configureStorageMiner(ctx context.Context, api v1api.FullNode, addr address.Address, peerid peer.ID, gasPrice types.BigInt) error {
func configureStorageMiner(ctx context.Context, api v1api.FullNode, addr address.Address, peerid peer.ID, gasPrice types.BigInt, confidence uint64) error {
mi, err := api.StateMinerInfo(ctx, addr, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("getWorkerAddr returned bad address: %w", err)
@ -615,7 +622,7 @@ func configureStorageMiner(ctx context.Context, api v1api.FullNode, addr address
}
log.Info("Waiting for message: ", smsg.Cid())
ret, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence, lapi.LookbackNoLimit, true)
ret, err := api.StateWaitMsg(ctx, smsg.Cid(), confidence, lapi.LookbackNoLimit, true)
if err != nil {
return err
}
@ -627,7 +634,7 @@ func configureStorageMiner(ctx context.Context, api v1api.FullNode, addr address
return nil
}
func createStorageMiner(ctx context.Context, api v1api.FullNode, ssize abi.SectorSize, peerid peer.ID, gasPrice types.BigInt, cctx *cli.Context) (address.Address, error) {
func createStorageMiner(ctx context.Context, api v1api.FullNode, ssize abi.SectorSize, peerid peer.ID, gasPrice types.BigInt, confidence uint64, cctx *cli.Context) (address.Address, error) {
var err error
var owner address.Address
if cctx.String("owner") != "" {
@ -679,7 +686,7 @@ func createStorageMiner(ctx context.Context, api v1api.FullNode, ssize abi.Secto
log.Infof("Initializing worker account %s, message: %s", worker, signed.Cid())
log.Infof("Waiting for confirmation")
mw, err := api.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, lapi.LookbackNoLimit, true)
mw, err := api.StateWaitMsg(ctx, signed.Cid(), confidence, lapi.LookbackNoLimit, true)
if err != nil {
return address.Undef, xerrors.Errorf("waiting for worker init: %w", err)
}
@ -703,7 +710,7 @@ func createStorageMiner(ctx context.Context, api v1api.FullNode, ssize abi.Secto
log.Infof("Initializing owner account %s, message: %s", worker, signed.Cid())
log.Infof("Waiting for confirmation")
mw, err := api.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, lapi.LookbackNoLimit, true)
mw, err := api.StateWaitMsg(ctx, signed.Cid(), confidence, lapi.LookbackNoLimit, true)
if err != nil {
return address.Undef, xerrors.Errorf("waiting for owner init: %w", err)
}
@ -752,7 +759,7 @@ func createStorageMiner(ctx context.Context, api v1api.FullNode, ssize abi.Secto
log.Infof("Pushed CreateMiner message: %s", signed.Cid())
log.Infof("Waiting for confirmation")
mw, err := api.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, lapi.LookbackNoLimit, true)
mw, err := api.StateWaitMsg(ctx, signed.Cid(), confidence, lapi.LookbackNoLimit, true)
if err != nil {
return address.Undef, xerrors.Errorf("waiting for createMiner message: %w", err)
}

View File

@ -80,8 +80,7 @@ var restoreCmd = &cli.Command{
}
log.Info("Configuring miner actor")
if err := configureStorageMiner(ctx, api, maddr, peerid, big.Zero()); err != nil {
if err := configureStorageMiner(ctx, api, maddr, peerid, big.Zero(), cctx.Uint64("confidence")); err != nil {
return err
}

View File

@ -105,7 +105,7 @@ var serviceCmd = &cli.Command{
if es.Contains(MarketsService) {
log.Info("Configuring miner actor")
if err := configureStorageMiner(ctx, api, maddr, peerid, big.Zero()); err != nil {
if err := configureStorageMiner(ctx, api, maddr, peerid, big.Zero(), cctx.Uint64("confidence")); err != nil {
return err
}
}

View File

@ -559,7 +559,8 @@ var provingCheckProvableCmd = &cli.Command{
for parIdx, par := range partitions {
sectors := make(map[abi.SectorNumber]struct{})
sectorInfos, err := api.StateMinerSectors(ctx, addr, &par.LiveSectors, types.EmptyTSK)
tmp := par.LiveSectors
sectorInfos, err := api.StateMinerSectors(ctx, addr, &tmp, types.EmptyTSK)
if err != nil {
return err
}

View File

@ -2290,7 +2290,7 @@ var sectorsCompactPartitionsCmd = &cli.Command{
if len(parts) <= 0 {
return fmt.Errorf("must include at least one partition to compact")
}
fmt.Printf("compacting %d paritions\n", len(parts))
fmt.Printf("compacting %d partitions\n", len(parts))
var makeMsgForPartitions func(partitionsBf bitfield.BitField) ([]*types.Message, error)
makeMsgForPartitions = func(partitionsBf bitfield.BitField) ([]*types.Message, error) {

View File

@ -0,0 +1,241 @@
package main
import (
"context"
"errors"
"fmt"
"io"
"os"
"path"
"strings"
"github.com/BurntSushi/toml"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/cmd/lotus-provider/deps"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
"github.com/filecoin-project/lotus/node/config"
)
var configCmd = &cli.Command{
Name: "config",
Usage: "Manage node config by layers. The layer 'base' will always be applied. ",
Subcommands: []*cli.Command{
configDefaultCmd,
configSetCmd,
configGetCmd,
configListCmd,
configViewCmd,
configRmCmd,
configMigrateCmd,
},
}
var configDefaultCmd = &cli.Command{
Name: "default",
Aliases: []string{"defaults"},
Usage: "Print default node config",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "no-comment",
Usage: "don't comment default values",
},
},
Action: func(cctx *cli.Context) error {
comment := !cctx.Bool("no-comment")
cfg, err := getDefaultConfig(comment)
if err != nil {
return err
}
fmt.Print(cfg)
return nil
},
}
func getDefaultConfig(comment bool) (string, error) {
c := config.DefaultLotusProvider()
cb, err := config.ConfigUpdate(c, nil, config.Commented(comment), config.DefaultKeepUncommented(), config.NoEnv())
if err != nil {
return "", err
}
return string(cb), nil
}
var configSetCmd = &cli.Command{
Name: "set",
Aliases: []string{"add", "update", "create"},
Usage: "Set a config layer or the base by providing a filename or stdin.",
ArgsUsage: "a layer's file name",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "title",
Usage: "title of the config layer (req'd for stdin)",
},
},
Action: func(cctx *cli.Context) error {
args := cctx.Args()
db, err := deps.MakeDB(cctx)
if err != nil {
return err
}
name := cctx.String("title")
var stream io.Reader = os.Stdin
if args.Len() != 1 {
if cctx.String("title") == "" {
return errors.New("must have a title for stdin, or a file name")
}
} else {
stream, err = os.Open(args.First())
if err != nil {
return fmt.Errorf("cannot open file %s: %w", args.First(), err)
}
if name == "" {
name = strings.Split(path.Base(args.First()), ".")[0]
}
}
bytes, err := io.ReadAll(stream)
if err != nil {
return fmt.Errorf("cannot read stream/file %w", err)
}
lp := config.DefaultLotusProvider() // ensure it's toml
_, err = toml.Decode(string(bytes), lp)
if err != nil {
return fmt.Errorf("cannot decode file: %w", err)
}
_ = lp
err = setConfig(db, name, string(bytes))
if err != nil {
return fmt.Errorf("unable to save config layer: %w", err)
}
fmt.Println("Layer " + name + " created/updated")
return nil
},
}
func setConfig(db *harmonydb.DB, name, config string) error {
_, err := db.Exec(context.Background(),
`INSERT INTO harmony_config (title, config) VALUES ($1, $2)
ON CONFLICT (title) DO UPDATE SET config = excluded.config`, name, config)
return err
}
var configGetCmd = &cli.Command{
Name: "get",
Aliases: []string{"cat", "show"},
Usage: "Get a config layer by name. You may want to pipe the output to a file, or use 'less'",
ArgsUsage: "layer name",
Action: func(cctx *cli.Context) error {
args := cctx.Args()
if args.Len() != 1 {
return fmt.Errorf("want 1 layer arg, got %d", args.Len())
}
db, err := deps.MakeDB(cctx)
if err != nil {
return err
}
cfg, err := getConfig(db, args.First())
if err != nil {
return err
}
fmt.Println(cfg)
return nil
},
}
func getConfig(db *harmonydb.DB, layer string) (string, error) {
var cfg string
err := db.QueryRow(context.Background(), `SELECT config FROM harmony_config WHERE title=$1`, layer).Scan(&cfg)
if err != nil {
return "", err
}
return cfg, nil
}
var configListCmd = &cli.Command{
Name: "list",
Aliases: []string{"ls"},
Usage: "List config layers you can get.",
Flags: []cli.Flag{},
Action: func(cctx *cli.Context) error {
db, err := deps.MakeDB(cctx)
if err != nil {
return err
}
var res []string
err = db.Select(context.Background(), &res, `SELECT title FROM harmony_config ORDER BY title`)
if err != nil {
return fmt.Errorf("unable to read from db: %w", err)
}
for _, r := range res {
fmt.Println(r)
}
return nil
},
}
var configRmCmd = &cli.Command{
Name: "remove",
Aliases: []string{"rm", "del", "delete"},
Usage: "Remove a named config layer.",
Flags: []cli.Flag{},
Action: func(cctx *cli.Context) error {
args := cctx.Args()
if args.Len() != 1 {
return errors.New("must have exactly 1 arg for the layer name")
}
db, err := deps.MakeDB(cctx)
if err != nil {
return err
}
ct, err := db.Exec(context.Background(), `DELETE FROM harmony_config WHERE title=$1`, args.First())
if err != nil {
return fmt.Errorf("unable to read from db: %w", err)
}
if ct == 0 {
return fmt.Errorf("no layer named %s", args.First())
}
return nil
},
}
var configViewCmd = &cli.Command{
Name: "interpret",
Aliases: []string{"view", "stacked", "stack"},
Usage: "Interpret stacked config layers by this version of lotus-provider, with system-generated comments.",
ArgsUsage: "a list of layers to be interpreted as the final config",
Flags: []cli.Flag{
&cli.StringSliceFlag{
Name: "layers",
Usage: "comma or space separated list of layers to be interpreted",
Value: cli.NewStringSlice("base"),
Required: true,
},
},
Action: func(cctx *cli.Context) error {
db, err := deps.MakeDB(cctx)
if err != nil {
return err
}
lp, err := deps.GetConfig(cctx, db)
if err != nil {
return err
}
cb, err := config.ConfigUpdate(lp, config.DefaultLotusProvider(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv())
if err != nil {
return xerrors.Errorf("cannot interpret config: %w", err)
}
fmt.Println(string(cb))
return nil
},
}

View File

@ -0,0 +1,282 @@
// Package deps provides the dependencies for the lotus provider node.
package deps
import (
"context"
"database/sql"
"encoding/base64"
"errors"
"fmt"
"net"
"net/http"
"os"
"strings"
"github.com/BurntSushi/toml"
"github.com/gbrlsnchs/jwt/v3"
ds "github.com/ipfs/go-datastore"
dssync "github.com/ipfs/go-datastore/sync"
logging "github.com/ipfs/go-log/v2"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/go-statestore"
"github.com/filecoin-project/lotus/api"
cliutil "github.com/filecoin-project/lotus/cli/util"
"github.com/filecoin-project/lotus/journal"
"github.com/filecoin-project/lotus/journal/alerting"
"github.com/filecoin-project/lotus/journal/fsjournal"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
"github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/node/repo"
"github.com/filecoin-project/lotus/provider"
"github.com/filecoin-project/lotus/storage/ctladdr"
"github.com/filecoin-project/lotus/storage/paths"
"github.com/filecoin-project/lotus/storage/sealer"
"github.com/filecoin-project/lotus/storage/sealer/ffiwrapper"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
var log = logging.Logger("lotus-provider/deps")
func MakeDB(cctx *cli.Context) (*harmonydb.DB, error) {
dbConfig := config.HarmonyDB{
Username: cctx.String("db-user"),
Password: cctx.String("db-password"),
Hosts: strings.Split(cctx.String("db-host"), ","),
Database: cctx.String("db-name"),
Port: cctx.String("db-port"),
}
return harmonydb.NewFromConfig(dbConfig)
}
type JwtPayload struct {
Allow []auth.Permission
}
func StorageAuth(apiKey string) (sealer.StorageAuth, error) {
if apiKey == "" {
return nil, xerrors.Errorf("no api key provided")
}
rawKey, err := base64.StdEncoding.DecodeString(apiKey)
if err != nil {
return nil, xerrors.Errorf("decoding api key: %w", err)
}
key := jwt.NewHS256(rawKey)
p := JwtPayload{
Allow: []auth.Permission{"admin"},
}
token, err := jwt.Sign(&p, key)
if err != nil {
return nil, err
}
headers := http.Header{}
headers.Add("Authorization", "Bearer "+string(token))
return sealer.StorageAuth(headers), nil
}
func GetDeps(ctx context.Context, cctx *cli.Context) (*Deps, error) {
var deps Deps
return &deps, deps.PopulateRemainingDeps(ctx, cctx, true)
}
type Deps struct {
Cfg *config.LotusProviderConfig
DB *harmonydb.DB
Full api.FullNode
Verif storiface.Verifier
LW *sealer.LocalWorker
As *ctladdr.AddressSelector
Maddrs []dtypes.MinerAddress
Stor *paths.Remote
Si *paths.DBIndex
LocalStore *paths.Local
ListenAddr string
}
const (
FlagRepoPath = "repo-path"
)
func (deps *Deps) PopulateRemainingDeps(ctx context.Context, cctx *cli.Context, makeRepo bool) error {
var err error
if makeRepo {
// Open repo
repoPath := cctx.String(FlagRepoPath)
fmt.Println("repopath", repoPath)
r, err := repo.NewFS(repoPath)
if err != nil {
return err
}
ok, err := r.Exists()
if err != nil {
return err
}
if !ok {
if err := r.Init(repo.Provider); err != nil {
return err
}
}
}
if deps.Cfg == nil {
deps.DB, err = MakeDB(cctx)
if err != nil {
return err
}
}
if deps.Cfg == nil {
// The config feeds into task runners & their helpers
deps.Cfg, err = GetConfig(cctx, deps.DB)
if err != nil {
return err
}
}
log.Debugw("config", "config", deps.Cfg)
if deps.Verif == nil {
deps.Verif = ffiwrapper.ProofVerifier
}
if deps.As == nil {
deps.As, err = provider.AddressSelector(&deps.Cfg.Addresses)()
if err != nil {
return err
}
}
if deps.Si == nil {
de, err := journal.ParseDisabledEvents(deps.Cfg.Journal.DisabledEvents)
if err != nil {
return err
}
j, err := fsjournal.OpenFSJournalPath(cctx.String("journal"), de)
if err != nil {
return err
}
go func() {
<-ctx.Done()
_ = j.Close()
}()
al := alerting.NewAlertingSystem(j)
deps.Si = paths.NewDBIndex(al, deps.DB)
}
if deps.Full == nil {
var fullCloser func()
cfgApiInfo := deps.Cfg.Apis.ChainApiInfo
if v := os.Getenv("FULLNODE_API_INFO"); v != "" {
cfgApiInfo = []string{v}
}
deps.Full, fullCloser, err = cliutil.GetFullNodeAPIV1LotusProvider(cctx, cfgApiInfo)
if err != nil {
return err
}
go func() {
<-ctx.Done()
fullCloser()
}()
}
bls := &paths.BasicLocalStorage{
PathToJSON: cctx.String("storage-json"),
}
if deps.ListenAddr == "" {
listenAddr := cctx.String("listen")
const unspecifiedAddress = "0.0.0.0"
addressSlice := strings.Split(listenAddr, ":")
if ip := net.ParseIP(addressSlice[0]); ip != nil {
if ip.String() == unspecifiedAddress {
rip, err := deps.DB.GetRoutableIP()
if err != nil {
return err
}
deps.ListenAddr = rip + ":" + addressSlice[1]
}
}
}
if deps.LocalStore == nil {
deps.LocalStore, err = paths.NewLocal(ctx, bls, deps.Si, []string{"http://" + deps.ListenAddr + "/remote"})
if err != nil {
return err
}
}
sa, err := StorageAuth(deps.Cfg.Apis.StorageRPCSecret)
if err != nil {
return xerrors.Errorf(`'%w' while parsing the config toml's
[Apis]
StorageRPCSecret=%v
Get it with: jq .PrivateKey ~/.lotus-miner/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU`, err, deps.Cfg.Apis.StorageRPCSecret)
}
if deps.Stor == nil {
deps.Stor = paths.NewRemote(deps.LocalStore, deps.Si, http.Header(sa), 10, &paths.DefaultPartialFileHandler{})
}
if deps.LW == nil {
wstates := statestore.New(dssync.MutexWrap(ds.NewMapDatastore()))
// todo localWorker isn't the abstraction layer we want to use here, we probably want to go straight to ffiwrapper
// maybe with a lotus-provider specific abstraction. LocalWorker does persistent call tracking which we probably
// don't need (ehh.. maybe we do, the async callback system may actually work decently well with harmonytask)
deps.LW = sealer.NewLocalWorker(sealer.WorkerConfig{}, deps.Stor, deps.LocalStore, deps.Si, nil, wstates)
}
if len(deps.Maddrs) == 0 {
for _, s := range deps.Cfg.Addresses.MinerAddresses {
addr, err := address.NewFromString(s)
if err != nil {
return err
}
deps.Maddrs = append(deps.Maddrs, dtypes.MinerAddress(addr))
}
}
fmt.Println("last line of populate")
return nil
}
func GetConfig(cctx *cli.Context, db *harmonydb.DB) (*config.LotusProviderConfig, error) {
lp := config.DefaultLotusProvider()
have := []string{}
layers := cctx.StringSlice("layers")
for _, layer := range layers {
text := ""
err := db.QueryRow(cctx.Context, `SELECT config FROM harmony_config WHERE title=$1`, layer).Scan(&text)
if err != nil {
if strings.Contains(err.Error(), sql.ErrNoRows.Error()) {
return nil, fmt.Errorf("missing layer '%s' ", layer)
}
if layer == "base" {
return nil, errors.New(`lotus-provider defaults to a layer named 'base'.
Either use 'migrate' command or edit a base.toml and upload it with: lotus-provider config set base.toml`)
}
return nil, fmt.Errorf("could not read layer '%s': %w", layer, err)
}
meta, err := toml.Decode(text, &lp)
if err != nil {
return nil, fmt.Errorf("could not read layer, bad toml %s: %w", layer, err)
}
for _, k := range meta.Keys() {
have = append(have, strings.Join(k, " "))
}
log.Infow("Using layer", "layer", layer, "config", lp)
}
_ = have // FUTURE: verify that required fields are here.
// If config includes 3rd-party config, consider JSONSchema as a way that
// 3rd-parties can dynamically include config requirements and we can
// validate the config. Because of layering, we must validate @ startup.
return lp, nil
}

165
cmd/lotus-provider/main.go Normal file
View File

@ -0,0 +1,165 @@
package main
import (
"context"
"fmt"
"os"
"os/signal"
"runtime/pprof"
"syscall"
"github.com/fatih/color"
logging "github.com/ipfs/go-log/v2"
"github.com/mitchellh/go-homedir"
"github.com/urfave/cli/v2"
"github.com/filecoin-project/lotus/build"
lcli "github.com/filecoin-project/lotus/cli"
cliutil "github.com/filecoin-project/lotus/cli/util"
"github.com/filecoin-project/lotus/cmd/lotus-provider/deps"
"github.com/filecoin-project/lotus/lib/lotuslog"
"github.com/filecoin-project/lotus/lib/tracing"
"github.com/filecoin-project/lotus/node/repo"
)
var log = logging.Logger("main")
func SetupCloseHandler() {
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
go func() {
<-c
fmt.Println("\r- Ctrl+C pressed in Terminal")
_ = pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
panic(1)
}()
}
func main() {
SetupCloseHandler()
lotuslog.SetupLogLevels()
local := []*cli.Command{
//initCmd,
runCmd,
stopCmd,
configCmd,
testCmd,
webCmd,
//backupCmd,
//lcli.WithCategory("chain", actorCmd),
//lcli.WithCategory("storage", sectorsCmd),
//lcli.WithCategory("storage", provingCmd),
//lcli.WithCategory("storage", storageCmd),
//lcli.WithCategory("storage", sealingCmd),
}
jaeger := tracing.SetupJaegerTracing("lotus")
defer func() {
if jaeger != nil {
_ = jaeger.ForceFlush(context.Background())
}
}()
for _, cmd := range local {
cmd := cmd
originBefore := cmd.Before
cmd.Before = func(cctx *cli.Context) error {
if jaeger != nil {
_ = jaeger.Shutdown(cctx.Context)
}
jaeger = tracing.SetupJaegerTracing("lotus/" + cmd.Name)
if cctx.IsSet("color") {
color.NoColor = !cctx.Bool("color")
}
if originBefore != nil {
return originBefore(cctx)
}
return nil
}
}
app := &cli.App{
Name: "lotus-provider",
Usage: "Filecoin decentralized storage network provider",
Version: build.UserVersion(),
EnableBashCompletion: true,
Flags: []cli.Flag{
&cli.BoolFlag{
// examined in the Before above
Name: "color",
Usage: "use color in display output",
DefaultText: "depends on output being a TTY",
},
&cli.StringFlag{
Name: "panic-reports",
EnvVars: []string{"LOTUS_PANIC_REPORT_PATH"},
Hidden: true,
Value: "~/.lotusprovider", // should follow --repo default
},
&cli.StringFlag{
Name: "db-host",
EnvVars: []string{"LOTUS_DB_HOST"},
Usage: "Command separated list of hostnames for yugabyte cluster",
Value: "yugabyte",
},
&cli.StringFlag{
Name: "db-name",
EnvVars: []string{"LOTUS_DB_NAME", "LOTUS_HARMONYDB_HOSTS"},
Value: "yugabyte",
},
&cli.StringFlag{
Name: "db-user",
EnvVars: []string{"LOTUS_DB_USER", "LOTUS_HARMONYDB_USERNAME"},
Value: "yugabyte",
},
&cli.StringFlag{
Name: "db-password",
EnvVars: []string{"LOTUS_DB_PASSWORD", "LOTUS_HARMONYDB_PASSWORD"},
Value: "yugabyte",
},
&cli.StringFlag{
Name: "db-port",
EnvVars: []string{"LOTUS_DB_PORT", "LOTUS_HARMONYDB_PORT"},
Hidden: true,
Value: "5433",
},
&cli.StringFlag{
Name: "layers",
EnvVars: []string{"LOTUS_LAYERS", "LOTUS_CONFIG_LAYERS"},
Value: "base",
},
&cli.StringFlag{
Name: deps.FlagRepoPath,
EnvVars: []string{"LOTUS_REPO_PATH"},
Value: "~/.lotusprovider",
},
cliutil.FlagVeryVerbose,
},
Commands: append(local, lcli.CommonCommands...),
Before: func(c *cli.Context) error {
return nil
},
After: func(c *cli.Context) error {
if r := recover(); r != nil {
p, err := homedir.Expand(c.String(FlagMinerRepo))
if err != nil {
log.Errorw("could not expand repo path for panic report", "error", err)
panic(r)
}
// Generate report in LOTUS_PATH and re-raise panic
build.GeneratePanicReport(c.String("panic-reports"), p, c.App.Name)
panic(r)
}
return nil
},
}
app.Setup()
app.Metadata["repoType"] = repo.Provider
lcli.RunApp(app)
}

View File

@ -0,0 +1,247 @@
package main
import (
"bytes"
"context"
"encoding/base64"
"errors"
"fmt"
"os"
"path"
"strings"
"github.com/BurntSushi/toml"
"github.com/fatih/color"
"github.com/ipfs/go-datastore"
"github.com/samber/lo"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
cliutil "github.com/filecoin-project/lotus/cli/util"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
"github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/lotus/node/modules"
"github.com/filecoin-project/lotus/node/repo"
)
var configMigrateCmd = &cli.Command{
Name: "from-miner",
Usage: "Express a database config (for lotus-provider) from an existing miner.",
Description: "Express a database config (for lotus-provider) from an existing miner.",
Flags: []cli.Flag{
&cli.StringFlag{
Name: FlagMinerRepo,
Aliases: []string{FlagMinerRepoDeprecation},
EnvVars: []string{"LOTUS_MINER_PATH", "LOTUS_STORAGE_PATH"},
Value: "~/.lotusminer",
Usage: fmt.Sprintf("Specify miner repo path. flag(%s) and env(LOTUS_STORAGE_PATH) are DEPRECATION, will REMOVE SOON", FlagMinerRepoDeprecation),
},
&cli.StringFlag{
Name: "repo",
EnvVars: []string{"LOTUS_PATH"},
Hidden: true,
Value: "~/.lotus",
},
&cli.StringFlag{
Name: "to-layer",
Aliases: []string{"t"},
Usage: "The layer name for this data push. 'base' is recommended for single-miner setup.",
},
&cli.BoolFlag{
Name: "overwrite",
Aliases: []string{"o"},
Usage: "Use this with --to-layer to replace an existing layer",
},
},
Action: fromMiner,
}
const (
FlagMinerRepo = "miner-repo"
)
const FlagMinerRepoDeprecation = "storagerepo"
func fromMiner(cctx *cli.Context) (err error) {
ctx := context.Background()
cliCommandColor := color.New(color.FgHiBlue).SprintFunc()
configColor := color.New(color.FgHiGreen).SprintFunc()
r, err := repo.NewFS(cctx.String(FlagMinerRepo))
if err != nil {
return err
}
ok, err := r.Exists()
if err != nil {
return err
}
if !ok {
return fmt.Errorf("repo not initialized")
}
lr, err := r.LockRO(repo.StorageMiner)
if err != nil {
return fmt.Errorf("locking repo: %w", err)
}
defer func() { _ = lr.Close() }()
cfgNode, err := lr.Config()
if err != nil {
return fmt.Errorf("getting node config: %w", err)
}
smCfg := cfgNode.(*config.StorageMiner)
db, err := harmonydb.NewFromConfig(smCfg.HarmonyDB)
if err != nil {
return fmt.Errorf("could not reach the database. Ensure the Miner config toml's HarmonyDB entry"+
" is setup to reach Yugabyte correctly: %w", err)
}
var titles []string
err = db.Select(ctx, &titles, `SELECT title FROM harmony_config WHERE LENGTH(config) > 0`)
if err != nil {
return fmt.Errorf("miner cannot reach the db. Ensure the config toml's HarmonyDB entry"+
" is setup to reach Yugabyte correctly: %s", err.Error())
}
name := cctx.String("to-layer")
if name == "" {
name = fmt.Sprintf("mig%d", len(titles))
} else {
if lo.Contains(titles, name) && !cctx.Bool("overwrite") {
return errors.New("the overwrite flag is needed to replace existing layer: " + name)
}
}
msg := "Layer " + configColor(name) + ` created. `
// Copy over identical settings:
buf, err := os.ReadFile(path.Join(lr.Path(), "config.toml"))
if err != nil {
return fmt.Errorf("could not read config.toml: %w", err)
}
var lpCfg config.LotusProviderConfig
_, err = toml.Decode(string(buf), &lpCfg)
if err != nil {
return fmt.Errorf("could not decode toml: %w", err)
}
// Populate Miner Address
mmeta, err := lr.Datastore(ctx, "/metadata")
if err != nil {
return xerrors.Errorf("opening miner metadata datastore: %w", err)
}
defer func() {
_ = mmeta.Close()
}()
maddrBytes, err := mmeta.Get(ctx, datastore.NewKey("miner-address"))
if err != nil {
return xerrors.Errorf("getting miner address datastore entry: %w", err)
}
addr, err := address.NewFromBytes(maddrBytes)
if err != nil {
return xerrors.Errorf("parsing miner actor address: %w", err)
}
lpCfg.Addresses.MinerAddresses = []string{addr.String()}
ks, err := lr.KeyStore()
if err != nil {
return xerrors.Errorf("keystore err: %w", err)
}
js, err := ks.Get(modules.JWTSecretName)
if err != nil {
return xerrors.Errorf("error getting JWTSecretName: %w", err)
}
lpCfg.Apis.StorageRPCSecret = base64.StdEncoding.EncodeToString(js.PrivateKey)
// Populate API Key
_, header, err := cliutil.GetRawAPI(cctx, repo.FullNode, "v0")
if err != nil {
return fmt.Errorf("cannot read API: %w", err)
}
ainfo, err := cliutil.GetAPIInfo(&cli.Context{}, repo.FullNode)
if err != nil {
return xerrors.Errorf(`could not get API info for FullNode: %w
Set the environment variable to the value of "lotus auth api-info --perm=admin"`, err)
}
lpCfg.Apis.ChainApiInfo = []string{header.Get("Authorization")[7:] + ":" + ainfo.Addr}
// Enable WindowPoSt
lpCfg.Subsystems.EnableWindowPost = true
msg += "\nBefore running lotus-provider, ensure any miner/worker answering of WindowPost is disabled by " +
"(on Miner) " + configColor("DisableBuiltinWindowPoSt=true") + " and (on Workers) not enabling windowpost on CLI or via " +
"environment variable " + configColor("LOTUS_WORKER_WINDOWPOST") + "."
// Express as configTOML
configTOML := &bytes.Buffer{}
if err = toml.NewEncoder(configTOML).Encode(lpCfg); err != nil {
return err
}
if !lo.Contains(titles, "base") {
cfg, err := getDefaultConfig(true)
if err != nil {
return xerrors.Errorf("Cannot get default config: %w", err)
}
_, err = db.Exec(ctx, "INSERT INTO harmony_config (title, config) VALUES ('base', $1)", cfg)
if err != nil {
return err
}
}
if cctx.Bool("overwrite") {
i, err := db.Exec(ctx, "DELETE FROM harmony_config WHERE title=$1", name)
if i != 0 {
fmt.Println("Overwriting existing layer")
}
if err != nil {
fmt.Println("Got error while deleting existing layer: " + err.Error())
}
}
_, err = db.Exec(ctx, "INSERT INTO harmony_config (title, config) VALUES ($1, $2)", name, configTOML.String())
if err != nil {
return err
}
dbSettings := ""
def := config.DefaultStorageMiner().HarmonyDB
if def.Hosts[0] != smCfg.HarmonyDB.Hosts[0] {
dbSettings += ` --db-host="` + strings.Join(smCfg.HarmonyDB.Hosts, ",") + `"`
}
if def.Port != smCfg.HarmonyDB.Port {
dbSettings += " --db-port=" + smCfg.HarmonyDB.Port
}
if def.Username != smCfg.HarmonyDB.Username {
dbSettings += ` --db-user="` + smCfg.HarmonyDB.Username + `"`
}
if def.Password != smCfg.HarmonyDB.Password {
dbSettings += ` --db-password="` + smCfg.HarmonyDB.Password + `"`
}
if def.Database != smCfg.HarmonyDB.Database {
dbSettings += ` --db-name="` + smCfg.HarmonyDB.Database + `"`
}
var layerMaybe string
if name != "base" {
layerMaybe = "--layer=" + name
}
msg += `
To work with the config:
` + cliCommandColor(`lotus-provider `+dbSettings+` config help `)
msg += `
To run Lotus Provider: in its own machine or cgroup without other files, use the command:
` + cliCommandColor(`lotus-provider `+dbSettings+` run `+layerMaybe)
fmt.Println(msg)
return nil
}

View File

@ -0,0 +1,207 @@
package main
import (
"context"
"database/sql"
"encoding/json"
"errors"
"fmt"
"os"
"time"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/dline"
"github.com/filecoin-project/lotus/cmd/lotus-provider/deps"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
"github.com/filecoin-project/lotus/provider"
)
var testCmd = &cli.Command{
Name: "test",
Usage: "Utility functions for testing",
Subcommands: []*cli.Command{
//provingInfoCmd,
wdPostCmd,
},
}
var wdPostCmd = &cli.Command{
Name: "window-post",
Aliases: []string{"wd", "windowpost", "wdpost"},
Usage: "Compute a proof-of-spacetime for a sector (requires the sector to be pre-sealed). These will not send to the chain.",
Subcommands: []*cli.Command{
wdPostHereCmd,
wdPostTaskCmd,
},
}
// wdPostTaskCmd writes to harmony_task and wdpost_partition_tasks, then waits for the result.
// It is intended to be used to test the windowpost scheduler.
// The end of the compute task puts the task_id onto wdpost_proofs, which is read by the submit task.
// The submit task will not send test tasks to the chain, and instead will write the result to harmony_test.
// The result is read by this command, and printed to stdout.
var wdPostTaskCmd = &cli.Command{
Name: "task",
Aliases: []string{"scheduled", "schedule", "async", "asynchronous"},
Usage: "Test the windowpost scheduler by running it on the next available lotus-provider. ",
Flags: []cli.Flag{
&cli.Uint64Flag{
Name: "deadline",
Usage: "deadline to compute WindowPoSt for ",
Value: 0,
},
&cli.StringSliceFlag{
Name: "layers",
Usage: "list of layers to be interpreted (atop defaults). Default: base",
Value: cli.NewStringSlice("base"),
},
},
Action: func(cctx *cli.Context) error {
ctx := context.Background()
deps, err := deps.GetDeps(ctx, cctx)
if err != nil {
return err
}
ts, err := deps.Full.ChainHead(ctx)
if err != nil {
return xerrors.Errorf("cannot get chainhead %w", err)
}
ht := ts.Height()
addr, err := address.NewFromString(deps.Cfg.Addresses.MinerAddresses[0])
if err != nil {
return xerrors.Errorf("cannot get miner address %w", err)
}
maddr, err := address.IDFromAddress(addr)
if err != nil {
return xerrors.Errorf("cannot get miner id %w", err)
}
var id int64
retryDelay := time.Millisecond * 10
retryAddTask:
_, err = deps.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) {
err = tx.QueryRow(`INSERT INTO harmony_task (name, posted_time, added_by) VALUES ('WdPost', CURRENT_TIMESTAMP, 123) RETURNING id`).Scan(&id)
if err != nil {
log.Error("inserting harmony_task: ", err)
return false, xerrors.Errorf("inserting harmony_task: %w", err)
}
_, err = tx.Exec(`INSERT INTO wdpost_partition_tasks
(task_id, sp_id, proving_period_start, deadline_index, partition_index) VALUES ($1, $2, $3, $4, $5)`,
id, maddr, ht, cctx.Uint64("deadline"), 0)
if err != nil {
log.Error("inserting wdpost_partition_tasks: ", err)
return false, xerrors.Errorf("inserting wdpost_partition_tasks: %w", err)
}
_, err = tx.Exec("INSERT INTO harmony_test (task_id) VALUES ($1)", id)
if err != nil {
return false, xerrors.Errorf("inserting into harmony_tests: %w", err)
}
return true, nil
})
if err != nil {
if harmonydb.IsErrSerialization(err) {
time.Sleep(retryDelay)
retryDelay *= 2
goto retryAddTask
}
return xerrors.Errorf("writing SQL transaction: %w", err)
}
fmt.Printf("Inserted task %v. Waiting for success ", id)
var result sql.NullString
for {
time.Sleep(time.Second)
err = deps.DB.QueryRow(ctx, `SELECT result FROM harmony_test WHERE task_id=$1`, id).Scan(&result)
if err != nil {
return xerrors.Errorf("reading result from harmony_test: %w", err)
}
if result.Valid {
break
}
fmt.Print(".")
}
log.Infof("Result: %s", result.String)
return nil
},
}
// This command is intended to be used to verify PoSt compute performance.
// It will not send any messages to the chain. Since it can compute any deadline, output may be incorrectly timed for the chain.
// The entire processing happens in this process while you wait. It does not use the scheduler.
var wdPostHereCmd = &cli.Command{
Name: "here",
Aliases: []string{"cli"},
Usage: "Compute WindowPoSt for performance and configuration testing.",
Description: `Note: This command is intended to be used to verify PoSt compute performance.
It will not send any messages to the chain. Since it can compute any deadline, output may be incorrectly timed for the chain.`,
ArgsUsage: "[deadline index]",
Flags: []cli.Flag{
&cli.Uint64Flag{
Name: "deadline",
Usage: "deadline to compute WindowPoSt for ",
Value: 0,
},
&cli.StringSliceFlag{
Name: "layers",
Usage: "list of layers to be interpreted (atop defaults). Default: base",
Value: cli.NewStringSlice("base"),
},
&cli.StringFlag{
Name: "storage-json",
Usage: "path to json file containing storage config",
Value: "~/.lotus-provider/storage.json",
},
&cli.Uint64Flag{
Name: "partition",
Usage: "partition to compute WindowPoSt for",
Value: 0,
},
},
Action: func(cctx *cli.Context) error {
ctx := context.Background()
deps, err := deps.GetDeps(ctx, cctx)
if err != nil {
return err
}
wdPostTask, wdPoStSubmitTask, derlareRecoverTask, err := provider.WindowPostScheduler(ctx, deps.Cfg.Fees, deps.Cfg.Proving, deps.Full, deps.Verif, deps.LW, nil,
deps.As, deps.Maddrs, deps.DB, deps.Stor, deps.Si, deps.Cfg.Subsystems.WindowPostMaxTasks)
if err != nil {
return err
}
_, _ = wdPoStSubmitTask, derlareRecoverTask
if len(deps.Maddrs) == 0 {
return errors.New("no miners to compute WindowPoSt for")
}
head, err := deps.Full.ChainHead(ctx)
if err != nil {
return xerrors.Errorf("failed to get chain head: %w", err)
}
di := dline.NewInfo(head.Height(), cctx.Uint64("deadline"), 0, 0, 0, 10 /*challenge window*/, 0, 0)
for _, maddr := range deps.Maddrs {
out, err := wdPostTask.DoPartition(ctx, head, address.Address(maddr), di, cctx.Uint64("partition"))
if err != nil {
fmt.Println("Error computing WindowPoSt for miner", maddr, err)
continue
}
fmt.Println("Computed WindowPoSt for miner", maddr, ":")
err = json.NewEncoder(os.Stdout).Encode(out)
if err != nil {
fmt.Println("Could not encode WindowPoSt output for miner", maddr, err)
continue
}
}
return nil
},
}

View File

@ -0,0 +1,156 @@
// Package rpc provides all direct access to this node.
package rpc
import (
"context"
"encoding/base64"
"encoding/json"
"net"
"net/http"
"time"
"github.com/gbrlsnchs/jwt/v3"
"github.com/gorilla/mux"
logging "github.com/ipfs/go-log/v2"
"go.opencensus.io/tag"
"golang.org/x/sync/errgroup"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/cmd/lotus-provider/deps"
"github.com/filecoin-project/lotus/cmd/lotus-provider/web"
"github.com/filecoin-project/lotus/lib/rpcenc"
"github.com/filecoin-project/lotus/metrics"
"github.com/filecoin-project/lotus/metrics/proxy"
"github.com/filecoin-project/lotus/storage/paths"
)
var log = logging.Logger("lp/rpc")
func LotusProviderHandler(
authv func(ctx context.Context, token string) ([]auth.Permission, error),
remote http.HandlerFunc,
a api.LotusProvider,
permissioned bool) http.Handler {
mux := mux.NewRouter()
readerHandler, readerServerOpt := rpcenc.ReaderParamDecoder()
rpcServer := jsonrpc.NewServer(jsonrpc.WithServerErrors(api.RPCErrors), readerServerOpt)
wapi := proxy.MetricedAPI[api.LotusProvider, api.LotusProviderStruct](a)
if permissioned {
wapi = api.PermissionedAPI[api.LotusProvider, api.LotusProviderStruct](wapi)
}
rpcServer.Register("Filecoin", wapi)
rpcServer.AliasMethod("rpc.discover", "Filecoin.Discover")
mux.Handle("/rpc/v0", rpcServer)
mux.Handle("/rpc/streams/v0/push/{uuid}", readerHandler)
mux.PathPrefix("/remote").HandlerFunc(remote)
mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof
if !permissioned {
return mux
}
ah := &auth.Handler{
Verify: authv,
Next: mux.ServeHTTP,
}
return ah
}
type ProviderAPI struct {
*deps.Deps
ShutdownChan chan struct{}
}
func (p *ProviderAPI) Version(context.Context) (api.Version, error) {
return api.ProviderAPIVersion0, nil
}
// Trigger shutdown
func (p *ProviderAPI) Shutdown(context.Context) error {
close(p.ShutdownChan)
return nil
}
func ListenAndServe(ctx context.Context, dependencies *deps.Deps, shutdownChan chan struct{}) error {
fh := &paths.FetchHandler{Local: dependencies.LocalStore, PfHandler: &paths.DefaultPartialFileHandler{}}
remoteHandler := func(w http.ResponseWriter, r *http.Request) {
if !auth.HasPerm(r.Context(), nil, api.PermAdmin) {
w.WriteHeader(401)
_ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing admin permission"})
return
}
fh.ServeHTTP(w, r)
}
// local APIs
{
// debugging
mux := mux.NewRouter()
mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof
mux.PathPrefix("/remote").HandlerFunc(remoteHandler)
}
var authVerify func(context.Context, string) ([]auth.Permission, error)
{
privateKey, err := base64.StdEncoding.DecodeString(dependencies.Cfg.Apis.StorageRPCSecret)
if err != nil {
return xerrors.Errorf("decoding storage rpc secret: %w", err)
}
authVerify = func(ctx context.Context, token string) ([]auth.Permission, error) {
var payload deps.JwtPayload
if _, err := jwt.Verify([]byte(token), jwt.NewHS256(privateKey), &payload); err != nil {
return nil, xerrors.Errorf("JWT Verification failed: %w", err)
}
return payload.Allow, nil
}
}
// Serve the RPC.
srv := &http.Server{
Handler: LotusProviderHandler(
authVerify,
remoteHandler,
&ProviderAPI{dependencies, shutdownChan},
true),
ReadHeaderTimeout: time.Minute * 3,
BaseContext: func(listener net.Listener) context.Context {
ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-worker"))
return ctx
},
Addr: dependencies.ListenAddr,
}
log.Infof("Setting up RPC server at %s", dependencies.ListenAddr)
eg := errgroup.Group{}
eg.Go(srv.ListenAndServe)
if dependencies.Cfg.Subsystems.EnableWebGui {
web, err := web.GetSrv(ctx, dependencies)
if err != nil {
return err
}
go func() {
<-ctx.Done()
log.Warn("Shutting down...")
if err := srv.Shutdown(context.TODO()); err != nil {
log.Errorf("shutting down RPC server failed: %s", err)
}
if err := web.Shutdown(context.Background()); err != nil {
log.Errorf("shutting down web server failed: %s", err)
}
log.Warn("Graceful shutdown successful")
}()
log.Infof("Setting up web server at %s", dependencies.Cfg.Subsystems.GuiAddress)
eg.Go(web.ListenAndServe)
}
return eg.Wait()
}

194
cmd/lotus-provider/run.go Normal file
View File

@ -0,0 +1,194 @@
package main
import (
"bytes"
"context"
"fmt"
"os"
"strings"
"time"
"github.com/BurntSushi/toml"
"github.com/pkg/errors"
"github.com/urfave/cli/v2"
"go.opencensus.io/stats"
"go.opencensus.io/tag"
"github.com/filecoin-project/lotus/build"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/cmd/lotus-provider/deps"
"github.com/filecoin-project/lotus/cmd/lotus-provider/rpc"
"github.com/filecoin-project/lotus/cmd/lotus-provider/tasks"
"github.com/filecoin-project/lotus/lib/ulimit"
"github.com/filecoin-project/lotus/metrics"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/config"
)
type stackTracer interface {
StackTrace() errors.StackTrace
}
var runCmd = &cli.Command{
Name: "run",
Usage: "Start a lotus provider process",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "listen",
Usage: "host address and port the worker api will listen on",
Value: "0.0.0.0:12300",
EnvVars: []string{"LOTUS_WORKER_LISTEN"},
},
&cli.BoolFlag{
Name: "nosync",
Usage: "don't check full-node sync status",
},
&cli.BoolFlag{
Name: "halt-after-init",
Usage: "only run init, then return",
Hidden: true,
},
&cli.BoolFlag{
Name: "manage-fdlimit",
Usage: "manage open file limit",
Value: true,
},
&cli.StringSliceFlag{
Name: "layers",
Usage: "list of layers to be interpreted (atop defaults). Default: base",
Value: cli.NewStringSlice("base"),
},
&cli.StringFlag{
Name: "storage-json",
Usage: "path to json file containing storage config",
Value: "~/.lotus-provider/storage.json",
},
&cli.StringFlag{
Name: "journal",
Usage: "path to journal files",
Value: "~/.lotus-provider/",
},
},
Action: func(cctx *cli.Context) (err error) {
defer func() {
if err != nil {
if err, ok := err.(stackTracer); ok {
for _, f := range err.StackTrace() {
fmt.Printf("%+s:%d\n", f, f)
}
}
}
}()
if !cctx.Bool("enable-gpu-proving") {
err := os.Setenv("BELLMAN_NO_GPU", "true")
if err != nil {
return err
}
}
ctx, _ := tag.New(lcli.DaemonContext(cctx),
tag.Insert(metrics.Version, build.BuildVersion),
tag.Insert(metrics.Commit, build.CurrentCommit),
tag.Insert(metrics.NodeType, "provider"),
)
shutdownChan := make(chan struct{})
{
var ctxclose func()
ctx, ctxclose = context.WithCancel(ctx)
go func() {
<-shutdownChan
ctxclose()
}()
}
// Register all metric views
/*
if err := view.Register(
metrics.MinerNodeViews...,
); err != nil {
log.Fatalf("Cannot register the view: %v", err)
}
*/
// Set the metric to one so it is published to the exporter
stats.Record(ctx, metrics.LotusInfo.M(1))
if cctx.Bool("manage-fdlimit") {
if _, _, err := ulimit.ManageFdLimit(); err != nil {
log.Errorf("setting file descriptor limit: %s", err)
}
}
dependencies := &deps.Deps{}
err = dependencies.PopulateRemainingDeps(ctx, cctx, true)
if err != nil {
fmt.Println("err", err)
return err
}
fmt.Println("ef")
taskEngine, err := tasks.StartTasks(ctx, dependencies)
fmt.Println("gh")
if err != nil {
return nil
}
defer taskEngine.GracefullyTerminate(time.Hour)
err = rpc.ListenAndServe(ctx, dependencies, shutdownChan) // Monitor for shutdown.
if err != nil {
return err
}
finishCh := node.MonitorShutdown(shutdownChan) //node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper},
//node.ShutdownHandler{Component: "provider", StopFunc: stop},
<-finishCh
return nil
},
}
var webCmd = &cli.Command{
Name: "web",
Usage: "Start lotus provider web interface",
Description: `Start an instance of lotus provider web interface.
This creates the 'web' layer if it does not exist, then calls run with that layer.`,
Flags: []cli.Flag{
&cli.StringFlag{
Name: "listen",
Usage: "Address to listen on",
Value: "127.0.0.1:4701",
},
&cli.StringSliceFlag{
Name: "layers",
Usage: "list of layers to be interpreted (atop defaults). Default: base. Web will be added",
Value: cli.NewStringSlice("base"),
},
&cli.BoolFlag{
Name: "nosync",
Usage: "don't check full-node sync status",
},
},
Action: func(cctx *cli.Context) error {
db, err := deps.MakeDB(cctx)
if err != nil {
return err
}
webtxt, err := getConfig(db, "web")
if err != nil || webtxt == "" {
cfg := config.DefaultLotusProvider()
cfg.Subsystems.EnableWebGui = true
var b bytes.Buffer
if err = toml.NewEncoder(&b).Encode(cfg); err != nil {
return err
}
if err = setConfig(db, "web", b.String()); err != nil {
return err
}
}
layers := append([]string{"web"}, cctx.StringSlice("layers")...)
err = cctx.Set("layers", strings.Join(layers, ","))
if err != nil {
return err
}
return runCmd.Action(cctx)
},
}

View File

@ -0,0 +1,29 @@
package main
import (
_ "net/http/pprof"
"github.com/urfave/cli/v2"
lcli "github.com/filecoin-project/lotus/cli"
)
var stopCmd = &cli.Command{
Name: "stop",
Usage: "Stop a running lotus provider",
Flags: []cli.Flag{},
Action: func(cctx *cli.Context) error {
api, closer, err := lcli.GetAPI(cctx)
if err != nil {
return err
}
defer closer()
err = api.Shutdown(lcli.ReqContext(cctx))
if err != nil {
return err
}
return nil
},
}

View File

@ -0,0 +1,58 @@
// Package tasks contains tasks that can be run by the lotus-provider command.
package tasks
import (
"context"
logging "github.com/ipfs/go-log/v2"
"github.com/samber/lo"
"github.com/filecoin-project/lotus/cmd/lotus-provider/deps"
"github.com/filecoin-project/lotus/lib/harmony/harmonytask"
"github.com/filecoin-project/lotus/provider"
"github.com/filecoin-project/lotus/provider/lpmessage"
"github.com/filecoin-project/lotus/provider/lpwinning"
)
var log = logging.Logger("lotus-provider/deps")
func StartTasks(ctx context.Context, dependencies *deps.Deps) (*harmonytask.TaskEngine, error) {
cfg := dependencies.Cfg
db := dependencies.DB
full := dependencies.Full
verif := dependencies.Verif
lw := dependencies.LW
as := dependencies.As
maddrs := dependencies.Maddrs
stor := dependencies.Stor
si := dependencies.Si
var activeTasks []harmonytask.TaskInterface
sender, sendTask := lpmessage.NewSender(full, full, db)
activeTasks = append(activeTasks, sendTask)
///////////////////////////////////////////////////////////////////////
///// Task Selection
///////////////////////////////////////////////////////////////////////
{
if cfg.Subsystems.EnableWindowPost {
wdPostTask, wdPoStSubmitTask, derlareRecoverTask, err := provider.WindowPostScheduler(ctx, cfg.Fees, cfg.Proving, full, verif, lw, sender,
as, maddrs, db, stor, si, cfg.Subsystems.WindowPostMaxTasks)
if err != nil {
return nil, err
}
activeTasks = append(activeTasks, wdPostTask, wdPoStSubmitTask, derlareRecoverTask)
}
if cfg.Subsystems.EnableWinningPost {
winPoStTask := lpwinning.NewWinPostTask(cfg.Subsystems.WinningPostMaxTasks, db, lw, verif, full, maddrs)
activeTasks = append(activeTasks, winPoStTask)
}
}
log.Infow("This lotus_provider instance handles",
"miner_addresses", maddrs,
"tasks", lo.Map(activeTasks, func(t harmonytask.TaskInterface, _ int) string { return t.TypeDetails().Name }))
return harmonytask.New(db, activeTasks, dependencies.ListenAddr)
}

View File

@ -0,0 +1,229 @@
// Package debug provides the API for various debug endpoints in lotus-provider.
package debug
import (
"context"
"encoding/json"
"fmt"
"net/http"
"sort"
"sync"
"time"
"github.com/BurntSushi/toml"
"github.com/gorilla/mux"
logging "github.com/ipfs/go-log/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/api/client"
"github.com/filecoin-project/lotus/build"
cliutil "github.com/filecoin-project/lotus/cli/util"
"github.com/filecoin-project/lotus/cmd/lotus-provider/deps"
)
var log = logging.Logger("lp/web/debug")
type debug struct {
*deps.Deps
}
func Routes(r *mux.Router, deps *deps.Deps) {
d := debug{deps}
r.HandleFunc("/chain-state-sse", d.chainStateSSE)
}
type rpcInfo struct {
Address string
CLayers []string
Reachable bool
SyncState string
Version string
}
func (d *debug) chainStateSSE(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
w.Header().Set("Content-Type", "text/event-stream")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Connection", "keep-alive")
ctx := r.Context()
for {
type minimalApiInfo struct {
Apis struct {
ChainApiInfo []string
}
}
rpcInfos := map[string]minimalApiInfo{} // config name -> api info
confNameToAddr := map[string]string{} // config name -> api address
err := forEachConfig[minimalApiInfo](d, func(name string, info minimalApiInfo) error {
if len(info.Apis.ChainApiInfo) == 0 {
return nil
}
rpcInfos[name] = info
for _, addr := range info.Apis.ChainApiInfo {
ai := cliutil.ParseApiInfo(addr)
confNameToAddr[name] = ai.Addr
}
return nil
})
if err != nil {
log.Errorw("getting api info", "error", err)
return
}
dedup := map[string]bool{} // for dedup by address
infos := map[string]rpcInfo{} // api address -> rpc info
var infosLk sync.Mutex
var wg sync.WaitGroup
for _, info := range rpcInfos {
ai := cliutil.ParseApiInfo(info.Apis.ChainApiInfo[0])
if dedup[ai.Addr] {
continue
}
dedup[ai.Addr] = true
wg.Add(1)
go func() {
defer wg.Done()
var clayers []string
for layer, a := range confNameToAddr {
if a == ai.Addr {
clayers = append(clayers, layer)
}
}
myinfo := rpcInfo{
Address: ai.Addr,
Reachable: false,
CLayers: clayers,
}
defer func() {
infosLk.Lock()
defer infosLk.Unlock()
infos[ai.Addr] = myinfo
}()
da, err := ai.DialArgs("v1")
if err != nil {
log.Warnw("DialArgs", "error", err)
return
}
ah := ai.AuthHeader()
v1api, closer, err := client.NewFullNodeRPCV1(ctx, da, ah)
if err != nil {
log.Warnf("Not able to establish connection to node with addr: %s", ai.Addr)
return
}
defer closer()
ver, err := v1api.Version(ctx)
if err != nil {
log.Warnw("Version", "error", err)
return
}
head, err := v1api.ChainHead(ctx)
if err != nil {
log.Warnw("ChainHead", "error", err)
return
}
var syncState string
switch {
case time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs*3/2): // within 1.5 epochs
syncState = "ok"
case time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs*5): // within 5 epochs
syncState = fmt.Sprintf("slow (%s behind)", time.Since(time.Unix(int64(head.MinTimestamp()), 0)).Truncate(time.Second))
default:
syncState = fmt.Sprintf("behind (%s behind)", time.Since(time.Unix(int64(head.MinTimestamp()), 0)).Truncate(time.Second))
}
myinfo = rpcInfo{
Address: ai.Addr,
CLayers: clayers,
Reachable: true,
Version: ver.Version,
SyncState: syncState,
}
}()
}
wg.Wait()
var infoList []rpcInfo
for _, i := range infos {
infoList = append(infoList, i)
}
sort.Slice(infoList, func(i, j int) bool {
return infoList[i].Address < infoList[j].Address
})
fmt.Fprintf(w, "data: ")
err = json.NewEncoder(w).Encode(&infoList)
if err != nil {
log.Warnw("json encode", "error", err)
return
}
fmt.Fprintf(w, "\n\n")
if f, ok := w.(http.Flusher); ok {
f.Flush()
}
time.Sleep(time.Duration(build.BlockDelaySecs) * time.Second)
select { // stop running if there is reader.
case <-ctx.Done():
return
default:
}
}
}
func forEachConfig[T any](a *debug, cb func(name string, v T) error) error {
confs, err := a.loadConfigs(context.Background())
if err != nil {
return err
}
for name, tomlStr := range confs { // todo for-each-config
var info T
if err := toml.Unmarshal([]byte(tomlStr), &info); err != nil {
return xerrors.Errorf("unmarshaling %s config: %w", name, err)
}
if err := cb(name, info); err != nil {
return xerrors.Errorf("cb: %w", err)
}
}
return nil
}
func (d *debug) loadConfigs(ctx context.Context) (map[string]string, error) {
//err := db.QueryRow(cctx.Context, `SELECT config FROM harmony_config WHERE title=$1`, layer).Scan(&text)
rows, err := d.DB.Query(ctx, `SELECT title, config FROM harmony_config`)
if err != nil {
return nil, xerrors.Errorf("getting db configs: %w", err)
}
configs := make(map[string]string)
for rows.Next() {
var title, config string
if err := rows.Scan(&title, &config); err != nil {
return nil, xerrors.Errorf("scanning db configs: %w", err)
}
configs[title] = config
}
return configs, nil
}

View File

@ -0,0 +1,13 @@
// Package api provides the HTTP API for the lotus provider web gui.
package api
import (
"github.com/gorilla/mux"
"github.com/filecoin-project/lotus/cmd/lotus-provider/deps"
"github.com/filecoin-project/lotus/cmd/lotus-provider/web/api/debug"
)
func Routes(r *mux.Router, deps *deps.Deps) {
debug.Routes(r.PathPrefix("/debug").Subrouter(), deps)
}

View File

@ -0,0 +1,35 @@
package hapi
import (
"embed"
"html/template"
"github.com/gorilla/mux"
logging "github.com/ipfs/go-log/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/cmd/lotus-provider/deps"
)
//go:embed web/*
var templateFS embed.FS
func Routes(r *mux.Router, deps *deps.Deps) error {
t, err := template.ParseFS(templateFS, "web/*")
if err != nil {
return xerrors.Errorf("parse templates: %w", err)
}
a := &app{
db: deps.DB,
t: t,
}
r.HandleFunc("/simpleinfo/actorsummary", a.actorSummary)
r.HandleFunc("/simpleinfo/machines", a.indexMachines)
r.HandleFunc("/simpleinfo/tasks", a.indexTasks)
r.HandleFunc("/simpleinfo/taskhistory", a.indexTasksHistory)
return nil
}
var log = logging.Logger("lpweb")

View File

@ -0,0 +1,187 @@
package hapi
import (
"context"
"html/template"
"net/http"
"os"
"sync"
"time"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
)
type app struct {
db *harmonydb.DB
t *template.Template
actorInfoLk sync.Mutex
actorInfos []actorInfo
}
type actorInfo struct {
Address string
CLayers []string
QualityAdjustedPower string
RawBytePower string
Deadlines []actorDeadline
}
type actorDeadline struct {
Empty bool
Current bool
Proven bool
PartFaulty bool
Faulty bool
}
func (a *app) actorSummary(w http.ResponseWriter, r *http.Request) {
a.actorInfoLk.Lock()
defer a.actorInfoLk.Unlock()
a.executeTemplate(w, "actor_summary", a.actorInfos)
}
func (a *app) indexMachines(w http.ResponseWriter, r *http.Request) {
s, err := a.clusterMachineSummary(r.Context())
if err != nil {
log.Errorf("cluster machine summary: %v", err)
http.Error(w, "internal server error", http.StatusInternalServerError)
return
}
a.executeTemplate(w, "cluster_machines", s)
}
func (a *app) indexTasks(w http.ResponseWriter, r *http.Request) {
s, err := a.clusterTaskSummary(r.Context())
if err != nil {
log.Errorf("cluster task summary: %v", err)
http.Error(w, "internal server error", http.StatusInternalServerError)
return
}
a.executeTemplate(w, "cluster_tasks", s)
}
func (a *app) indexTasksHistory(w http.ResponseWriter, r *http.Request) {
s, err := a.clusterTaskHistorySummary(r.Context())
if err != nil {
log.Errorf("cluster task history summary: %v", err)
http.Error(w, "internal server error", http.StatusInternalServerError)
return
}
a.executeTemplate(w, "cluster_task_history", s)
}
var templateDev = os.Getenv("LOTUS_WEB_DEV") == "1"
func (a *app) executeTemplate(w http.ResponseWriter, name string, data interface{}) {
if templateDev {
fs := os.DirFS("./cmd/lotus-provider/web/hapi/web")
a.t = template.Must(template.ParseFS(fs, "web/*"))
}
if err := a.t.ExecuteTemplate(w, name, data); err != nil {
log.Errorf("execute template %s: %v", name, err)
http.Error(w, "internal server error", http.StatusInternalServerError)
}
}
type machineSummary struct {
Address string
ID int64
SinceContact string
}
type taskSummary struct {
Name string
SincePosted string
Owner *string
ID int64
}
type taskHistorySummary struct {
Name string
TaskID int64
Posted, Start, End string
Result bool
Err string
CompletedBy string
}
func (a *app) clusterMachineSummary(ctx context.Context) ([]machineSummary, error) {
rows, err := a.db.Query(ctx, "SELECT id, host_and_port, last_contact FROM harmony_machines")
if err != nil {
return nil, err // Handle error
}
defer rows.Close()
var summaries []machineSummary
for rows.Next() {
var m machineSummary
var lastContact time.Time
if err := rows.Scan(&m.ID, &m.Address, &lastContact); err != nil {
return nil, err // Handle error
}
m.SinceContact = time.Since(lastContact).Round(time.Second).String()
summaries = append(summaries, m)
}
return summaries, nil
}
func (a *app) clusterTaskSummary(ctx context.Context) ([]taskSummary, error) {
rows, err := a.db.Query(ctx, "SELECT id, name, update_time, owner_id FROM harmony_task")
if err != nil {
return nil, err // Handle error
}
defer rows.Close()
var summaries []taskSummary
for rows.Next() {
var t taskSummary
var posted time.Time
if err := rows.Scan(&t.ID, &t.Name, &posted, &t.Owner); err != nil {
return nil, err // Handle error
}
t.SincePosted = time.Since(posted).Round(time.Second).String()
summaries = append(summaries, t)
}
return summaries, nil
}
func (a *app) clusterTaskHistorySummary(ctx context.Context) ([]taskHistorySummary, error) {
rows, err := a.db.Query(ctx, "SELECT id, name, task_id, posted, work_start, work_end, result, err, completed_by_host_and_port FROM harmony_task_history ORDER BY work_end DESC LIMIT 15")
if err != nil {
return nil, err // Handle error
}
defer rows.Close()
var summaries []taskHistorySummary
for rows.Next() {
var t taskHistorySummary
var posted, start, end time.Time
if err := rows.Scan(&t.TaskID, &t.Name, &t.TaskID, &posted, &start, &end, &t.Result, &t.Err, &t.CompletedBy); err != nil {
return nil, err // Handle error
}
t.Posted = posted.Round(time.Second).Format("02 Jan 06 15:04")
t.Start = start.Round(time.Second).Format("02 Jan 06 15:04")
t.End = end.Round(time.Second).Format("02 Jan 06 15:04")
summaries = append(summaries, t)
}
return summaries, nil
}

View File

@ -0,0 +1,20 @@
{{define "actor_summary"}}
{{range .}}
<tr>
<td>{{.Address}}</td>
<td>
{{range .CLayers}}
<span>{{.}} </span>
{{end}}
</td>
<td>{{.QualityAdjustedPower}}</td>
<td>
<div class="deadline-box">
{{range .Deadlines}}
<div class="deadline-entry{{if .Current}} deadline-entry-cur{{end}}{{if .Proven}} deadline-proven{{end}}{{if .PartFaulty}} deadline-partially-faulty{{end}}{{if .Faulty}} deadline-faulty{{end}}"></div>
{{end}}
</div>
</td>
</tr>
{{end}}
{{end}}

View File

@ -0,0 +1,15 @@
{{define "chain_rpcs"}}
{{range .}}
<tr>
<td>{{.Address}}</td>
<td>
{{range .CLayers}}
<span>{{.}} </span>
{{end}}
</td>
<td>{{if .Reachable}}<span class="success">ok</span>{{else}}<span class="error">FAIL</span>{{end}}</td>
<td>{{if eq "ok" .SyncState}}<span class="success">ok</span>{{else}}<span class="warning">{{.SyncState}}</span>{{end}}</td>
<td>{{.Version}}</td>
</tr>
{{end}}
{{end}}

View File

@ -0,0 +1,10 @@
{{define "cluster_machines"}}
{{range .}}
<tr>
<td>{{.Address}}</td>
<td>{{.ID}}</td>
<td>todo</td>
<td>{{.SinceContact}}</td>
</tr>
{{end}}
{{end}}

View File

@ -0,0 +1,14 @@
{{define "cluster_task_history"}}
{{range .}}
<tr>
<td>{{.Name}}</td>
<td>{{.TaskID}}</td>
<td>{{.CompletedBy}}</td>
<td>{{.Posted}}</td>
<td>{{.Start}}</td>
<td>{{.End}}</td>
<td>{{if .Result}}<span class="success">success</span>{{else}}<span class="error">error</span>{{end}}</td>
<td>{{.Err}}</td>
</tr>
{{end}}
{{end}}

View File

@ -0,0 +1,10 @@
{{define "cluster_tasks"}}
{{range .}}
<tr>
<td>{{.Name}}</td>
<td>{{.ID}}</td>
<td>{{.SincePosted}}</td>
<td>{{.Owner}}</td>
</tr>
{{end}}
{{end}}

View File

@ -0,0 +1,84 @@
// Package web defines the HTTP web server for static files and endpoints.
package web
import (
"context"
"embed"
"io"
"io/fs"
"net"
"net/http"
"os"
"path"
"strings"
"time"
"github.com/gorilla/mux"
"go.opencensus.io/tag"
"github.com/filecoin-project/lotus/cmd/lotus-provider/deps"
"github.com/filecoin-project/lotus/cmd/lotus-provider/web/api"
"github.com/filecoin-project/lotus/cmd/lotus-provider/web/hapi"
"github.com/filecoin-project/lotus/metrics"
)
//go:embed static
var static embed.FS
var basePath = "/static/"
// An dev mode hack for no-restart changes to static and templates.
// You still need to recomplie the binary for changes to go code.
var webDev = os.Getenv("LOTUS_WEB_DEV") == "1"
func GetSrv(ctx context.Context, deps *deps.Deps) (*http.Server, error) {
mx := mux.NewRouter()
err := hapi.Routes(mx.PathPrefix("/hapi").Subrouter(), deps)
if err != nil {
return nil, err
}
api.Routes(mx.PathPrefix("/api").Subrouter(), deps)
basePath := basePath
var static fs.FS = static
if webDev {
basePath = "cmd/lotus-provider/web/static"
static = os.DirFS(basePath)
}
mx.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// If the request is for a directory, redirect to the index file.
if strings.HasSuffix(r.URL.Path, "/") {
r.URL.Path += "index.html"
}
file, err := static.Open(path.Join(basePath, r.URL.Path)[1:])
if err != nil {
w.WriteHeader(http.StatusNotFound)
_, _ = w.Write([]byte("404 Not Found"))
return
}
defer func() { _ = file.Close() }()
fileInfo, err := file.Stat()
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
_, _ = w.Write([]byte("500 Internal Server Error"))
return
}
http.ServeContent(w, r, fileInfo.Name(), fileInfo.ModTime(), file.(io.ReadSeeker))
})
return &http.Server{
Handler: http.HandlerFunc(mx.ServeHTTP),
BaseContext: func(listener net.Listener) context.Context {
ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-provider"))
return ctx
},
Addr: deps.Cfg.Subsystems.GuiAddress,
ReadTimeout: time.Minute * 3,
ReadHeaderTimeout: time.Minute * 3, // lint
}, nil
}

View File

@ -0,0 +1,73 @@
import { LitElement, html, css } from 'https://cdn.jsdelivr.net/gh/lit/dist@3/all/lit-all.min.js';
window.customElements.define('chain-connectivity', class MyElement extends LitElement {
constructor() {
super();
this.data = [];
this.loadData();
}
loadData() {
const eventSource = new EventSource('/api/debug/chain-state-sse');
eventSource.onmessage = (event) => {
this.data = JSON.parse(event.data);
super.requestUpdate();
};
eventSource.onerror = (error) => {
console.error('Error:', error);
loadData();
};
};
static get styles() {
return [css`
:host {
box-sizing: border-box; /* Don't forgert this to include padding/border inside width calculation */
}
table {
border-collapse: collapse;
}
table td, table th {
border-left: 1px solid #f0f0f0;
padding: 1px 5px;
}
table tr td:first-child, table tr th:first-child {
border-left: none;
}
.success {
color: green;
}
.warning {
color: yellow;
}
.error {
color: red;
}
`];
}
render = () => html`
<table>
<thead>
<tr>
<th>RPC Address</th>
<th>Reachability</th>
<th>Sync Status</th>
<th>Version</th>
</tr>
</thead>
<tbody>
${this.data.map(item => html`
<tr>
<td>${item.Address}</td>
<td>${item.Reachable ? html`<span class="success">ok</span>` : html`<span class="error">FAIL</span>`}</td>
<td>${item.SyncState === "ok" ? html`<span class="success">ok</span>` : html`<span class="warning">${item.SyncState}</span>`}</td>
<td>${item.Version}</td>
</tr>
`)}
<tr>
<td colspan="4">Data incoming...</td>
</tr>
</tbody>
</table>`
});

View File

@ -0,0 +1,193 @@
<html>
<head>
<title>Lotus Provider Cluster Overview</title>
<script src="https://unpkg.com/htmx.org@1.9.5" integrity="sha384-xcuj3WpfgjlKF+FXhSQFQ0ZNr39ln+hwjN3npfM9VBnUskLolQAcN80McRIVOPuO" crossorigin="anonymous"></script>
<script type="module" src="chain-connectivity.js"></script>
<style>
html, body {
background: #0f0f0f;
color: #ffffff;
padding: 0;
margin: 0;
font-family: monospace;
}
table td, table th {
font-size: 13px;
}
.app-head {
width: 100%;
}
.head-left {
display: inline-block;
}
.head-right {
display: inline-block;
float: right;
}
table {
border-collapse: collapse;
}
table td, table th {
border-left: 1px solid #f0f0f0;
padding: 1px 5px;
}
table tr td:first-child, table tr th:first-child {
border-left: none;
}
a:link {
color: #cfc;
}
a:visited {
color: #dfa;
}
a:hover {
color: #af7;
}
.success {
color: green;
}
.warning {
color: yellow;
}
.error {
color: red;
}
.dash-tile {
display: flex;
flex-direction: column;
padding: 0.75rem;
background: #3f3f3f;
& b {
padding-bottom: 0.5rem;
color: deeppink;
}
}
.deadline-box {
display: grid;
grid-template-columns: repeat(16, auto);
grid-template-rows: repeat(3, auto);
grid-gap: 1px;
}
.deadline-entry {
width: 10px;
height: 10px;
background-color: grey;
margin: 1px;
}
.deadline-entry-cur {
border-bottom: 3px solid deepskyblue;
height: 7px;
}
.deadline-proven {
background-color: green;
}
.deadline-partially-faulty {
background-color: yellow;
}
.deadline-faulty {
background-color: red;
}
</style>
</head>
<body>
<div class="app-head">
<div class="head-left">
<h1>Lotus Provider Cluster</h1>
</div>
<div class="head-right">
version [todo]
</div>
</div>
<hr/>
<div class="page">
<div class="info-block">
<h2>Chain Connectivity</h2>
<chain-connectivity></chain-connectivity>
</div>
<hr>
<div class="info-block">
<h2>Actor Summary</h2>
<table>
<thead>
<tr>
<th>Address</th>
<th>Config Layers</th>
<th>QaP</th>
<th>Deadlines</th>
</tr>
</thead>
<tbody hx-get="/hapi/simpleinfo/actorsummary" hx-trigger="load,every 5s">
</tbody>
</table>
</div>
<hr>
<div class="info-block">
<h2>Cluster Machines</h2>
<table>
<thead>
<tr>
<th>Host</th>
<th>ID</th>
<th>Config Layers</th>
<th>Last Contact</th>
</tr>
</thead>
<tbody hx-get="/hapi/simpleinfo/machines" hx-trigger="load,every 5s">
</tbody>
</table>
</div>
<hr>
<div class="info-block">
<h2>Recently Finished Tasks</h2>
<table>
<thead>
<tr>
<th>Name</th>
<th>ID</th>
<th>Executor</th>
<th>Posted</th>
<th>Start</th>
<th>End</th>
<th>Outcome</th>
<th>Message</th>
</tr>
</thead>
<tbody hx-get="/hapi/simpleinfo/taskhistory" hx-trigger="load, every 5s">
</tbody>
</table>
</div>
<hr>
<div class="info-block">
<h2>Cluster Tasks</h2>
<table>
<thead>
<tr>
<th>Task</th>
<th>ID</th>
<th>Posted</th>
<th>Owner</th>
</tr>
</thead>
<tbody hx-get="/hapi/simpleinfo/tasks" hx-trigger="load,every 5s">
</tbody>
</table>
</div>
</div>
</body>
</html>

View File

@ -3,7 +3,8 @@ package main
import (
"context"
"fmt"
"io"
"os"
"path/filepath"
"strconv"
"time"
@ -21,6 +22,8 @@ import (
v9 "github.com/filecoin-project/go-state-types/builtin/v9"
"github.com/filecoin-project/lotus/blockstore"
badgerbs "github.com/filecoin-project/lotus/blockstore/badger"
"github.com/filecoin-project/lotus/blockstore/splitstore"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
@ -73,24 +76,52 @@ var invariantsCmd = &cli.Command{
defer lkrepo.Close() //nolint:errcheck
bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore)
cold, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore)
if err != nil {
return fmt.Errorf("failed to open blockstore: %w", err)
return fmt.Errorf("failed to open universal blockstore %w", err)
}
defer func() {
if c, ok := bs.(io.Closer); ok {
if err := c.Close(); err != nil {
log.Warnf("failed to close blockstore: %s", err)
}
}
}()
path, err := lkrepo.SplitstorePath()
if err != nil {
return err
}
path = filepath.Join(path, "hot.badger")
if err := os.MkdirAll(path, 0755); err != nil {
return err
}
opts, err := repo.BadgerBlockstoreOptions(repo.HotBlockstore, path, lkrepo.Readonly())
if err != nil {
return err
}
hot, err := badgerbs.Open(opts)
if err != nil {
return err
}
mds, err := lkrepo.Datastore(context.Background(), "/metadata")
if err != nil {
return err
}
cfg := &splitstore.Config{
MarkSetType: "map",
DiscardColdBlocks: true,
}
ss, err := splitstore.Open(path, mds, hot, cold, cfg)
if err != nil {
return err
}
defer func() {
if err := ss.Close(); err != nil {
log.Warnf("failed to close blockstore: %s", err)
}
}()
bs := ss
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
defer cs.Close() //nolint:errcheck

View File

@ -553,7 +553,7 @@ var sendInvalidWindowPoStCmd = &cli.Command{
return xerrors.Errorf("serializing params: %w", err)
}
fmt.Printf("submitting bad PoST for %d paritions\n", len(partitionIndices))
fmt.Printf("submitting bad PoST for %d partitions\n", len(partitionIndices))
smsg, err := api.MpoolPushMessage(ctx, &types.Message{
From: minfo.Worker,
To: maddr,

View File

@ -157,7 +157,8 @@ var terminationsCmd = &cli.Command{
}
for _, t := range termParams.Terminations {
sectors, err := minerSt.LoadSectors(&t.Sectors)
tmp := t.Sectors
sectors, err := minerSt.LoadSectors(&tmp)
if err != nil {
return err
}

View File

@ -166,7 +166,8 @@ func (fs *FundingStage) PackMessages(ctx context.Context, bb *blockbuilder.Block
)
}()
for _, actor := range targets {
for _, actorTmp := range targets {
actor := actorTmp
switch {
case builtin.IsAccountActor(actor.Code):
if _, err := bb.PushMessage(&types.Message{

View File

@ -39,6 +39,7 @@ import (
"github.com/filecoin-project/lotus/node/repo"
"github.com/filecoin-project/lotus/storage/paths"
"github.com/filecoin-project/lotus/storage/sealer"
"github.com/filecoin-project/lotus/storage/sealer/ffiwrapper"
"github.com/filecoin-project/lotus/storage/sealer/sealtasks"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
@ -284,7 +285,36 @@ var runCmd = &cli.Command{
Value: true,
DefaultText: "inherits --addpiece",
},
&cli.StringFlag{
Name: "external-pc2",
Usage: "command for computing PC2 externally",
},
},
Description: `Run lotus-worker.
--external-pc2 can be used to compute the PreCommit2 inputs externally.
The flag behaves similarly to the related lotus-worker flag, using it in
lotus-bench may be useful for testing if the external PreCommit2 command is
invoked correctly.
The command will be called with a number of environment variables set:
* EXTSEAL_PC2_SECTOR_NUM: the sector number
* EXTSEAL_PC2_SECTOR_MINER: the miner id
* EXTSEAL_PC2_PROOF_TYPE: the proof type
* EXTSEAL_PC2_SECTOR_SIZE: the sector size in bytes
* EXTSEAL_PC2_CACHE: the path to the cache directory
* EXTSEAL_PC2_SEALED: the path to the sealed sector file (initialized with unsealed data by the caller)
* EXTSEAL_PC2_PC1OUT: output from rust-fil-proofs precommit1 phase (base64 encoded json)
The command is expected to:
* Create cache sc-02-data-tree-r* files
* Create cache sc-02-data-tree-c* files
* Create cache p_aux / t_aux files
* Transform the sealed file in place
Example invocation of lotus-bench as external executor:
'./lotus-bench simple precommit2 --sector-size $EXTSEAL_PC2_SECTOR_SIZE $EXTSEAL_PC2_SEALED $EXTSEAL_PC2_CACHE $EXTSEAL_PC2_PC1OUT'
`,
Before: func(cctx *cli.Context) error {
if cctx.IsSet("address") {
log.Warnf("The '--address' flag is deprecated, it has been replaced by '--listen'")
@ -623,18 +653,32 @@ var runCmd = &cli.Command{
fh.ServeHTTP(w, r)
}
// Parse ffi executor flags
var ffiOpts []ffiwrapper.FFIWrapperOpt
if cctx.IsSet("external-pc2") {
extSeal := ffiwrapper.ExternalSealer{
PreCommit2: ffiwrapper.MakeExternPrecommit2(cctx.String("external-pc2")),
}
ffiOpts = append(ffiOpts, ffiwrapper.WithExternalSealCalls(extSeal))
}
// Create / expose the worker
wsts := statestore.New(namespace.Wrap(ds, modules.WorkerCallsPrefix))
workerApi := &sealworker.Worker{
LocalWorker: sealer.NewLocalWorker(sealer.WorkerConfig{
TaskTypes: taskTypes,
NoSwap: cctx.Bool("no-swap"),
MaxParallelChallengeReads: cctx.Int("post-parallel-reads"),
ChallengeReadTimeout: cctx.Duration("post-read-timeout"),
Name: cctx.String("name"),
}, remote, localStore, nodeApi, nodeApi, wsts),
LocalWorker: sealer.NewLocalWorkerWithExecutor(
sealer.FFIExec(ffiOpts...),
sealer.WorkerConfig{
TaskTypes: taskTypes,
NoSwap: cctx.Bool("no-swap"),
MaxParallelChallengeReads: cctx.Int("post-parallel-reads"),
ChallengeReadTimeout: cctx.Duration("post-read-timeout"),
Name: cctx.String("name"),
}, os.LookupEnv, remote, localStore, nodeApi, nodeApi, wsts),
LocalStore: localStore,
Storage: lr,
}

View File

@ -26,7 +26,11 @@ import (
var log = logging.Logger("sealworker")
func WorkerHandler(authv func(ctx context.Context, token string) ([]auth.Permission, error), remote http.HandlerFunc, a api.Worker, permissioned bool) http.Handler {
func WorkerHandler(
authv func(ctx context.Context, token string) ([]auth.Permission, error),
remote http.HandlerFunc,
a api.Worker,
permissioned bool) http.Handler {
mux := mux.NewRouter()
readerHandler, readerServerOpt := rpcenc.ReaderParamDecoder()
rpcServer := jsonrpc.NewServer(jsonrpc.WithServerErrors(api.RPCErrors), readerServerOpt)

View File

@ -269,6 +269,26 @@ var DaemonCmd = &cli.Command{
}
}
if cctx.Bool("remove-existing-chain") {
lr, err := repo.NewFS(cctx.String("repo"))
if err != nil {
return xerrors.Errorf("error opening fs repo: %w", err)
}
exists, err := lr.Exists()
if err != nil {
return err
}
if !exists {
return xerrors.Errorf("lotus repo doesn't exist")
}
err = removeExistingChain(cctx, lr)
if err != nil {
return err
}
}
chainfile := cctx.String("import-chain")
snapshot := cctx.String("import-snapshot")
willImportChain := false

View File

@ -0,0 +1,25 @@
# Groups
* [](#)
* [Shutdown](#Shutdown)
* [Version](#Version)
##
### Shutdown
Perms: admin
Inputs: `null`
Response: `{}`
### Version
Perms: admin
Inputs: `null`
Response: `131840`

View File

@ -7,7 +7,7 @@ USAGE:
lotus-miner [global options] command [command options] [arguments...]
VERSION:
1.25.1-dev
1.25.3-dev
COMMANDS:
init Initialize a lotus miner repo
@ -66,6 +66,7 @@ OPTIONS:
--no-local-storage don't use storageminer repo for sector storage (default: false)
--gas-premium value set gas premium for initialization messages in AttoFIL (default: "0")
--from value select which address to send actor creation message from
--confidence value number of block confirmations to wait for (default: 5)
--help, -h show help
```
@ -231,8 +232,19 @@ OPTIONS:
--help, -h show help
```
#### lotus-miner actor set-addresses, set-addrs
### lotus-miner actor set-addresses
```
NAME:
lotus-miner actor set-addresses - set addresses that your miner can be publicly dialed on
USAGE:
lotus-miner actor set-addresses [command options] <multiaddrs>
OPTIONS:
--from value optionally specify the account to send the message from
--gas-limit value set gas limit (default: 0)
--unset unset address (default: false)
--help, -h show help
```
### lotus-miner actor withdraw
@ -1161,8 +1173,20 @@ OPTIONS:
--help, -h show help
```
##### lotus-miner proving compute windowed-post, window-post
#### lotus-miner proving compute windowed-post
```
NAME:
lotus-miner proving compute windowed-post - Compute WindowPoSt for a specific deadline
USAGE:
lotus-miner proving compute windowed-post [command options] [deadline index]
DESCRIPTION:
Note: This command is intended to be used to verify PoSt compute performance.
It will not send any messages to the chain.
OPTIONS:
--help, -h show help
```
### lotus-miner proving recover-faults

View File

@ -0,0 +1,430 @@
# lotus-provider
```
NAME:
lotus-provider - Filecoin decentralized storage network provider
USAGE:
lotus-provider [global options] command [command options] [arguments...]
VERSION:
1.25.3-dev
COMMANDS:
run Start a lotus provider process
stop Stop a running lotus provider
config Manage node config by layers. The layer 'base' will always be applied.
test Utility functions for testing
web Start lotus provider web interface
version Print version
help, h Shows a list of commands or help for one command
DEVELOPER:
auth Manage RPC permissions
log Manage logging
wait-api Wait for lotus api to come online
fetch-params Fetch proving parameters
GLOBAL OPTIONS:
--color use color in display output (default: depends on output being a TTY)
--db-host value Command separated list of hostnames for yugabyte cluster (default: "yugabyte") [$LOTUS_DB_HOST]
--db-name value (default: "yugabyte") [$LOTUS_DB_NAME, $LOTUS_HARMONYDB_HOSTS]
--db-user value (default: "yugabyte") [$LOTUS_DB_USER, $LOTUS_HARMONYDB_USERNAME]
--db-password value (default: "yugabyte") [$LOTUS_DB_PASSWORD, $LOTUS_HARMONYDB_PASSWORD]
--layers value (default: "base") [$LOTUS_LAYERS, $LOTUS_CONFIG_LAYERS]
--repo-path value (default: "~/.lotusprovider") [$LOTUS_REPO_PATH]
--vv enables very verbose mode, useful for debugging the CLI (default: false)
--help, -h show help
--version, -v print the version
```
## lotus-provider run
```
NAME:
lotus-provider run - Start a lotus provider process
USAGE:
lotus-provider run [command options] [arguments...]
OPTIONS:
--listen value host address and port the worker api will listen on (default: "0.0.0.0:12300") [$LOTUS_WORKER_LISTEN]
--nosync don't check full-node sync status (default: false)
--manage-fdlimit manage open file limit (default: true)
--layers value [ --layers value ] list of layers to be interpreted (atop defaults). Default: base (default: "base")
--storage-json value path to json file containing storage config (default: "~/.lotus-provider/storage.json")
--journal value path to journal files (default: "~/.lotus-provider/")
--help, -h show help
```
## lotus-provider stop
```
NAME:
lotus-provider stop - Stop a running lotus provider
USAGE:
lotus-provider stop [command options] [arguments...]
OPTIONS:
--help, -h show help
```
## lotus-provider config
```
NAME:
lotus-provider config - Manage node config by layers. The layer 'base' will always be applied.
USAGE:
lotus-provider config command [command options] [arguments...]
COMMANDS:
default, defaults Print default node config
set, add, update, create Set a config layer or the base by providing a filename or stdin.
get, cat, show Get a config layer by name. You may want to pipe the output to a file, or use 'less'
list, ls List config layers you can get.
interpret, view, stacked, stack Interpret stacked config layers by this version of lotus-provider, with system-generated comments.
remove, rm, del, delete Remove a named config layer.
from-miner Express a database config (for lotus-provider) from an existing miner.
help, h Shows a list of commands or help for one command
OPTIONS:
--help, -h show help
```
### lotus-provider config default
```
NAME:
lotus-provider config default - Print default node config
USAGE:
lotus-provider config default [command options] [arguments...]
OPTIONS:
--no-comment don't comment default values (default: false)
--help, -h show help
```
### lotus-provider config set
```
NAME:
lotus-provider config set - Set a config layer or the base by providing a filename or stdin.
USAGE:
lotus-provider config set [command options] a layer's file name
OPTIONS:
--title value title of the config layer (req'd for stdin)
--help, -h show help
```
### lotus-provider config get
```
NAME:
lotus-provider config get - Get a config layer by name. You may want to pipe the output to a file, or use 'less'
USAGE:
lotus-provider config get [command options] layer name
OPTIONS:
--help, -h show help
```
### lotus-provider config list
```
NAME:
lotus-provider config list - List config layers you can get.
USAGE:
lotus-provider config list [command options] [arguments...]
OPTIONS:
--help, -h show help
```
### lotus-provider config interpret
```
NAME:
lotus-provider config interpret - Interpret stacked config layers by this version of lotus-provider, with system-generated comments.
USAGE:
lotus-provider config interpret [command options] a list of layers to be interpreted as the final config
OPTIONS:
--layers value [ --layers value ] comma or space separated list of layers to be interpreted (default: "base")
--help, -h show help
```
### lotus-provider config remove
```
NAME:
lotus-provider config remove - Remove a named config layer.
USAGE:
lotus-provider config remove [command options] [arguments...]
OPTIONS:
--help, -h show help
```
### lotus-provider config from-miner
```
NAME:
lotus-provider config from-miner - Express a database config (for lotus-provider) from an existing miner.
USAGE:
lotus-provider config from-miner [command options] [arguments...]
DESCRIPTION:
Express a database config (for lotus-provider) from an existing miner.
OPTIONS:
--miner-repo value, --storagerepo value Specify miner repo path. flag(storagerepo) and env(LOTUS_STORAGE_PATH) are DEPRECATION, will REMOVE SOON (default: "~/.lotusminer") [$LOTUS_MINER_PATH, $LOTUS_STORAGE_PATH]
--to-layer value, -t value The layer name for this data push. 'base' is recommended for single-miner setup.
--overwrite, -o Use this with --to-layer to replace an existing layer (default: false)
--help, -h show help
```
## lotus-provider test
```
NAME:
lotus-provider test - Utility functions for testing
USAGE:
lotus-provider test command [command options] [arguments...]
COMMANDS:
window-post, wd, windowpost, wdpost Compute a proof-of-spacetime for a sector (requires the sector to be pre-sealed). These will not send to the chain.
help, h Shows a list of commands or help for one command
OPTIONS:
--help, -h show help
```
### lotus-provider test window-post
```
NAME:
lotus-provider test window-post - Compute a proof-of-spacetime for a sector (requires the sector to be pre-sealed). These will not send to the chain.
USAGE:
lotus-provider test window-post command [command options] [arguments...]
COMMANDS:
here, cli Compute WindowPoSt for performance and configuration testing.
task, scheduled, schedule, async, asynchronous Test the windowpost scheduler by running it on the next available lotus-provider.
help, h Shows a list of commands or help for one command
OPTIONS:
--help, -h show help
```
#### lotus-provider test window-post here
```
NAME:
lotus-provider test window-post here - Compute WindowPoSt for performance and configuration testing.
USAGE:
lotus-provider test window-post here [command options] [deadline index]
DESCRIPTION:
Note: This command is intended to be used to verify PoSt compute performance.
It will not send any messages to the chain. Since it can compute any deadline, output may be incorrectly timed for the chain.
OPTIONS:
--deadline value deadline to compute WindowPoSt for (default: 0)
--layers value [ --layers value ] list of layers to be interpreted (atop defaults). Default: base (default: "base")
--storage-json value path to json file containing storage config (default: "~/.lotus-provider/storage.json")
--partition value partition to compute WindowPoSt for (default: 0)
--help, -h show help
```
#### lotus-provider test window-post task
```
NAME:
lotus-provider test window-post task - Test the windowpost scheduler by running it on the next available lotus-provider.
USAGE:
lotus-provider test window-post task [command options] [arguments...]
OPTIONS:
--deadline value deadline to compute WindowPoSt for (default: 0)
--layers value [ --layers value ] list of layers to be interpreted (atop defaults). Default: base (default: "base")
--help, -h show help
```
## lotus-provider web
```
NAME:
lotus-provider web - Start lotus provider web interface
USAGE:
lotus-provider web [command options] [arguments...]
DESCRIPTION:
Start an instance of lotus provider web interface.
This creates the 'web' layer if it does not exist, then calls run with that layer.
OPTIONS:
--listen value Address to listen on (default: "127.0.0.1:4701")
--layers value [ --layers value ] list of layers to be interpreted (atop defaults). Default: base. Web will be added (default: "base")
--nosync don't check full-node sync status (default: false)
--help, -h show help
```
## lotus-provider version
```
NAME:
lotus-provider version - Print version
USAGE:
lotus-provider version [command options] [arguments...]
OPTIONS:
--help, -h show help
```
## lotus-provider auth
```
NAME:
lotus-provider auth - Manage RPC permissions
USAGE:
lotus-provider auth command [command options] [arguments...]
COMMANDS:
create-token Create token
api-info Get token with API info required to connect to this node
help, h Shows a list of commands or help for one command
OPTIONS:
--help, -h show help
```
### lotus-provider auth create-token
```
NAME:
lotus-provider auth create-token - Create token
USAGE:
lotus-provider auth create-token [command options] [arguments...]
OPTIONS:
--perm value permission to assign to the token, one of: read, write, sign, admin
--help, -h show help
```
### lotus-provider auth api-info
```
NAME:
lotus-provider auth api-info - Get token with API info required to connect to this node
USAGE:
lotus-provider auth api-info [command options] [arguments...]
OPTIONS:
--perm value permission to assign to the token, one of: read, write, sign, admin
--help, -h show help
```
## lotus-provider log
```
NAME:
lotus-provider log - Manage logging
USAGE:
lotus-provider log command [command options] [arguments...]
COMMANDS:
list List log systems
set-level Set log level
alerts Get alert states
help, h Shows a list of commands or help for one command
OPTIONS:
--help, -h show help
```
### lotus-provider log list
```
NAME:
lotus-provider log list - List log systems
USAGE:
lotus-provider log list [command options] [arguments...]
OPTIONS:
--help, -h show help
```
### lotus-provider log set-level
```
NAME:
lotus-provider log set-level - Set log level
USAGE:
lotus-provider log set-level [command options] [level]
DESCRIPTION:
Set the log level for logging systems:
The system flag can be specified multiple times.
eg) log set-level --system chain --system chainxchg debug
Available Levels:
debug
info
warn
error
Environment Variables:
GOLOG_LOG_LEVEL - Default log level for all log systems
GOLOG_LOG_FMT - Change output log format (json, nocolor)
GOLOG_FILE - Write logs to file
GOLOG_OUTPUT - Specify whether to output to file, stderr, stdout or a combination, i.e. file+stderr
OPTIONS:
--system value [ --system value ] limit to log system
--help, -h show help
```
### lotus-provider log alerts
```
NAME:
lotus-provider log alerts - Get alert states
USAGE:
lotus-provider log alerts [command options] [arguments...]
OPTIONS:
--all get all (active and inactive) alerts (default: false)
--help, -h show help
```
## lotus-provider wait-api
```
NAME:
lotus-provider wait-api - Wait for lotus api to come online
USAGE:
lotus-provider wait-api [command options] [arguments...]
CATEGORY:
DEVELOPER
OPTIONS:
--timeout value duration to wait till fail (default: 30s)
--help, -h show help
```
## lotus-provider fetch-params
```
NAME:
lotus-provider fetch-params - Fetch proving parameters
USAGE:
lotus-provider fetch-params [command options] [sectorSize]
CATEGORY:
DEVELOPER
OPTIONS:
--help, -h show help
```

View File

@ -7,7 +7,7 @@ USAGE:
lotus-worker [global options] command [command options] [arguments...]
VERSION:
1.25.1-dev
1.25.3-dev
COMMANDS:
run Start lotus worker
@ -34,6 +34,33 @@ NAME:
USAGE:
lotus-worker run [command options] [arguments...]
DESCRIPTION:
Run lotus-worker.
--external-pc2 can be used to compute the PreCommit2 inputs externally.
The flag behaves similarly to the related lotus-worker flag, using it in
lotus-bench may be useful for testing if the external PreCommit2 command is
invoked correctly.
The command will be called with a number of environment variables set:
* EXTSEAL_PC2_SECTOR_NUM: the sector number
* EXTSEAL_PC2_SECTOR_MINER: the miner id
* EXTSEAL_PC2_PROOF_TYPE: the proof type
* EXTSEAL_PC2_SECTOR_SIZE: the sector size in bytes
* EXTSEAL_PC2_CACHE: the path to the cache directory
* EXTSEAL_PC2_SEALED: the path to the sealed sector file (initialized with unsealed data by the caller)
* EXTSEAL_PC2_PC1OUT: output from rust-fil-proofs precommit1 phase (base64 encoded json)
The command is expected to:
* Create cache sc-02-data-tree-r* files
* Create cache sc-02-data-tree-c* files
* Create cache p_aux / t_aux files
* Transform the sealed file in place
Example invocation of lotus-bench as external executor:
'./lotus-bench simple precommit2 --sector-size $EXTSEAL_PC2_SECTOR_SIZE $EXTSEAL_PC2_SEALED $EXTSEAL_PC2_CACHE $EXTSEAL_PC2_PC1OUT'
OPTIONS:
--listen value host address and port the worker api will listen on (default: "0.0.0.0:3456") [$LOTUS_WORKER_LISTEN]
--no-local-storage don't use storageminer repo for sector storage (default: false) [$LOTUS_WORKER_NO_LOCAL_STORAGE]
@ -57,6 +84,7 @@ OPTIONS:
--timeout value used when 'listen' is unspecified. must be a valid duration recognized by golang's time.ParseDuration function (default: "30m") [$LOTUS_WORKER_TIMEOUT]
--http-server-timeout value (default: "30s")
--data-cid Run the data-cid task. true|false (default: inherits --addpiece)
--external-pc2 value command for computing PC2 externally
--help, -h show help
```

View File

@ -7,7 +7,7 @@ USAGE:
lotus [global options] command [command options] [arguments...]
VERSION:
1.25.1-dev
1.25.3-dev
COMMANDS:
daemon Start a lotus daemon process
@ -1807,8 +1807,16 @@ OPTIONS:
--help, -h show help
```
#### lotus state sector, sector-info
### lotus state sector
```
NAME:
lotus state sector - Get miner sector info
USAGE:
lotus state sector [command options] [minerAddress] [sectorNumber]
OPTIONS:
--help, -h show help
```
### lotus state get-actor
@ -1937,12 +1945,29 @@ OPTIONS:
--help, -h show help
```
#### lotus state wait-msg, wait-message
### lotus state wait-msg
```
NAME:
lotus state wait-msg - Wait for a message to appear on chain
USAGE:
lotus state wait-msg [command options] [messageCid]
OPTIONS:
--timeout value (default: "10m")
--help, -h show help
```
#### lotus state search-msg, search-message
### lotus state search-msg
```
NAME:
lotus state search-msg - Search to see whether a message has appeared on chain
USAGE:
lotus state search-msg [command options] [messageCid]
OPTIONS:
--help, -h show help
```
### lotus state miner-info
@ -2080,8 +2105,17 @@ OPTIONS:
--help, -h show help
```
#### lotus chain get-block, getblock
### lotus chain get-block
```
NAME:
lotus chain get-block - Get a block and print its details
USAGE:
lotus chain get-block [command options] [blockCid]
OPTIONS:
--raw print just the raw block header (default: false)
--help, -h show help
```
### lotus chain read-obj
@ -2132,16 +2166,46 @@ OPTIONS:
--help, -h show help
```
##### lotus chain getmessage, get-message, get-msg
### lotus chain getmessage
```
NAME:
lotus chain getmessage - Get and print a message by its cid
USAGE:
lotus chain getmessage [command options] [messageCid]
OPTIONS:
--help, -h show help
```
#### lotus chain sethead, set-head
### lotus chain sethead
```
NAME:
lotus chain sethead - manually set the local nodes head tipset (Caution: normally only used for recovery)
USAGE:
lotus chain sethead [command options] [tipsetkey]
OPTIONS:
--genesis reset head to genesis (default: false)
--epoch value reset head to given epoch (default: 0)
--help, -h show help
```
#### lotus chain list, love
### lotus chain list
```
NAME:
lotus chain list - View a segment of the chain
USAGE:
lotus chain list [command options] [arguments...]
OPTIONS:
--height value (default: current head)
--count value (default: 30)
--format value specify the format to print out tipsets (default: "<height>: (<time>) <blocks>")
--gas-stats view gas statistics for the chain (default: false)
--help, -h show help
```
### lotus chain get
@ -2768,8 +2832,16 @@ OPTIONS:
--help, -h show help
```
#### lotus net find-peer, findpeer
### lotus net find-peer
```
NAME:
lotus net find-peer - Find the addresses of a given peerID
USAGE:
lotus net find-peer [command options] [peerId]
OPTIONS:
--help, -h show help
```
### lotus net scores

View File

@ -145,6 +145,14 @@
# env var: LOTUS_SUBSYSTEMS_ENABLEMARKETS
#EnableMarkets = false
# When enabled, the sector index will reside in an external database
# as opposed to the local KV store in the miner process
# This is useful to allow workers to bypass the lotus miner to access sector information
#
# type: bool
# env var: LOTUS_SUBSYSTEMS_ENABLESECTORINDEXDB
#EnableSectorIndexDB = false
# type: string
# env var: LOTUS_SUBSYSTEMS_SEALERAPIINFO
#SealerApiInfo = ""
@ -153,6 +161,31 @@
# env var: LOTUS_SUBSYSTEMS_SECTORINDEXAPIINFO
#SectorIndexApiInfo = ""
# When window post is enabled, the miner will automatically submit window post proofs
# for all sectors that are eligible for window post
# IF WINDOW POST IS DISABLED, THE MINER WILL NOT SUBMIT WINDOW POST PROOFS
# THIS WILL RESULT IN FAULTS AND PENALTIES IF NO OTHER MECHANISM IS RUNNING
# TO SUBMIT WINDOW POST PROOFS.
# Note: This option is entirely disabling the window post scheduler,
# not just the builtin PoSt computation like Proving.DisableBuiltinWindowPoSt.
# This option will stop lotus-miner from performing any actions related
# to window post, including scheduling, submitting proofs, and recovering
# sectors.
#
# type: bool
# env var: LOTUS_SUBSYSTEMS_DISABLEWINDOWPOST
#DisableWindowPoSt = false
# When winning post is disabled, the miner process will NOT attempt to mine
# blocks. This should only be set when there's an external process mining
# blocks on behalf of the miner.
# When disabled and no external block producers are configured, all potential
# block rewards will be missed!
#
# type: bool
# env var: LOTUS_SUBSYSTEMS_DISABLEWINNINGPOST
#DisableWinningPoSt = false
[Dealmaking]
# When enabled, the miner can accept online deals
@ -896,3 +929,36 @@
#GCInterval = "1m0s"
[HarmonyDB]
# HOSTS is a list of hostnames to nodes running YugabyteDB
# in a cluster. Only 1 is required
#
# type: []string
# env var: LOTUS_HARMONYDB_HOSTS
#Hosts = ["127.0.0.1"]
# The Yugabyte server's username with full credentials to operate on Lotus' Database. Blank for default.
#
# type: string
# env var: LOTUS_HARMONYDB_USERNAME
#Username = "yugabyte"
# The password for the related username. Blank for default.
#
# type: string
# env var: LOTUS_HARMONYDB_PASSWORD
#Password = "yugabyte"
# The database (logical partition) within Yugabyte. Blank for default.
#
# type: string
# env var: LOTUS_HARMONYDB_DATABASE
#Database = "yugabyte"
# The port to find Yugabyte. Blank for default.
#
# type: string
# env var: LOTUS_HARMONYDB_PORT
#Port = "5433"

View File

@ -0,0 +1,217 @@
[Subsystems]
# type: bool
#EnableWindowPost = false
# type: int
#WindowPostMaxTasks = 0
# type: bool
#EnableWinningPost = false
# type: int
#WinningPostMaxTasks = 0
# type: bool
#EnableWebGui = false
# The address that should listen for Web GUI requests.
#
# type: string
#GuiAddress = ":4701"
[Fees]
# type: types.FIL
#DefaultMaxFee = "0.07 FIL"
# type: types.FIL
#MaxPreCommitGasFee = "0.025 FIL"
# type: types.FIL
#MaxCommitGasFee = "0.05 FIL"
# type: types.FIL
#MaxTerminateGasFee = "0.5 FIL"
# WindowPoSt is a high-value operation, so the default fee should be high.
#
# type: types.FIL
#MaxWindowPoStGasFee = "5 FIL"
# type: types.FIL
#MaxPublishDealsFee = "0.05 FIL"
[Fees.MaxPreCommitBatchGasFee]
# type: types.FIL
#Base = "0 FIL"
# type: types.FIL
#PerSector = "0.02 FIL"
[Fees.MaxCommitBatchGasFee]
# type: types.FIL
#Base = "0 FIL"
# type: types.FIL
#PerSector = "0.03 FIL"
[Addresses]
# Addresses to send PreCommit messages from
#
# type: []string
#PreCommitControl = []
# Addresses to send Commit messages from
#
# type: []string
#CommitControl = []
# type: []string
#TerminateControl = []
# DisableOwnerFallback disables usage of the owner address for messages
# sent automatically
#
# type: bool
#DisableOwnerFallback = false
# DisableWorkerFallback disables usage of the worker address for messages
# sent automatically, if control addresses are configured.
# A control address that doesn't have enough funds will still be chosen
# over the worker address if this flag is set.
#
# type: bool
#DisableWorkerFallback = false
[Proving]
# Maximum number of sector checks to run in parallel. (0 = unlimited)
#
# WARNING: Setting this value too high may make the node crash by running out of stack
# WARNING: Setting this value too low may make sector challenge reading much slower, resulting in failed PoSt due
# to late submission.
#
# After changing this option, confirm that the new value works in your setup by invoking
# 'lotus-miner proving compute window-post 0'
#
# type: int
#ParallelCheckLimit = 32
# Maximum amount of time a proving pre-check can take for a sector. If the check times out the sector will be skipped
#
# WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the
# test challenge took longer than this timeout
# WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this sector are
# blocked (e.g. in case of disconnected NFS mount)
#
# type: Duration
#SingleCheckTimeout = "10m0s"
# Maximum amount of time a proving pre-check can take for an entire partition. If the check times out, sectors in
# the partition which didn't get checked on time will be skipped
#
# WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the
# test challenge took longer than this timeout
# WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this partition are
# blocked or slow
#
# type: Duration
#PartitionCheckTimeout = "20m0s"
# Disable Window PoSt computation on the lotus-miner process even if no window PoSt workers are present.
#
# WARNING: If no windowPoSt workers are connected, window PoSt WILL FAIL resulting in faulty sectors which will need
# to be recovered. Before enabling this option, make sure your PoSt workers work correctly.
#
# After changing this option, confirm that the new value works in your setup by invoking
# 'lotus-miner proving compute window-post 0'
#
# type: bool
#DisableBuiltinWindowPoSt = false
# Disable Winning PoSt computation on the lotus-miner process even if no winning PoSt workers are present.
#
# WARNING: If no WinningPoSt workers are connected, Winning PoSt WILL FAIL resulting in lost block rewards.
# Before enabling this option, make sure your PoSt workers work correctly.
#
# type: bool
#DisableBuiltinWinningPoSt = false
# Disable WindowPoSt provable sector readability checks.
#
# In normal operation, when preparing to compute WindowPoSt, lotus-miner will perform a round of reading challenges
# from all sectors to confirm that those sectors can be proven. Challenges read in this process are discarded, as
# we're only interested in checking that sector data can be read.
#
# When using builtin proof computation (no PoSt workers, and DisableBuiltinWindowPoSt is set to false), this process
# can save a lot of time and compute resources in the case that some sectors are not readable - this is caused by
# the builtin logic not skipping snark computation when some sectors need to be skipped.
#
# When using PoSt workers, this process is mostly redundant, with PoSt workers challenges will be read once, and
# if challenges for some sectors aren't readable, those sectors will just get skipped.
#
# Disabling sector pre-checks will slightly reduce IO load when proving sectors, possibly resulting in shorter
# time to produce window PoSt. In setups with good IO capabilities the effect of this option on proving time should
# be negligible.
#
# NOTE: It likely is a bad idea to disable sector pre-checks in setups with no PoSt workers.
#
# NOTE: Even when this option is enabled, recovering sectors will be checked before recovery declaration message is
# sent to the chain
#
# After changing this option, confirm that the new value works in your setup by invoking
# 'lotus-miner proving compute window-post 0'
#
# type: bool
#DisableWDPoStPreChecks = false
# Maximum number of partitions to prove in a single SubmitWindowPoSt messace. 0 = network limit (3 in nv21)
#
# A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors.
# //
# Note that setting this value lower may result in less efficient gas use - more messages will be sent,
# to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
#
# Setting this value above the network limit has no effect
#
# type: int
#MaxPartitionsPerPoStMessage = 0
# In some cases when submitting DeclareFaultsRecovered messages,
# there may be too many recoveries to fit in a BlockGasLimit.
# In those cases it may be necessary to set this value to something low (eg 1);
# Note that setting this value lower may result in less efficient gas use - more messages will be sent than needed,
# resulting in more total gas use (but each message will have lower gas limit)
#
# type: int
#MaxPartitionsPerRecoveryMessage = 0
# Enable single partition per PoSt Message for partitions containing recovery sectors
#
# In cases when submitting PoSt messages which contain recovering sectors, the default network limit may still be
# too high to fit in the block gas limit. In those cases, it becomes useful to only house the single partition
# with recovering sectors in the post message
#
# Note that setting this value lower may result in less efficient gas use - more messages will be sent,
# to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
#
# type: bool
#SingleRecoveringPartitionPerPostMessage = false
[Journal]
# Events of the form: "system1:event1,system1:event2[,...]"
#
# type: string
#DisabledEvents = ""
[Apis]
# RPC Secret for the storage subsystem.
# If integrating with lotus-miner this must match the value from
# cat ~/.lotusminer/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU | jq -r .PrivateKey
#
# type: string
#StorageRPCSecret = ""

View File

@ -92,7 +92,7 @@ func main() {
err = gen.WriteTupleEncodersToFile("./chain/exchange/cbor_gen.go", "exchange",
exchange.Request{},
exchange.Response{},
exchange.CompactedMessages{},
exchange.CompactedMessagesCBOR{},
exchange.BSTipSet{},
)
if err != nil {

24
go.mod
View File

@ -1,6 +1,6 @@
module github.com/filecoin-project/lotus
go 1.19
go 1.20
retract v1.14.0 // Accidentally force-pushed tag, use v1.14.1+ instead.
@ -62,6 +62,7 @@ require (
github.com/filecoin-project/test-vectors/schema v0.0.7
github.com/gbrlsnchs/jwt/v3 v3.0.1
github.com/gdamore/tcell/v2 v2.2.0
github.com/georgysavva/scany/v2 v2.0.0
github.com/go-openapi/spec v0.19.11
github.com/golang/mock v1.6.0
github.com/google/uuid v1.3.0
@ -101,10 +102,12 @@ require (
github.com/ipld/go-ipld-selector-text-lite v0.0.1
github.com/ipni/go-libipni v0.0.8
github.com/ipni/index-provider v0.12.0
github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa
github.com/jackc/pgx/v5 v5.4.1
github.com/kelseyhightower/envconfig v1.4.0
github.com/koalacxr/quantile v0.0.1
github.com/libp2p/go-buffer-pool v0.1.0
github.com/libp2p/go-libp2p v0.31.0
github.com/libp2p/go-libp2p v0.31.1
github.com/libp2p/go-libp2p-kad-dht v0.24.0
github.com/libp2p/go-libp2p-pubsub v0.9.3
github.com/libp2p/go-libp2p-record v0.2.0
@ -116,20 +119,24 @@ require (
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1
github.com/mitchellh/go-homedir v1.1.0
github.com/multiformats/go-base32 v0.1.0
github.com/multiformats/go-multiaddr v0.11.0
github.com/multiformats/go-multiaddr v0.12.0
github.com/multiformats/go-multiaddr-dns v0.3.1
github.com/multiformats/go-multibase v0.2.0
github.com/multiformats/go-multicodec v0.9.0
github.com/multiformats/go-multihash v0.2.3
github.com/multiformats/go-varint v0.0.7
github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58
github.com/pkg/errors v0.9.1
github.com/polydawn/refmt v0.89.0
github.com/prometheus/client_golang v1.14.0
github.com/prometheus/client_golang v1.16.0
github.com/puzpuzpuz/xsync/v2 v2.4.0
github.com/raulk/clock v1.1.0
github.com/raulk/go-watchdog v1.3.0
github.com/samber/lo v1.38.1
github.com/stretchr/testify v1.8.4
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
github.com/triplewz/poseidon v0.0.0-20230828015038-79d8165c88ed
github.com/urfave/cli/v2 v2.25.5
github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba
github.com/whyrusleeping/cbor-gen v0.0.0-20230923211252-36a87e1ba72f
@ -235,6 +242,9 @@ require (
github.com/ipfs/go-verifcid v0.0.2 // indirect
github.com/ipld/go-ipld-adl-hamt v0.0.0-20220616142416-9004dbd839e0 // indirect
github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/puddle/v2 v2.2.0 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
@ -276,17 +286,15 @@ require (
github.com/onsi/ginkgo/v2 v2.11.0 // indirect
github.com/opencontainers/runtime-spec v1.1.0 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/common v0.42.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/prometheus/procfs v0.10.1 // indirect
github.com/prometheus/statsd_exporter v0.22.7 // indirect
github.com/quic-go/qpack v0.4.0 // indirect
github.com/quic-go/qtls-go1-20 v0.3.3 // indirect
github.com/quic-go/quic-go v0.38.1 // indirect
github.com/quic-go/quic-go v0.38.2 // indirect
github.com/quic-go/webtransport-go v0.5.3 // indirect
github.com/rivo/uniseg v0.1.0 // indirect
github.com/rs/cors v1.7.0 // indirect

42
go.sum
View File

@ -160,6 +160,7 @@ github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnx
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cockroachdb/cockroach-go/v2 v2.2.0 h1:/5znzg5n373N/3ESjHF5SMLxiW4RKB05Ql//KWfeTFs=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA=
@ -384,6 +385,8 @@ github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdk
github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg=
github.com/gdamore/tcell/v2 v2.2.0 h1:vSyEgKwraXPSOkvCk7IwOSyX+Pv3V2cV9CikJMXg4U4=
github.com/gdamore/tcell/v2 v2.2.0/go.mod h1:cTTuF84Dlj/RqmaCIV5p4w8uG1zWdk0SF6oBpwHp4fU=
github.com/georgysavva/scany/v2 v2.0.0 h1:RGXqxDv4row7/FYoK8MRXAZXqoWF/NM+NP0q50k3DKU=
github.com/georgysavva/scany/v2 v2.0.0/go.mod h1:sigOdh+0qb/+aOs3TVhehVT10p8qJL7K/Zhyz8vWo38=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
@ -441,6 +444,7 @@ github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
@ -832,6 +836,16 @@ github.com/ipni/index-provider v0.12.0 h1:R3F6dxxKNv4XkE4GJZNLOG0bDEbBQ/S5iztXwS
github.com/ipni/index-provider v0.12.0/go.mod h1:GhyrADJp7n06fqoc1djzkvL4buZYHzV8SoWrlxEo5F4=
github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c=
github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4=
github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa h1:s+4MhCQ6YrzisK6hFJUX53drDT4UsSW3DEhKn0ifuHw=
github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa/go.mod h1:a/s9Lp5W7n/DD0VrVoyJ00FbP2ytTPDVOivvn2bMlds=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.4.1 h1:oKfB/FhuVtit1bBM3zNRRsZ925ZkMN3HXL+LgLUM9lE=
github.com/jackc/pgx/v5 v5.4.1/go.mod h1:q6iHT8uDNXWiFNOlRqJzBTaSH3+2xCXkokxHZC5qWFY=
github.com/jackc/puddle/v2 v2.2.0 h1:RdcDk92EJBuBS55nQMMYFXTxwstHug4jkhT5pq8VxPk=
github.com/jackc/puddle/v2 v2.2.0/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA=
github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
@ -919,6 +933,9 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
github.com/lib/pq v1.10.0 h1:Zx5DJFEYQXio93kgXnQ09fXNiUKsqv4OUEu2UtGcB1E=
github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ=
github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E=
github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ=
@ -943,8 +960,8 @@ github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xS
github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw=
github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o=
github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0=
github.com/libp2p/go-libp2p v0.31.0 h1:LFShhP8F6xthWiBBq3euxbKjZsoRajVEyBS9snfHxYg=
github.com/libp2p/go-libp2p v0.31.0/go.mod h1:W/FEK1c/t04PbRH3fA9i5oucu5YcgrG0JVoBWT1B7Eg=
github.com/libp2p/go-libp2p v0.31.1 h1:mUiFPwdzC2zMLIATKVddjCuPXVbtC3BsKKVPMs4+jzY=
github.com/libp2p/go-libp2p v0.31.1/go.mod h1:+9TCv+XySSOdaxPF1WIgTK8rXP9jBb8WbemlMCSXGsU=
github.com/libp2p/go-libp2p-asn-util v0.3.0 h1:gMDcMyYiZKkocGXDQ5nsUQyquC9+H+iLEQHwOCZ7s8s=
github.com/libp2p/go-libp2p-asn-util v0.3.0/go.mod h1:B1mcOrKUE35Xq/ASTmQ4tN3LNzVVaMNmq2NACuqyB9w=
github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8=
@ -1232,8 +1249,8 @@ github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u
github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI=
github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc=
github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0=
github.com/multiformats/go-multiaddr v0.11.0 h1:XqGyJ8ufbCE0HmTDwx2kPdsrQ36AGPZNZX6s6xfJH10=
github.com/multiformats/go-multiaddr v0.11.0/go.mod h1:gWUm0QLR4thQ6+ZF6SXUw8YjtwQSPapICM+NmCkxHSM=
github.com/multiformats/go-multiaddr v0.12.0 h1:1QlibTFkoXJuDjjYsMHhE73TnzJQl8FSWatk/0gxGzE=
github.com/multiformats/go-multiaddr v0.12.0/go.mod h1:WmZXgObOQOYp9r3cslLlppkrz1FYSHmE834dfz/lWu8=
github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q=
github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q=
github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0=
@ -1377,8 +1394,8 @@ github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -1411,8 +1428,8 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
github.com/prometheus/statsd_exporter v0.22.7 h1:7Pji/i2GuhK6Lu7DHrtTkFmNBCudCPT1pX2CziuyQR0=
github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI=
github.com/puzpuzpuz/xsync/v2 v2.4.0 h1:5sXAMHrtx1bg9nbRZTOn8T4MkWe5V+o8yKRH02Eznag=
@ -1421,8 +1438,8 @@ github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo=
github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A=
github.com/quic-go/qtls-go1-20 v0.3.3 h1:17/glZSLI9P9fDAeyCHBFSWSqJcwx1byhLwP5eUIDCM=
github.com/quic-go/qtls-go1-20 v0.3.3/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k=
github.com/quic-go/quic-go v0.38.1 h1:M36YWA5dEhEeT+slOu/SwMEucbYd0YFidxG3KlGPZaE=
github.com/quic-go/quic-go v0.38.1/go.mod h1:ijnZM7JsFIkp4cRyjxJNIzdSfCLmUMg9wdyhGmg+SN4=
github.com/quic-go/quic-go v0.38.2 h1:VWv/6gxIoB8hROQJhx1JEyiegsUQ+zMN3em3kynTGdg=
github.com/quic-go/quic-go v0.38.2/go.mod h1:ijnZM7JsFIkp4cRyjxJNIzdSfCLmUMg9wdyhGmg+SN4=
github.com/quic-go/webtransport-go v0.5.3 h1:5XMlzemqB4qmOlgIus5zB45AcZ2kCgCy2EptUrfOPWU=
github.com/quic-go/webtransport-go v0.5.3/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU=
github.com/raulk/clock v1.1.0 h1:dpb29+UKMbLqiU/jqIJptgLR1nn23HLgMY0sTCDza5Y=
@ -1447,6 +1464,8 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/samber/lo v1.38.1 h1:j2XEAqXKb09Am4ebOg31SpvzUTTs6EN3VfgeLUhPdXM=
github.com/samber/lo v1.38.1/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA=
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/sercand/kuberesolver v2.4.0+incompatible h1:WE2OlRf6wjLxHwNkkFLQGaZcVLEXjMjBPjjEU5vksH8=
@ -1552,6 +1571,8 @@ github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhV
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/triplewz/poseidon v0.0.0-20230828015038-79d8165c88ed h1:C8H2ql+vCBhEi7d3vMBBbdCAKv9s/thfPyLEuSvFpMU=
github.com/triplewz/poseidon v0.0.0-20230828015038-79d8165c88ed/go.mod h1:QYG1d0B4YZD7TgF6qZndTTu4rxUGFCCZAQRDanDj+9c=
github.com/twmb/murmur3 v1.1.6 h1:mqrRot1BRxm+Yct+vavLMou2/iJt0tNVTTC0QoIjaZg=
github.com/twmb/murmur3 v1.1.6/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o=
@ -1977,6 +1998,7 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=

174
itests/harmonydb_test.go Normal file
View File

@ -0,0 +1,174 @@
package itests
import (
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"testing"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
"github.com/filecoin-project/lotus/node/impl"
)
func withSetup(t *testing.T, f func(*kit.TestMiner)) {
_, miner, _ := kit.EnsembleMinimal(t,
kit.LatestActorsAt(-1),
kit.MockProofs(),
kit.WithSectorIndexDB(),
)
f(miner)
}
func TestCrud(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
withSetup(t, func(miner *kit.TestMiner) {
cdb := miner.BaseAPI.(*impl.StorageMinerAPI).HarmonyDB
_, err := cdb.Exec(ctx, `
INSERT INTO
itest_scratch (some_int, content)
VALUES
(11, 'cows'),
(5, 'cats')
`)
if err != nil {
t.Fatal("Could not insert: ", err)
}
var ints []struct {
Count int `db:"some_int"`
Animal string `db:"content"`
Unpopulated int
}
err = cdb.Select(ctx, &ints, "SELECT content, some_int FROM itest_scratch")
if err != nil {
t.Fatal("Could not select: ", err)
}
if len(ints) != 2 {
t.Fatal("unexpected count of returns. Want 2, Got ", len(ints))
}
if ints[0].Count != 11 || ints[1].Count != 5 {
t.Fatal("expected [11,5] got ", ints)
}
if ints[0].Animal != "cows" || ints[1].Animal != "cats" {
t.Fatal("expected, [cows, cats] ", ints)
}
fmt.Println("test completed")
})
}
func TestTransaction(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
withSetup(t, func(miner *kit.TestMiner) {
cdb := miner.BaseAPI.(*impl.StorageMinerAPI).HarmonyDB
if _, err := cdb.Exec(ctx, "INSERT INTO itest_scratch (some_int) VALUES (4), (5), (6)"); err != nil {
t.Fatal("E0", err)
}
_, err := cdb.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) {
if _, err := tx.Exec("INSERT INTO itest_scratch (some_int) VALUES (7), (8), (9)"); err != nil {
t.Fatal("E1", err)
}
// sum1 is read from OUTSIDE the transaction so it's the old value
var sum1 int
if err := cdb.QueryRow(ctx, "SELECT SUM(some_int) FROM itest_scratch").Scan(&sum1); err != nil {
t.Fatal("E2", err)
}
if sum1 != 4+5+6 {
t.Fatal("Expected 15, got ", sum1)
}
// sum2 is from INSIDE the transaction, so the updated value.
var sum2 int
if err := tx.QueryRow("SELECT SUM(some_int) FROM itest_scratch").Scan(&sum2); err != nil {
t.Fatal("E3", err)
}
if sum2 != 4+5+6+7+8+9 {
t.Fatal("Expected 39, got ", sum2)
}
return false, nil // rollback
})
if err != nil {
t.Fatal("ET", err)
}
var sum2 int
// Query() example (yes, QueryRow would be preferred here)
q, err := cdb.Query(ctx, "SELECT SUM(some_int) FROM itest_scratch")
if err != nil {
t.Fatal("E4", err)
}
defer q.Close()
var rowCt int
for q.Next() {
err := q.Scan(&sum2)
if err != nil {
t.Fatal("error scanning ", err)
}
rowCt++
}
if sum2 != 4+5+6 {
t.Fatal("Expected 15, got ", sum2)
}
if rowCt != 1 {
t.Fatal("unexpected count of rows")
}
})
}
func TestPartialWalk(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
withSetup(t, func(miner *kit.TestMiner) {
cdb := miner.BaseAPI.(*impl.StorageMinerAPI).HarmonyDB
if _, err := cdb.Exec(ctx, `
INSERT INTO
itest_scratch (content, some_int)
VALUES
('andy was here', 5),
('lotus is awesome', 6),
('hello world', 7),
('3rd integration test', 8),
('fiddlesticks', 9)
`); err != nil {
t.Fatal("e1", err)
}
// TASK: FIND THE ID of the string with a specific SHA256
needle := "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9"
q, err := cdb.Query(ctx, `SELECT id, content FROM itest_scratch`)
if err != nil {
t.Fatal("e2", err)
}
defer q.Close()
var tmp struct {
Src string `db:"content"`
ID int
}
var done bool
for q.Next() {
if err := q.StructScan(&tmp); err != nil {
t.Fatal("structscan err " + err.Error())
}
bSha := sha256.Sum256([]byte(tmp.Src))
if hex.EncodeToString(bSha[:]) == needle {
done = true
break
}
}
if !done {
t.Fatal("We didn't find it.")
}
// Answer: tmp.ID
})
}

266
itests/harmonytask_test.go Normal file
View File

@ -0,0 +1,266 @@
package itests
import (
"context"
"errors"
"fmt"
"sort"
"sync"
"testing"
"time"
logging "github.com/ipfs/go-log/v2"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
"github.com/filecoin-project/lotus/lib/harmony/harmonytask"
"github.com/filecoin-project/lotus/lib/harmony/resources"
"github.com/filecoin-project/lotus/node/impl"
)
type task1 struct {
toAdd []int
myPersonalTableLock sync.Mutex
myPersonalTable map[harmonytask.TaskID]int // This would typically be a DB table
WorkCompleted []string
}
func withDbSetup(t *testing.T, f func(*kit.TestMiner)) {
_, miner, _ := kit.EnsembleMinimal(t,
kit.LatestActorsAt(-1),
kit.MockProofs(),
kit.WithSectorIndexDB(),
)
logging.SetLogLevel("harmonytask", "debug")
f(miner)
}
func (t *task1) Do(tID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) {
if !stillOwned() {
return false, errors.New("Why not still owned?")
}
t.myPersonalTableLock.Lock()
defer t.myPersonalTableLock.Unlock()
t.WorkCompleted = append(t.WorkCompleted, fmt.Sprintf("taskResult%d", t.myPersonalTable[tID]))
return true, nil
}
func (t *task1) CanAccept(list []harmonytask.TaskID, e *harmonytask.TaskEngine) (*harmonytask.TaskID, error) {
return &list[0], nil
}
func (t *task1) TypeDetails() harmonytask.TaskTypeDetails {
return harmonytask.TaskTypeDetails{
Max: 100,
Name: "ThingOne",
MaxFailures: 1,
Cost: resources.Resources{
Cpu: 1,
Ram: 100 << 10, // at 100kb, it's tiny
},
}
}
func (t *task1) Adder(add harmonytask.AddTaskFunc) {
for _, vTmp := range t.toAdd {
v := vTmp
add(func(tID harmonytask.TaskID, tx *harmonydb.Tx) (bool, error) {
t.myPersonalTableLock.Lock()
defer t.myPersonalTableLock.Unlock()
t.myPersonalTable[tID] = v
return true, nil
})
}
}
func init() {
//logging.SetLogLevel("harmonydb", "debug")
//logging.SetLogLevel("harmonytask", "debug")
}
func TestHarmonyTasks(t *testing.T) {
//t.Parallel()
withDbSetup(t, func(m *kit.TestMiner) {
cdb := m.BaseAPI.(*impl.StorageMinerAPI).HarmonyDB
t1 := &task1{
toAdd: []int{56, 73},
myPersonalTable: map[harmonytask.TaskID]int{},
}
harmonytask.POLL_DURATION = time.Millisecond * 100
e, err := harmonytask.New(cdb, []harmonytask.TaskInterface{t1}, "test:1")
require.NoError(t, err)
time.Sleep(time.Second) // do the work. FLAKYNESS RISK HERE.
e.GracefullyTerminate(time.Minute)
expected := []string{"taskResult56", "taskResult73"}
sort.Strings(t1.WorkCompleted)
require.Equal(t, expected, t1.WorkCompleted, "unexpected results")
})
}
type passthru struct {
dtl harmonytask.TaskTypeDetails
do func(tID harmonytask.TaskID, stillOwned func() bool) (done bool, err error)
canAccept func(list []harmonytask.TaskID, e *harmonytask.TaskEngine) (*harmonytask.TaskID, error)
adder func(add harmonytask.AddTaskFunc)
}
func (t *passthru) Do(tID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) {
return t.do(tID, stillOwned)
}
func (t *passthru) CanAccept(list []harmonytask.TaskID, e *harmonytask.TaskEngine) (*harmonytask.TaskID, error) {
return t.canAccept(list, e)
}
func (t *passthru) TypeDetails() harmonytask.TaskTypeDetails {
return t.dtl
}
func (t *passthru) Adder(add harmonytask.AddTaskFunc) {
if t.adder != nil {
t.adder(add)
}
}
// Common stuff
var dtl = harmonytask.TaskTypeDetails{Name: "foo", Max: -1, Cost: resources.Resources{}}
var lettersMutex sync.Mutex
func fooLetterAdder(t *testing.T, cdb *harmonydb.DB) *passthru {
return &passthru{
dtl: dtl,
canAccept: func(list []harmonytask.TaskID, e *harmonytask.TaskEngine) (*harmonytask.TaskID, error) {
return nil, nil
},
adder: func(add harmonytask.AddTaskFunc) {
for _, vTmp := range []string{"A", "B"} {
v := vTmp
add(func(tID harmonytask.TaskID, tx *harmonydb.Tx) (bool, error) {
_, err := tx.Exec("INSERT INTO itest_scratch (some_int, content) VALUES ($1,$2)", tID, v)
require.NoError(t, err)
return true, nil
})
}
},
}
}
func fooLetterSaver(t *testing.T, cdb *harmonydb.DB, dest *[]string) *passthru {
return &passthru{
dtl: dtl,
canAccept: func(list []harmonytask.TaskID, e *harmonytask.TaskEngine) (*harmonytask.TaskID, error) {
return &list[0], nil
},
do: func(tID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) {
var content string
err = cdb.QueryRow(context.Background(),
"SELECT content FROM itest_scratch WHERE some_int=$1", tID).Scan(&content)
require.NoError(t, err)
lettersMutex.Lock()
defer lettersMutex.Unlock()
*dest = append(*dest, content)
return true, nil
},
}
}
func TestHarmonyTasksWith2PartiesPolling(t *testing.T) {
//t.Parallel()
withDbSetup(t, func(m *kit.TestMiner) {
cdb := m.BaseAPI.(*impl.StorageMinerAPI).HarmonyDB
senderParty := fooLetterAdder(t, cdb)
var dest []string
workerParty := fooLetterSaver(t, cdb, &dest)
harmonytask.POLL_DURATION = time.Millisecond * 100
sender, err := harmonytask.New(cdb, []harmonytask.TaskInterface{senderParty}, "test:1")
require.NoError(t, err)
worker, err := harmonytask.New(cdb, []harmonytask.TaskInterface{workerParty}, "test:2")
require.NoError(t, err)
time.Sleep(time.Second) // do the work. FLAKYNESS RISK HERE.
sender.GracefullyTerminate(time.Second * 5)
worker.GracefullyTerminate(time.Second * 5)
sort.Strings(dest)
require.Equal(t, []string{"A", "B"}, dest)
})
}
func TestWorkStealing(t *testing.T) {
//t.Parallel()
withDbSetup(t, func(m *kit.TestMiner) {
cdb := m.BaseAPI.(*impl.StorageMinerAPI).HarmonyDB
ctx := context.Background()
// The dead worker will be played by a few SQL INSERTS.
_, err := cdb.Exec(ctx, `INSERT INTO harmony_machines
(id, last_contact,host_and_port, cpu, ram, gpu)
VALUES (300, DATE '2000-01-01', 'test:1', 4, 400000, 1)`)
require.ErrorIs(t, err, nil)
_, err = cdb.Exec(ctx, `INSERT INTO harmony_task
(id, name, owner_id, posted_time, added_by)
VALUES (1234, 'foo', 300, DATE '2000-01-01', 300)`)
require.ErrorIs(t, err, nil)
_, err = cdb.Exec(ctx, "INSERT INTO itest_scratch (some_int, content) VALUES (1234, 'M')")
require.ErrorIs(t, err, nil)
harmonytask.POLL_DURATION = time.Millisecond * 100
harmonytask.CLEANUP_FREQUENCY = time.Millisecond * 100
var dest []string
worker, err := harmonytask.New(cdb, []harmonytask.TaskInterface{fooLetterSaver(t, cdb, &dest)}, "test:2")
require.ErrorIs(t, err, nil)
time.Sleep(time.Second) // do the work. FLAKYNESS RISK HERE.
worker.GracefullyTerminate(time.Second * 5)
require.Equal(t, []string{"M"}, dest)
})
}
func TestTaskRetry(t *testing.T) {
//t.Parallel()
withDbSetup(t, func(m *kit.TestMiner) {
cdb := m.BaseAPI.(*impl.StorageMinerAPI).HarmonyDB
senderParty := fooLetterAdder(t, cdb)
harmonytask.POLL_DURATION = time.Millisecond * 100
sender, err := harmonytask.New(cdb, []harmonytask.TaskInterface{senderParty}, "test:1")
require.NoError(t, err)
alreadyFailed := map[string]bool{}
var dest []string
fails2xPerMsg := &passthru{
dtl: dtl,
canAccept: func(list []harmonytask.TaskID, e *harmonytask.TaskEngine) (*harmonytask.TaskID, error) {
return &list[0], nil
},
do: func(tID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) {
var content string
err = cdb.QueryRow(context.Background(),
"SELECT content FROM itest_scratch WHERE some_int=$1", tID).Scan(&content)
require.NoError(t, err)
lettersMutex.Lock()
defer lettersMutex.Unlock()
if !alreadyFailed[content] {
alreadyFailed[content] = true
return false, errors.New("intentional 'error'")
}
dest = append(dest, content)
return true, nil
},
}
rcv, err := harmonytask.New(cdb, []harmonytask.TaskInterface{fails2xPerMsg}, "test:2")
require.NoError(t, err)
time.Sleep(time.Second)
sender.GracefullyTerminate(time.Hour)
rcv.GracefullyTerminate(time.Hour)
sort.Strings(dest)
require.Equal(t, []string{"A", "B"}, dest)
type hist struct {
TaskID int
Result bool
Err string
}
var res []hist
require.NoError(t, cdb.Select(context.Background(), &res,
`SELECT task_id, result, err FROM harmony_task_history
ORDER BY result DESC, task_id`))
require.Equal(t, []hist{
{1, true, ""},
{2, true, ""},
{1, false, "error: intentional 'error'"},
{2, false, "error: intentional 'error'"}}, res)
})
}

View File

@ -20,6 +20,7 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
"github.com/stretchr/testify/require"
"github.com/urfave/cli/v2"
"github.com/filecoin-project/go-address"
cborutil "github.com/filecoin-project/go-cbor-util"
@ -45,15 +46,20 @@ import (
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/wallet/key"
"github.com/filecoin-project/lotus/cmd/lotus-provider/deps"
"github.com/filecoin-project/lotus/cmd/lotus-provider/rpc"
"github.com/filecoin-project/lotus/cmd/lotus-provider/tasks"
"github.com/filecoin-project/lotus/cmd/lotus-seed/seed"
"github.com/filecoin-project/lotus/cmd/lotus-worker/sealworker"
"github.com/filecoin-project/lotus/gateway"
"github.com/filecoin-project/lotus/genesis"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
"github.com/filecoin-project/lotus/markets/idxprov"
"github.com/filecoin-project/lotus/markets/idxprov/idxprov_test"
lotusminer "github.com/filecoin-project/lotus/miner"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/lotus/node/impl"
"github.com/filecoin-project/lotus/node/modules"
"github.com/filecoin-project/lotus/node/modules/dtypes"
testing2 "github.com/filecoin-project/lotus/node/modules/testing"
@ -118,15 +124,17 @@ type Ensemble struct {
options *ensembleOpts
inactive struct {
fullnodes []*TestFullNode
miners []*TestMiner
workers []*TestWorker
fullnodes []*TestFullNode
providernodes []*TestProviderNode
miners []*TestMiner
workers []*TestWorker
}
active struct {
fullnodes []*TestFullNode
miners []*TestMiner
workers []*TestWorker
bms map[*TestMiner]*BlockMiner
fullnodes []*TestFullNode
providernodes []*TestProviderNode
miners []*TestMiner
workers []*TestWorker
bms map[*TestMiner]*BlockMiner
}
genesis struct {
version network.Version
@ -219,6 +227,20 @@ func (n *Ensemble) FullNode(full *TestFullNode, opts ...NodeOpt) *Ensemble {
return n
}
// FullNode enrolls a new Provider node.
func (n *Ensemble) Provider(lp *TestProviderNode, opts ...NodeOpt) *Ensemble {
options := DefaultNodeOpts
for _, o := range opts {
err := o(&options)
require.NoError(n.t, err)
}
*lp = TestProviderNode{t: n.t, options: options, Deps: &deps.Deps{}}
n.inactive.providernodes = append(n.inactive.providernodes, lp)
return n
}
// Miner enrolls a new miner, using the provided full node for chain
// interactions.
func (n *Ensemble) MinerEnroll(minerNode *TestMiner, full *TestFullNode, opts ...NodeOpt) *Ensemble {
@ -359,6 +381,8 @@ func (n *Ensemble) Start() *Ensemble {
n.mn = mocknet.New()
}
sharedITestID := harmonydb.ITestNewID()
// ---------------------
// FULL NODES
// ---------------------
@ -603,6 +627,7 @@ func (n *Ensemble) Start() *Ensemble {
cfg.Subsystems.EnableMining = m.options.subsystems.Has(SMining)
cfg.Subsystems.EnableSealing = m.options.subsystems.Has(SSealing)
cfg.Subsystems.EnableSectorStorage = m.options.subsystems.Has(SSectorStorage)
cfg.Subsystems.EnableSectorIndexDB = m.options.subsystems.Has(SHarmony)
cfg.Dealmaking.MaxStagingDealsBytes = m.options.maxStagingDealsBytes
if m.options.mainMiner != nil {
@ -720,6 +745,17 @@ func (n *Ensemble) Start() *Ensemble {
// upgrades
node.Override(new(stmgr.UpgradeSchedule), n.options.upgradeSchedule),
node.Override(new(harmonydb.ITestID), sharedITestID),
node.Override(new(config.HarmonyDB), func() config.HarmonyDB {
return config.HarmonyDB{
Hosts: []string{envElse("LOTUS_HARMONYDB_HOSTS", "127.0.0.1")},
Database: "yugabyte",
Username: "yugabyte",
Password: "yugabyte",
Port: "5433",
}
}),
}
if m.options.subsystems.Has(SMarkets) {
@ -766,6 +802,12 @@ func (n *Ensemble) Start() *Ensemble {
require.NoError(n.t, err)
n.t.Cleanup(func() { _ = stop(context.Background()) })
mCopy := m
n.t.Cleanup(func() {
if mCopy.BaseAPI.(*impl.StorageMinerAPI).HarmonyDB != nil {
mCopy.BaseAPI.(*impl.StorageMinerAPI).HarmonyDB.ITestDeleteAll()
}
})
m.BaseAPI = m.StorageMiner
@ -822,6 +864,8 @@ func (n *Ensemble) Start() *Ensemble {
auth := http.Header(nil)
// FUTURE: Use m.MinerNode.(BaseAPI).(impl.StorageMinerAPI).HarmonyDB to setup.
remote := paths.NewRemote(localStore, m.MinerNode, auth, 20, &paths.DefaultPartialFileHandler{})
store := m.options.workerStorageOpt(remote)
@ -851,12 +895,35 @@ func (n *Ensemble) Start() *Ensemble {
require.NoError(n.t, err)
n.active.workers = append(n.active.workers, m)
}
// If we are here, we have processed all inactive workers and moved them
// to active, so clear the slice.
n.inactive.workers = n.inactive.workers[:0]
for _, p := range n.inactive.providernodes {
// TODO setup config with options
err := p.Deps.PopulateRemainingDeps(context.Background(), &cli.Context{}, false)
require.NoError(n.t, err)
shutdownChan := make(chan struct{})
taskEngine, err := tasks.StartTasks(ctx, p.Deps)
if err != nil {
return nil
}
defer taskEngine.GracefullyTerminate(time.Hour)
err = rpc.ListenAndServe(ctx, p.Deps, shutdownChan) // Monitor for shutdown.
require.NoError(n.t, err)
finishCh := node.MonitorShutdown(shutdownChan) //node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper},
//node.ShutdownHandler{Component: "provider", StopFunc: stop},
<-finishCh
n.active.providernodes = append(n.active.providernodes, p)
}
// ---------------------
// MISC
// ---------------------
@ -1063,3 +1130,10 @@ func importPreSealMeta(ctx context.Context, meta genesis.Miner, mds dtypes.Metad
size := binary.PutUvarint(buf, uint64(maxSectorID))
return mds.Put(ctx, datastore.NewKey(pipeline.StorageCounterDSPrefix), buf[:size])
}
func envElse(env, els string) string {
if v := os.Getenv(env); v != "" {
return v
}
return els
}

View File

@ -101,6 +101,21 @@ func EnsembleOneTwo(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMine
return &full, &one, &two, ens
}
// EnsembleProvider creates and starts an Ensemble with a single full node and a single provider.
// It does not interconnect nodes nor does it begin mining.
func EnsembleProvider(t *testing.T, opts ...interface{}) (*TestFullNode, *TestProviderNode, *Ensemble) {
opts = append(opts, WithAllSubsystems())
eopts, nopts := siftOptions(t, opts)
var (
full TestFullNode
provider TestProviderNode
)
ens := NewEnsemble(t, eopts...).FullNode(&full, nopts...).Provider(&provider, nopts...).Start()
return &full, &provider, ens
}
func siftOptions(t *testing.T, opts []interface{}) (eopts []EnsembleOpt, nopts []NodeOpt) {
for _, v := range opts {
switch o := v.(type) {

View File

@ -22,6 +22,7 @@ import (
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/wallet/key"
cliutil "github.com/filecoin-project/lotus/cli/util"
"github.com/filecoin-project/lotus/cmd/lotus-provider/deps"
"github.com/filecoin-project/lotus/gateway"
"github.com/filecoin-project/lotus/node"
)
@ -54,6 +55,17 @@ type TestFullNode struct {
options nodeOpts
}
// TestProviderNode represents a Provider node enrolled in an Ensemble.
type TestProviderNode struct {
v1api.LotusProviderStruct
t *testing.T
*deps.Deps
options nodeOpts
}
func MergeFullNodes(fullNodes []*TestFullNode) *TestFullNode {
var wrappedFullNode TestFullNode
var fns api.FullNodeStruct

View File

@ -37,6 +37,8 @@ const (
SSealing
SSectorStorage
SHarmony
MinerSubsystems = iota
)

View File

@ -89,6 +89,13 @@ func WithAllSubsystems() NodeOpt {
}
}
func WithSectorIndexDB() NodeOpt {
return func(opts *nodeOpts) error {
opts.subsystems = opts.subsystems.Add(SHarmony)
return nil
}
}
func WithSubsystems(systems ...MinerSubsystem) NodeOpt {
return func(opts *nodeOpts) error {
for _, s := range systems {

View File

@ -15,6 +15,7 @@ import (
)
func TestPathTypeFilters(t *testing.T) {
runTest := func(t *testing.T, name string, asserts func(t *testing.T, ctx context.Context, miner *kit.TestMiner, run func())) {
t.Run(name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())

View File

@ -139,7 +139,7 @@ func TestWindowPostNoBuiltinWindowWithWorker(t *testing.T) {
t.Log("post message landed")
bm.MineBlocks(ctx, 2*time.Millisecond)
bm.MineBlocksMustPost(ctx, 2*time.Millisecond)
waitUntil = di.Open + di.WPoStChallengeWindow*3
t.Logf("End for head.Height > %d", waitUntil)

View File

@ -146,7 +146,7 @@ func TestWindowPostWorker(t *testing.T) {
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
bm := ens.InterconnectAll().BeginMining(2 * time.Millisecond)[0]
bm := ens.InterconnectAll().BeginMiningMustPost(2 * time.Millisecond)[0]
di = di.NextNotElapsed()
@ -172,7 +172,7 @@ func TestWindowPostWorker(t *testing.T) {
t.Log("post message landed")
bm.MineBlocks(ctx, 2*time.Millisecond)
bm.MineBlocksMustPost(ctx, 2*time.Millisecond)
waitUntil = di.Open + di.WPoStChallengeWindow*3
t.Logf("End for head.Height > %d", waitUntil)
@ -235,6 +235,8 @@ func TestWindowPostWorker(t *testing.T) {
type badWorkerStorage struct {
paths.Store
t *testing.T
badsector *uint64
notBadCount int
}
@ -242,10 +244,12 @@ type badWorkerStorage struct {
func (bs *badWorkerStorage) GenerateSingleVanillaProof(ctx context.Context, minerID abi.ActorID, si storiface.PostSectorChallenge, ppt abi.RegisteredPoStProof) ([]byte, error) {
if atomic.LoadUint64(bs.badsector) == uint64(si.SectorNumber) {
bs.notBadCount--
bs.t.Logf("Generating proof for sector %d maybe bad nbc=%d", si.SectorNumber, bs.notBadCount)
if bs.notBadCount < 0 {
return nil, xerrors.New("no proof for you")
}
}
bs.t.Logf("Generating proof for sector %d", si.SectorNumber)
return bs.Store.GenerateSingleVanillaProof(ctx, minerID, si, ppt)
}
@ -268,6 +272,7 @@ func TestWindowPostWorkerSkipBadSector(t *testing.T) {
return &badWorkerStorage{
Store: store,
badsector: &badsector,
t: t,
}
}),
kit.ConstructorOpts(node.ApplyIf(node.IsType(repo.StorageMiner),
@ -275,6 +280,7 @@ func TestWindowPostWorkerSkipBadSector(t *testing.T) {
return &badWorkerStorage{
Store: store,
badsector: &badsector,
t: t,
notBadCount: 1,
}
}))))
@ -506,157 +512,6 @@ func TestWorkerName(t *testing.T) {
require.True(t, found)
}
// Tests that V1_1 proofs on post workers with faults
func TestWindowPostV1P1NV20WorkerFault(t *testing.T) {
kit.QuietMiningLogs()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
blocktime := 2 * time.Millisecond
sectors := 2 * 48 * 2
var badsector uint64 = 100000
client, miner, _, ens := kit.EnsembleWorker(t,
kit.PresealSectors(sectors), // 2 sectors per partition, 2 partitions in all 48 deadlines
kit.GenesisNetworkVersion(network.Version20),
kit.ConstructorOpts(
node.Override(new(config.ProvingConfig), func() config.ProvingConfig {
c := config.DefaultStorageMiner()
c.Proving.DisableBuiltinWindowPoSt = true
return c.Proving
}),
node.Override(new(*wdpost.WindowPoStScheduler), modules.WindowPostScheduler(
config.DefaultStorageMiner().Fees,
config.ProvingConfig{
DisableBuiltinWindowPoSt: true,
DisableBuiltinWinningPoSt: false,
DisableWDPoStPreChecks: false,
},
)),
node.Override(new(paths.Store), func(store *paths.Remote) paths.Store {
return &badWorkerStorage{
Store: store,
badsector: &badsector,
notBadCount: 1,
}
})),
kit.ThroughRPC(),
kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWindowPoSt}),
kit.WithWorkerStorage(func(store paths.Store) paths.Store {
return &badWorkerStorage{
Store: store,
badsector: &badsector,
}
}))
bm := ens.InterconnectAll().BeginMining(blocktime)[0]
maddr, err := miner.ActorAddress(ctx)
require.NoError(t, err)
// wait for sectors to be committed
require.Eventually(t, func() bool {
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
parts, err := client.StateMinerPartitions(ctx, maddr, di.Index, types.EmptyTSK)
require.NoError(t, err)
return len(parts) > 1
}, 30*time.Second, 100*time.Millisecond)
// Wait until just before a deadline opens
{
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
di = di.NextNotElapsed()
t.Log("Running one proving period")
waitUntil := di.Open + di.WPoStChallengeWindow - di.WPoStChallengeLookback - 1
client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
t.Log("Waiting for post message")
bm.Stop()
}
// Remove one sector in the next deadline (so it's skipped)
{
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
parts, err := client.StateMinerPartitions(ctx, maddr, di.Index+1, types.EmptyTSK)
require.NoError(t, err)
require.Greater(t, len(parts), 0)
secs := parts[0].AllSectors
n, err := secs.Count()
require.NoError(t, err)
require.Equal(t, uint64(2), n)
// Drop the sector in first partition
sid, err := secs.First()
require.NoError(t, err)
t.Logf("Drop sector %d; dl %d part %d", sid, di.Index, 0)
atomic.StoreUint64(&badsector, sid)
require.NoError(t, err)
}
bm.MineBlocksMustPost(ctx, 2*time.Millisecond)
mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
require.NoError(t, err)
en := wact.Nonce
// wait for a new message to be sent from worker address, it will be a PoSt
waitForProof:
for {
//stm: @CHAIN_STATE_GET_ACTOR_001
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
require.NoError(t, err)
if wact.Nonce > en {
break waitForProof
}
build.Clock.Sleep(blocktime)
}
slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0)
require.NoError(t, err)
pmr, err := client.StateSearchMsg(ctx, types.EmptyTSK, slm[0], -1, false)
require.NoError(t, err)
nv, err := client.StateNetworkVersion(ctx, pmr.TipSet)
require.NoError(t, err)
require.Equal(t, network.Version20, nv)
require.True(t, pmr.Receipt.ExitCode.IsSuccess())
slmsg, err := client.ChainGetMessage(ctx, slm[0])
require.NoError(t, err)
var params miner11.SubmitWindowedPoStParams
require.NoError(t, params.UnmarshalCBOR(bytes.NewBuffer(slmsg.Params)))
require.Equal(t, abi.RegisteredPoStProof_StackedDrgWindow2KiBV1_1, params.Proofs[0].PoStProof)
require.Len(t, params.Partitions, 2)
sc0, err := params.Partitions[0].Skipped.Count()
require.NoError(t, err)
require.Equal(t, uint64(1), sc0)
sc1, err := params.Partitions[1].Skipped.Count()
require.NoError(t, err)
require.Equal(t, uint64(0), sc1)
}
// Tests that V1_1 proofs on post worker
func TestWindowPostV1P1NV20Worker(t *testing.T) {
kit.QuietMiningLogs()
@ -685,7 +540,7 @@ func TestWindowPostV1P1NV20Worker(t *testing.T) {
kit.ThroughRPC(),
kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWindowPoSt}))
ens.InterconnectAll().BeginMining(blocktime)
ens.InterconnectAll().BeginMiningMustPost(blocktime)
maddr, err := miner.ActorAddress(ctx)
require.NoError(t, err)

View File

@ -7,6 +7,7 @@ import (
"path/filepath"
logging "github.com/ipfs/go-log/v2"
"github.com/mitchellh/go-homedir"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/build"
@ -37,7 +38,16 @@ type fsJournal struct {
// OpenFSJournal constructs a rolling filesystem journal, with a default
// per-file size limit of 1GiB.
func OpenFSJournal(lr repo.LockedRepo, disabled journal.DisabledEvents) (journal.Journal, error) {
dir := filepath.Join(lr.Path(), "journal")
return OpenFSJournalPath(lr.Path(), disabled)
}
func OpenFSJournalPath(path string, disabled journal.DisabledEvents) (journal.Journal, error) {
path, err := homedir.Expand(path)
if err != nil {
return nil, xerrors.Errorf("failed to expand repo path: %w", err)
}
dir := filepath.Join(path, "journal")
if err := os.MkdirAll(dir, 0755); err != nil {
return nil, fmt.Errorf("failed to mk directory %s for file journal: %w", dir, err)
}

View File

@ -26,6 +26,9 @@ func ParseDisabledEvents(s string) (DisabledEvents, error) {
s = strings.TrimSpace(s) // sanitize
evts := strings.Split(s, ",")
ret := make(DisabledEvents, 0, len(evts))
if len(s) == 0 {
return ret, nil
}
for _, evt := range evts {
evt = strings.TrimSpace(evt) // sanitize
s := strings.Split(evt, ":")

View File

@ -0,0 +1,35 @@
/*
# HarmonyDB provides database abstractions over SP-wide Postgres-compatible instance(s).
# Features
Rolling to secondary database servers on connection failure
Convenience features for Go + SQL
Prevention of SQL injection vulnerabilities
Monitors behavior via Prometheus stats and logging of errors.
# Usage
Processes should use New() to instantiate a *DB and keep it.
Consumers can use this *DB concurrently.
Creating and changing tables & views should happen in ./sql/ folder.
Name the file "today's date" in the format: YYYYMMDD.sql (ex: 20231231.sql for the year's last day)
a. CREATE TABLE should NOT have a schema:
GOOD: CREATE TABLE foo ();
BAD: CREATE TABLE me.foo ();
b. Schema is managed for you. It provides isolation for integraton tests & multi-use.
c. Git Merges: All run once, so old-after-new is OK when there are no deps.
d. NEVER change shipped sql files. Have later files make corrections.
e. Anything not ran will be ran, so an older date making it to master is OK.
Write SQL with context, raw strings, and args:
name := "Alice"
var ID int
err := QueryRow(ctx, "SELECT id from people where first_name=?", name).Scan(&ID)
fmt.Println(ID)
Note: Scan() is column-oriented, while Select() & StructScan() is field name/tag oriented.
*/
package harmonydb

View File

@ -0,0 +1,301 @@
package harmonydb
import (
"context"
"embed"
"fmt"
"math/rand"
"net"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
logging "github.com/ipfs/go-log/v2"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/pgx/v5/pgxpool"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/node/config"
)
type ITestID string
// ItestNewID see ITestWithID doc
func ITestNewID() ITestID {
return ITestID(strconv.Itoa(rand.Intn(99999)))
}
type DB struct {
pgx *pgxpool.Pool
cfg *pgxpool.Config
schema string
hostnames []string
BTFPOnce sync.Once
BTFP atomic.Uintptr
}
var logger = logging.Logger("harmonydb")
// NewFromConfig is a convenience function.
// In usage:
//
// db, err := NewFromConfig(config.HarmonyDB) // in binary init
func NewFromConfig(cfg config.HarmonyDB) (*DB, error) {
return New(
cfg.Hosts,
cfg.Username,
cfg.Password,
cfg.Database,
cfg.Port,
"",
)
}
func NewFromConfigWithITestID(cfg config.HarmonyDB) func(id ITestID) (*DB, error) {
return func(id ITestID) (*DB, error) {
return New(
cfg.Hosts,
cfg.Username,
cfg.Password,
cfg.Database,
cfg.Port,
id,
)
}
}
// New is to be called once per binary to establish the pool.
// log() is for errors. It returns an upgraded database's connection.
// This entry point serves both production and integration tests, so it's more DI.
func New(hosts []string, username, password, database, port string, itestID ITestID) (*DB, error) {
itest := string(itestID)
connString := ""
if len(hosts) > 0 {
connString = "host=" + hosts[0] + " "
}
for k, v := range map[string]string{"user": username, "password": password, "dbname": database, "port": port} {
if strings.TrimSpace(v) != "" {
connString += k + "=" + v + " "
}
}
schema := "lotus"
if itest != "" {
schema = "itest_" + itest
}
if err := ensureSchemaExists(connString, schema); err != nil {
return nil, err
}
cfg, err := pgxpool.ParseConfig(connString + "search_path=" + schema)
if err != nil {
return nil, err
}
// enable multiple fallback hosts.
for _, h := range hosts[1:] {
cfg.ConnConfig.Fallbacks = append(cfg.ConnConfig.Fallbacks, &pgconn.FallbackConfig{Host: h})
}
cfg.ConnConfig.OnNotice = func(conn *pgconn.PgConn, n *pgconn.Notice) {
logger.Debug("database notice: " + n.Message + ": " + n.Detail)
DBMeasures.Errors.M(1)
}
db := DB{cfg: cfg, schema: schema, hostnames: hosts} // pgx populated in AddStatsAndConnect
if err := db.addStatsAndConnect(); err != nil {
return nil, err
}
return &db, db.upgrade()
}
type tracer struct {
}
type ctxkey string
const SQL_START = ctxkey("sqlStart")
const SQL_STRING = ctxkey("sqlString")
func (t tracer) TraceQueryStart(ctx context.Context, conn *pgx.Conn, data pgx.TraceQueryStartData) context.Context {
return context.WithValue(context.WithValue(ctx, SQL_START, time.Now()), SQL_STRING, data.SQL)
}
func (t tracer) TraceQueryEnd(ctx context.Context, conn *pgx.Conn, data pgx.TraceQueryEndData) {
DBMeasures.Hits.M(1)
ms := time.Since(ctx.Value(SQL_START).(time.Time)).Milliseconds()
DBMeasures.TotalWait.M(ms)
DBMeasures.Waits.Observe(float64(ms))
if data.Err != nil {
DBMeasures.Errors.M(1)
}
logger.Debugw("SQL run",
"query", ctx.Value(SQL_STRING).(string),
"err", data.Err,
"rowCt", data.CommandTag.RowsAffected(),
"milliseconds", ms)
}
func (db *DB) GetRoutableIP() (string, error) {
tx, err := db.pgx.Begin(context.Background())
if err != nil {
return "", err
}
defer func() { _ = tx.Rollback(context.Background()) }()
local := tx.Conn().PgConn().Conn().LocalAddr()
addr, ok := local.(*net.TCPAddr)
if !ok {
return "", fmt.Errorf("could not get local addr from %v", addr)
}
return addr.IP.String(), nil
}
// addStatsAndConnect connects a prometheus logger. Be sure to run this before using the DB.
func (db *DB) addStatsAndConnect() error {
db.cfg.ConnConfig.Tracer = tracer{}
hostnameToIndex := map[string]float64{}
for i, h := range db.hostnames {
hostnameToIndex[h] = float64(i)
}
db.cfg.AfterConnect = func(ctx context.Context, c *pgx.Conn) error {
s := db.pgx.Stat()
DBMeasures.OpenConnections.M(int64(s.TotalConns()))
DBMeasures.WhichHost.Observe(hostnameToIndex[c.Config().Host])
//FUTURE place for any connection seasoning
return nil
}
// Timeout the first connection so we know if the DB is down.
ctx, ctxClose := context.WithDeadline(context.Background(), time.Now().Add(5*time.Second))
defer ctxClose()
var err error
db.pgx, err = pgxpool.NewWithConfig(ctx, db.cfg)
if err != nil {
logger.Error(fmt.Sprintf("Unable to connect to database: %v\n", err))
return err
}
return nil
}
// ITestDeleteAll will delete everything created for "this" integration test.
// This must be called at the end of each integration test.
func (db *DB) ITestDeleteAll() {
if !strings.HasPrefix(db.schema, "itest_") {
fmt.Println("Warning: this should never be called on anything but an itest schema.")
return
}
defer db.pgx.Close()
_, err := db.pgx.Exec(context.Background(), "DROP SCHEMA "+db.schema+" CASCADE")
if err != nil {
fmt.Println("warning: unclean itest shutdown: cannot delete schema: " + err.Error())
return
}
}
var schemaREString = "^[A-Za-z0-9_]+$"
var schemaRE = regexp.MustCompile(schemaREString)
func ensureSchemaExists(connString, schema string) error {
// FUTURE allow using fallback DBs for start-up.
ctx, cncl := context.WithDeadline(context.Background(), time.Now().Add(3*time.Second))
p, err := pgx.Connect(ctx, connString)
defer cncl()
if err != nil {
return xerrors.Errorf("unable to connect to db: %s, err: %v", connString, err)
}
defer func() { _ = p.Close(context.Background()) }()
if len(schema) < 5 || !schemaRE.MatchString(schema) {
return xerrors.New("schema must be of the form " + schemaREString + "\n Got: " + schema)
}
_, err = p.Exec(context.Background(), "CREATE SCHEMA IF NOT EXISTS "+schema)
if err != nil {
return xerrors.Errorf("cannot create schema: %w", err)
}
return nil
}
//go:embed sql
var fs embed.FS
func (db *DB) upgrade() error {
// Does the version table exist? if not, make it.
// NOTE: This cannot change except via the next sql file.
_, err := db.Exec(context.Background(), `CREATE TABLE IF NOT EXISTS base (
id SERIAL PRIMARY KEY,
entry CHAR(12),
applied TIMESTAMP DEFAULT current_timestamp
)`)
if err != nil {
logger.Error("Upgrade failed.")
return xerrors.Errorf("Cannot create base table %w", err)
}
// __Run scripts in order.__
landed := map[string]bool{}
{
var landedEntries []struct{ Entry string }
err = db.Select(context.Background(), &landedEntries, "SELECT entry FROM base")
if err != nil {
logger.Error("Cannot read entries: " + err.Error())
return xerrors.Errorf("cannot read entries: %w", err)
}
for _, l := range landedEntries {
landed[l.Entry[:8]] = true
}
}
dir, err := fs.ReadDir("sql")
if err != nil {
logger.Error("Cannot read fs entries: " + err.Error())
return err
}
sort.Slice(dir, func(i, j int) bool { return dir[i].Name() < dir[j].Name() })
if len(dir) == 0 {
logger.Error("No sql files found.")
}
for _, e := range dir {
name := e.Name()
if !strings.HasSuffix(name, ".sql") {
logger.Debug("Must have only SQL files here, found: " + name)
continue
}
if landed[name[:8]] {
logger.Debug("DB Schema " + name + " already applied.")
continue
}
file, err := fs.ReadFile("sql/" + name)
if err != nil {
logger.Error("weird embed file read err")
return err
}
for _, s := range strings.Split(string(file), ";") { // Implement the changes.
if len(strings.TrimSpace(s)) == 0 {
continue
}
_, err = db.pgx.Exec(context.Background(), s)
if err != nil {
msg := fmt.Sprintf("Could not upgrade! File %s, Query: %s, Returned: %s", name, s, err.Error())
logger.Error(msg)
return xerrors.New(msg) // makes devs lives easier by placing message at the end.
}
}
// Mark Completed.
_, err = db.Exec(context.Background(), "INSERT INTO base (entry) VALUES ($1)", name[:8])
if err != nil {
logger.Error("Cannot update base: " + err.Error())
return xerrors.Errorf("cannot insert into base: %w", err)
}
}
return nil
}

View File

@ -0,0 +1,77 @@
package harmonydb
import (
"github.com/prometheus/client_golang/prometheus"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"github.com/filecoin-project/lotus/metrics"
)
var (
dbTag, _ = tag.NewKey("db_name")
pre = "harmonydb_base_"
waitsBuckets = []float64{0, 10, 20, 30, 50, 80, 130, 210, 340, 550, 890}
whichHostBuckets = []float64{0, 1, 2, 3, 4, 5}
)
// DBMeasures groups all db metrics.
var DBMeasures = struct {
Hits *stats.Int64Measure
TotalWait *stats.Int64Measure
Waits prometheus.Histogram
OpenConnections *stats.Int64Measure
Errors *stats.Int64Measure
WhichHost prometheus.Histogram
}{
Hits: stats.Int64(pre+"hits", "Total number of uses.", stats.UnitDimensionless),
TotalWait: stats.Int64(pre+"total_wait", "Total delay. A numerator over hits to get average wait.", stats.UnitMilliseconds),
Waits: prometheus.NewHistogram(prometheus.HistogramOpts{
Name: pre + "waits",
Buckets: waitsBuckets,
Help: "The histogram of waits for query completions.",
}),
OpenConnections: stats.Int64(pre+"open_connections", "Total connection count.", stats.UnitDimensionless),
Errors: stats.Int64(pre+"errors", "Total error count.", stats.UnitDimensionless),
WhichHost: prometheus.NewHistogram(prometheus.HistogramOpts{
Name: pre + "which_host",
Buckets: whichHostBuckets,
Help: "The index of the hostname being used",
}),
}
// CacheViews groups all cache-related default views.
func init() {
metrics.RegisterViews(
&view.View{
Measure: DBMeasures.Hits,
Aggregation: view.Sum(),
TagKeys: []tag.Key{dbTag},
},
&view.View{
Measure: DBMeasures.TotalWait,
Aggregation: view.Sum(),
TagKeys: []tag.Key{dbTag},
},
&view.View{
Measure: DBMeasures.OpenConnections,
Aggregation: view.LastValue(),
TagKeys: []tag.Key{dbTag},
},
&view.View{
Measure: DBMeasures.Errors,
Aggregation: view.Sum(),
TagKeys: []tag.Key{dbTag},
},
)
err := prometheus.Register(DBMeasures.Waits)
if err != nil {
panic(err)
}
err = prometheus.Register(DBMeasures.WhichHost)
if err != nil {
panic(err)
}
}

View File

@ -0,0 +1,7 @@
CREATE TABLE itest_scratch (
id SERIAL PRIMARY KEY,
content TEXT,
some_int INTEGER,
second_int INTEGER,
update_time TIMESTAMP DEFAULT current_timestamp
)

View File

@ -0,0 +1,45 @@
create table sector_location
(
miner_id bigint not null,
sector_num bigint not null,
sector_filetype int not null,
storage_id varchar not null,
is_primary bool,
read_ts timestamp(6),
read_refs int,
write_ts timestamp(6),
write_lock_owner varchar,
constraint sectorlocation_pk
primary key (miner_id, sector_num, sector_filetype, storage_id)
);
alter table sector_location
alter column read_refs set not null;
alter table sector_location
alter column read_refs set default 0;
create table storage_path
(
"storage_id" varchar not null
constraint "storage_path_pkey"
primary key,
"urls" varchar, -- comma separated list of urls
"weight" bigint,
"max_storage" bigint,
"can_seal" bool,
"can_store" bool,
"groups" varchar, -- comma separated list of group names
"allow_to" varchar, -- comma separated list of allowed groups
"allow_types" varchar, -- comma separated list of allowed file types
"deny_types" varchar, -- comma separated list of denied file types
"capacity" bigint,
"available" bigint,
"fs_available" bigint,
"reserved" bigint,
"used" bigint,
"last_heartbeat" timestamp(6),
"heartbeat_err" varchar
);

Some files were not shown because too many files have changed in this diff Show More