Drop curio-related sources

This commit is contained in:
Łukasz Magiera 2024-05-27 19:01:56 +02:00 committed by Łukasz Magiera
parent dce7f5d32d
commit bcdca4d09f
158 changed files with 6 additions and 29927 deletions

View File

@ -1,95 +0,0 @@
#####################################
FROM golang:1.21.7-bullseye AS curio-builder
MAINTAINER Curio Development Team
RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev
ENV XDG_CACHE_HOME="/tmp"
### taken from https://github.com/rust-lang/docker-rust/blob/master/1.63.0/buster/Dockerfile
ENV RUSTUP_HOME=/usr/local/rustup \
CARGO_HOME=/usr/local/cargo \
PATH=/usr/local/cargo/bin:$PATH \
RUST_VERSION=1.63.0
RUN set -eux; \
dpkgArch="$(dpkg --print-architecture)"; \
case "${dpkgArch##*-}" in \
amd64) rustArch='x86_64-unknown-linux-gnu'; rustupSha256='5cc9ffd1026e82e7fb2eec2121ad71f4b0f044e88bca39207b3f6b769aaa799c' ;; \
arm64) rustArch='aarch64-unknown-linux-gnu'; rustupSha256='e189948e396d47254103a49c987e7fb0e5dd8e34b200aa4481ecc4b8e41fb929' ;; \
*) echo >&2 "unsupported architecture: ${dpkgArch}"; exit 1 ;; \
esac; \
url="https://static.rust-lang.org/rustup/archive/1.25.1/${rustArch}/rustup-init"; \
wget "$url"; \
echo "${rustupSha256} *rustup-init" | sha256sum -c -; \
chmod +x rustup-init; \
./rustup-init -y --no-modify-path --profile minimal --default-toolchain $RUST_VERSION --default-host ${rustArch}; \
rm rustup-init; \
chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \
rustup --version; \
cargo --version; \
rustc --version;
COPY ./ /opt/curio
WORKDIR /opt/curio
### make configurable filecoin-ffi build
ARG FFI_BUILD_FROM_SOURCE=0
ENV FFI_BUILD_FROM_SOURCE=${FFI_BUILD_FROM_SOURCE}
RUN make clean deps
ARG RUSTFLAGS=""
ARG GOFLAGS=""
RUN make curio-devnet
#####################################
FROM ubuntu:22.04 AS curio-all-in-one
RUN apt-get update && apt-get install -y dnsutils vim curl
# Copy libraries and binaries from curio-builder
COPY --from=curio-builder /etc/ssl/certs /etc/ssl/certs
COPY --from=curio-builder /lib/*/libdl.so.2 /lib/
COPY --from=curio-builder /lib/*/librt.so.1 /lib/
COPY --from=curio-builder /lib/*/libgcc_s.so.1 /lib/
COPY --from=curio-builder /lib/*/libutil.so.1 /lib/
COPY --from=curio-builder /usr/lib/*/libltdl.so.7 /lib/
COPY --from=curio-builder /usr/lib/*/libnuma.so.1 /lib/
COPY --from=curio-builder /usr/lib/*/libhwloc.so.* /lib/
COPY --from=curio-builder /usr/lib/*/libOpenCL.so.1 /lib/
# Setup user and OpenCL configuration
RUN useradd -r -u 532 -U fc && \
mkdir -p /etc/OpenCL/vendors && \
echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd
# Environment setup
ENV FILECOIN_PARAMETER_CACHE=/var/tmp/filecoin-proof-parameters \
LOTUS_MINER_PATH=/var/lib/lotus-miner \
LOTUS_PATH=/var/lib/lotus \
CURIO_REPO_PATH=/var/lib/curio
# Copy binaries and scripts
COPY --from=curio-builder /opt/curio/lotus /usr/local/bin/
COPY --from=curio-builder /opt/curio/lotus-seed /usr/local/bin/
COPY --from=curio-builder /opt/curio/lotus-shed /usr/local/bin/
COPY --from=curio-builder /opt/curio/lotus-miner /usr/local/bin/
COPY --from=curio-builder /opt/curio/curio /usr/local/bin/
COPY --from=curio-builder /opt/curio/sptool /usr/local/bin/
# Set up directories and permissions
RUN mkdir /var/tmp/filecoin-proof-parameters \
/var/lib/lotus \
/var/lib/lotus-miner \
/var/lib/curio && \
chown fc: /var/tmp/filecoin-proof-parameters /var/lib/lotus /var/lib/lotus-miner /var/lib/curio
# Define volumes
VOLUME ["/var/tmp/filecoin-proof-parameters", "/var/lib/lotus", "/var/lib/lotus-miner", "/var/lib/curio"]
# Expose necessary ports
EXPOSE 1234 2345 12300 4701 32100
CMD ["/bin/bash"]

View File

@ -66,7 +66,7 @@ CLEAN+=build/.update-modules
deps: $(BUILD_DEPS)
.PHONY: deps
build-devnets: build lotus-seed lotus-shed curio sptool
build-devnets: build lotus-seed lotus-shed
.PHONY: build-devnets
debug: GOFLAGS+=-tags=debug
@ -97,24 +97,6 @@ lotus-miner: $(BUILD_DEPS)
.PHONY: lotus-miner
BINS+=lotus-miner
curio: $(BUILD_DEPS)
rm -f curio
$(GOCC) build $(GOFLAGS) -o curio -ldflags " \
-X github.com/filecoin-project/lotus/curiosrc/build.IsOpencl=$(FFI_USE_OPENCL) \
-X github.com/filecoin-project/lotus/curiosrc/build.Commit=`git log -1 --format=%h_%cI`" \
./curiosrc/cmd/curio
.PHONY: curio
BINS+=curio
cu2k: GOFLAGS+=-tags=2k
cu2k: curio
sptool: $(BUILD_DEPS)
rm -f sptool
$(GOCC) build $(GOFLAGS) -o sptool ./curiosrc/cmd/sptool
.PHONY: sptool
BINS+=sptool
lotus-worker: $(BUILD_DEPS)
rm -f lotus-worker
$(GOCC) build $(GOFLAGS) -o lotus-worker ./cmd/lotus-worker
@ -133,13 +115,13 @@ lotus-gateway: $(BUILD_DEPS)
.PHONY: lotus-gateway
BINS+=lotus-gateway
build: lotus lotus-miner lotus-worker curio sptool
build: lotus lotus-miner lotus-worker
@[[ $$(type -P "lotus") ]] && echo "Caution: you have \
an existing lotus binary in your PATH. This may cause problems if you don't run 'sudo make install'" || true
.PHONY: build
install: install-daemon install-miner install-worker install-curio install-sptool
install: install-daemon install-miner install-worker
install-daemon:
install -C ./lotus /usr/local/bin/lotus
@ -147,12 +129,6 @@ install-daemon:
install-miner:
install -C ./lotus-miner /usr/local/bin/lotus-miner
install-curio:
install -C ./curio /usr/local/bin/curio
install-sptool:
install -C ./sptool /usr/local/bin/sptool
install-worker:
install -C ./lotus-worker /usr/local/bin/lotus-worker
@ -168,12 +144,6 @@ uninstall-daemon:
uninstall-miner:
rm -f /usr/local/bin/lotus-miner
uninstall-curio:
rm -f /usr/local/bin/curio
uninstall-sptool:
rm -f /usr/local/bin/sptool
uninstall-worker:
rm -f /usr/local/bin/lotus-worker
@ -275,14 +245,6 @@ install-miner-service: install-miner install-daemon-service
@echo "To start the service, run: 'sudo systemctl start lotus-miner'"
@echo "To enable the service on startup, run: 'sudo systemctl enable lotus-miner'"
install-curio-service: install-curio install-sptool install-daemon-service
mkdir -p /etc/systemd/system
mkdir -p /var/log/lotus
install -C -m 0644 ./scripts/curio.service /etc/systemd/system/curio.service
systemctl daemon-reload
@echo
@echo "Curio service installed. Don't forget to run 'sudo systemctl start curio' to start it and 'sudo systemctl enable curio' for it to be enabled on startup."
install-main-services: install-miner-service
install-all-services: install-main-services
@ -301,12 +263,6 @@ clean-miner-service:
rm -f /etc/systemd/system/lotus-miner.service
systemctl daemon-reload
clean-curio-service:
-systemctl stop curio
-systemctl disable curio
rm -f /etc/systemd/system/curio.service
systemctl daemon-reload
clean-main-services: clean-daemon-service
clean-all-services: clean-main-services
@ -381,7 +337,7 @@ docsgen-md-bin: api-gen actors-gen
docsgen-openrpc-bin: api-gen actors-gen
$(GOCC) build $(GOFLAGS) -o docgen-openrpc ./api/docgen-openrpc/cmd
docsgen-md: docsgen-md-full docsgen-md-storage docsgen-md-worker docsgen-md-curio
docsgen-md: docsgen-md-full docsgen-md-storage docsgen-md-worker
docsgen-md-full: docsgen-md-bin
./docgen-md "api/api_full.go" "FullNode" "api" "./api" > documentation/en/api-v1-unstable-methods.md
@ -390,8 +346,6 @@ docsgen-md-storage: docsgen-md-bin
./docgen-md "api/api_storage.go" "StorageMiner" "api" "./api" > documentation/en/api-v0-methods-miner.md
docsgen-md-worker: docsgen-md-bin
./docgen-md "api/api_worker.go" "Worker" "api" "./api" > documentation/en/api-v0-methods-worker.md
docsgen-md-curio: docsgen-md-bin
./docgen-md "api/api_curio.go" "Curio" "api" "./api" > documentation/en/api-v0-methods-curio.md
docsgen-openrpc: docsgen-openrpc-full docsgen-openrpc-storage docsgen-openrpc-worker docsgen-openrpc-gateway
@ -416,47 +370,17 @@ gen: actors-code-gen type-gen cfgdoc-gen docsgen api-gen
jen: gen
snap: lotus lotus-miner lotus-worker curio sptool
snap: lotus lotus-miner lotus-worker
snapcraft
# snapcraft upload ./lotus_*.snap
# separate from gen because it needs binaries
docsgen-cli: lotus lotus-miner lotus-worker curio sptool
docsgen-cli: lotus lotus-miner lotus-worker
python3 ./scripts/generate-lotus-cli.py
./lotus config default > documentation/en/default-lotus-config.toml
./lotus-miner config default > documentation/en/default-lotus-miner-config.toml
./curio config default > documentation/en/default-curio-config.toml
.PHONY: docsgen-cli
print-%:
@echo $*=$($*)
### Curio devnet images
curio_docker_user?=curio
curio_base_image=$(curio_docker_user)/curio-all-in-one:latest-debug
ffi_from_source?=0
curio-devnet: lotus lotus-miner lotus-shed lotus-seed curio sptool
.PHONY: curio-devnet
curio_docker_build_cmd=docker build --build-arg CURIO_TEST_IMAGE=$(curio_base_image) \
--build-arg FFI_BUILD_FROM_SOURCE=$(ffi_from_source) $(docker_args)
docker/curio-all-in-one:
$(curio_docker_build_cmd) -f Dockerfile.curio --target curio-all-in-one \
-t $(curio_base_image) --build-arg GOFLAGS=-tags=debug .
.PHONY: docker/curio-all-in-one
docker/%:
cd curiosrc/docker/$* && DOCKER_BUILDKIT=1 $(curio_docker_build_cmd) -t $(curio_docker_user)/$*-dev:dev \
--build-arg BUILD_VERSION=dev .
docker/curio-devnet: $(lotus_build_cmd) \
docker/curio-all-in-one docker/lotus docker/lotus-miner docker/curio docker/yugabyte
.PHONY: docker/curio-devnet
curio-devnet/up:
rm -rf ./curiosrc/docker/data && docker compose -f ./curiosrc/docker/docker-compose.yaml up -d
curio-devnet/down:
docker compose -f ./curiosrc/docker/docker-compose.yaml down --rmi=local && sleep 2 && rm -rf ./curiosrc/docker/data

View File

@ -1,35 +0,0 @@
package api
import (
"context"
"net/http"
"net/url"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
lpiece "github.com/filecoin-project/lotus/storage/pipeline/piece"
"github.com/filecoin-project/lotus/storage/sealer/fsutil"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
type Curio interface {
Version(context.Context) (Version, error) //perm:admin
AllocatePieceToSector(ctx context.Context, maddr address.Address, piece lpiece.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (SectorOffset, error) //perm:write
StorageInit(ctx context.Context, path string, opts storiface.LocalStorageMeta) error //perm:admin
StorageAddLocal(ctx context.Context, path string) error //perm:admin
StorageDetachLocal(ctx context.Context, path string) error //perm:admin
StorageList(ctx context.Context) (map[storiface.ID][]storiface.Decl, error) //perm:admin
StorageLocal(ctx context.Context) (map[storiface.ID]string, error) //perm:admin
StorageStat(ctx context.Context, id storiface.ID) (fsutil.FsStat, error) //perm:admin
StorageInfo(context.Context, storiface.ID) (storiface.StorageInfo, error) //perm:admin
StorageFindSector(ctx context.Context, sector abi.SectorID, ft storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]storiface.SectorStorageInfo, error) //perm:admin
LogList(ctx context.Context) ([]string, error) //perm:read
LogSetLevel(ctx context.Context, subsystem, level string) error //perm:admin
// Trigger shutdown
Shutdown(context.Context) error //perm:admin
}

View File

@ -15,16 +15,6 @@ import (
"github.com/filecoin-project/lotus/lib/rpcenc"
)
// NewCurioRpc creates a new http jsonrpc client.
func NewCurioRpc(ctx context.Context, addr string, requestHeader http.Header) (api.Curio, jsonrpc.ClientCloser, error) {
var res v1api.CurioStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
api.GetInternalStructs(&res), requestHeader, jsonrpc.WithErrors(api.RPCErrors))
return &res, closer, err
}
// NewCommonRPCV0 creates a new http jsonrpc client.
func NewCommonRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.CommonNet, jsonrpc.ClientCloser, error) {
var res v0api.CommonNetStruct

View File

@ -456,10 +456,6 @@ func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []r
i = &api.GatewayStruct{}
t = reflect.TypeOf(new(struct{ api.Gateway })).Elem()
permStruct = append(permStruct, reflect.TypeOf(api.GatewayStruct{}.Internal))
case "Curio":
i = &api.CurioStruct{}
t = reflect.TypeOf(new(struct{ api.Curio })).Elem()
permStruct = append(permStruct, reflect.TypeOf(api.CurioStruct{}.Internal))
default:
panic("unknown type")
}

View File

@ -12,5 +12,3 @@ type RawFullNodeAPI FullNode
func PermissionedFullAPI(a FullNode) FullNode {
return api.PermissionedFullAPI(a)
}
type CurioStruct = api.CurioStruct

View File

@ -59,8 +59,6 @@ var (
MinerAPIVersion0 = newVer(1, 5, 0)
WorkerAPIVersion0 = newVer(1, 7, 0)
CurioAPIVersion0 = newVer(1, 0, 0)
)
//nolint:varcheck,deadcode

View File

@ -1,64 +0,0 @@
package curio
import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/curiosrc/multictladdr"
"github.com/filecoin-project/lotus/node/config"
)
func AddressSelector(addrConf []config.CurioAddresses) func() (*multictladdr.MultiAddressSelector, error) {
return func() (*multictladdr.MultiAddressSelector, error) {
as := &multictladdr.MultiAddressSelector{
MinerMap: make(map[address.Address]api.AddressConfig),
}
if addrConf == nil {
return as, nil
}
for _, addrConf := range addrConf {
for _, minerID := range addrConf.MinerAddresses {
tmp := api.AddressConfig{
DisableOwnerFallback: addrConf.DisableOwnerFallback,
DisableWorkerFallback: addrConf.DisableWorkerFallback,
}
for _, s := range addrConf.PreCommitControl {
addr, err := address.NewFromString(s)
if err != nil {
return nil, xerrors.Errorf("parsing precommit control address: %w", err)
}
tmp.PreCommitControl = append(tmp.PreCommitControl, addr)
}
for _, s := range addrConf.CommitControl {
addr, err := address.NewFromString(s)
if err != nil {
return nil, xerrors.Errorf("parsing commit control address: %w", err)
}
tmp.CommitControl = append(tmp.CommitControl, addr)
}
for _, s := range addrConf.TerminateControl {
addr, err := address.NewFromString(s)
if err != nil {
return nil, xerrors.Errorf("parsing terminate control address: %w", err)
}
tmp.TerminateControl = append(tmp.TerminateControl, addr)
}
a, err := address.NewFromString(minerID)
if err != nil {
return nil, xerrors.Errorf("parsing miner address %s: %w", minerID, err)
}
as.MinerMap[a] = tmp
}
}
return as, nil
}
}

View File

@ -1,573 +0,0 @@
package alertmanager
import (
"bytes"
"database/sql"
"fmt"
"math"
"strings"
"time"
"github.com/BurntSushi/toml"
"github.com/dustin/go-humanize"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/node/config"
)
// balanceCheck retrieves the machine details from the database and performs balance checks on unique addresses.
// It populates the alert map with any errors encountered during the process and with any alerts related to low wallet balance and missing wallets.
// The alert map key is "Balance Check".
// It queries the database for the configuration of each layer and decodes it using the toml.Decode function.
// It then iterates over the addresses in the configuration and curates a list of unique addresses.
// If an address is not found in the chain node, it adds an alert to the alert map.
// If the balance of an address is below MinimumWalletBalance, it adds an alert to the alert map.
// If there are any errors encountered during the process, the err field of the alert map is populated.
func balanceCheck(al *alerts) {
Name := "Balance Check"
al.alertMap[Name] = &alertOut{}
var ret string
uniqueAddrs, _, err := al.getAddresses()
if err != nil {
al.alertMap[Name].err = err
return
}
for _, addrStr := range uniqueAddrs {
addr, err := address.NewFromString(addrStr)
if err != nil {
al.alertMap[Name].err = xerrors.Errorf("failed to parse address: %w", err)
return
}
has, err := al.api.WalletHas(al.ctx, addr)
if err != nil {
al.alertMap[Name].err = err
return
}
if !has {
ret += fmt.Sprintf("Wallet %s was not found in chain node. ", addrStr)
}
balance, err := al.api.WalletBalance(al.ctx, addr)
if err != nil {
al.alertMap[Name].err = err
}
if abi.TokenAmount(al.cfg.MinimumWalletBalance).GreaterThanEqual(balance) {
ret += fmt.Sprintf("Balance for wallet %s is below 5 Fil. ", addrStr)
}
}
if ret != "" {
al.alertMap[Name].alertString = ret
}
return
}
// taskFailureCheck retrieves the task failure counts from the database for a specific time period.
// It then checks for specific sealing tasks and tasks with more than 5 failures to generate alerts.
func taskFailureCheck(al *alerts) {
Name := "TaskFailures"
al.alertMap[Name] = &alertOut{}
type taskFailure struct {
Machine string `db:"completed_by_host_and_port"`
Name string `db:"name"`
Failures int `db:"failed_count"`
}
var taskFailures []taskFailure
err := al.db.Select(al.ctx, &taskFailures, `
SELECT completed_by_host_and_port, name, COUNT(*) AS failed_count
FROM harmony_task_history
WHERE result = FALSE
AND work_end >= NOW() - $1::interval
GROUP BY completed_by_host_and_port, name
ORDER BY completed_by_host_and_port, name;`, fmt.Sprintf("%f Minutes", AlertMangerInterval.Minutes()))
if err != nil {
al.alertMap[Name].err = xerrors.Errorf("getting failed task count: %w", err)
return
}
mmap := make(map[string]int)
tmap := make(map[string]int)
if len(taskFailures) > 0 {
for _, tf := range taskFailures {
_, ok := tmap[tf.Name]
if !ok {
tmap[tf.Name] = tf.Failures
} else {
tmap[tf.Name] += tf.Failures
}
_, ok = mmap[tf.Machine]
if !ok {
mmap[tf.Machine] = tf.Failures
} else {
mmap[tf.Machine] += tf.Failures
}
}
}
sealingTasks := []string{"SDR", "TreeD", "TreeRC", "PreCommitSubmit", "PoRep", "Finalize", "MoveStorage", "CommitSubmit", "WdPost", "ParkPiece"}
contains := func(s []string, e string) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
}
// Alerts for any sealing pipeline failures. Other tasks should have at least 5 failures for an alert
for name, count := range tmap {
if contains(sealingTasks, name) {
al.alertMap[Name].alertString += fmt.Sprintf("Task: %s, Failures: %d. ", name, count)
}
if count > 5 {
al.alertMap[Name].alertString += fmt.Sprintf("Task: %s, Failures: %d. ", name, count)
}
}
// Alert if a machine failed more than 5 tasks
for name, count := range tmap {
if count > 5 {
al.alertMap[Name].alertString += fmt.Sprintf("Machine: %s, Failures: %d. ", name, count)
}
}
return
}
// permanentStorageCheck retrieves the storage details from the database and checks if there is sufficient space for sealing sectors.
// It queries the database for the available storage for all storage paths that can store data.
// It queries the database for sectors being sealed that have not been finalized yet.
// For each sector, it calculates the required space for sealing based on the sector size.
// It checks if there is enough available storage for each sector and updates the sectorMap accordingly.
// If any sectors are unaccounted for, it calculates the total missing space and adds an alert to the alert map.
func permanentStorageCheck(al *alerts) {
Name := "PermanentStorageSpace"
al.alertMap[Name] = &alertOut{}
// Get all storage path for permanent storages
type storage struct {
ID string `db:"storage_id"`
Available int64 `db:"available"`
}
var storages []storage
err := al.db.Select(al.ctx, &storages, `
SELECT storage_id, available
FROM storage_path
WHERE can_store = TRUE;`)
if err != nil {
al.alertMap[Name].err = xerrors.Errorf("getting storage details: %w", err)
return
}
type sector struct {
Miner abi.ActorID `db:"sp_id"`
Number abi.SectorNumber `db:"sector_number"`
Proof abi.RegisteredSealProof `db:"reg_seal_proof"`
}
var sectors []sector
err = al.db.Select(al.ctx, &sectors, `
SELECT sp_id, sector_number, reg_seal_proof
FROM sectors_sdr_pipeline
WHERE after_move_storage = FALSE;`)
if err != nil {
al.alertMap[Name].err = xerrors.Errorf("getting sectors being sealed: %w", err)
return
}
type sm struct {
s sector
size int64
}
sectorMap := make(map[sm]bool)
for _, sec := range sectors {
space := int64(0)
sec := sec
sectorSize, err := sec.Proof.SectorSize()
if err != nil {
space = int64(64<<30)*2 + int64(200<<20) // Assume 64 GiB sector
} else {
space = int64(sectorSize)*2 + int64(200<<20) // sealed + unsealed + cache
}
key := sm{s: sec, size: space}
sectorMap[key] = false
for _, strg := range storages {
if space > strg.Available {
strg.Available -= space
sectorMap[key] = true
}
}
}
missingSpace := big.NewInt(0)
for sec, accounted := range sectorMap {
if !accounted {
big.Add(missingSpace, big.NewInt(sec.size))
}
}
if missingSpace.GreaterThan(big.NewInt(0)) {
al.alertMap[Name].alertString = fmt.Sprintf("Insufficient storage space for sealing sectors. Additional %s required.", humanize.Bytes(missingSpace.Uint64()))
}
}
// getAddresses retrieves machine details from the database, stores them in an array and compares layers for uniqueness.
// It employs addrMap to handle unique addresses, and generated slices for configuration fields and MinerAddresses.
// The function iterates over layers, storing decoded configuration and verifying address existence in addrMap.
// It ends by returning unique addresses and miner slices.
func (al *alerts) getAddresses() ([]string, []string, error) {
// MachineDetails represents the structure of data received from the SQL query.
type machineDetail struct {
ID int
HostAndPort string
Layers string
}
var machineDetails []machineDetail
// Get all layers in use
err := al.db.Select(al.ctx, &machineDetails, `
SELECT m.id, m.host_and_port, d.layers
FROM harmony_machines m
LEFT JOIN harmony_machine_details d ON m.id = d.machine_id;`)
if err != nil {
return nil, nil, xerrors.Errorf("getting config layers for all machines: %w", err)
}
// UniqueLayers takes an array of MachineDetails and returns a slice of unique layers.
layerMap := make(map[string]bool)
var uniqueLayers []string
// Get unique layers in use
for _, machine := range machineDetails {
machine := machine
// Split the Layers field into individual layers
layers := strings.Split(machine.Layers, ",")
for _, layer := range layers {
layer = strings.TrimSpace(layer)
if _, exists := layerMap[layer]; !exists && layer != "" {
layerMap[layer] = true
uniqueLayers = append(uniqueLayers, layer)
}
}
}
addrMap := make(map[string]bool)
var uniqueAddrs []string
var miners []string
// Get all unique addresses
for _, layer := range uniqueLayers {
text := ""
cfg := config.DefaultCurioConfig()
err := al.db.QueryRow(al.ctx, `SELECT config FROM harmony_config WHERE title=$1`, layer).Scan(&text)
if err != nil {
if strings.Contains(err.Error(), sql.ErrNoRows.Error()) {
return nil, nil, xerrors.Errorf("missing layer '%s' ", layer)
}
return nil, nil, fmt.Errorf("could not read layer '%s': %w", layer, err)
}
_, err = toml.Decode(text, cfg)
if err != nil {
return nil, nil, fmt.Errorf("could not read layer, bad toml %s: %w", layer, err)
}
for i := range cfg.Addresses {
prec := cfg.Addresses[i].PreCommitControl
com := cfg.Addresses[i].CommitControl
term := cfg.Addresses[i].TerminateControl
miner := cfg.Addresses[i].MinerAddresses
if prec != nil {
for j := range prec {
if _, ok := addrMap[prec[j]]; !ok && prec[j] != "" {
addrMap[prec[j]] = true
uniqueAddrs = append(uniqueAddrs, prec[j])
}
}
}
if com != nil {
for j := range com {
if _, ok := addrMap[com[j]]; !ok && com[j] != "" {
addrMap[com[j]] = true
uniqueAddrs = append(uniqueAddrs, com[j])
}
}
}
if term != nil {
for j := range term {
if _, ok := addrMap[term[j]]; !ok && term[j] != "" {
addrMap[term[j]] = true
uniqueAddrs = append(uniqueAddrs, term[j])
}
}
}
if miner != nil {
for j := range miner {
if _, ok := addrMap[miner[j]]; !ok && miner[j] != "" {
addrMap[miner[j]] = true
miners = append(miners, miner[j])
}
}
}
}
}
return uniqueAddrs, miners, nil
}
func wdPostCheck(al *alerts) {
Name := "WindowPost"
al.alertMap[Name] = &alertOut{}
head, err := al.api.ChainHead(al.ctx)
if err != nil {
al.alertMap[Name].err = err
return
}
from := head.Height() - abi.ChainEpoch(math.Ceil(AlertMangerInterval.Seconds()/float64(build.BlockDelaySecs))) - 1
if from < 0 {
from = 0
}
log.Infof("ALERTMANAGER: FROM: %d", from)
_, miners, err := al.getAddresses()
if err != nil {
al.alertMap[Name].err = err
return
}
h := head
type partSent struct {
sent bool
parts int
}
msgCheck := make(map[address.Address]map[uint64]*partSent)
for h.Height() >= from {
for _, minerStr := range miners {
maddr, err := address.NewFromString(minerStr)
if err != nil {
al.alertMap[Name].err = err
return
}
deadlineInfo, err := al.api.StateMinerProvingDeadline(al.ctx, maddr, h.Key())
if err != nil {
al.alertMap[Name].err = xerrors.Errorf("getting miner deadline: %w", err)
return
}
partitions, err := al.api.StateMinerPartitions(al.ctx, maddr, deadlineInfo.Index, h.Key())
if err != nil {
al.alertMap[Name].err = xerrors.Errorf("getting miner partitions: %w", err)
return
}
if _, ok := msgCheck[maddr]; !ok {
msgCheck[maddr] = make(map[uint64]*partSent)
}
if _, ok := msgCheck[maddr][deadlineInfo.Index]; !ok {
msgCheck[maddr][deadlineInfo.Index] = &partSent{
sent: false,
parts: len(partitions),
}
}
}
h, err = al.api.ChainGetTipSet(al.ctx, h.Parents())
if err != nil {
al.alertMap[Name].err = err
return
}
}
for maddr, deadlines := range msgCheck {
for deadlineIndex, ps := range deadlines {
log.Infof("ALERTMANAGER: Address: %s, DEADLINE: %d, Partitions: %d", maddr.String(), deadlineIndex, ps.parts)
}
}
var wdDetails []struct {
Miner int64 `db:"sp_id"`
Deadline int64 `db:"deadline"`
Partition int64 `db:"partition"`
Epoch abi.ChainEpoch `db:"submit_at_epoch"`
Proof []byte `db:"proof_params"`
}
err = al.db.Select(al.ctx, &wdDetails, `
SELECT sp_id, submit_at_epoch, proof_params, partition, deadline
FROM wdpost_proofs
WHERE submit_at_epoch > $1;`, from)
if err != nil {
al.alertMap[Name].err = xerrors.Errorf("getting windowPost details from database: %w", err)
return
}
if len(wdDetails) < 1 {
return
}
for _, detail := range wdDetails {
addr, err := address.NewIDAddress(uint64(detail.Miner))
if err != nil {
al.alertMap[Name].err = xerrors.Errorf("getting miner address: %w", err)
return
}
if _, ok := msgCheck[addr][uint64(detail.Deadline)]; !ok {
al.alertMap[Name].alertString += fmt.Sprintf("unknown WindowPost jobs for miner %s deadline %d partition %d found. ", addr.String(), detail.Deadline, detail.Partition)
continue
}
msgCheck[addr][uint64(detail.Deadline)].sent = true
var postOut miner.SubmitWindowedPoStParams
err = postOut.UnmarshalCBOR(bytes.NewReader(detail.Proof))
if err != nil {
al.alertMap[Name].err = xerrors.Errorf("unmarshaling windowPost proof params: %w", err)
return
}
for i := range postOut.Partitions {
c, err := postOut.Partitions[i].Skipped.Count()
if err != nil {
al.alertMap[Name].err = xerrors.Errorf("getting skipped sector count: %w", err)
return
}
if c > 0 {
al.alertMap[Name].alertString += fmt.Sprintf("Skipped %d sectors in deadline %d partition %d. ", c, postOut.Deadline, postOut.Partitions[i].Index)
}
}
}
for maddr, deadlines := range msgCheck {
for deadlineIndex, ps := range deadlines {
if !ps.sent {
al.alertMap[Name].alertString += fmt.Sprintf("No WindowPost jobs found for miner %s deadline %d. ", maddr.String(), deadlineIndex)
}
}
}
}
func wnPostCheck(al *alerts) {
Name := "WinningPost"
al.alertMap[Name] = &alertOut{}
head, err := al.api.ChainHead(al.ctx)
if err != nil {
al.alertMap[Name].err = err
return
}
from := head.Height() - abi.ChainEpoch(math.Ceil(AlertMangerInterval.Seconds()/float64(build.BlockDelaySecs))) - 1
if from < 0 {
from = 0
}
var wnDetails []struct {
Miner int64 `db:"sp_id"`
Block string `db:"mined_cid"`
Epoch abi.ChainEpoch `db:"epoch"`
}
err = al.db.Select(al.ctx, &wnDetails, `
SELECT sp_id, mined_cid, epoch
FROM mining_tasks
WHERE epoch > $1 AND won = TRUE
ORDER BY epoch;`, from)
if err != nil {
al.alertMap[Name].err = xerrors.Errorf("getting winningPost details from database: %w", err)
return
}
var count []int64
err = al.db.Select(al.ctx, &count, `
SELECT COUNT(*)
FROM mining_tasks
WHERE epoch > $1;`, from)
if err != nil {
al.alertMap[Name].err = xerrors.Errorf("getting winningPost count details from database: %w", err)
return
}
if count[0] == 0 {
al.alertMap[Name].alertString += "No winningPost tasks found in the last " + humanize.Time(time.Now().Add(-AlertMangerInterval))
return
}
epochs := int64(math.Ceil(AlertMangerInterval.Seconds() / float64(build.BlockDelaySecs)))
if (head.Height() - abi.ChainEpoch(epochs)) < 0 {
epochs = int64(head.Height())
}
if epochs != count[0]+1 && epochs != count[0]-1 && epochs != count[0] {
al.alertMap[Name].alertString += fmt.Sprintf("Expected %d WinningPost task and found %d in DB ", epochs, count[0])
}
if len(wnDetails) < 1 {
return
}
to := wnDetails[len(wnDetails)-1].Epoch
epochMap := make(map[abi.ChainEpoch]string)
for head.Height() >= to {
epochMap[head.Height()] = head.String()
head, err = al.api.ChainGetTipSet(al.ctx, head.Parents())
if err != nil {
al.alertMap[Name].err = xerrors.Errorf("getting tipset: %w", err)
}
if head == nil {
al.alertMap[Name].err = xerrors.Errorf("tipset is nil")
return
}
if head.Height() == 0 {
break
}
}
winMap := make(map[abi.ChainEpoch]struct {
won bool
cid string
})
for _, wn := range wnDetails {
if strings.Contains(epochMap[wn.Epoch], wn.Block) {
winMap[wn.Epoch] = struct {
won bool
cid string
}{won: true, cid: wn.Block}
continue
}
winMap[wn.Epoch] = struct {
won bool
cid string
}{won: false, cid: wn.Block}
}
for epoch, st := range winMap {
if !st.won {
al.alertMap[Name].alertString += fmt.Sprintf("Epoch %d: does not contain our block %s", epoch, st.cid)
}
}
}

View File

@ -1,234 +0,0 @@
// Nobody associated with this software's development has any business relationship to pagerduty.
// This is provided as a convenient trampoline to SP's alert system of choice.
package alertmanager
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"time"
logging "github.com/ipfs/go-log/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/dline"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
harmonytask2 "github.com/filecoin-project/lotus/curiosrc/harmony/harmonytask"
"github.com/filecoin-project/lotus/curiosrc/harmony/resources"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
"github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/lotus/storage/ctladdr"
)
const AlertMangerInterval = time.Hour
var log = logging.Logger("curio/alertmanager")
type AlertAPI interface {
ctladdr.NodeApi
ChainHead(context.Context) (*types.TipSet, error)
ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error)
StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (api.MinerInfo, error)
StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error)
StateMinerPartitions(context.Context, address.Address, uint64, types.TipSetKey) ([]api.Partition, error)
}
type AlertTask struct {
api AlertAPI
cfg config.CurioAlerting
db *harmonydb.DB
}
type alertOut struct {
err error
alertString string
}
type alerts struct {
ctx context.Context
api AlertAPI
db *harmonydb.DB
cfg config.CurioAlerting
alertMap map[string]*alertOut
}
type pdPayload struct {
Summary string `json:"summary"`
Severity string `json:"severity"`
Source string `json:"source"`
Component string `json:"component,omitempty"`
Group string `json:"group,omitempty"`
Class string `json:"class,omitempty"`
CustomDetails interface{} `json:"custom_details,omitempty"`
}
type alertFunc func(al *alerts)
var alertFuncs = []alertFunc{
balanceCheck,
taskFailureCheck,
permanentStorageCheck,
wdPostCheck,
wnPostCheck,
}
func NewAlertTask(api AlertAPI, db *harmonydb.DB, alertingCfg config.CurioAlerting) *AlertTask {
return &AlertTask{
api: api,
db: db,
cfg: alertingCfg,
}
}
func (a *AlertTask) Do(taskID harmonytask2.TaskID, stillOwned func() bool) (done bool, err error) {
if a.cfg.PageDutyIntegrationKey == "" {
log.Warnf("PageDutyIntegrationKey is empty, not sending an alert")
return true, nil
}
ctx := context.Background()
alMap := make(map[string]*alertOut)
altrs := &alerts{
ctx: ctx,
api: a.api,
db: a.db,
cfg: a.cfg,
alertMap: alMap,
}
for _, al := range alertFuncs {
al(altrs)
}
details := make(map[string]interface{})
for k, v := range altrs.alertMap {
if v != nil {
if v.err != nil {
details[k] = v.err.Error()
continue
}
if v.alertString != "" {
details[k] = v.alertString
}
}
}
// Alert only if required
if len(details) > 0 {
payloadData := &pdPayload{
Summary: "Curio Alert",
Severity: "critical",
CustomDetails: details,
Source: "Curio Cluster",
}
err = a.sendAlert(payloadData)
if err != nil {
return false, err
}
}
return true, nil
}
func (a *AlertTask) CanAccept(ids []harmonytask2.TaskID, engine *harmonytask2.TaskEngine) (*harmonytask2.TaskID, error) {
id := ids[0]
return &id, nil
}
func (a *AlertTask) TypeDetails() harmonytask2.TaskTypeDetails {
return harmonytask2.TaskTypeDetails{
Max: 1,
Name: "AlertManager",
Cost: resources.Resources{
Cpu: 1,
Ram: 64 << 20,
Gpu: 0,
},
IAmBored: harmonytask2.SingletonTaskAdder(AlertMangerInterval, a),
}
}
func (a *AlertTask) Adder(taskFunc harmonytask2.AddTaskFunc) {
return
}
var _ harmonytask2.TaskInterface = &AlertTask{}
// sendAlert sends an alert to PagerDuty with the provided payload data.
// It creates a PDData struct with the provided routing key, event action and payload.
// It creates an HTTP POST request with the PagerDuty event URL as the endpoint and the marshaled JSON data as the request body.
// It sends the request using an HTTP client with a maximum of 5 retries for network errors with exponential backoff before each retry.
// It handles different HTTP response status codes and returns an error based on the status code().
// If all retries fail, it returns an error indicating the last network error encountered.
func (a *AlertTask) sendAlert(data *pdPayload) error {
type pdData struct {
RoutingKey string `json:"routing_key"`
EventAction string `json:"event_action"`
Payload *pdPayload `json:"payload"`
}
payload := &pdData{
RoutingKey: a.cfg.PageDutyIntegrationKey,
EventAction: "trigger",
Payload: data,
}
jsonData, err := json.Marshal(payload)
if err != nil {
return fmt.Errorf("error marshaling JSON: %w", err)
}
req, err := http.NewRequest("POST", a.cfg.PagerDutyEventURL, bytes.NewBuffer(jsonData))
if err != nil {
return fmt.Errorf("error creating request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
var resp *http.Response
for i := 0; i < 5; i++ { // Maximum of 5 retries
resp, err = client.Do(req)
if err != nil {
time.Sleep(time.Duration(2*i) * time.Second) // Exponential backoff
continue
}
defer func() { _ = resp.Body.Close() }()
switch resp.StatusCode {
case 202:
log.Debug("Accepted: The event has been accepted by PagerDuty.")
return nil
case 400:
bd, rerr := io.ReadAll(resp.Body)
if rerr != nil {
return xerrors.Errorf("Bad request: payload JSON is invalid. Failed to read the body: %w", err)
}
return xerrors.Errorf("Bad request: payload JSON is invalid %s", string(bd))
case 429:
log.Debug("Too many API calls, retrying after backoff...")
time.Sleep(time.Duration(5*i) * time.Second) // Exponential backoff
case 500, 501, 502, 503, 504:
log.Debug("Server error, retrying after backoff...")
time.Sleep(time.Duration(5*i) * time.Second) // Exponential backoff
default:
log.Errorw("Response status:", resp.Status)
return xerrors.Errorf("Unexpected HTTP response: %s", resp.Status)
}
}
return fmt.Errorf("after retries, last error: %w", err)
}

View File

@ -1,9 +0,0 @@
package build
// IsOpencl is set to the value of FFI_USE_OPENCL
var IsOpencl string
// Format: 8 HEX then underscore then ISO8701 date
// Ex: 4c5e98f28_2024-05-17T18:42:27-04:00
// NOTE: git date for repeatabile builds.
var Commit string

View File

@ -1,45 +0,0 @@
package curio
import (
"context"
"time"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/curiosrc/chainsched"
"github.com/filecoin-project/lotus/curiosrc/message"
"github.com/filecoin-project/lotus/curiosrc/multictladdr"
"github.com/filecoin-project/lotus/curiosrc/window"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
"github.com/filecoin-project/lotus/node/config"
dtypes "github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/storage/paths"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
//var log = logging.Logger("provider")
func WindowPostScheduler(ctx context.Context, fc config.CurioFees, pc config.CurioProvingConfig,
api api.FullNode, verif storiface.Verifier, sender *message.Sender, chainSched *chainsched.CurioChainSched,
as *multictladdr.MultiAddressSelector, addresses map[dtypes.MinerAddress]bool, db *harmonydb.DB,
stor paths.Store, idx paths.SectorIndex, max int) (*window.WdPostTask, *window.WdPostSubmitTask, *window.WdPostRecoverDeclareTask, error) {
// todo config
ft := window.NewSimpleFaultTracker(stor, idx, pc.ParallelCheckLimit, time.Duration(pc.SingleCheckTimeout), time.Duration(pc.PartitionCheckTimeout))
computeTask, err := window.NewWdPostTask(db, api, ft, stor, verif, chainSched, addresses, max, pc.ParallelCheckLimit, time.Duration(pc.SingleCheckTimeout))
if err != nil {
return nil, nil, nil, err
}
submitTask, err := window.NewWdPostSubmitTask(chainSched, sender, db, api, fc.MaxWindowPoStGasFee, as)
if err != nil {
return nil, nil, nil, err
}
recoverTask, err := window.NewWdPostRecoverDeclareTask(sender, db, api, ft, as, chainSched, fc.MaxWindowPoStGasFee, addresses)
if err != nil {
return nil, nil, nil, err
}
return computeTask, submitTask, recoverTask, nil
}

View File

@ -1,136 +0,0 @@
package chainsched
import (
"context"
"time"
logging "github.com/ipfs/go-log/v2"
"go.opencensus.io/trace"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
)
var log = logging.Logger("curio/chainsched")
type NodeAPI interface {
ChainHead(context.Context) (*types.TipSet, error)
ChainNotify(context.Context) (<-chan []*api.HeadChange, error)
}
type CurioChainSched struct {
api NodeAPI
callbacks []UpdateFunc
started bool
}
func New(api NodeAPI) *CurioChainSched {
return &CurioChainSched{
api: api,
}
}
type UpdateFunc func(ctx context.Context, revert, apply *types.TipSet) error
func (s *CurioChainSched) AddHandler(ch UpdateFunc) error {
if s.started {
return xerrors.Errorf("cannot add handler after start")
}
s.callbacks = append(s.callbacks, ch)
return nil
}
func (s *CurioChainSched) Run(ctx context.Context) {
s.started = true
var (
notifs <-chan []*api.HeadChange
err error
gotCur bool
)
// not fine to panic after this point
for {
if notifs == nil {
notifs, err = s.api.ChainNotify(ctx)
if err != nil {
log.Errorf("ChainNotify error: %+v", err)
build.Clock.Sleep(10 * time.Second)
continue
}
gotCur = false
log.Info("restarting chain scheduler")
}
select {
case changes, ok := <-notifs:
if !ok {
log.Warn("chain notifs channel closed")
notifs = nil
continue
}
if !gotCur {
if len(changes) != 1 {
log.Errorf("expected first notif to have len = 1")
continue
}
chg := changes[0]
if chg.Type != store.HCCurrent {
log.Errorf("expected first notif to tell current ts")
continue
}
ctx, span := trace.StartSpan(ctx, "CurioChainSched.headChange")
s.update(ctx, nil, chg.Val)
span.End()
gotCur = true
continue
}
ctx, span := trace.StartSpan(ctx, "CurioChainSched.headChange")
var lowest, highest *types.TipSet = nil, nil
for _, change := range changes {
if change.Val == nil {
log.Errorf("change.Val was nil")
}
switch change.Type {
case store.HCRevert:
lowest = change.Val
case store.HCApply:
highest = change.Val
}
}
s.update(ctx, lowest, highest)
span.End()
case <-ctx.Done():
return
}
}
}
func (s *CurioChainSched) update(ctx context.Context, revert, apply *types.TipSet) {
if apply == nil {
log.Error("no new tipset in CurioChainSched.update")
return
}
for _, ch := range s.callbacks {
if err := ch(ctx, revert, apply); err != nil {
log.Errorf("handling head updates in curio chain sched: %+v", err)
}
}
}

View File

@ -1,249 +0,0 @@
package main
import (
"bufio"
"context"
"encoding/base64"
"errors"
"fmt"
"net"
"os"
"time"
"github.com/BurntSushi/toml"
"github.com/gbrlsnchs/jwt/v3"
manet "github.com/multiformats/go-multiaddr/net"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/lotus/api"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/curiosrc/cmd/curio/rpc"
"github.com/filecoin-project/lotus/curiosrc/deps"
)
const providerEnvVar = "CURIO_API_INFO"
var cliCmd = &cli.Command{
Name: "cli",
Usage: "Execute cli commands",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "machine",
Usage: "machine host:port (curio run --listen address)",
},
},
Before: func(cctx *cli.Context) error {
if os.Getenv(providerEnvVar) != "" {
// set already
return nil
}
if os.Getenv("LOTUS_DOCS_GENERATION") == "1" {
return nil
}
db, err := deps.MakeDB(cctx)
if err != nil {
return err
}
ctx := lcli.ReqContext(cctx)
machine := cctx.String("machine")
if machine == "" {
// interactive picker
var machines []struct {
HostAndPort string `db:"host_and_port"`
LastContact time.Time `db:"last_contact"`
}
err := db.Select(ctx, &machines, "select host_and_port, last_contact from harmony_machines")
if err != nil {
return xerrors.Errorf("getting machine list: %w", err)
}
now := time.Now()
fmt.Println("Available machines:")
for i, m := range machines {
// A machine is healthy if contacted not longer than 2 minutes ago
healthStatus := "unhealthy"
if now.Sub(m.LastContact) <= 2*time.Minute {
healthStatus = "healthy"
}
fmt.Printf("%d. %s %s\n", i+1, m.HostAndPort, healthStatus)
}
fmt.Print("Select: ")
reader := bufio.NewReader(os.Stdin)
input, err := reader.ReadString('\n')
if err != nil {
return xerrors.Errorf("reading selection: %w", err)
}
var selection int
_, err = fmt.Sscanf(input, "%d", &selection)
if err != nil {
return xerrors.Errorf("parsing selection: %w", err)
}
if selection < 1 || selection > len(machines) {
return xerrors.New("invalid selection")
}
machine = machines[selection-1].HostAndPort
}
var apiKeys []string
{
var dbconfigs []struct {
Config string `db:"config"`
Title string `db:"title"`
}
err := db.Select(ctx, &dbconfigs, "select config from harmony_config")
if err != nil {
return xerrors.Errorf("getting configs: %w", err)
}
var seen = make(map[string]struct{})
for _, config := range dbconfigs {
var layer struct {
Apis struct {
StorageRPCSecret string
}
}
if _, err := toml.Decode(config.Config, &layer); err != nil {
return xerrors.Errorf("decode config layer %s: %w", config.Title, err)
}
if layer.Apis.StorageRPCSecret != "" {
if _, ok := seen[layer.Apis.StorageRPCSecret]; ok {
continue
}
seen[layer.Apis.StorageRPCSecret] = struct{}{}
apiKeys = append(apiKeys, layer.Apis.StorageRPCSecret)
}
}
}
if len(apiKeys) == 0 {
return xerrors.New("no api keys found in the database")
}
if len(apiKeys) > 1 {
return xerrors.Errorf("multiple api keys found in the database, not supported yet")
}
var apiToken []byte
{
type jwtPayload struct {
Allow []auth.Permission
}
p := jwtPayload{
Allow: api.AllPermissions,
}
sk, err := base64.StdEncoding.DecodeString(apiKeys[0])
if err != nil {
return xerrors.Errorf("decode secret: %w", err)
}
apiToken, err = jwt.Sign(&p, jwt.NewHS256(sk))
if err != nil {
return xerrors.Errorf("signing token: %w", err)
}
}
{
laddr, err := net.ResolveTCPAddr("tcp", machine)
if err != nil {
return xerrors.Errorf("net resolve: %w", err)
}
if len(laddr.IP) == 0 {
// set localhost
laddr.IP = net.IPv4(127, 0, 0, 1)
}
ma, err := manet.FromNetAddr(laddr)
if err != nil {
return xerrors.Errorf("net from addr (%v): %w", laddr, err)
}
token := fmt.Sprintf("%s:%s", string(apiToken), ma)
if err := os.Setenv(providerEnvVar, token); err != nil {
return xerrors.Errorf("setting env var: %w", err)
}
}
{
api, closer, err := rpc.GetCurioAPI(cctx)
if err != nil {
return err
}
defer closer()
v, err := api.Version(ctx)
if err != nil {
return xerrors.Errorf("querying version: %w", err)
}
fmt.Println("remote node version:", v.String())
}
return nil
},
Subcommands: []*cli.Command{
storageCmd,
logCmd,
waitApiCmd,
},
}
var waitApiCmd = &cli.Command{
Name: "wait-api",
Usage: "Wait for Curio api to come online",
Flags: []cli.Flag{
&cli.DurationFlag{
Name: "timeout",
Usage: "duration to wait till fail",
Value: time.Second * 30,
},
},
Action: func(cctx *cli.Context) error {
ctx := lcli.ReqContext(cctx)
ctx, cancel := context.WithTimeout(ctx, cctx.Duration("timeout"))
defer cancel()
for {
if ctx.Err() != nil {
break
}
api, closer, err := rpc.GetCurioAPI(cctx)
if err != nil {
fmt.Printf("Not online yet... (%s)\n", err)
time.Sleep(time.Second)
continue
}
defer closer()
_, err = api.Version(ctx)
if err != nil {
return err
}
return nil
}
if errors.Is(ctx.Err(), context.DeadlineExceeded) {
return fmt.Errorf("timed out waiting for api to come online")
}
return ctx.Err()
},
}

View File

@ -1,441 +0,0 @@
package main
import (
"context"
"errors"
"fmt"
"io"
"os"
"os/exec"
"path"
"strings"
"github.com/BurntSushi/toml"
"github.com/fatih/color"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/curiosrc/deps"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
"github.com/filecoin-project/lotus/node/config"
)
var configCmd = &cli.Command{
Name: "config",
Usage: "Manage node config by layers. The layer 'base' will always be applied at Curio start-up.",
Subcommands: []*cli.Command{
configDefaultCmd,
configSetCmd,
configGetCmd,
configListCmd,
configViewCmd,
configRmCmd,
configEditCmd,
configNewCmd,
},
}
var configDefaultCmd = &cli.Command{
Name: "default",
Aliases: []string{"defaults"},
Usage: "Print default node config",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "no-comment",
Usage: "don't comment default values",
},
},
Action: func(cctx *cli.Context) error {
comment := !cctx.Bool("no-comment")
cfg, err := deps.GetDefaultConfig(comment)
if err != nil {
return err
}
fmt.Print(cfg)
return nil
},
}
var configSetCmd = &cli.Command{
Name: "set",
Aliases: []string{"add", "update", "create"},
Usage: "Set a config layer or the base by providing a filename or stdin.",
ArgsUsage: "a layer's file name",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "title",
Usage: "title of the config layer (req'd for stdin)",
},
},
Action: func(cctx *cli.Context) error {
args := cctx.Args()
db, err := deps.MakeDB(cctx)
if err != nil {
return err
}
name := cctx.String("title")
var stream io.Reader = os.Stdin
if args.Len() != 1 {
if cctx.String("title") == "" {
return errors.New("must have a title for stdin, or a file name")
}
} else {
stream, err = os.Open(args.First())
if err != nil {
return fmt.Errorf("cannot open file %s: %w", args.First(), err)
}
if name == "" {
name = strings.Split(path.Base(args.First()), ".")[0]
}
}
bytes, err := io.ReadAll(stream)
if err != nil {
return fmt.Errorf("cannot read stream/file %w", err)
}
curioConfig := config.DefaultCurioConfig() // ensure it's toml
_, err = deps.LoadConfigWithUpgrades(string(bytes), curioConfig)
if err != nil {
return fmt.Errorf("cannot decode file: %w", err)
}
_ = curioConfig
err = setConfig(db, name, string(bytes))
if err != nil {
return fmt.Errorf("unable to save config layer: %w", err)
}
fmt.Println("Layer " + name + " created/updated")
return nil
},
}
func setConfig(db *harmonydb.DB, name, config string) error {
_, err := db.Exec(context.Background(),
`INSERT INTO harmony_config (title, config) VALUES ($1, $2)
ON CONFLICT (title) DO UPDATE SET config = excluded.config`, name, config)
return err
}
var configGetCmd = &cli.Command{
Name: "get",
Aliases: []string{"cat", "show"},
Usage: "Get a config layer by name. You may want to pipe the output to a file, or use 'less'",
ArgsUsage: "layer name",
Action: func(cctx *cli.Context) error {
args := cctx.Args()
if args.Len() != 1 {
return fmt.Errorf("want 1 layer arg, got %d", args.Len())
}
db, err := deps.MakeDB(cctx)
if err != nil {
return err
}
cfg, err := getConfig(db, args.First())
if err != nil {
return err
}
fmt.Println(cfg)
return nil
},
}
func getConfig(db *harmonydb.DB, layer string) (string, error) {
var cfg string
err := db.QueryRow(context.Background(), `SELECT config FROM harmony_config WHERE title=$1`, layer).Scan(&cfg)
if err != nil {
return "", err
}
return cfg, nil
}
var configListCmd = &cli.Command{
Name: "list",
Aliases: []string{"ls"},
Usage: "List config layers present in the DB.",
Flags: []cli.Flag{},
Action: func(cctx *cli.Context) error {
db, err := deps.MakeDB(cctx)
if err != nil {
return err
}
var res []string
err = db.Select(context.Background(), &res, `SELECT title FROM harmony_config ORDER BY title`)
if err != nil {
return fmt.Errorf("unable to read from db: %w", err)
}
for _, r := range res {
fmt.Println(r)
}
return nil
},
}
var configRmCmd = &cli.Command{
Name: "remove",
Aliases: []string{"rm", "del", "delete"},
Usage: "Remove a named config layer.",
Flags: []cli.Flag{},
Action: func(cctx *cli.Context) error {
args := cctx.Args()
if args.Len() != 1 {
return errors.New("must have exactly 1 arg for the layer name")
}
db, err := deps.MakeDB(cctx)
if err != nil {
return err
}
ct, err := db.Exec(context.Background(), `DELETE FROM harmony_config WHERE title=$1`, args.First())
if err != nil {
return fmt.Errorf("unable to read from db: %w", err)
}
if ct == 0 {
return fmt.Errorf("no layer named %s", args.First())
}
return nil
},
}
var configViewCmd = &cli.Command{
Name: "interpret",
Aliases: []string{"view", "stacked", "stack"},
Usage: "Interpret stacked config layers by this version of curio, with system-generated comments.",
ArgsUsage: "a list of layers to be interpreted as the final config",
Flags: []cli.Flag{
&cli.StringSliceFlag{
Name: "layers",
Usage: "comma or space separated list of layers to be interpreted (base is always applied)",
Required: true,
},
},
Action: func(cctx *cli.Context) error {
db, err := deps.MakeDB(cctx)
if err != nil {
return err
}
layers := cctx.StringSlice("layers")
curioConfig, err := deps.GetConfig(cctx.Context, layers, db)
if err != nil {
return err
}
cb, err := config.ConfigUpdate(curioConfig, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv())
if err != nil {
return xerrors.Errorf("cannot interpret config: %w", err)
}
fmt.Println(string(cb))
return nil
},
}
var configEditCmd = &cli.Command{
Name: "edit",
Usage: "edit a config layer",
ArgsUsage: "[layer name]",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "editor",
Usage: "editor to use",
Value: "vim",
EnvVars: []string{"EDITOR"},
},
&cli.StringFlag{
Name: "source",
Usage: "source config layer",
DefaultText: "<edited layer>",
},
&cli.BoolFlag{
Name: "allow-overwrite",
Usage: "allow overwrite of existing layer if source is a different layer",
},
&cli.BoolFlag{
Name: "no-source-diff",
Usage: "save the whole config into the layer, not just the diff",
},
&cli.BoolFlag{
Name: "no-interpret-source",
Usage: "do not interpret source layer",
DefaultText: "true if --source is set",
},
},
Action: func(cctx *cli.Context) error {
layer := cctx.Args().First()
if layer == "" {
return errors.New("layer name is required")
}
source := layer
if cctx.IsSet("source") {
source = cctx.String("source")
if source == layer && !cctx.Bool("allow-owerwrite") {
return errors.New("source and target layers are the same")
}
}
db, err := deps.MakeDB(cctx)
if err != nil {
return err
}
sourceConfig, err := getConfig(db, source)
if err != nil {
return xerrors.Errorf("getting source config: %w", err)
}
if cctx.IsSet("source") && source != layer && !cctx.Bool("no-interpret-source") {
curioCfg := config.DefaultCurioConfig()
if _, err := toml.Decode(sourceConfig, curioCfg); err != nil {
return xerrors.Errorf("parsing source config: %w", err)
}
cb, err := config.ConfigUpdate(curioCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv())
if err != nil {
return xerrors.Errorf("interpreting source config: %w", err)
}
sourceConfig = string(cb)
}
editor := cctx.String("editor")
newConfig, err := edit(editor, sourceConfig)
if err != nil {
return xerrors.Errorf("editing config: %w", err)
}
toWrite := newConfig
if cctx.IsSet("source") && !cctx.Bool("no-source-diff") {
updated, err := diff(sourceConfig, newConfig)
if err != nil {
return xerrors.Errorf("computing diff: %w", err)
}
{
fmt.Printf("%s will write changes as the layer because %s is not set\n", color.YellowString(">"), color.GreenString("--no-source-diff"))
fmt.Println(updated)
fmt.Printf("%s Confirm [y]: ", color.YellowString(">"))
for {
var confirmBuf [16]byte
n, err := os.Stdin.Read(confirmBuf[:])
if err != nil {
return xerrors.Errorf("reading confirmation: %w", err)
}
confirm := strings.TrimSpace(string(confirmBuf[:n]))
if confirm == "" {
confirm = "y"
}
if confirm[:1] == "y" {
break
}
if confirm[:1] == "n" {
return nil
}
fmt.Printf("%s Confirm [y]:\n", color.YellowString(">"))
}
}
toWrite = updated
}
fmt.Printf("%s Writing config for layer %s\n", color.YellowString(">"), color.GreenString(layer))
return setConfig(db, layer, toWrite)
},
}
func diff(sourceConf, newConf string) (string, error) {
fromSrc := config.DefaultCurioConfig()
fromNew := config.DefaultCurioConfig()
_, err := toml.Decode(sourceConf, fromSrc)
if err != nil {
return "", xerrors.Errorf("decoding source config: %w", err)
}
_, err = toml.Decode(newConf, fromNew)
if err != nil {
return "", xerrors.Errorf("decoding new config: %w", err)
}
cb, err := config.ConfigUpdate(fromNew, fromSrc, config.Commented(true), config.NoEnv())
if err != nil {
return "", xerrors.Errorf("interpreting source config: %w", err)
}
lines := strings.Split(string(cb), "\n")
var outLines []string
var categoryBuf string
for _, line := range lines {
// drop empty lines
if strings.TrimSpace(line) == "" {
continue
}
// drop lines starting with '#'
if strings.HasPrefix(strings.TrimSpace(line), "#") {
continue
}
// if starting with [, it's a category
if strings.HasPrefix(strings.TrimSpace(line), "[") {
categoryBuf = line
continue
}
if categoryBuf != "" {
outLines = append(outLines, categoryBuf)
categoryBuf = ""
}
outLines = append(outLines, line)
}
return strings.Join(outLines, "\n"), nil
}
func edit(editor, cfg string) (string, error) {
file, err := os.CreateTemp("", "curio-config-*.toml")
if err != nil {
return "", err
}
_, err = file.WriteString(cfg)
if err != nil {
return "", err
}
filePath := file.Name()
if err := file.Close(); err != nil {
return "", err
}
defer func() {
_ = os.Remove(filePath)
}()
cmd := exec.Command(editor, filePath)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err = cmd.Run()
if err != nil {
return "", err
}
data, err := os.ReadFile(filePath)
if err != nil {
return "", err
}
return string(data), err
}

View File

@ -1,57 +0,0 @@
package main
import (
"fmt"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/api"
cliutil "github.com/filecoin-project/lotus/cli/util"
"github.com/filecoin-project/lotus/curiosrc/deps"
"github.com/filecoin-project/lotus/node/repo"
)
var configNewCmd = &cli.Command{
Name: "new-cluster",
Usage: "Create new configuration for a new cluster",
ArgsUsage: "[SP actor address...]",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "repo",
EnvVars: []string{"LOTUS_PATH"},
Hidden: true,
Value: "~/.lotus",
},
},
Action: func(cctx *cli.Context) error {
if cctx.Args().Len() < 1 {
return xerrors.New("must specify at least one SP actor address. Use 'lotus-shed miner create' or use 'curio guided-setup'")
}
ctx := cctx.Context
db, err := deps.MakeDB(cctx)
if err != nil {
return err
}
full, closer, err := cliutil.GetFullNodeAPIV1(cctx)
if err != nil {
return xerrors.Errorf("connecting to full node: %w", err)
}
defer closer()
ainfo, err := cliutil.GetAPIInfo(cctx, repo.FullNode)
if err != nil {
return xerrors.Errorf("could not get API info for FullNode: %w", err)
}
token, err := full.AuthNew(ctx, api.AllPermissions)
if err != nil {
return err
}
return deps.CreateMinerConfig(ctx, full, db, cctx.Args().Slice(), fmt.Sprintf("%s:%s", string(token), ainfo.Addr))
},
}

View File

@ -1,438 +0,0 @@
package main
import (
"reflect"
"testing"
"time"
"github.com/invopop/jsonschema"
"github.com/samber/lo"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/lotus/curiosrc/deps"
"github.com/filecoin-project/lotus/node/config"
)
var baseText = `
[Subsystems]
# EnableWindowPost enables window post to be executed on this curio instance. Each machine in the cluster
# with WindowPoSt enabled will also participate in the window post scheduler. It is possible to have multiple
# machines with WindowPoSt enabled which will provide redundancy, and in case of multiple partitions per deadline,
# will allow for parallel processing of partitions.
#
# It is possible to have instances handling both WindowPoSt and WinningPoSt, which can provide redundancy without
# the need for additional machines. In setups like this it is generally recommended to run
# partitionsPerDeadline+1 machines.
#
# type: bool
#EnableWindowPost = false
# type: int
#WindowPostMaxTasks = 0
# EnableWinningPost enables winning post to be executed on this curio instance.
# Each machine in the cluster with WinningPoSt enabled will also participate in the winning post scheduler.
# It is possible to mix machines with WindowPoSt and WinningPoSt enabled, for details see the EnableWindowPost
# documentation.
#
# type: bool
#EnableWinningPost = false
# type: int
#WinningPostMaxTasks = 0
# EnableParkPiece enables the "piece parking" task to run on this node. This task is responsible for fetching
# pieces from the network and storing them in the storage subsystem until sectors are sealed. This task is
# only applicable when integrating with boost, and should be enabled on nodes which will hold deal data
# from boost until sectors containing the related pieces have the TreeD/TreeR constructed.
# Note that future Curio implementations will have a separate task type for fetching pieces from the internet.
#
# type: bool
#EnableParkPiece = false
# type: int
#ParkPieceMaxTasks = 0
# EnableSealSDR enables SDR tasks to run. SDR is the long sequential computation
# creating 11 layer files in sector cache directory.
#
# SDR is the first task in the sealing pipeline. It's inputs are just the hash of the
# unsealed data (CommD), sector number, miner id, and the seal proof type.
# It's outputs are the 11 layer files in the sector cache directory.
#
# In lotus-miner this was run as part of PreCommit1.
#
# type: bool
#EnableSealSDR = false
# The maximum amount of SDR tasks that can run simultaneously. Note that the maximum number of tasks will
# also be bounded by resources available on the machine.
#
# type: int
#SealSDRMaxTasks = 0
# EnableSealSDRTrees enables the SDR pipeline tree-building task to run.
# This task handles encoding of unsealed data into last sdr layer and building
# of TreeR, TreeC and TreeD.
#
# This task runs after SDR
# TreeD is first computed with optional input of unsealed data
# TreeR is computed from replica, which is first computed as field
# addition of the last SDR layer and the bottom layer of TreeD (which is the unsealed data)
# TreeC is computed from the 11 SDR layers
# The 3 trees will later be used to compute the PoRep proof.
#
# In case of SyntheticPoRep challenges for PoRep will be pre-generated at this step, and trees and layers
# will be dropped. SyntheticPoRep works by pre-generating a very large set of challenges (~30GiB on disk)
# then using a small subset of them for the actual PoRep computation. This allows for significant scratch space
# saving between PreCommit and PoRep generation at the expense of more computation (generating challenges in this step)
#
# In lotus-miner this was run as part of PreCommit2 (TreeD was run in PreCommit1).
# Note that nodes with SDRTrees enabled will also answer to Finalize tasks,
# which just remove unneeded tree data after PoRep is computed.
#
# type: bool
#EnableSealSDRTrees = false
# The maximum amount of SealSDRTrees tasks that can run simultaneously. Note that the maximum number of tasks will
# also be bounded by resources available on the machine.
#
# type: int
#SealSDRTreesMaxTasks = 0
# FinalizeMaxTasks is the maximum amount of finalize tasks that can run simultaneously.
# The finalize task is enabled on all machines which also handle SDRTrees tasks. Finalize ALWAYS runs on whichever
# machine holds sector cache files, as it removes unneeded tree data after PoRep is computed.
# Finalize will run in parallel with the SubmitCommitMsg task.
#
# type: int
#FinalizeMaxTasks = 0
# EnableSendPrecommitMsg enables the sending of precommit messages to the chain
# from this curio instance.
# This runs after SDRTrees and uses the output CommD / CommR (roots of TreeD / TreeR) for the message
#
# type: bool
#EnableSendPrecommitMsg = false
# EnablePoRepProof enables the computation of the porep proof
#
# This task runs after interactive-porep seed becomes available, which happens 150 epochs (75min) after the
# precommit message lands on chain. This task should run on a machine with a GPU. Vanilla PoRep proofs are
# requested from the machine which holds sector cache files which most likely is the machine which ran the SDRTrees
# task.
#
# In lotus-miner this was Commit1 / Commit2
#
# type: bool
#EnablePoRepProof = false
# The maximum amount of PoRepProof tasks that can run simultaneously. Note that the maximum number of tasks will
# also be bounded by resources available on the machine.
#
# type: int
#PoRepProofMaxTasks = 0
# EnableSendCommitMsg enables the sending of commit messages to the chain
# from this curio instance.
#
# type: bool
#EnableSendCommitMsg = false
# EnableMoveStorage enables the move-into-long-term-storage task to run on this curio instance.
# This tasks should only be enabled on nodes with long-term storage.
#
# The MoveStorage task is the last task in the sealing pipeline. It moves the sealed sector data from the
# SDRTrees machine into long-term storage. This task runs after the Finalize task.
#
# type: bool
#EnableMoveStorage = false
# The maximum amount of MoveStorage tasks that can run simultaneously. Note that the maximum number of tasks will
# also be bounded by resources available on the machine. It is recommended that this value is set to a number which
# uses all available network (or disk) bandwidth on the machine without causing bottlenecks.
#
# type: int
#MoveStorageMaxTasks = 0
# EnableWebGui enables the web GUI on this curio instance. The UI has minimal local overhead, but it should
# only need to be run on a single machine in the cluster.
#
# type: bool
#EnableWebGui = false
# The address that should listen for Web GUI requests.
#
# type: string
#GuiAddress = ":4701"
[Fees]
# type: types.FIL
#DefaultMaxFee = "0.07 FIL"
# type: types.FIL
#MaxPreCommitGasFee = "0.025 FIL"
# type: types.FIL
#MaxCommitGasFee = "0.05 FIL"
# type: types.FIL
#MaxTerminateGasFee = "0.5 FIL"
# WindowPoSt is a high-value operation, so the default fee should be high.
#
# type: types.FIL
#MaxWindowPoStGasFee = "5 FIL"
# type: types.FIL
#MaxPublishDealsFee = "0.05 FIL"
[Fees.MaxPreCommitBatchGasFee]
# type: types.FIL
#Base = "0 FIL"
# type: types.FIL
#PerSector = "0.02 FIL"
[Fees.MaxCommitBatchGasFee]
# type: types.FIL
#Base = "0 FIL"
# type: types.FIL
#PerSector = "0.03 FIL"
[[Addresses]]
#PreCommitControl = []
#CommitControl = []
#TerminateControl = []
#DisableOwnerFallback = false
#DisableWorkerFallback = false
MinerAddresses = ["t01013"]
[[Addresses]]
#PreCommitControl = []
#CommitControl = []
#TerminateControl = []
#DisableOwnerFallback = false
#DisableWorkerFallback = false
#MinerAddresses = []
[[Addresses]]
#PreCommitControl = []
#CommitControl = []
#TerminateControl = []
#DisableOwnerFallback = false
#DisableWorkerFallback = false
MinerAddresses = ["t01006"]
[Proving]
# Maximum number of sector checks to run in parallel. (0 = unlimited)
#
# WARNING: Setting this value too high may make the node crash by running out of stack
# WARNING: Setting this value too low may make sector challenge reading much slower, resulting in failed PoSt due
# to late submission.
#
# After changing this option, confirm that the new value works in your setup by invoking
# 'lotus-miner proving compute window-post 0'
#
# type: int
#ParallelCheckLimit = 32
# Maximum amount of time a proving pre-check can take for a sector. If the check times out the sector will be skipped
#
# WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the
# test challenge took longer than this timeout
# WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this sector are
# blocked (e.g. in case of disconnected NFS mount)
#
# type: Duration
#SingleCheckTimeout = "10m0s"
# Maximum amount of time a proving pre-check can take for an entire partition. If the check times out, sectors in
# the partition which didn't get checked on time will be skipped
#
# WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the
# test challenge took longer than this timeout
# WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this partition are
# blocked or slow
#
# type: Duration
#PartitionCheckTimeout = "20m0s"
# Disable Window PoSt computation on the lotus-miner process even if no window PoSt workers are present.
#
# WARNING: If no windowPoSt workers are connected, window PoSt WILL FAIL resulting in faulty sectors which will need
# to be recovered. Before enabling this option, make sure your PoSt workers work correctly.
#
# After changing this option, confirm that the new value works in your setup by invoking
# 'lotus-miner proving compute window-post 0'
#
# type: bool
#DisableBuiltinWindowPoSt = false
# Disable Winning PoSt computation on the lotus-miner process even if no winning PoSt workers are present.
#
# WARNING: If no WinningPoSt workers are connected, Winning PoSt WILL FAIL resulting in lost block rewards.
# Before enabling this option, make sure your PoSt workers work correctly.
#
# type: bool
#DisableBuiltinWinningPoSt = false
# Disable WindowPoSt provable sector readability checks.
#
# In normal operation, when preparing to compute WindowPoSt, lotus-miner will perform a round of reading challenges
# from all sectors to confirm that those sectors can be proven. Challenges read in this process are discarded, as
# we're only interested in checking that sector data can be read.
#
# When using builtin proof computation (no PoSt workers, and DisableBuiltinWindowPoSt is set to false), this process
# can save a lot of time and compute resources in the case that some sectors are not readable - this is caused by
# the builtin logic not skipping snark computation when some sectors need to be skipped.
#
# When using PoSt workers, this process is mostly redundant, with PoSt workers challenges will be read once, and
# if challenges for some sectors aren't readable, those sectors will just get skipped.
#
# Disabling sector pre-checks will slightly reduce IO load when proving sectors, possibly resulting in shorter
# time to produce window PoSt. In setups with good IO capabilities the effect of this option on proving time should
# be negligible.
#
# NOTE: It likely is a bad idea to disable sector pre-checks in setups with no PoSt workers.
#
# NOTE: Even when this option is enabled, recovering sectors will be checked before recovery declaration message is
# sent to the chain
#
# After changing this option, confirm that the new value works in your setup by invoking
# 'lotus-miner proving compute window-post 0'
#
# type: bool
#DisableWDPoStPreChecks = false
# Maximum number of partitions to prove in a single SubmitWindowPoSt messace. 0 = network limit (3 in nv21)
#
# A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors.
# //
# Note that setting this value lower may result in less efficient gas use - more messages will be sent,
# to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
#
# Setting this value above the network limit has no effect
#
# type: int
#MaxPartitionsPerPoStMessage = 0
# In some cases when submitting DeclareFaultsRecovered messages,
# there may be too many recoveries to fit in a BlockGasLimit.
# In those cases it may be necessary to set this value to something low (eg 1);
# Note that setting this value lower may result in less efficient gas use - more messages will be sent than needed,
# resulting in more total gas use (but each message will have lower gas limit)
#
# type: int
#MaxPartitionsPerRecoveryMessage = 0
# Enable single partition per PoSt Message for partitions containing recovery sectors
#
# In cases when submitting PoSt messages which contain recovering sectors, the default network limit may still be
# too high to fit in the block gas limit. In those cases, it becomes useful to only house the single partition
# with recovering sectors in the post message
#
# Note that setting this value lower may result in less efficient gas use - more messages will be sent,
# to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
#
# type: bool
#SingleRecoveringPartitionPerPostMessage = false
[Journal]
# Events of the form: "system1:event1,system1:event2[,...]"
#
# type: string
#DisabledEvents = ""
[Apis]
# ChainApiInfo is the API endpoint for the Lotus daemon.
#
# type: []string
ChainApiInfo = ["eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJBbGxvdyI6WyJyZWFkIiwid3JpdGUiLCJzaWduIiwiYWRtaW4iXX0.T_jmG4DTs9Zjd7rr78862lT7D2U63uz-zqcUKHwcqaU:/dns/localhost/tcp/1234/http"]
# RPC Secret for the storage subsystem.
# If integrating with lotus-miner this must match the value from
# cat ~/.lotusminer/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU | jq -r .PrivateKey
#
# type: string
StorageRPCSecret = "HxHe8YLHiY0LjHVw/WT/4XQkPGgRyCEYk+xiFi0Ob0o="
`
func TestConfig(t *testing.T) {
baseCfg := config.DefaultCurioConfig()
addr1 := config.CurioAddresses{
PreCommitControl: []string{},
CommitControl: []string{},
TerminateControl: []string{"t3qroiebizgkz7pvj26reg5r5mqiftrt5hjdske2jzjmlacqr2qj7ytjncreih2mvujxoypwpfusmwpipvxncq"},
DisableOwnerFallback: false,
DisableWorkerFallback: false,
MinerAddresses: []string{"t01000"},
}
addr2 := config.CurioAddresses{
MinerAddresses: []string{"t01001"},
}
_, err := deps.LoadConfigWithUpgrades(baseText, baseCfg)
require.NoError(t, err)
baseCfg.Addresses = append(baseCfg.Addresses, addr1)
baseCfg.Addresses = lo.Filter(baseCfg.Addresses, func(a config.CurioAddresses, _ int) bool {
return len(a.MinerAddresses) > 0
})
_, err = config.ConfigUpdate(baseCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv())
require.NoError(t, err)
baseCfg.Addresses = append(baseCfg.Addresses, addr2)
baseCfg.Addresses = lo.Filter(baseCfg.Addresses, func(a config.CurioAddresses, _ int) bool {
return len(a.MinerAddresses) > 0
})
_, err = config.ConfigUpdate(baseCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv())
require.NoError(t, err)
}
func TestCustomConfigDurationJson(t *testing.T) {
ref := new(jsonschema.Reflector)
ref.Mapper = func(i reflect.Type) *jsonschema.Schema {
if i == reflect.TypeOf(config.Duration(time.Second)) {
return &jsonschema.Schema{
Type: "string",
Format: "duration",
}
}
return nil
}
sch := ref.Reflect(config.CurioConfig{})
definitions := sch.Definitions["CurioProvingConfig"]
prop, ok := definitions.Properties.Get("SingleCheckTimeout")
require.True(t, ok)
require.Equal(t, prop.Type, "string")
}

View File

@ -1,71 +0,0 @@
package main
import (
"encoding/gob"
"fmt"
"os"
"reflect"
"github.com/ipfs/go-cid"
"github.com/samber/lo"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/curiosrc/ffiselect"
"github.com/filecoin-project/lotus/curiosrc/ffiselect/ffidirect"
"github.com/filecoin-project/lotus/lib/must"
)
var ffiCmd = &cli.Command{
Name: "ffi",
Hidden: true,
Flags: []cli.Flag{
layersFlag,
},
Action: func(cctx *cli.Context) (err error) {
output := os.NewFile(uintptr(3), "out")
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("panic: %v", r)
}
if err != nil {
err = gob.NewEncoder(output).Encode(ffiselect.ValErr{Val: nil, Err: err.Error()})
if err != nil {
panic(err)
}
}
}()
var callInfo ffiselect.FFICall
if err := gob.NewDecoder(os.Stdin).Decode(&callInfo); err != nil {
return xerrors.Errorf("ffi subprocess can not decode: %w", err)
}
args := lo.Map(callInfo.Args, func(arg any, i int) reflect.Value {
return reflect.ValueOf(arg)
})
resAry := reflect.ValueOf(ffidirect.FFI{}).MethodByName(callInfo.Fn).Call(args)
res := lo.Map(resAry, func(res reflect.Value, i int) any {
return res.Interface()
})
err = gob.NewEncoder(output).Encode(ffiselect.ValErr{Val: res, Err: ""})
if err != nil {
return xerrors.Errorf("ffi subprocess can not encode: %w", err)
}
return output.Close()
},
}
func ffiSelfTest() {
val1, val2 := 12345678, must.One(cid.Parse("bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi"))
ret1, ret2, err := ffiselect.FFISelect{}.SelfTest(val1, val2)
if err != nil {
panic("ffi self test failed:" + err.Error())
}
if ret1 != val1 || !val2.Equals(ret2) {
panic(fmt.Sprint("ffi self test failed: values do not match: ", val1, val2, ret1, ret2))
}
}

View File

@ -1,896 +0,0 @@
// guidedSetup for migration from lotus-miner to Curio
//
// IF STRINGS CHANGED {
// follow instructions at ../internal/translations/translations.go
// }
package guidedsetup
import (
"bytes"
"context"
"crypto/rand"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"math/bits"
"net/http"
"os"
"os/signal"
"path"
"reflect"
"strconv"
"strings"
"syscall"
"time"
"github.com/BurntSushi/toml"
"github.com/charmbracelet/lipgloss"
"github.com/docker/go-units"
"github.com/manifoldco/promptui"
"github.com/mitchellh/go-homedir"
"github.com/samber/lo"
"github.com/urfave/cli/v2"
"golang.org/x/text/language"
"golang.org/x/text/message"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/cli/spcli"
cliutil "github.com/filecoin-project/lotus/cli/util"
_ "github.com/filecoin-project/lotus/curiosrc/cmd/curio/internal/translations"
"github.com/filecoin-project/lotus/curiosrc/deps"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
"github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/lotus/node/repo"
)
// URL to upload user-selected fields to help direct developer's focus.
const DeveloperFocusRequestURL = "https://curiostorage.org/cgi-bin/savedata.php"
var GuidedsetupCmd = &cli.Command{
Name: "guided-setup",
Usage: "Run the guided setup for migrating from lotus-miner to Curio or Creating a new Curio miner",
Flags: []cli.Flag{
&cli.StringFlag{ // for cliutil.GetFullNodeAPI
Name: "repo",
EnvVars: []string{"LOTUS_PATH"},
Hidden: true,
Value: "~/.lotus",
},
},
Action: func(cctx *cli.Context) (err error) {
T, say := SetupLanguage()
setupCtrlC(say)
// Run the migration steps
migrationData := MigrationData{
T: T,
say: say,
selectTemplates: &promptui.SelectTemplates{
Help: T("Use the arrow keys to navigate: ↓ ↑ → ← "),
},
cctx: cctx,
ctx: cctx.Context,
}
newOrMigrate(&migrationData)
if migrationData.init {
say(header, "This interactive tool creates a new miner actor and creates the basic configuration layer for it.")
say(notice, "This process is partially idempotent. Once a new miner actor has been created and subsequent steps fail, the user need to run 'curio config new-cluster < miner ID >' to finish the configuration.")
for _, step := range newMinerSteps {
step(&migrationData)
}
} else {
say(header, "This interactive tool migrates lotus-miner to Curio in 5 minutes.")
say(notice, "Each step needs your confirmation and can be reversed. Press Ctrl+C to exit at any time.")
for _, step := range migrationSteps {
step(&migrationData)
}
}
for _, closer := range migrationData.closers {
closer()
}
return nil
},
}
func setupCtrlC(say func(style lipgloss.Style, key message.Reference, a ...interface{})) {
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
go func() {
<-c
say(notice, "Ctrl+C pressed in Terminal")
os.Exit(2)
}()
}
var (
header = lipgloss.NewStyle().
Align(lipgloss.Left).
Foreground(lipgloss.Color("#00FF00")).
Background(lipgloss.Color("#242424")).
BorderStyle(lipgloss.NormalBorder()).
Width(60).Margin(1)
notice = lipgloss.NewStyle().
Align(lipgloss.Left).
Bold(true).
Foreground(lipgloss.Color("#CCCCCC")).
Background(lipgloss.Color("#333300")).MarginBottom(1)
green = lipgloss.NewStyle().
Align(lipgloss.Left).
Foreground(lipgloss.Color("#00FF00")).
Background(lipgloss.Color("#000000"))
plain = lipgloss.NewStyle().Align(lipgloss.Left)
section = lipgloss.NewStyle().
Align(lipgloss.Left).
Foreground(lipgloss.Color("#000000")).
Background(lipgloss.Color("#FFFFFF")).
Underline(true)
code = lipgloss.NewStyle().
Align(lipgloss.Left).
Foreground(lipgloss.Color("#00FF00")).
Background(lipgloss.Color("#f8f9fa"))
)
func SetupLanguage() (func(key message.Reference, a ...interface{}) string, func(style lipgloss.Style, key message.Reference, a ...interface{})) {
langText := "en"
problem := false
if len(os.Getenv("LANG")) > 1 {
langText = os.Getenv("LANG")[:2]
} else {
problem = true
}
lang, err := language.Parse(langText)
if err != nil {
lang = language.English
problem = true
fmt.Println("Error parsing language")
}
langs := message.DefaultCatalog.Languages()
have := lo.SliceToMap(langs, func(t language.Tag) (string, bool) { return t.String(), true })
if _, ok := have[lang.String()]; !ok {
lang = language.English
problem = true
}
if problem {
_ = os.Setenv("LANG", "en-US") // for later users of this function
notice.Copy().AlignHorizontal(lipgloss.Right).
Render("$LANG=" + langText + " unsupported. Available: " + strings.Join(lo.Keys(have), ", "))
fmt.Println("Defaulting to English. Please reach out to the Curio team if you would like to have additional language support.")
}
return func(key message.Reference, a ...interface{}) string {
return message.NewPrinter(lang).Sprintf(key, a...)
}, func(sty lipgloss.Style, key message.Reference, a ...interface{}) {
msg := message.NewPrinter(lang).Sprintf(key, a...)
fmt.Println(sty.Render(msg))
}
}
func newOrMigrate(d *MigrationData) {
i, _, err := (&promptui.Select{
Label: d.T("I want to:"),
Items: []string{
d.T("Migrate from existing Lotus-Miner"),
d.T("Create a new miner")},
Templates: d.selectTemplates,
}).Run()
if err != nil {
d.say(notice, "Aborting remaining steps.", err.Error())
os.Exit(1)
}
if i == 1 {
d.init = true
}
}
type migrationStep func(*MigrationData)
var migrationSteps = []migrationStep{
readMinerConfig, // Tells them to be on the miner machine
yugabyteConnect, // Miner is updated
configToDB, // work on base configuration migration.
verifySectors, // Verify the sectors are in the database
doc,
oneLastThing,
complete,
}
type newMinerStep func(data *MigrationData)
var newMinerSteps = []newMinerStep{
stepPresteps,
stepCreateActor,
stepNewMinerConfig,
doc,
oneLastThing,
completeInit,
}
type MigrationData struct {
T func(key message.Reference, a ...interface{}) string
say func(style lipgloss.Style, key message.Reference, a ...interface{})
selectTemplates *promptui.SelectTemplates
MinerConfigPath string
MinerConfig *config.StorageMiner
DB *harmonydb.DB
MinerID address.Address
full v1api.FullNode
cctx *cli.Context
closers []jsonrpc.ClientCloser
ctx context.Context
owner address.Address
worker address.Address
sender address.Address
ssize abi.SectorSize
confidence uint64
init bool
}
func complete(d *MigrationData) {
stepCompleted(d, d.T("Lotus-Miner to Curio Migration."))
d.say(plain, "Try the web interface with %s for further guided improvements.", code.Render("curio run --layers=gui"))
d.say(plain, "You can now migrate your market node (%s), if applicable.", "Boost")
}
func completeInit(d *MigrationData) {
stepCompleted(d, d.T("New Miner initialization complete."))
d.say(plain, "Try the web interface with %s for further guided improvements.", code.Render("curio run --layers=gui"))
}
func configToDB(d *MigrationData) {
d.say(section, "Migrating lotus-miner config.toml to Curio in-database configuration.")
{
var closer jsonrpc.ClientCloser
var err error
d.full, closer, err = cliutil.GetFullNodeAPIV1(d.cctx)
d.closers = append(d.closers, closer)
if err != nil {
d.say(notice, "Error getting API: %s", err.Error())
os.Exit(1)
}
}
ainfo, err := cliutil.GetAPIInfo(d.cctx, repo.FullNode)
if err != nil {
d.say(notice, "could not get API info for FullNode: %w", err)
os.Exit(1)
}
token, err := d.full.AuthNew(context.Background(), api.AllPermissions)
if err != nil {
d.say(notice, "Error getting token: %s", err.Error())
os.Exit(1)
}
chainApiInfo := fmt.Sprintf("%s:%s", string(token), ainfo.Addr)
shouldErrPrompt := func() bool {
i, _, err := (&promptui.Select{
Label: d.T("Unmigratable sectors found. Do you want to continue?"),
Items: []string{
d.T("Yes, continue"),
d.T("No, abort")},
Templates: d.selectTemplates,
}).Run()
if err != nil {
d.say(notice, "Aborting migration.", err.Error())
os.Exit(1)
}
return i == 1
}
d.MinerID, err = SaveConfigToLayerMigrateSectors(d.MinerConfigPath, chainApiInfo, shouldErrPrompt)
if err != nil {
d.say(notice, "Error saving config to layer: %s. Aborting Migration", err.Error())
os.Exit(1)
}
}
// bucket returns the power's 4 highest bits (rounded down).
func bucket(power *api.MinerPower) uint64 {
rawQAP := power.TotalPower.QualityAdjPower.Uint64()
magnitude := lo.Max([]int{bits.Len64(rawQAP), 5})
// shifting erases resolution so we cannot distinguish SPs of similar scales.
return rawQAP >> (uint64(magnitude) - 4) << (uint64(magnitude - 4))
}
type uploadType int
const uploadTypeIndividual uploadType = 0
const uploadTypeAggregate uploadType = 1
// const uploadTypeHint uploadType = 2
const uploadTypeNothing uploadType = 3
func oneLastThing(d *MigrationData) {
d.say(section, "The Curio team wants to improve the software you use. Tell the team you're using `%s`.", "curio")
i, _, err := (&promptui.Select{
Label: d.T("Select what you want to share with the Curio team."),
Items: []string{
d.T("Individual Data: Miner ID, Curio version, chain (%s or %s). Signed.", "mainnet", "calibration"),
d.T("Aggregate-Anonymous: version, chain, and Miner power (bucketed)."),
d.T("Hint: I am someone running Curio on whichever chain."),
d.T("Nothing.")},
Templates: d.selectTemplates,
}).Run()
preference := uploadType(i)
if err != nil {
d.say(notice, "Aborting remaining steps.", err.Error())
os.Exit(1)
}
if preference != uploadTypeNothing {
msgMap := map[string]any{
"domain": "curio-newuser",
"net": build.BuildTypeString(),
}
if preference == uploadTypeIndividual || preference == uploadTypeAggregate {
// articles of incorporation
power, err := d.full.StateMinerPower(context.Background(), d.MinerID, types.EmptyTSK)
if err != nil {
d.say(notice, "Error getting miner power: %s", err.Error())
os.Exit(1)
}
msgMap["version"] = build.BuildVersion
msgMap["net"] = build.BuildType
msgMap["power"] = map[uploadType]uint64{
uploadTypeIndividual: power.MinerPower.QualityAdjPower.Uint64(),
uploadTypeAggregate: bucket(power)}[preference]
if preference == uploadTypeIndividual { // Sign it
msgMap["miner_id"] = d.MinerID
msg, err := json.Marshal(msgMap)
if err != nil {
d.say(notice, "Error marshalling message: %s", err.Error())
os.Exit(1)
}
mi, err := d.full.StateMinerInfo(context.Background(), d.MinerID, types.EmptyTSK)
if err != nil {
d.say(notice, "Error getting miner info: %s", err.Error())
os.Exit(1)
}
sig, err := d.full.WalletSign(context.Background(), mi.Worker, msg)
if err != nil {
d.say(notice, "Error signing message: %s", err.Error())
os.Exit(1)
}
msgMap["signature"] = base64.StdEncoding.EncodeToString(sig.Data)
}
}
msg, err := json.Marshal(msgMap)
if err != nil {
d.say(notice, "Error marshalling message: %s", err.Error())
os.Exit(1)
}
resp, err := http.DefaultClient.Post(DeveloperFocusRequestURL, "application/json", bytes.NewReader(msg))
if err != nil {
d.say(notice, "Error sending message: %s", err.Error())
}
if resp != nil {
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode != 200 {
b, err := io.ReadAll(resp.Body)
if err == nil {
d.say(notice, "Error sending message: Status %s, Message: ", resp.Status, string(b))
}
} else {
stepCompleted(d, d.T("Message sent."))
}
}
}
}
func doc(d *MigrationData) {
d.say(plain, "Documentation: ")
d.say(plain, "The '%s' layer stores common configuration. All curio instances can include it in their %s argument.", "base", "--layers")
d.say(plain, "You can add other layers for per-machine configuration changes.")
d.say(plain, "Filecoin %s channels: %s and %s", "Slack", "#fil-curio-help", "#fil-curio-dev")
d.say(plain, "Increase reliability using redundancy: start multiple machines with at-least the post layer: 'curio run --layers=post'")
//d.say(plain, "Point your browser to your web GUI to complete setup with %s and advanced featues.", "Boost")
d.say(plain, "One database can serve multiple miner IDs: Run a migration for each lotus-miner.")
}
func verifySectors(d *MigrationData) {
var i []int
var lastError string
fmt.Println()
d.say(section, "Please start (or restart) %s now that database credentials are in %s.", "lotus-miner", "config.toml")
d.say(notice, "Waiting for %s to write sectors into Yugabyte.", "lotus-miner")
mid, err := address.IDFromAddress(d.MinerID)
if err != nil {
d.say(notice, "Error interpreting miner ID: %s: ID: %s", err.Error(), d.MinerID.String())
os.Exit(1)
}
for {
err := d.DB.Select(context.Background(), &i, `
SELECT count(*) FROM sector_location WHERE miner_id=$1`, mid)
if err != nil {
if err.Error() != lastError {
d.say(notice, "Error verifying sectors: %s", err.Error())
lastError = err.Error()
}
continue
}
if i[0] > 0 {
break
}
fmt.Print(".")
time.Sleep(5 * time.Second)
}
d.say(plain, "The sectors are in the database. The database is ready for %s.", "Curio")
d.say(notice, "Now shut down lotus-miner and lotus-worker and use run %s instead.", code.Render("curio run"))
_, err = (&promptui.Prompt{Label: d.T("Press return to continue")}).Run()
if err != nil {
d.say(notice, "Aborting migration.")
os.Exit(1)
}
stepCompleted(d, d.T("Sectors verified. %d sector locations found.", i))
}
func yugabyteConnect(d *MigrationData) {
harmonyCfg := config.DefaultStorageMiner().HarmonyDB //copy the config to a local variable
if d.MinerConfig != nil {
harmonyCfg = d.MinerConfig.HarmonyDB //copy the config to a local variable
}
var err error
d.DB, err = harmonydb.NewFromConfig(harmonyCfg)
if err != nil {
hcfg := getDBDetails(d)
harmonyCfg = *hcfg
}
d.say(plain, "Connected to Yugabyte. Schema is current.")
if !reflect.DeepEqual(harmonyCfg, d.MinerConfig.HarmonyDB) || !d.MinerConfig.Subsystems.EnableSectorIndexDB {
d.MinerConfig.HarmonyDB = harmonyCfg
d.MinerConfig.Subsystems.EnableSectorIndexDB = true
d.say(plain, "Enabling Sector Indexing in the database.")
buf, err := config.ConfigUpdate(d.MinerConfig, config.DefaultStorageMiner())
if err != nil {
d.say(notice, "Error encoding config.toml: %s", err.Error())
os.Exit(1)
}
_, err = (&promptui.Prompt{
Label: d.T("Press return to update %s with Yugabyte info. A Backup file will be written to that folder before changes are made.", "config.toml")}).Run()
if err != nil {
os.Exit(1)
}
p, err := homedir.Expand(d.MinerConfigPath)
if err != nil {
d.say(notice, "Error expanding path: %s", err.Error())
os.Exit(1)
}
tomlPath := path.Join(p, "config.toml")
stat, err := os.Stat(tomlPath)
if err != nil {
d.say(notice, "Error reading filemode of config.toml: %s", err.Error())
os.Exit(1)
}
fBackup, err := os.CreateTemp(p, "config-backup-*.toml")
if err != nil {
d.say(notice, "Error creating backup file: %s", err.Error())
os.Exit(1)
}
fBackupContents, err := os.ReadFile(tomlPath)
if err != nil {
d.say(notice, "Error reading config.toml: %s", err.Error())
os.Exit(1)
}
_, err = fBackup.Write(fBackupContents)
if err != nil {
d.say(notice, "Error writing backup file: %s", err.Error())
os.Exit(1)
}
err = fBackup.Close()
if err != nil {
d.say(notice, "Error closing backup file: %s", err.Error())
os.Exit(1)
}
filemode := stat.Mode()
err = os.WriteFile(path.Join(p, "config.toml"), buf, filemode)
if err != nil {
d.say(notice, "Error writing config.toml: %s", err.Error())
os.Exit(1)
}
d.say(section, "Restart Lotus Miner. ")
}
stepCompleted(d, d.T("Connected to Yugabyte"))
}
func readMinerConfig(d *MigrationData) {
d.say(plain, "To start, ensure your sealing pipeline is drained and shut-down lotus-miner.")
verifyPath := func(dir string) (*config.StorageMiner, error) {
cfg := config.DefaultStorageMiner()
dir, err := homedir.Expand(dir)
if err != nil {
return nil, err
}
_, err = toml.DecodeFile(path.Join(dir, "config.toml"), &cfg)
return cfg, err
}
dirs := map[string]*config.StorageMiner{"~/.lotusminer": nil, "~/.lotus-miner-local-net": nil}
if v := os.Getenv("LOTUS_MINER_PATH"); v != "" {
dirs[v] = nil
}
for dir := range dirs {
cfg, err := verifyPath(dir)
if err != nil {
delete(dirs, dir)
}
dirs[dir] = cfg
}
var otherPath bool
if len(dirs) > 0 {
_, str, err := (&promptui.Select{
Label: d.T("Select the location of your lotus-miner config directory?"),
Items: append(lo.Keys(dirs), d.T("Other")),
Templates: d.selectTemplates,
}).Run()
if err != nil {
if err.Error() == "^C" {
os.Exit(1)
}
otherPath = true
} else {
if str == d.T("Other") {
otherPath = true
} else {
d.MinerConfigPath = str
d.MinerConfig = dirs[str]
}
}
}
if otherPath {
minerPathEntry:
str, err := (&promptui.Prompt{
Label: d.T("Enter the path to the configuration directory used by %s", "lotus-miner"),
}).Run()
if err != nil {
d.say(notice, "No path provided, abandoning migration ")
os.Exit(1)
}
cfg, err := verifyPath(str)
if err != nil {
d.say(notice, "Cannot read the config.toml file in the provided directory, Error: %s", err.Error())
goto minerPathEntry
}
d.MinerConfigPath = str
d.MinerConfig = cfg
}
// Try to lock Miner repo to verify that lotus-miner is not running
{
r, err := repo.NewFS(d.MinerConfigPath)
if err != nil {
d.say(plain, "Could not create repo from directory: %s. Aborting migration", err.Error())
os.Exit(1)
}
lr, err := r.Lock(repo.StorageMiner)
if err != nil {
d.say(plain, "Could not lock miner repo. Your miner must be stopped: %s\n Aborting migration", err.Error())
os.Exit(1)
}
_ = lr.Close()
}
stepCompleted(d, d.T("Read Miner Config"))
}
func stepCompleted(d *MigrationData, step string) {
fmt.Print(green.Render("✔ "))
d.say(plain, "Step Complete: %s\n", step)
}
func stepCreateActor(d *MigrationData) {
d.say(plain, "Initializing a new miner actor.")
for {
i, _, err := (&promptui.Select{
Label: d.T("Enter the info to create a new miner"),
Items: []string{
d.T("Owner Address: %s", d.owner.String()),
d.T("Worker Address: %s", d.worker.String()),
d.T("Sender Address: %s", d.sender.String()),
d.T("Sector Size: %d", d.ssize),
d.T("Confidence epochs: %d", d.confidence),
d.T("Continue to verify the addresses and create a new miner actor.")},
Size: 6,
Templates: d.selectTemplates,
}).Run()
if err != nil {
d.say(notice, "Miner creation error occurred: %s ", err.Error())
os.Exit(1)
}
switch i {
case 0:
owner, err := (&promptui.Prompt{
Label: d.T("Enter the owner address"),
}).Run()
if err != nil {
d.say(notice, "No address provided")
continue
}
ownerAddr, err := address.NewFromString(owner)
if err != nil {
d.say(notice, "Failed to parse the address: %s", err.Error())
}
d.owner = ownerAddr
case 1, 2:
val, err := (&promptui.Prompt{
Label: d.T("Enter %s address", []string{"worker", "sender"}[i-1]),
Default: d.owner.String(),
}).Run()
if err != nil {
d.say(notice, err.Error())
continue
}
addr, err := address.NewFromString(val)
if err != nil {
d.say(notice, "Failed to parse the address: %s", err.Error())
}
switch i {
case 1:
d.worker = addr
case 2:
d.sender = addr
}
continue
case 3:
val, err := (&promptui.Prompt{
Label: d.T("Enter the sector size"),
}).Run()
if err != nil {
d.say(notice, "No value provided")
continue
}
sectorSize, err := units.RAMInBytes(val)
if err != nil {
d.say(notice, "Failed to parse sector size: %s", err.Error())
continue
}
d.ssize = abi.SectorSize(sectorSize)
continue
case 4:
confidenceStr, err := (&promptui.Prompt{
Label: d.T("Confidence epochs"),
Default: strconv.Itoa(5),
}).Run()
if err != nil {
d.say(notice, err.Error())
continue
}
confidence, err := strconv.ParseUint(confidenceStr, 10, 64)
if err != nil {
d.say(notice, "Failed to parse confidence: %s", err.Error())
continue
}
d.confidence = confidence
goto minerInit // break out of the for loop once we have all the values
}
}
minerInit:
miner, err := spcli.CreateStorageMiner(d.ctx, d.full, d.owner, d.worker, d.sender, d.ssize, d.confidence)
if err != nil {
d.say(notice, "Failed to create the miner actor: %s", err.Error())
os.Exit(1)
}
d.MinerID = miner
stepCompleted(d, d.T("Miner %s created successfully", miner.String()))
}
func stepPresteps(d *MigrationData) {
// Setup and connect to YugabyteDB
_ = getDBDetails(d)
// Verify HarmonyDB connection
var titles []string
err := d.DB.Select(d.ctx, &titles, `SELECT title FROM harmony_config WHERE LENGTH(config) > 0`)
if err != nil {
d.say(notice, "Cannot reach the DB: %s", err.Error())
os.Exit(1)
}
// Get full node API
full, closer, err := cliutil.GetFullNodeAPIV1(d.cctx)
if err != nil {
d.say(notice, "Error connecting to full node API: %s", err.Error())
os.Exit(1)
}
d.full = full
d.closers = append(d.closers, closer)
stepCompleted(d, d.T("Pre-initialization steps complete"))
}
func stepNewMinerConfig(d *MigrationData) {
curioCfg := config.DefaultCurioConfig()
curioCfg.Addresses = append(curioCfg.Addresses, config.CurioAddresses{
PreCommitControl: []string{},
CommitControl: []string{},
TerminateControl: []string{},
DisableOwnerFallback: false,
DisableWorkerFallback: false,
MinerAddresses: []string{d.MinerID.String()},
})
sk, err := io.ReadAll(io.LimitReader(rand.Reader, 32))
if err != nil {
d.say(notice, "Failed to generate random bytes for secret: %s", err.Error())
d.say(notice, "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration", d.MinerID.String())
os.Exit(1)
}
curioCfg.Apis.StorageRPCSecret = base64.StdEncoding.EncodeToString(sk)
ainfo, err := cliutil.GetAPIInfo(d.cctx, repo.FullNode)
if err != nil {
d.say(notice, "Failed to get API info for FullNode: %w", err)
d.say(notice, "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration", d.MinerID.String())
os.Exit(1)
}
token, err := d.full.AuthNew(d.ctx, api.AllPermissions)
if err != nil {
d.say(notice, "Failed to verify the auth token from daemon node: %s", err.Error())
d.say(notice, "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration", d.MinerID.String())
os.Exit(1)
}
curioCfg.Apis.ChainApiInfo = append(curioCfg.Apis.ChainApiInfo, fmt.Sprintf("%s:%s", string(token), ainfo.Addr))
// write config
var titles []string
err = d.DB.Select(d.ctx, &titles, `SELECT title FROM harmony_config WHERE LENGTH(config) > 0`)
if err != nil {
d.say(notice, "Cannot reach the DB: %s", err.Error())
d.say(notice, "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration", d.MinerID.String())
os.Exit(1)
}
// If 'base' layer is not present
if !lo.Contains(titles, "base") {
curioCfg.Addresses = lo.Filter(curioCfg.Addresses, func(a config.CurioAddresses, _ int) bool {
return len(a.MinerAddresses) > 0
})
cb, err := config.ConfigUpdate(curioCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv())
if err != nil {
d.say(notice, "Failed to generate default config: %s", err.Error())
d.say(notice, "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration", d.MinerID.String())
os.Exit(1)
}
_, err = d.DB.Exec(d.ctx, "INSERT INTO harmony_config (title, config) VALUES ('base', $1)", string(cb))
if err != nil {
d.say(notice, "Failed to insert 'base' config layer in database: %s", err.Error())
d.say(notice, "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration", d.MinerID.String())
os.Exit(1)
}
stepCompleted(d, d.T("Configuration 'base' was updated to include this miner's address"))
return
}
// If base layer is already present
baseCfg := config.DefaultCurioConfig()
var baseText string
err = d.DB.QueryRow(d.ctx, "SELECT config FROM harmony_config WHERE title='base'").Scan(&baseText)
if err != nil {
d.say(notice, "Failed to load base config from database: %s", err.Error())
d.say(notice, "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration", d.MinerID.String())
os.Exit(1)
}
_, err = deps.LoadConfigWithUpgrades(baseText, baseCfg)
if err != nil {
d.say(notice, "Failed to parse base config: %s", err.Error())
d.say(notice, "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration", d.MinerID.String())
os.Exit(1)
}
baseCfg.Addresses = append(baseCfg.Addresses, curioCfg.Addresses...)
baseCfg.Addresses = lo.Filter(baseCfg.Addresses, func(a config.CurioAddresses, _ int) bool {
return len(a.MinerAddresses) > 0
})
cb, err := config.ConfigUpdate(baseCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv())
if err != nil {
d.say(notice, "Failed to regenerate base config: %s", err.Error())
d.say(notice, "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration", d.MinerID.String())
os.Exit(1)
}
_, err = d.DB.Exec(d.ctx, "UPDATE harmony_config SET config=$1 WHERE title='base'", string(cb))
if err != nil {
d.say(notice, "Failed to insert 'base' config layer in database: %s", err.Error())
d.say(notice, "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration", d.MinerID.String())
os.Exit(1)
}
stepCompleted(d, d.T("Configuration 'base' was updated to include this miner's address"))
}
func getDBDetails(d *MigrationData) *config.HarmonyDB {
harmonyCfg := config.DefaultStorageMiner().HarmonyDB
for {
i, _, err := (&promptui.Select{
Label: d.T("Enter the info to connect to your Yugabyte database installation (https://download.yugabyte.com/)"),
Items: []string{
d.T("Host: %s", strings.Join(harmonyCfg.Hosts, ",")),
d.T("Port: %s", harmonyCfg.Port),
d.T("Username: %s", harmonyCfg.Username),
d.T("Password: %s", harmonyCfg.Password),
d.T("Database: %s", harmonyCfg.Database),
d.T("Continue to connect and update schema.")},
Size: 6,
Templates: d.selectTemplates,
}).Run()
if err != nil {
d.say(notice, "Database config error occurred, abandoning migration: %s ", err.Error())
os.Exit(1)
}
switch i {
case 0:
host, err := (&promptui.Prompt{
Label: d.T("Enter the Yugabyte database host(s)"),
}).Run()
if err != nil {
d.say(notice, "No host provided")
continue
}
harmonyCfg.Hosts = strings.Split(host, ",")
case 1, 2, 3, 4:
val, err := (&promptui.Prompt{
Label: d.T("Enter the Yugabyte database %s", []string{"port", "username", "password", "database"}[i-1]),
}).Run()
if err != nil {
d.say(notice, "No value provided")
continue
}
switch i {
case 1:
harmonyCfg.Port = val
case 2:
harmonyCfg.Username = val
case 3:
harmonyCfg.Password = val
case 4:
harmonyCfg.Database = val
}
continue
case 5:
db, err := harmonydb.NewFromConfig(harmonyCfg)
if err != nil {
if err.Error() == "^C" {
os.Exit(1)
}
d.say(notice, "Error connecting to Yugabyte database: %s", err.Error())
continue
}
d.DB = db
return &harmonyCfg
}
}
}

View File

@ -1,430 +0,0 @@
package guidedsetup
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"fmt"
"os"
"path"
"strings"
"github.com/BurntSushi/toml"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/namespace"
"github.com/samber/lo"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-statestore"
"github.com/filecoin-project/lotus/curiosrc/deps"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
"github.com/filecoin-project/lotus/lib/must"
"github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/lotus/node/modules"
"github.com/filecoin-project/lotus/node/repo"
sealing "github.com/filecoin-project/lotus/storage/pipeline"
)
const (
FlagMinerRepo = "miner-repo"
)
const FlagMinerRepoDeprecation = "storagerepo"
func SaveConfigToLayerMigrateSectors(minerRepoPath, chainApiInfo string, unmigSectorShouldFail func() bool) (minerAddress address.Address, err error) {
_, say := SetupLanguage()
ctx := context.Background()
r, err := repo.NewFS(minerRepoPath)
if err != nil {
return minerAddress, err
}
ok, err := r.Exists()
if err != nil {
return minerAddress, err
}
if !ok {
return minerAddress, fmt.Errorf("repo not initialized at: %s", minerRepoPath)
}
lr, err := r.LockRO(repo.StorageMiner)
if err != nil {
return minerAddress, fmt.Errorf("locking repo: %w", err)
}
defer func() {
err = lr.Close()
if err != nil {
fmt.Println("error closing repo: ", err)
}
}()
cfgNode, err := lr.Config()
if err != nil {
return minerAddress, fmt.Errorf("getting node config: %w", err)
}
smCfg := cfgNode.(*config.StorageMiner)
db, err := harmonydb.NewFromConfig(smCfg.HarmonyDB)
if err != nil {
return minerAddress, fmt.Errorf("could not reach the database. Ensure the Miner config toml's HarmonyDB entry"+
" is setup to reach Yugabyte correctly: %w", err)
}
var titles []string
err = db.Select(ctx, &titles, `SELECT title FROM harmony_config WHERE LENGTH(config) > 0`)
if err != nil {
return minerAddress, fmt.Errorf("miner cannot reach the db. Ensure the config toml's HarmonyDB entry"+
" is setup to reach Yugabyte correctly: %s", err.Error())
}
// Copy over identical settings:
buf, err := os.ReadFile(path.Join(lr.Path(), "config.toml"))
if err != nil {
return minerAddress, fmt.Errorf("could not read config.toml: %w", err)
}
curioCfg := config.DefaultCurioConfig()
ensureEmptyArrays(curioCfg)
_, err = deps.LoadConfigWithUpgrades(string(buf), curioCfg)
if err != nil {
return minerAddress, fmt.Errorf("could not decode toml: %w", err)
}
// Populate Miner Address
mmeta, err := lr.Datastore(ctx, "/metadata")
if err != nil {
return minerAddress, xerrors.Errorf("opening miner metadata datastore: %w", err)
}
maddrBytes, err := mmeta.Get(ctx, datastore.NewKey("miner-address"))
if err != nil {
return minerAddress, xerrors.Errorf("getting miner address datastore entry: %w", err)
}
addr, err := address.NewFromBytes(maddrBytes)
if err != nil {
return minerAddress, xerrors.Errorf("parsing miner actor address: %w", err)
}
if err := MigrateSectors(ctx, addr, mmeta, db, func(nSectors int) {
say(plain, "Migrating metadata for %d sectors.", nSectors)
}, unmigSectorShouldFail); err != nil {
return address.Address{}, xerrors.Errorf("migrating sectors: %w", err)
}
minerAddress = addr
curioCfg.Addresses = []config.CurioAddresses{{
MinerAddresses: []string{addr.String()},
PreCommitControl: smCfg.Addresses.PreCommitControl,
CommitControl: smCfg.Addresses.CommitControl,
TerminateControl: smCfg.Addresses.TerminateControl,
DisableOwnerFallback: smCfg.Addresses.DisableOwnerFallback,
DisableWorkerFallback: smCfg.Addresses.DisableWorkerFallback,
}}
ks, err := lr.KeyStore()
if err != nil {
return minerAddress, xerrors.Errorf("keystore err: %w", err)
}
js, err := ks.Get(modules.JWTSecretName)
if err != nil {
return minerAddress, xerrors.Errorf("error getting JWTSecretName: %w", err)
}
curioCfg.Apis.StorageRPCSecret = base64.StdEncoding.EncodeToString(js.PrivateKey)
curioCfg.Apis.ChainApiInfo = append(curioCfg.Apis.ChainApiInfo, chainApiInfo)
// Express as configTOML
configTOML := &bytes.Buffer{}
if err = toml.NewEncoder(configTOML).Encode(curioCfg); err != nil {
return minerAddress, err
}
if lo.Contains(titles, "base") {
// append addresses
var baseCfg = config.DefaultCurioConfig()
var baseText string
err = db.QueryRow(ctx, "SELECT config FROM harmony_config WHERE title='base'").Scan(&baseText)
if err != nil {
return minerAddress, xerrors.Errorf("Cannot load base config: %w", err)
}
ensureEmptyArrays(baseCfg)
_, err := deps.LoadConfigWithUpgrades(baseText, baseCfg)
if err != nil {
return minerAddress, xerrors.Errorf("Cannot load base config: %w", err)
}
for _, addr := range baseCfg.Addresses {
if lo.Contains(addr.MinerAddresses, curioCfg.Addresses[0].MinerAddresses[0]) {
goto skipWritingToBase
}
}
// write to base
{
baseCfg.Addresses = append(baseCfg.Addresses, curioCfg.Addresses[0])
baseCfg.Addresses = lo.Filter(baseCfg.Addresses, func(a config.CurioAddresses, _ int) bool {
return len(a.MinerAddresses) > 0
})
if baseCfg.Apis.ChainApiInfo == nil {
baseCfg.Apis.ChainApiInfo = append(baseCfg.Apis.ChainApiInfo, chainApiInfo)
}
if baseCfg.Apis.StorageRPCSecret == "" {
baseCfg.Apis.StorageRPCSecret = curioCfg.Apis.StorageRPCSecret
}
cb, err := config.ConfigUpdate(baseCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv())
if err != nil {
return minerAddress, xerrors.Errorf("cannot interpret config: %w", err)
}
_, err = db.Exec(ctx, "UPDATE harmony_config SET config=$1 WHERE title='base'", string(cb))
if err != nil {
return minerAddress, xerrors.Errorf("cannot update base config: %w", err)
}
say(plain, "Configuration 'base' was updated to include this miner's address (%s) and its wallet setup.", minerAddress)
}
say(plain, "Compare the configurations %s to %s. Changes between the miner IDs other than wallet addreses should be a new, minimal layer for runners that need it.", "base", "mig-"+curioCfg.Addresses[0].MinerAddresses[0])
skipWritingToBase:
} else {
_, err = db.Exec(ctx, `INSERT INTO harmony_config (title, config) VALUES ('base', $1)
ON CONFLICT(title) DO UPDATE SET config=EXCLUDED.config`, configTOML)
if err != nil {
return minerAddress, xerrors.Errorf("Cannot insert base config: %w", err)
}
say(notice, "Configuration 'base' was created to resemble this lotus-miner's config.toml .")
}
{ // make a layer representing the migration
layerName := fmt.Sprintf("mig-%s", curioCfg.Addresses[0].MinerAddresses[0])
_, err = db.Exec(ctx, "DELETE FROM harmony_config WHERE title=$1", layerName)
if err != nil {
return minerAddress, xerrors.Errorf("Cannot delete existing layer: %w", err)
}
_, err = db.Exec(ctx, "INSERT INTO harmony_config (title, config) VALUES ($1, $2)", layerName, configTOML.String())
if err != nil {
return minerAddress, xerrors.Errorf("Cannot insert layer after layer created message: %w", err)
}
say(plain, "Layer %s created. ", layerName)
}
dbSettings := getDBSettings(*smCfg)
say(plain, "To work with the config: ")
fmt.Println(code.Render(`curio ` + dbSettings + ` config edit base`))
say(plain, `To run Curio: With machine or cgroup isolation, use the command (with example layer selection):`)
fmt.Println(code.Render(`curio ` + dbSettings + ` run --layer=post`))
return minerAddress, nil
}
func getDBSettings(smCfg config.StorageMiner) string {
dbSettings := ""
def := config.DefaultStorageMiner().HarmonyDB
if def.Hosts[0] != smCfg.HarmonyDB.Hosts[0] {
dbSettings += ` --db-host="` + strings.Join(smCfg.HarmonyDB.Hosts, ",") + `"`
}
if def.Port != smCfg.HarmonyDB.Port {
dbSettings += " --db-port=" + smCfg.HarmonyDB.Port
}
if def.Username != smCfg.HarmonyDB.Username {
dbSettings += ` --db-user="` + smCfg.HarmonyDB.Username + `"`
}
if def.Password != smCfg.HarmonyDB.Password {
dbSettings += ` --db-password="` + smCfg.HarmonyDB.Password + `"`
}
if def.Database != smCfg.HarmonyDB.Database {
dbSettings += ` --db-name="` + smCfg.HarmonyDB.Database + `"`
}
return dbSettings
}
func ensureEmptyArrays(cfg *config.CurioConfig) {
if cfg.Addresses == nil {
cfg.Addresses = []config.CurioAddresses{}
} else {
for i := range cfg.Addresses {
if cfg.Addresses[i].PreCommitControl == nil {
cfg.Addresses[i].PreCommitControl = []string{}
}
if cfg.Addresses[i].CommitControl == nil {
cfg.Addresses[i].CommitControl = []string{}
}
if cfg.Addresses[i].TerminateControl == nil {
cfg.Addresses[i].TerminateControl = []string{}
}
}
}
if cfg.Apis.ChainApiInfo == nil {
cfg.Apis.ChainApiInfo = []string{}
}
}
func cidPtrToStrptr(c *cid.Cid) *string {
if c == nil {
return nil
}
s := c.String()
return &s
}
func coalescePtrs[A any](a, b *A) *A {
if a != nil {
return a
}
return b
}
func MigrateSectors(ctx context.Context, maddr address.Address, mmeta datastore.Batching, db *harmonydb.DB, logMig func(int), unmigSectorShouldFail func() bool) error {
mid, err := address.IDFromAddress(maddr)
if err != nil {
return xerrors.Errorf("getting miner ID: %w", err)
}
sts := statestore.New(namespace.Wrap(mmeta, datastore.NewKey(sealing.SectorStorePrefix)))
var sectors []sealing.SectorInfo
if err := sts.List(&sectors); err != nil {
return xerrors.Errorf("getting sector list: %w", err)
}
logMig(len(sectors))
migratableState := func(state sealing.SectorState) bool {
switch state {
case sealing.Proving, sealing.Available, sealing.Removed:
return true
default:
return false
}
}
unmigratable := map[sealing.SectorState]int{}
for _, sector := range sectors {
if !migratableState(sector.State) {
unmigratable[sector.State]++
continue
}
}
if len(unmigratable) > 0 {
fmt.Println("The following sector states are not migratable:")
for state, count := range unmigratable {
fmt.Printf(" %s: %d\n", state, count)
}
if unmigSectorShouldFail() {
return xerrors.Errorf("aborting migration because sectors were found that are not migratable.")
}
}
for _, sector := range sectors {
if !migratableState(sector.State) || sector.State == sealing.Removed {
continue
}
// Insert sector metadata
_, err := db.Exec(ctx, `
INSERT INTO sectors_meta (sp_id, sector_num, reg_seal_proof, ticket_epoch, ticket_value,
orig_sealed_cid, orig_unsealed_cid, cur_sealed_cid, cur_unsealed_cid,
msg_cid_precommit, msg_cid_commit, msg_cid_update, seed_epoch, seed_value)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)
ON CONFLICT (sp_id, sector_num) DO UPDATE
SET reg_seal_proof = excluded.reg_seal_proof, ticket_epoch = excluded.ticket_epoch, ticket_value = excluded.ticket_value,
orig_sealed_cid = excluded.orig_sealed_cid, orig_unsealed_cid = excluded.orig_unsealed_cid, cur_sealed_cid = excluded.cur_sealed_cid,
cur_unsealed_cid = excluded.cur_unsealed_cid, msg_cid_precommit = excluded.msg_cid_precommit, msg_cid_commit = excluded.msg_cid_commit,
msg_cid_update = excluded.msg_cid_update, seed_epoch = excluded.seed_epoch, seed_value = excluded.seed_value`,
mid,
sector.SectorNumber,
sector.SectorType,
sector.TicketEpoch,
sector.TicketValue,
cidPtrToStrptr(sector.CommR),
cidPtrToStrptr(sector.CommD),
cidPtrToStrptr(coalescePtrs(sector.UpdateSealed, sector.CommR)),
cidPtrToStrptr(coalescePtrs(sector.UpdateUnsealed, sector.CommD)),
cidPtrToStrptr(sector.PreCommitMessage),
cidPtrToStrptr(sector.CommitMessage),
cidPtrToStrptr(sector.ReplicaUpdateMessage),
sector.SeedEpoch,
sector.SeedValue,
)
if err != nil {
b, _ := json.MarshalIndent(sector, "", " ")
fmt.Println(string(b))
return xerrors.Errorf("inserting/updating sectors_meta for sector %d: %w", sector.SectorNumber, err)
}
// Process each piece within the sector
for j, piece := range sector.Pieces {
dealID := int64(0)
startEpoch := int64(0)
endEpoch := int64(0)
var pamJSON *string
if piece.HasDealInfo() {
dealInfo := piece.DealInfo()
if dealInfo.Impl().DealProposal != nil {
dealID = int64(dealInfo.Impl().DealID)
}
startEpoch = int64(must.One(dealInfo.StartEpoch()))
endEpoch = int64(must.One(dealInfo.EndEpoch()))
if piece.Impl().PieceActivationManifest != nil {
pam, err := json.Marshal(piece.Impl().PieceActivationManifest)
if err != nil {
return xerrors.Errorf("error marshalling JSON for piece %d in sector %d: %w", j, sector.SectorNumber, err)
}
ps := string(pam)
pamJSON = &ps
}
}
// Splitting the SQL statement for readability and adding new fields
_, err = db.Exec(ctx, `
INSERT INTO sectors_meta_pieces (
sp_id, sector_num, piece_num, piece_cid, piece_size,
requested_keep_data, raw_data_size, start_epoch, orig_end_epoch,
f05_deal_id, ddo_pam
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)
ON CONFLICT (sp_id, sector_num, piece_num) DO UPDATE
SET
piece_cid = excluded.piece_cid,
piece_size = excluded.piece_size,
requested_keep_data = excluded.requested_keep_data,
raw_data_size = excluded.raw_data_size,
start_epoch = excluded.start_epoch,
orig_end_epoch = excluded.orig_end_epoch,
f05_deal_id = excluded.f05_deal_id,
ddo_pam = excluded.ddo_pam`,
mid,
sector.SectorNumber,
j,
piece.PieceCID(),
piece.Piece().Size,
piece.HasDealInfo(),
nil, // raw_data_size might be calculated based on the piece size, or retrieved if available
startEpoch,
endEpoch,
dealID,
pamJSON,
)
if err != nil {
b, _ := json.MarshalIndent(sector, "", " ")
fmt.Println(string(b))
return xerrors.Errorf("inserting/updating sector_meta_pieces for sector %d, piece %d: %w", sector.SectorNumber, j, err)
}
}
}
return nil
}

View File

@ -1,486 +0,0 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
package translations
import (
"golang.org/x/text/language"
"golang.org/x/text/message"
"golang.org/x/text/message/catalog"
)
type dictionary struct {
index []uint32
data string
}
func (d *dictionary) Lookup(key string) (data string, ok bool) {
p, ok := messageKeyToIndex[key]
if !ok {
return "", false
}
start, end := d.index[p], d.index[p+1]
if start == end {
return "", false
}
return d.data[start:end], true
}
func init() {
dict := map[string]catalog.Dictionary{
"en": &dictionary{index: enIndex, data: enData},
"ko": &dictionary{index: koIndex, data: koData},
"zh": &dictionary{index: zhIndex, data: zhData},
}
fallback := language.MustParse("en")
cat, err := catalog.NewFromMap(dict, catalog.Fallback(fallback))
if err != nil {
panic(err)
}
message.DefaultCatalog = cat
}
var messageKeyToIndex = map[string]int{
"Aborting migration.": 21,
"Aborting remaining steps.": 9,
"Aggregate-Anonymous: version, chain, and Miner power (bucketed).": 26,
"Cannot reach the DB: %s": 93,
"Cannot read the config.toml file in the provided directory, Error: %s": 68,
"Compare the configurations %s to %s. Changes between the miner IDs other than wallet addreses should be a new, minimal layer for runners that need it.": 120,
"Confidence epochs": 89,
"Confidence epochs: %d": 79,
"Configuration 'base' was created to resemble this lotus-miner's config.toml .": 121,
"Configuration 'base' was updated to include this miner's address": 102,
"Configuration 'base' was updated to include this miner's address (%s) and its wallet setup.": 119,
"Connected to Yugabyte": 62,
"Connected to Yugabyte. Schema is current.": 50,
"Continue to connect and update schema.": 112,
"Continue to verify the addresses and create a new miner actor.": 80,
"Could not create repo from directory: %s. Aborting migration": 69,
"Could not lock miner repo. Your miner must be stopped: %s\n Aborting migration": 70,
"Create a new miner": 8,
"Ctrl+C pressed in Terminal": 5,
"Database config error occurred, abandoning migration: %s ": 113,
"Database: %s": 111,
"Documentation: ": 36,
"Each step needs your confirmation and can be reversed. Press Ctrl+C to exit at any time.": 4,
"Enabling Sector Indexing in the database.": 51,
"Enter %s address": 85,
"Enter the Yugabyte database %s": 116,
"Enter the Yugabyte database host(s)": 114,
"Enter the info to connect to your Yugabyte database installation (https://download.yugabyte.com/)": 106,
"Enter the info to create a new miner": 74,
"Enter the owner address": 82,
"Enter the path to the configuration directory used by %s": 66,
"Enter the sector size": 86,
"Error closing backup file: %s": 59,
"Error connecting to Yugabyte database: %s": 117,
"Error connecting to full node API: %s": 94,
"Error creating backup file: %s": 56,
"Error encoding config.toml: %s": 52,
"Error expanding path: %s": 54,
"Error getting API: %s": 15,
"Error getting miner info: %s": 31,
"Error getting miner power: %s": 29,
"Error getting token: %s": 17,
"Error interpreting miner ID: %s: ID: %s": 44,
"Error marshalling message: %s": 30,
"Error reading config.toml: %s": 57,
"Error reading filemode of config.toml: %s": 55,
"Error saving config to layer: %s. Aborting Migration": 22,
"Error sending message: %s": 33,
"Error sending message: Status %s, Message: ": 34,
"Error signing message: %s": 32,
"Error verifying sectors: %s": 45,
"Error writing backup file: %s": 58,
"Error writing config.toml: %s": 60,
"Failed to create the miner actor: %s": 91,
"Failed to generate default config: %s": 100,
"Failed to generate random bytes for secret: %s": 96,
"Failed to get API info for FullNode: %w": 98,
"Failed to insert 'base' config layer in database: %s": 101,
"Failed to load base config from database: %s": 103,
"Failed to parse base config: %s": 104,
"Failed to parse confidence: %s": 90,
"Failed to parse sector size: %s": 88,
"Failed to parse the address: %s": 84,
"Failed to regenerate base config: %s": 105,
"Failed to verify the auth token from daemon node: %s": 99,
"Filecoin %s channels: %s and %s": 39,
"Hint: I am someone running Curio on whichever chain.": 27,
"Host: %s": 107,
"I want to:": 6,
"Increase reliability using redundancy: start multiple machines with at-least the post layer: 'curio run --layers=post'": 40,
"Individual Data: Miner ID, Curio version, chain (%s or %s). Signed.": 25,
"Initializing a new miner actor.": 73,
"Layer %s created. ": 122,
"Lotus-Miner to Curio Migration.": 10,
"Message sent.": 35,
"Migrate from existing Lotus-Miner": 7,
"Migrating lotus-miner config.toml to Curio in-database configuration.": 14,
"Migrating metadata for %d sectors.": 118,
"Miner %s created successfully": 92,
"Miner creation error occurred: %s ": 81,
"New Miner initialization complete.": 13,
"No address provided": 83,
"No host provided": 115,
"No path provided, abandoning migration ": 67,
"No value provided": 87,
"No, abort": 20,
"Nothing.": 28,
"Now shut down lotus-miner and lotus-worker and use run %s instead.": 47,
"One database can serve multiple miner IDs: Run a migration for each lotus-miner.": 41,
"Other": 65,
"Owner Address: %s": 75,
"Password: %s": 110,
"Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration": 97,
"Please start (or restart) %s now that database credentials are in %s.": 42,
"Port: %s": 108,
"Pre-initialization steps complete": 95,
"Press return to continue": 48,
"Press return to update %s with Yugabyte info. A Backup file will be written to that folder before changes are made.": 53,
"Read Miner Config": 71,
"Restart Lotus Miner. ": 61,
"Sector Size: %d": 78,
"Sectors verified. %d sector locations found.": 49,
"Select the location of your lotus-miner config directory?": 64,
"Select what you want to share with the Curio team.": 24,
"Sender Address: %s": 77,
"Step Complete: %s\n": 72,
"The '%s' layer stores common configuration. All curio instances can include it in their %s argument.": 37,
"The Curio team wants to improve the software you use. Tell the team you're using `%s`.": 23,
"The sectors are in the database. The database is ready for %s.": 46,
"This interactive tool creates a new miner actor and creates the basic configuration layer for it.": 1,
"This interactive tool migrates lotus-miner to Curio in 5 minutes.": 3,
"This process is partially idempotent. Once a new miner actor has been created and subsequent steps fail, the user need to run 'curio config new-cluster < miner ID >' to finish the configuration.": 2,
"To run Curio: With machine or cgroup isolation, use the command (with example layer selection):": 124,
"To start, ensure your sealing pipeline is drained and shut-down lotus-miner.": 63,
"To work with the config: ": 123,
"Try the web interface with %s for further guided improvements.": 11,
"Unmigratable sectors found. Do you want to continue?": 18,
"Use the arrow keys to navigate: ↓ ↑ → ← ": 0,
"Username: %s": 109,
"Waiting for %s to write sectors into Yugabyte.": 43,
"Worker Address: %s": 76,
"Yes, continue": 19,
"You can add other layers for per-machine configuration changes.": 38,
"You can now migrate your market node (%s), if applicable.": 12,
"could not get API info for FullNode: %w": 16,
}
var enIndex = []uint32{ // 126 elements
// Entry 0 - 1F
0x00000000, 0x00000035, 0x00000097, 0x0000015a,
0x0000019c, 0x000001f5, 0x00000210, 0x0000021b,
0x0000023d, 0x00000250, 0x0000026a, 0x0000028a,
0x000002cc, 0x00000309, 0x0000032c, 0x00000372,
0x0000038b, 0x000003b6, 0x000003d1, 0x00000406,
0x00000414, 0x0000041e, 0x00000432, 0x0000046a,
0x000004c4, 0x000004f7, 0x00000541, 0x00000582,
0x000005b7, 0x000005c0, 0x000005e1, 0x00000602,
// Entry 20 - 3F
0x00000622, 0x0000063f, 0x0000065c, 0x0000068f,
0x0000069d, 0x000006b1, 0x0000071c, 0x0000075c,
0x00000785, 0x000007fc, 0x0000084d, 0x00000899,
0x000008cb, 0x000008f9, 0x00000918, 0x0000095a,
0x000009a0, 0x000009b9, 0x000009e9, 0x00000a13,
0x00000a3d, 0x00000a5f, 0x00000ad6, 0x00000af2,
0x00000b1f, 0x00000b41, 0x00000b62, 0x00000b83,
0x00000ba4, 0x00000bc5, 0x00000bdf, 0x00000bf5,
// Entry 40 - 5F
0x00000c42, 0x00000c7c, 0x00000c82, 0x00000cbe,
0x00000cea, 0x00000d33, 0x00000d73, 0x00000dc4,
0x00000dd6, 0x00000df0, 0x00000e10, 0x00000e35,
0x00000e4a, 0x00000e60, 0x00000e76, 0x00000e89,
0x00000ea2, 0x00000ee1, 0x00000f0b, 0x00000f23,
0x00000f37, 0x00000f5a, 0x00000f6e, 0x00000f84,
0x00000f96, 0x00000fb9, 0x00000fcb, 0x00000fed,
0x00001015, 0x00001036, 0x00001051, 0x0000107a,
// Entry 60 - 7F
0x0000109c, 0x000010ce, 0x00001165, 0x00001190,
0x000011c8, 0x000011f1, 0x00001229, 0x0000126a,
0x0000129a, 0x000012bd, 0x000012e5, 0x00001347,
0x00001353, 0x0000135f, 0x0000136f, 0x0000137f,
0x0000138f, 0x000013b6, 0x000013f7, 0x0000141b,
0x0000142c, 0x0000144e, 0x0000147b, 0x000014a1,
0x00001500, 0x0000159d, 0x000015eb, 0x00001605,
0x00001623, 0x00001683,
} // Size: 528 bytes
const enData string = "" + // Size: 5763 bytes
"\x04\x00\x01 0\x02Use the arrow keys to navigate: ↓ ↑ → ←\x02This intera" +
"ctive tool creates a new miner actor and creates the basic configuration" +
" layer for it.\x02This process is partially idempotent. Once a new miner" +
" actor has been created and subsequent steps fail, the user need to run " +
"'curio config new-cluster < miner ID >' to finish the configuration.\x02" +
"This interactive tool migrates lotus-miner to Curio in 5 minutes.\x02Eac" +
"h step needs your confirmation and can be reversed. Press Ctrl+C to exit" +
" at any time.\x02Ctrl+C pressed in Terminal\x02I want to:\x02Migrate fro" +
"m existing Lotus-Miner\x02Create a new miner\x02Aborting remaining steps" +
".\x02Lotus-Miner to Curio Migration.\x02Try the web interface with %[1]s" +
" for further guided improvements.\x02You can now migrate your market nod" +
"e (%[1]s), if applicable.\x02New Miner initialization complete.\x02Migra" +
"ting lotus-miner config.toml to Curio in-database configuration.\x02Erro" +
"r getting API: %[1]s\x02could not get API info for FullNode: %[1]w\x02Er" +
"ror getting token: %[1]s\x02Unmigratable sectors found. Do you want to c" +
"ontinue?\x02Yes, continue\x02No, abort\x02Aborting migration.\x02Error s" +
"aving config to layer: %[1]s. Aborting Migration\x02The Curio team wants" +
" to improve the software you use. Tell the team you're using `%[1]s`." +
"\x02Select what you want to share with the Curio team.\x02Individual Dat" +
"a: Miner ID, Curio version, chain (%[1]s or %[2]s). Signed.\x02Aggregate" +
"-Anonymous: version, chain, and Miner power (bucketed).\x02Hint: I am so" +
"meone running Curio on whichever chain.\x02Nothing.\x02Error getting min" +
"er power: %[1]s\x02Error marshalling message: %[1]s\x02Error getting min" +
"er info: %[1]s\x02Error signing message: %[1]s\x02Error sending message:" +
" %[1]s\x04\x00\x01 .\x02Error sending message: Status %[1]s, Message:" +
"\x02Message sent.\x04\x00\x01 \x0f\x02Documentation:\x02The '%[1]s' laye" +
"r stores common configuration. All curio instances can include it in the" +
"ir %[2]s argument.\x02You can add other layers for per-machine configura" +
"tion changes.\x02Filecoin %[1]s channels: %[2]s and %[3]s\x02Increase re" +
"liability using redundancy: start multiple machines with at-least the po" +
"st layer: 'curio run --layers=post'\x02One database can serve multiple m" +
"iner IDs: Run a migration for each lotus-miner.\x02Please start (or rest" +
"art) %[1]s now that database credentials are in %[2]s.\x02Waiting for %[" +
"1]s to write sectors into Yugabyte.\x02Error interpreting miner ID: %[1]" +
"s: ID: %[2]s\x02Error verifying sectors: %[1]s\x02The sectors are in the" +
" database. The database is ready for %[1]s.\x02Now shut down lotus-miner" +
" and lotus-worker and use run %[1]s instead.\x02Press return to continue" +
"\x02Sectors verified. %[1]d sector locations found.\x02Connected to Yuga" +
"byte. Schema is current.\x02Enabling Sector Indexing in the database." +
"\x02Error encoding config.toml: %[1]s\x02Press return to update %[1]s wi" +
"th Yugabyte info. A Backup file will be written to that folder before ch" +
"anges are made.\x02Error expanding path: %[1]s\x02Error reading filemode" +
" of config.toml: %[1]s\x02Error creating backup file: %[1]s\x02Error rea" +
"ding config.toml: %[1]s\x02Error writing backup file: %[1]s\x02Error clo" +
"sing backup file: %[1]s\x02Error writing config.toml: %[1]s\x04\x00\x01 " +
"\x15\x02Restart Lotus Miner.\x02Connected to Yugabyte\x02To start, ensur" +
"e your sealing pipeline is drained and shut-down lotus-miner.\x02Select " +
"the location of your lotus-miner config directory?\x02Other\x02Enter the" +
" path to the configuration directory used by %[1]s\x04\x00\x01 '\x02No p" +
"ath provided, abandoning migration\x02Cannot read the config.toml file i" +
"n the provided directory, Error: %[1]s\x02Could not create repo from dir" +
"ectory: %[1]s. Aborting migration\x02Could not lock miner repo. Your min" +
"er must be stopped: %[1]s\x0a Aborting migration\x02Read Miner Config" +
"\x04\x00\x01\x0a\x15\x02Step Complete: %[1]s\x02Initializing a new miner" +
" actor.\x02Enter the info to create a new miner\x02Owner Address: %[1]s" +
"\x02Worker Address: %[1]s\x02Sender Address: %[1]s\x02Sector Size: %[1]d" +
"\x02Confidence epochs: %[1]d\x02Continue to verify the addresses and cre" +
"ate a new miner actor.\x04\x00\x01 %\x02Miner creation error occurred: %" +
"[1]s\x02Enter the owner address\x02No address provided\x02Failed to pars" +
"e the address: %[1]s\x02Enter %[1]s address\x02Enter the sector size\x02" +
"No value provided\x02Failed to parse sector size: %[1]s\x02Confidence ep" +
"ochs\x02Failed to parse confidence: %[1]s\x02Failed to create the miner " +
"actor: %[1]s\x02Miner %[1]s created successfully\x02Cannot reach the DB:" +
" %[1]s\x02Error connecting to full node API: %[1]s\x02Pre-initialization" +
" steps complete\x02Failed to generate random bytes for secret: %[1]s\x02" +
"Please do not run guided-setup again as miner creation is not idempotent" +
". You need to run 'curio config new-cluster %[1]s' to finish the configu" +
"ration\x02Failed to get API info for FullNode: %[1]w\x02Failed to verify" +
" the auth token from daemon node: %[1]s\x02Failed to generate default co" +
"nfig: %[1]s\x02Failed to insert 'base' config layer in database: %[1]s" +
"\x02Configuration 'base' was updated to include this miner's address\x02" +
"Failed to load base config from database: %[1]s\x02Failed to parse base " +
"config: %[1]s\x02Failed to regenerate base config: %[1]s\x02Enter the in" +
"fo to connect to your Yugabyte database installation (https://download.y" +
"ugabyte.com/)\x02Host: %[1]s\x02Port: %[1]s\x02Username: %[1]s\x02Passwo" +
"rd: %[1]s\x02Database: %[1]s\x02Continue to connect and update schema." +
"\x04\x00\x01 <\x02Database config error occurred, abandoning migration: " +
"%[1]s\x02Enter the Yugabyte database host(s)\x02No host provided\x02Ente" +
"r the Yugabyte database %[1]s\x02Error connecting to Yugabyte database: " +
"%[1]s\x02Migrating metadata for %[1]d sectors.\x02Configuration 'base' w" +
"as updated to include this miner's address (%[1]s) and its wallet setup." +
"\x02Compare the configurations %[1]s to %[2]s. Changes between the miner" +
" IDs other than wallet addreses should be a new, minimal layer for runne" +
"rs that need it.\x02Configuration 'base' was created to resemble this lo" +
"tus-miner's config.toml .\x04\x00\x01 \x15\x02Layer %[1]s created.\x04" +
"\x00\x01 \x19\x02To work with the config:\x02To run Curio: With machine " +
"or cgroup isolation, use the command (with example layer selection):"
var koIndex = []uint32{ // 126 elements
// Entry 0 - 1F
0x00000000, 0x00000044, 0x000000c1, 0x000001c1,
0x0000020e, 0x00000289, 0x000002aa, 0x000002bc,
0x000002e5, 0x00000300, 0x00000325, 0x00000348,
0x000003b2, 0x00000402, 0x00000428, 0x00000481,
0x000004a0, 0x000004dc, 0x0000050c, 0x0000055c,
0x00000568, 0x0000057a, 0x00000595, 0x000005ed,
0x00000679, 0x000006b2, 0x00000708, 0x00000746,
0x00000794, 0x000007af, 0x000007e9, 0x0000081c,
// Entry 20 - 3F
0x00000856, 0x00000880, 0x000008aa, 0x000008ec,
0x00000910, 0x0000091d, 0x000009a3, 0x000009f5,
0x00000a1c, 0x00000ab8, 0x00000b4a, 0x00000bc5,
0x00000c09, 0x00000c47, 0x00000c6e, 0x00000cd9,
0x00000d26, 0x00000d4d, 0x00000d9c, 0x00000ddd,
0x00000e1d, 0x00000e64, 0x00000f0a, 0x00000f3a,
0x00000f89, 0x00000fac, 0x00000fcd, 0x00000ff0,
0x00001013, 0x00001051, 0x00001075, 0x0000108b,
// Entry 40 - 5F
0x000010f6, 0x00001145, 0x0000114c, 0x00001194,
0x000011e6, 0x00001240, 0x000012aa, 0x0000133b,
0x00001353, 0x0000136d, 0x00001391, 0x000013c4,
0x000013dc, 0x000013f4, 0x0000140c, 0x00001421,
0x00001439, 0x00001490, 0x000014bb, 0x000014d3,
0x000014fa, 0x0000151d, 0x00001531, 0x00001546,
0x0000156a, 0x00001594, 0x000015a5, 0x000015cb,
0x000015f1, 0x0000162a, 0x00001662, 0x0000169a,
// Entry 60 - 7F
0x000016b9, 0x00001705, 0x000017c3, 0x0000180f,
0x0000185d, 0x00001880, 0x000018dc, 0x0000192c,
0x00001981, 0x000019c4, 0x00001a03, 0x00001a71,
0x00001a82, 0x00001a90, 0x00001aa8, 0x00001abc,
0x00001ad6, 0x00001b00, 0x00001b63, 0x00001b9f,
0x00001bc9, 0x00001c01, 0x00001c55, 0x00001c8d,
0x00001d06, 0x00001dc0, 0x00001e17, 0x00001e46,
0x00001e6d, 0x00001ef9,
} // Size: 528 bytes
const koData string = "" + // Size: 7929 bytes
"\x04\x00\x01 ?\x02화살표 키를 사용하여 이동하세요: ↓ ↑ → ←\x02이 대화형 도구는 새로운 채굴자 액터를 생성" +
"하고 그에 대한 기본 구성 레이어를 생성합니다.\x02이 프로세스는 부분적으로 항등원적입니다. 새로운 채굴자 액터가 생성되었고" +
" 후속 단계가 실패하는 경우 사용자는 구성을 완료하기 위해 'curio config new-cluster < 채굴자 ID >'를 " +
"실행해야 합니다.\x02이 대화형 도구는 5분 안에 lotus-miner를 Curio로 이주합니다.\x02각 단계는 확인이 필" +
"요하며 되돌릴 수 있습니다. 언제든지 Ctrl+C를 눌러 종료할 수 있습니다.\x02터미널에서 Ctrl+C가 눌림\x02나는 " +
"원한다:\x02기존의 Lotus-Miner에서 이전하기\x02새로운 채굴자 생성\x02나머지 단계를 중단합니다.\x02Lotu" +
"s-Miner에서 Curio로 이주.\x02%[1]s를 사용하여 웹 인터페이스를 시도하고 더 나은 안내된 개선을 진행하세요." +
"\x02해당하는 경우 이제 시장 노드를 이주할 수 있습니다 (%[1]s).\x02새로운 채굴자 초기화 완료.\x02lotus-mi" +
"ner config.toml을 Curio의 데이터베이스 구성으로 이전 중입니다.\x02API 가져오기 오류: %[1]s\x02Fu" +
"llNode의 API 정보를 가져올 수 없습니다: %[1]w\x02토큰을 가져오는 중 오류 발생: %[1]s\x02이동할 수 없는" +
" 섹터가 발견되었습니다. 계속하시겠습니까?\x02예, 계속\x02아니오, 중단\x02마이그레이션 중단.\x02레이어에 구성을 저장" +
"하는 중 오류 발생: %[1]s. 마이그레이션 중단\x02Curio 팀은 당신이 사용하는 소프트웨어를 개선하고자 합니다. 팀에" +
"게 `%[1]s`를 사용 중이라고 알려주세요.\x02Curio 팀과 공유하고 싶은 것을 선택하세요.\x02개별 데이터: 채굴자" +
" ID, Curio 버전, 체인 (%[1]s 또는 %[2]s). 서명됨.\x02집계-익명: 버전, 체인, 및 채굴자 파워 (버킷)" +
".\x02힌트: 나는 어떤 체인에서든 Curio를 실행 중인 사람입니다.\x02아무것도 없습니다.\x02마이너 파워를 가져오는 중" +
" 오류 발생: %[1]s\x02메시지를 마샬하는 중 오류 발생: %[1]s\x02마이너 정보를 가져오는 중 오류 발생: %[1]s" +
"\x02메시지 서명 중 오류 발생: %[1]s\x02메시지 전송 중 오류 발생: %[1]s\x04\x00\x01 =\x02메시지 " +
"전송 중 오류 발생: 상태 %[1]s, 메시지:\x02메시지가 전송되었습니다.\x04\x00\x01 \x08\x02문서:" +
"\x02'%[1]s' 레이어에는 공통 구성이 저장됩니다. 모든 Curio 인스턴스는 %[2]s 인수에 포함시킬 수 있습니다." +
"\x02기계별 구성 변경을 위해 다른 레이어를 추가할 수 있습니다.\x02Filecoin %[1]s 채널: %[2]s 및 %[3]" +
"s\x02신뢰성 향상을 위한 중복성 사용: 적어도 post 레이어를 사용하여 여러 대의 기계를 시작하십시오: 'curio run " +
"--layers=post'\x02한 개의 데이터베이스는 여러 광부 ID를 제공할 수 있습니다: 각 lotus-miner에 대해 마" +
"이그레이션을 실행하세요.\x02데이터베이스 자격 증명이 %[2]s에 입력되었으므로 지금 %[1]s을 시작하거나 다시 시작하세요" +
".\x02%[1]s가 Yugabyte에 섹터를 기록하도록 대기 중입니다.\x02광부 ID를 해석하는 중 오류 발생: %[1]s: " +
"ID: %[2]s\x02섹터 확인 중 오류 발생: %[1]s\x02섹터가 데이터베이스에 있습니다. 데이터베이스가 %[1]s를 위해" +
" 준비되었습니다.\x02이제 lotus-miner와 lotus-worker를 종료하고 %[1]s을 실행하세요.\x02계속하려면 리" +
"턴을 누르세요\x02섹터가 확인되었습니다. %[1]d개의 섹터 위치를 찾았습니다.\x02Yugabyte에 연결되었습니다. 스키" +
"마가 현재입니다.\x02데이터베이스에서 Sector Indexing을 활성화합니다.\x02config.toml을 인코딩하는 중" +
" 오류가 발생했습니다: %[1]s\x02%[1]s을 Yugabyte 정보로 업데이트하려면 리턴 키를 누르세요. 변경 사항을 적용하" +
"기 전에 해당 폴더에 백업 파일이 작성됩니다.\x02경로를 확장하는 중 오류 발생: %[1]s\x02config.toml의 파" +
"일 모드를 읽는 중 오류가 발생했습니다: %[1]s\x02백업 파일 생성 오류: %[1]s\x02config.toml 읽기 오" +
"류: %[1]s\x02백업 파일 쓰기 오류: %[1]s\x02백업 파일 닫기 오류: %[1]s\x02config.toml을 쓰" +
"는 중 오류가 발생했습니다: %[1]s\x04\x00\x01 \x1f\x02로터스 마이너 재시작.\x02Yugabyte에 연결" +
"됨\x02시작하려면 밀봉 파이프라인이 비어 있고 lotus-miner가 종료되었는지 확인하세요.\x02로터스 마이너 구성 디렉" +
"토리의 위치를 선택하시겠습니까?\x02기타\x02%[1]s에서 사용하는 구성 디렉터리 경로를 입력하세요.\x04\x00\x01" +
" M\x02경로가 제공되지 않았으므로 마이그레이션을 포기합니다\x02제공된 디렉토리에서 config.toml 파일을 읽을 수 없습" +
"니다. 오류: %[1]s\x02디렉토리에서 저장소를 생성할 수 없습니다: %[1]s. 마이그레이션을 중단합니다.\x02광부 저" +
"장소를 잠금 해제할 수 없습니다. 귀하의 광부를 중지해야 합니다: %[1]s\x0a 마이그레이션을 중단합니다.\x02마이너 구" +
"성 읽기\x04\x00\x01\x0a\x15\x02단계 완료: %[1]s\x02새 채굴자 액터 초기화 중.\x02새 채굴자를 " +
"생성하기 위한 정보 입력\x02소유자 주소: %[1]s\x02작업자 주소: %[1]s\x02송신자 주소: %[1]s\x02섹터" +
" 크기: %[1]d\x02신뢰 에포크: %[1]d\x02주소를 확인하고 새 채굴자 액터를 생성하려면 계속 진행하세요.\x04" +
"\x00\x01 &\x02채굴자 생성 오류 발생: %[1]s\x02소유자 주소 입력\x02주소가 제공되지 않았습니다\x02주소 구" +
"문 분석 실패: %[1]s\x02%[1]s 주소 입력\x02섹터 크기 입력\x02값이 제공되지 않았습니다\x02섹터 크기 구문" +
" 분석 실패: %[1]s\x02신뢰 에포크\x02신뢰도 구문 분석 실패: %[1]s\x02채굴자 액터 생성 실패: %[1]s" +
"\x02%[1]s 채굴자가 성공적으로 생성되었습니다\x02데이터베이스에 연결할 수 없습니다: %[1]s\x02풀 노드 API에 연" +
"결하는 중 오류 발생: %[1]s\x02사전 초기화 단계 완료\x02비밀번호를 위한 랜덤 바이트 생성에 실패했습니다: %[1]" +
"s\x02마이너 생성은 idempotent하지 않으므로 가이드 설정을 다시 실행하지 마십시오. 구성을 완료하려면 'curio co" +
"nfig new-cluster %[1]s'를 실행해야 합니다.\x02FullNode에 대한 API 정보를 가져오는 데 실패했습니다" +
": %[1]w\x02데몬 노드로부터 인증 토큰을 확인하는 중 오류 발생: %[1]s\x02기본 구성 생성 실패: %[1]s\x02" +
"데이터베이스에 'base' 구성 레이어를 삽입하는 데 실패했습니다: %[1]s\x02이 마이너 주소를 포함한 구성 'base'" +
"가 업데이트되었습니다.\x02데이터베이스에서 기본 구성을 로드하는 데 실패했습니다: %[1]s\x02기본 구성을 구문 분석하는" +
" 데 실패했습니다: %[1]s\x02기본 구성을 재생성하는 데 실패했습니다: %[1]s\x02Yugabyte 데이터베이스 설치에 " +
"연결할 정보를 입력하십시오 (https://download.yugabyte.com/)\x02호스트: %[1]s\x02포트: %" +
"[1]s\x02사용자 이름: %[1]s\x02비밀번호: %[1]s\x02데이터베이스: %[1]s\x02계속 연결 및 스키마 업데이" +
"트.\x04\x00\x01 ^\x02데이터베이스 구성 오류가 발생하여 마이그레이션을 포기합니다: %[1]s\x02Yugabyt" +
"e 데이터베이스 호스트를 입력하십시오\x02호스트가 제공되지 않았습니다\x02Yugabyte 데이터베이스 %[1]s을 입력하십시오" +
"\x02Yugabyte 데이터베이스에 연결하는 중 오류가 발생했습니다: %[1]s\x02%[1]d 섹터의 메타데이터를 이동 중입니" +
"다.\x02기본 설정 'base'가 이 마이너의 주소(%[1]s) 및 지갑 설정을 포함하도록 업데이트되었습니다.\x02구성 %" +
"[1]s를 %[2]s과 비교하세요. 지갑 주소 이외의 마이너 ID 사이의 변경 사항은 필요한 실행자를 위한 새로운 최소한의 레이어" +
"여야 합니다.\x02'base' 설정이 이 lotus-miner의 config.toml과 유사하게 만들어졌습니다.\x04" +
"\x00\x01 *\x02레이어 %[1]s가 생성되었습니다.\x04\x00\x01 \x22\x02구성 파일을 사용하려면:\x02C" +
"urio를 실행하려면: 기계 또는 cgroup 격리를 사용하여 다음 명령을 사용하세요 (예제 레이어 선택과 함께):"
var zhIndex = []uint32{ // 126 elements
// Entry 0 - 1F
0x00000000, 0x00000033, 0x0000008b, 0x00000134,
0x0000017c, 0x000001cb, 0x000001e4, 0x000001f1,
0x00000211, 0x0000022a, 0x00000240, 0x0000025d,
0x000002a5, 0x000002e6, 0x00000302, 0x00000347,
0x00000364, 0x0000038d, 0x000003ab, 0x000003df,
0x000003ef, 0x000003fc, 0x0000040c, 0x00000445,
0x00000499, 0x000004c6, 0x00000515, 0x00000550,
0x00000585, 0x0000058f, 0x000005b3, 0x000005d1,
// Entry 20 - 3F
0x000005f5, 0x00000613, 0x00000631, 0x00000666,
0x00000679, 0x00000688, 0x000006e2, 0x0000071f,
0x00000747, 0x000007a6, 0x000007f6, 0x00000849,
0x0000086f, 0x0000089c, 0x000008ba, 0x000008f6,
0x0000093a, 0x0000094a, 0x0000097d, 0x000009aa,
0x000009cf, 0x000009f2, 0x00000a6a, 0x00000a88,
0x00000ab7, 0x00000adb, 0x00000b00, 0x00000b24,
0x00000b48, 0x00000b6b, 0x00000b8b, 0x00000ba0,
// Entry 40 - 5F
0x00000beb, 0x00000c1b, 0x00000c22, 0x00000c4c,
0x00000c70, 0x00000cb4, 0x00000ce6, 0x00000d2f,
0x00000d42, 0x00000d5c, 0x00000d7b, 0x00000da0,
0x00000db8, 0x00000dcd, 0x00000de5, 0x00000df9,
0x00000e10, 0x00000e41, 0x00000e66, 0x00000e7c,
0x00000e8c, 0x00000ea6, 0x00000eba, 0x00000ecd,
0x00000eda, 0x00000efa, 0x00000f0a, 0x00000f27,
0x00000f47, 0x00000f61, 0x00000f7e, 0x00000faf,
// Entry 60 - 7F
0x00000fc8, 0x00000ff1, 0x0000107e, 0x000010aa,
0x000010e5, 0x00001105, 0x00001136, 0x00001169,
0x00001196, 0x000011b7, 0x000011dd, 0x00001237,
0x00001246, 0x00001255, 0x00001267, 0x00001276,
0x00001288, 0x000012a7, 0x000012df, 0x00001304,
0x00001314, 0x00001332, 0x0000135e, 0x00001388,
0x000013d9, 0x0000145b, 0x000014a2, 0x000014bc,
0x000014d4, 0x0000152b,
} // Size: 528 bytes
const zhData string = "" + // Size: 5419 bytes
"\x04\x00\x01 .\x02使用箭头键进行导航↓ ↑ → ←\x02此交互式工具将创建一个新的矿工角色并为其创建基本配置层。\x02" +
"该过程部分幂等。一旦创建了新的矿工角色,并且随后的步骤失败,用户需要运行 'curio config new-cluster < 矿工 ID" +
" >' 来完成配置。\x02这个交互式工具可以在5分钟内将lotus-miner迁移到Curio。\x02每一步都需要您的确认并且可以撤销。随" +
"时按Ctrl+C退出。\x02在终端中按下Ctrl+C\x02我想要\x02从现有的 Lotus-Miner 迁移\x02创建一个新的矿工" +
"\x02中止剩余步骤。\x02Lotus-Miner到Curio迁移。\x02尝试使用%[1]s的网络界面进行更进一步的指导性改进。\x02如果" +
"适用,您现在可以迁移您的市场节点(%[1]s)。\x02新矿工初始化完成。\x02将 lotus-miner config.toml 迁移到" +
" Curio 的数据库配置中。\x02获取 API 时出错:%[1]s\x02无法获取FullNode的API信息%[1]w\x02获取令牌时" +
"出错:%[1]s\x02发现无法迁移的扇区。您想要继续吗\x02是的继续\x02不中止\x02中止迁移。\x02保存配置到层时出错%" +
"[1]s。正在中止迁移\x02Curio 团队希望改进您使用的软件。告诉团队您正在使用 `%[1]s`。\x02选择您想与Curio团队分享的内" +
"容。\x02个人数据矿工 IDCurio 版本,链(%[1]s 或 %[2]s。签名。\x02聚合-匿名:版本,链和矿工算力(分桶)。" +
"\x02提示我是在任何链上运行 Curio 的人。\x02没有。\x02获取矿工功率时出错%[1]s\x02整理消息时出错%[1]s" +
"\x02获取矿工信息时出错%[1]s\x02签署消息时出错%[1]s\x02发送消息时出错%[1]s\x04\x00\x01 0\x02发" +
"送消息时出错:状态%[1]s消息\x02消息已发送。\x04\x00\x01 \x0a\x02文档\x02'%[1]s'层存储通用配置" +
"。所有Curio实例都可以在其%[2]s参数中包含它。\x02您可以添加其他层进行每台机器的配置更改。\x02Filecoin %[1]s " +
"频道:%[2]s 和 %[3]s\x02通过冗余增加可靠性使用至少后层启动多台机器'curio run --layers=post'" +
"\x02一个数据库可以服务多个矿工ID为每个lotus-miner运行迁移。\x02请立即启动或重新启动%[1]s因为数据库凭据已在%[" +
"2]s中。\x02等待%[1]s将扇区写入Yugabyte。\x02解释矿工ID时出错%[1]sID%[2]s\x02验证扇区时出错%[" +
"1]s\x02扇区在数据库中。数据库已准备好用于%[1]s。\x02现在关闭lotus-miner和lotus-worker改为使用%[1]s" +
"运行。\x02按回车继续\x02扇区已验证。发现了%[1]d个扇区位置。\x02已连接到Yugabyte。模式是当前的。\x02在数据库中启" +
"用扇区索引。\x02编码config.toml时出错%[1]s\x02按回车键更新 %[1]s 以包含 Yugabyte 信息。在进行更改" +
"之前,将在该文件夹中写入备份文件。\x02扩展路径时出错%[1]s\x02读取config.toml文件模式时出错%[1]s\x02创建" +
"备份文件时出错:%[1]s\x02读取 config.toml 时出错:%[1]s\x02写入备份文件时出错%[1]s\x02关闭备份文件" +
"时出错:%[1]s\x02写入config.toml时出错%[1]s\x04\x00\x01 \x1b\x02重新启动Lotus Mine" +
"r。\x02已连接到Yugabyte\x02开始之前请确保您的密封管道已排空并关闭lotus-miner。\x02选择您的lotus-mine" +
"r配置目录的位置\x02其他\x02输入%[1]s使用的配置目录的路径\x04\x00\x01 \x1f\x02未提供路径放弃迁移\x02无" +
"法读取提供的目录中的config.toml文件错误%[1]s\x02无法从目录创建repo%[1]s。 中止迁移\x02无法锁定矿工r" +
"epo。 您的矿工必须停止:%[1]s\x0a 中止迁移\x02读取矿工配置\x04\x00\x01\x0a\x15\x02步骤完成%[1]s" +
"\x02初始化新的矿工角色。\x02输入创建新矿工所需的信息\x02所有者地址%[1]s\x02工作地址%[1]s\x02发送者地址%[1" +
"]s\x02扇区大小: %[1]d\x02置信度时期: %[1]d\x02继续验证地址并创建新的矿工角色。\x04\x00\x01 \x02矿" +
"工创建错误发生: %[1]s\x02输入所有者地址\x02未提供地址\x02解析地址失败: %[1]s\x02输入 %[1]s 地址\x02" +
"输入扇区大小\x02未提供值\x02解析扇区大小失败: %[1]s\x02置信度时期\x02解析置信度失败: %[1]s\x02创建矿工角色" +
"失败: %[1]s\x02矿工 %[1]s 创建成功\x02无法访问数据库: %[1]s\x02连接到完整节点 API 时发生错误: %[1" +
"]s\x02预初始化步骤完成\x02生成密码的随机字节失败: %[1]s\x02请不要再次运行引导设置因为矿工创建不是幂等的。 您需要运行 '" +
"curio config new-cluster %[1]s' 来完成配置。\x02无法获取 FullNode 的 API 信息: %[1]w" +
"\x02无法验证来自守护进程节点的授权令牌: %[1]s\x02无法生成默认配置: %[1]s\x02无法将 'base' 配置层插入数据库: " +
"%[1]s\x02配置 'base' 已更新以包含此矿工的地址\x02从数据库加载基本配置失败%[1]s\x02解析基本配置失败%[1]s" +
"\x02重新生成基本配置失败: %[1]s\x02输入连接到您的Yugabyte数据库安装的信息https://download.yugaby" +
"te.com/\x02主机%[1]s\x02端口%[1]s\x02用户名%[1]s\x02密码%[1]s\x02数据库%[1]s" +
"\x02继续连接和更新架构。\x04\x00\x01 3\x02发生数据库配置错误放弃迁移%[1]s\x02输入Yugabyte数据库主机" +
"S\x02未提供主机\x02输入Yugabyte数据库 %[1]s\x02连接到Yugabyte数据库时出错%[1]s\x02正在迁移%[1" +
"]d个扇区的元数据。\x02'base'配置已更新,包括该矿工的地址(%[1]s及其钱包设置。\x02比较配置%[1]s和%[2]s。矿工ID" +
"之间除了钱包地址的变化应该是需要的运行者的一个新的、最小的层。\x02'base'配置已创建以类似于这个lotus-miner的confi" +
"g.toml。\x04\x00\x01 \x15\x02层%[1]s已创建。\x04\x00\x01 \x13\x02要使用配置\x02运行C" +
"urio使用机器或cgroup隔离使用命令附带示例层选择"
// Total table size 20695 bytes (20KiB); checksum: BB5CCE20

View File

@ -1,82 +0,0 @@
package main
import (
"bytes"
"encoding/json"
"fmt"
"os"
"path"
"github.com/samber/lo"
)
func main() {
for _, arg := range os.Args {
handleKnowns(arg)
}
}
func handleKnowns(pathStart string) {
outpath := path.Join(pathStart, "out.gotext.json")
b, err := os.ReadFile(outpath)
if err != nil {
fmt.Println("cannot open "+outpath+":", err)
return
}
type TMsg struct {
ID string `json:"id"`
Translation string `json:"translation"`
Message string `json:"message"`
Placeholder json.RawMessage `json:"placeholder"`
}
type Dataformat struct {
Language string `json:"language"`
Messages []TMsg `json:"messages"`
}
var outData Dataformat
err = json.NewDecoder(bytes.NewBuffer(b)).Decode(&outData)
if err != nil {
fmt.Println("cannot decode "+outpath+":", err)
return
}
f, err := os.Open(path.Join(pathStart, "messages.gotext.json"))
if err != nil {
fmt.Println("cannot open "+path.Join(pathStart, "messages.gotext.json")+":", err)
return
}
defer func() { _ = f.Close() }()
var msgData Dataformat
err = json.NewDecoder(f).Decode(&msgData)
if err != nil {
fmt.Println("cannot decode "+path.Join(pathStart, "messages.gotext.json")+":", err)
return
}
knowns := map[string]string{}
for _, msg := range msgData.Messages {
knowns[msg.ID] = msg.Translation
}
toTranslate := lo.Filter(outData.Messages, func(msg TMsg, _ int) bool {
_, ok := knowns[msg.ID]
return !ok
})
outData.Messages = toTranslate // drop the "done" messages
var outJSON bytes.Buffer
enc := json.NewEncoder(&outJSON)
enc.SetIndent(" ", " ")
err = enc.Encode(outData)
if err != nil {
fmt.Println("cannot encode "+outpath+":", err)
return
}
err = os.WriteFile(outpath, outJSON.Bytes(), 0644)
if err != nil {
fmt.Println("cannot write "+outpath+":", err)
return
}
fmt.Println("rearranged successfully")
}

View File

@ -1,4 +0,0 @@
{
"language": "ko",
"messages": []
}

View File

@ -1,4 +0,0 @@
{
"language": "zh",
"messages": []
}

View File

@ -1,27 +0,0 @@
// Usage:
// To UPDATE translations:
//
// 1. add/change strings in guidedsetup folder that use d.T() or d.say().
//
// 2. run `go generate` in the cmd/curio/internal/translations/ folder.
//
// 3. ChatGPT 3.5 can translate the ./locales/??/out.gotext.json files'
// which ONLY include the un-translated messages.
// APPEND to the messages.gotext.json files's array.
//
// ChatGPT fuss:
// - on a good day, you may need to hit "continue generating".
// - > 60? you'll need to give it sections of the file.
//
// 4. Re-import with `go generate` again.
//
// To ADD a language:
// 1. Add it to the list in updateLang.sh
// 2. Run `go generate` in the cmd/curio/internal/translations/ folder.
// 3. Follow the "Update translations" steps here.
// 4. Code will auto-detect the new language and use it.
//
// FUTURE Reliability: OpenAPI automation.
package translations
//go:generate ./updateLang.sh

View File

@ -1,8 +0,0 @@
#!/bin/bash
#OP: Only run if some file in ../guidedsetup* is newer than catalog.go
# Change this condition if using translations more widely.
if [ "$(find ../../guidedsetup/* -newer catalog.go)" ] || [ "$(find locales/* -newer catalog.go)" ]; then
gotext -srclang=en update -out=catalog.go -lang=en,zh,ko github.com/filecoin-project/lotus/cmd/curio/guidedsetup
go run knowns/main.go locales/zh locales/ko
fi

View File

@ -1,105 +0,0 @@
package main
import (
"fmt"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/curiosrc/cmd/curio/rpc"
)
var logCmd = &cli.Command{
Name: "log",
Usage: "Manage logging",
Subcommands: []*cli.Command{
LogList,
LogSetLevel,
},
}
var LogList = &cli.Command{
Name: "list",
Usage: "List log systems",
Action: func(cctx *cli.Context) error {
minerApi, closer, err := rpc.GetCurioAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
systems, err := minerApi.LogList(ctx)
if err != nil {
return err
}
for _, system := range systems {
fmt.Println(system)
}
return nil
},
}
var LogSetLevel = &cli.Command{
Name: "set-level",
Usage: "Set log level",
ArgsUsage: "[level]",
Description: `Set the log level for logging systems:
The system flag can be specified multiple times.
eg) log set-level --system chain --system chainxchg debug
Available Levels:
debug
info
warn
error
Environment Variables:
GOLOG_LOG_LEVEL - Default log level for all log systems
GOLOG_LOG_FMT - Change output log format (json, nocolor)
GOLOG_FILE - Write logs to file
GOLOG_OUTPUT - Specify whether to output to file, stderr, stdout or a combination, i.e. file+stderr
`,
Flags: []cli.Flag{
&cli.StringSliceFlag{
Name: "system",
Usage: "limit to log system",
Value: &cli.StringSlice{},
},
},
Action: func(cctx *cli.Context) error {
minerApi, closer, err := rpc.GetCurioAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
if !cctx.Args().Present() {
return fmt.Errorf("level is required")
}
systems := cctx.StringSlice("system")
if len(systems) == 0 {
var err error
systems, err = minerApi.LogList(ctx)
if err != nil {
return err
}
}
for _, system := range systems {
if err := minerApi.LogSetLevel(ctx, system, cctx.Args().First()); err != nil {
return xerrors.Errorf("setting log level on %s: %v", system, err)
}
}
return nil
},
}

View File

@ -1,190 +0,0 @@
package main
import (
"context"
"fmt"
"os"
"os/signal"
"runtime/pprof"
"syscall"
"github.com/docker/go-units"
"github.com/fatih/color"
logging "github.com/ipfs/go-log/v2"
"github.com/mitchellh/go-homedir"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-paramfetch"
"github.com/filecoin-project/lotus/build"
lcli "github.com/filecoin-project/lotus/cli"
cliutil "github.com/filecoin-project/lotus/cli/util"
"github.com/filecoin-project/lotus/curiosrc/cmd/curio/guidedsetup"
"github.com/filecoin-project/lotus/curiosrc/deps"
"github.com/filecoin-project/lotus/lib/lotuslog"
"github.com/filecoin-project/lotus/lib/tracing"
"github.com/filecoin-project/lotus/node/repo"
)
var log = logging.Logger("main")
const (
FlagMinerRepo = "miner-repo"
)
func setupCloseHandler() {
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
go func() {
<-c
fmt.Println("\r- Ctrl+C pressed in Terminal")
_ = pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
panic(1)
}()
}
func main() {
lotuslog.SetupLogLevels()
local := []*cli.Command{
cliCmd,
runCmd,
stopCmd,
configCmd,
testCmd,
webCmd,
guidedsetup.GuidedsetupCmd,
sealCmd,
marketCmd,
fetchParamCmd,
ffiCmd,
}
jaeger := tracing.SetupJaegerTracing("curio")
defer func() {
if jaeger != nil {
_ = jaeger.ForceFlush(context.Background())
}
}()
for _, cmd := range local {
cmd := cmd
originBefore := cmd.Before
cmd.Before = func(cctx *cli.Context) error {
if jaeger != nil {
_ = jaeger.Shutdown(cctx.Context)
}
jaeger = tracing.SetupJaegerTracing("curio/" + cmd.Name)
if cctx.IsSet("color") {
color.NoColor = !cctx.Bool("color")
}
if originBefore != nil {
return originBefore(cctx)
}
return nil
}
}
app := &cli.App{
Name: "curio",
Usage: "Filecoin decentralized storage network provider",
Version: build.UserVersion(),
EnableBashCompletion: true,
Before: func(c *cli.Context) error {
setupCloseHandler()
return nil
},
Flags: []cli.Flag{
&cli.BoolFlag{
// examined in the Before above
Name: "color",
Usage: "use color in display output",
DefaultText: "depends on output being a TTY",
},
&cli.StringFlag{
Name: "panic-reports",
EnvVars: []string{"CURIO_PANIC_REPORT_PATH"},
Hidden: true,
Value: "~/.curio", // should follow --repo default
},
&cli.StringFlag{
Name: "db-host",
EnvVars: []string{"CURIO_DB_HOST", "CURIO_HARMONYDB_HOSTS"},
Usage: "Command separated list of hostnames for yugabyte cluster",
Value: "127.0.0.1",
},
&cli.StringFlag{
Name: "db-name",
EnvVars: []string{"CURIO_DB_NAME", "CURIO_HARMONYDB_NAME"},
Value: "yugabyte",
},
&cli.StringFlag{
Name: "db-user",
EnvVars: []string{"CURIO_DB_USER", "CURIO_HARMONYDB_USERNAME"},
Value: "yugabyte",
},
&cli.StringFlag{
Name: "db-password",
EnvVars: []string{"CURIO_DB_PASSWORD", "CURIO_HARMONYDB_PASSWORD"},
Value: "yugabyte",
},
&cli.StringFlag{
Name: "db-port",
EnvVars: []string{"CURIO_DB_PORT", "CURIO_HARMONYDB_PORT"},
Value: "5433",
},
&cli.StringFlag{
Name: deps.FlagRepoPath,
EnvVars: []string{"CURIO_REPO_PATH"},
Value: "~/.curio",
},
cliutil.FlagVeryVerbose,
},
Commands: local,
After: func(c *cli.Context) error {
if r := recover(); r != nil {
p, err := homedir.Expand(c.String(FlagMinerRepo))
if err != nil {
log.Errorw("could not expand repo path for panic report", "error", err)
panic(r)
}
// Generate report in CURIO_PATH and re-raise panic
build.GeneratePanicReport(c.String("panic-reports"), p, c.App.Name)
panic(r)
}
return nil
},
}
app.Setup()
app.Metadata["repoType"] = repo.Curio
lcli.RunApp(app)
}
var fetchParamCmd = &cli.Command{
Name: "fetch-params",
Usage: "Fetch proving parameters",
ArgsUsage: "[sectorSize]",
Action: func(cctx *cli.Context) error {
if cctx.NArg() != 1 {
return xerrors.Errorf("incorrect number of arguments")
}
sectorSizeInt, err := units.RAMInBytes(cctx.Args().First())
if err != nil {
return xerrors.Errorf("error parsing sector size (specify as \"32GiB\", for instance): %w", err)
}
sectorSize := uint64(sectorSizeInt)
err = paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), build.SrsJSON(), sectorSize)
if err != nil {
return xerrors.Errorf("fetching proof parameters: %w", err)
}
return nil
},
}

View File

@ -1,201 +0,0 @@
package main
import (
"fmt"
"sort"
"strconv"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/curiosrc/deps"
"github.com/filecoin-project/lotus/curiosrc/market/lmrpc"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
)
var marketCmd = &cli.Command{
Name: "market",
Subcommands: []*cli.Command{
marketRPCInfoCmd,
marketSealCmd,
},
}
var marketRPCInfoCmd = &cli.Command{
Flags: []cli.Flag{
&cli.StringSliceFlag{
Name: "layers",
Usage: "list of layers to be interpreted (atop defaults). Default: base",
},
},
Action: func(cctx *cli.Context) error {
db, err := deps.MakeDB(cctx)
if err != nil {
return err
}
layers := cctx.StringSlice("layers")
cfg, err := deps.GetConfig(cctx.Context, layers, db)
if err != nil {
return xerrors.Errorf("get config: %w", err)
}
ts, err := lmrpc.MakeTokens(cfg)
if err != nil {
return xerrors.Errorf("make tokens: %w", err)
}
var addrTokens []struct {
Address string
Token string
}
for address, s := range ts {
addrTokens = append(addrTokens, struct {
Address string
Token string
}{
Address: address.String(),
Token: s,
})
}
sort.Slice(addrTokens, func(i, j int) bool {
return addrTokens[i].Address < addrTokens[j].Address
})
for _, at := range addrTokens {
fmt.Printf("[lotus-miner/boost compatible] %s %s\n", at.Address, at.Token)
}
return nil
},
Name: "rpc-info",
}
var marketSealCmd = &cli.Command{
Name: "seal",
Usage: "start sealing a deal sector early",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "actor",
Usage: "Specify actor address to start sealing sectors for",
Required: true,
},
&cli.StringSliceFlag{
Name: "layers",
Usage: "list of layers to be interpreted (atop defaults). Default: base",
},
&cli.BoolFlag{
Name: "synthetic",
Usage: "Use synthetic PoRep",
Value: false, // todo implement synthetic
},
},
Action: func(cctx *cli.Context) error {
act, err := address.NewFromString(cctx.String("actor"))
if err != nil {
return xerrors.Errorf("parsing --actor: %w", err)
}
if cctx.Args().Len() > 1 {
return xerrors.Errorf("specify only one sector")
}
sec := cctx.Args().First()
sector, err := strconv.ParseUint(sec, 10, 64)
if err != nil {
return xerrors.Errorf("failed to parse the sector number: %w", err)
}
ctx := lcli.ReqContext(cctx)
dep, err := deps.GetDepsCLI(ctx, cctx)
if err != nil {
return err
}
mid, err := address.IDFromAddress(act)
if err != nil {
return xerrors.Errorf("getting miner id: %w", err)
}
mi, err := dep.Full.StateMinerInfo(ctx, act, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("getting miner info: %w", err)
}
nv, err := dep.Full.StateNetworkVersion(ctx, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("getting network version: %w", err)
}
wpt := mi.WindowPoStProofType
spt, err := miner.PreferredSealProofTypeFromWindowPoStType(nv, wpt, cctx.Bool("synthetic"))
if err != nil {
return xerrors.Errorf("getting seal proof type: %w", err)
}
comm, err := dep.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) {
// Get current open sector pieces from DB
var pieces []struct {
Sector abi.SectorNumber `db:"sector_number"`
Size abi.PaddedPieceSize `db:"piece_size"`
Index uint64 `db:"piece_index"`
}
err = tx.Select(&pieces, `
SELECT
sector_number,
piece_size,
piece_index,
FROM
open_sector_pieces
WHERE
sp_id = $1 AND sector_number = $2
ORDER BY
piece_index DESC;`, mid, sector)
if err != nil {
return false, xerrors.Errorf("getting open sectors from DB")
}
if len(pieces) < 1 {
return false, xerrors.Errorf("sector %d is not waiting to be sealed", sector)
}
cn, err := tx.Exec(`INSERT INTO sectors_sdr_pipeline (sp_id, sector_number, reg_seal_proof) VALUES ($1, $2, $3);`, mid, sector, spt)
if err != nil {
return false, xerrors.Errorf("adding sector to pipeline: %w", err)
}
if cn != 1 {
return false, xerrors.Errorf("incorrect number of rows returned")
}
_, err = tx.Exec("SELECT transfer_and_delete_open_piece($1, $2)", mid, sector)
if err != nil {
return false, xerrors.Errorf("adding sector to pipeline: %w", err)
}
return true, nil
}, harmonydb.OptionRetry())
if err != nil {
return xerrors.Errorf("start sealing sector: %w", err)
}
if !comm {
return xerrors.Errorf("start sealing sector: commit failed")
}
return nil
},
}

View File

@ -1,216 +0,0 @@
package main
import (
"fmt"
"github.com/ipfs/go-datastore"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/curiosrc/cmd/curio/guidedsetup"
"github.com/filecoin-project/lotus/curiosrc/deps"
"github.com/filecoin-project/lotus/curiosrc/seal"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
"github.com/filecoin-project/lotus/node/repo"
)
var sealCmd = &cli.Command{
Name: "seal",
Usage: "Manage the sealing pipeline",
Subcommands: []*cli.Command{
sealStartCmd,
sealMigrateLMSectorsCmd,
},
}
var sealStartCmd = &cli.Command{
Name: "start",
Usage: "Start new sealing operations manually",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "actor",
Usage: "Specify actor address to start sealing sectors for",
Required: true,
},
&cli.BoolFlag{
Name: "now",
Usage: "Start sealing sectors for all actors now (not on schedule)",
},
&cli.BoolFlag{
Name: "cc",
Usage: "Start sealing new CC sectors",
},
&cli.IntFlag{
Name: "count",
Usage: "Number of sectors to start",
Value: 1,
},
&cli.BoolFlag{
Name: "synthetic",
Usage: "Use synthetic PoRep",
Value: false, // todo implement synthetic
},
&cli.StringSliceFlag{
Name: "layers",
Usage: "list of layers to be interpreted (atop defaults). Default: base",
},
},
Action: func(cctx *cli.Context) error {
if !cctx.Bool("now") {
return xerrors.Errorf("schedule not implemented, use --now")
}
if !cctx.IsSet("actor") {
return cli.ShowCommandHelp(cctx, "start")
}
if !cctx.Bool("cc") {
return xerrors.Errorf("only CC sectors supported for now")
}
act, err := address.NewFromString(cctx.String("actor"))
if err != nil {
return xerrors.Errorf("parsing --actor: %w", err)
}
ctx := lcli.ReqContext(cctx)
dep, err := deps.GetDepsCLI(ctx, cctx)
if err != nil {
return err
}
/*
create table sectors_sdr_pipeline (
sp_id bigint not null,
sector_number bigint not null,
-- at request time
create_time timestamp not null,
reg_seal_proof int not null,
comm_d_cid text not null,
[... other not relevant fields]
*/
mid, err := address.IDFromAddress(act)
if err != nil {
return xerrors.Errorf("getting miner id: %w", err)
}
mi, err := dep.Full.StateMinerInfo(ctx, act, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("getting miner info: %w", err)
}
nv, err := dep.Full.StateNetworkVersion(ctx, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("getting network version: %w", err)
}
wpt := mi.WindowPoStProofType
spt, err := miner.PreferredSealProofTypeFromWindowPoStType(nv, wpt, cctx.Bool("synthetic"))
if err != nil {
return xerrors.Errorf("getting seal proof type: %w", err)
}
num, err := seal.AllocateSectorNumbers(ctx, dep.Full, dep.DB, act, cctx.Int("count"), func(tx *harmonydb.Tx, numbers []abi.SectorNumber) (bool, error) {
for _, n := range numbers {
_, err := tx.Exec("insert into sectors_sdr_pipeline (sp_id, sector_number, reg_seal_proof) values ($1, $2, $3)", mid, n, spt)
if err != nil {
return false, xerrors.Errorf("inserting into sectors_sdr_pipeline: %w", err)
}
}
return true, nil
})
if err != nil {
return xerrors.Errorf("allocating sector numbers: %w", err)
}
for _, number := range num {
fmt.Println(number)
}
return nil
},
}
var sealMigrateLMSectorsCmd = &cli.Command{
Name: "migrate-lm-sectors",
Usage: "(debug tool) Copy LM sector metadata into Curio DB",
Hidden: true, // only needed in advanced cases where manual repair is needed
Flags: []cli.Flag{
&cli.StringFlag{
Name: "miner-repo",
Usage: "Path to miner repo",
Value: "~/.lotusminer",
},
&cli.BoolFlag{
Name: "seal-ignore",
Usage: "Ignore sectors that cannot be migrated",
Value: false,
EnvVars: []string{"CURUO_MIGRATE_SEAL_IGNORE"},
},
},
Action: func(cctx *cli.Context) error {
ctx := lcli.ReqContext(cctx)
db, err := deps.MakeDB(cctx)
if err != nil {
return err
}
r, err := repo.NewFS(cctx.String("miner-repo"))
if err != nil {
return err
}
ok, err := r.Exists()
if err != nil {
return err
}
if !ok {
return fmt.Errorf("repo not initialized at: %s", cctx.String("miner-repo"))
}
lr, err := r.LockRO(repo.StorageMiner)
if err != nil {
return fmt.Errorf("locking repo: %w", err)
}
defer func() {
err = lr.Close()
if err != nil {
fmt.Println("error closing repo: ", err)
}
}()
mmeta, err := lr.Datastore(ctx, "/metadata")
if err != nil {
return xerrors.Errorf("opening miner metadata datastore: %w", err)
}
maddrBytes, err := mmeta.Get(ctx, datastore.NewKey("miner-address"))
if err != nil {
return xerrors.Errorf("getting miner address datastore entry: %w", err)
}
addr, err := address.NewFromBytes(maddrBytes)
if err != nil {
return xerrors.Errorf("parsing miner actor address: %w", err)
}
unmigSectorShouldFail := func() bool { return !cctx.Bool("seal-ignore") }
err = guidedsetup.MigrateSectors(ctx, addr, mmeta, db, func(n int) {
fmt.Printf("Migrating %d sectors\n", n)
}, unmigSectorShouldFail)
if err != nil {
return xerrors.Errorf("migrating sectors: %w", err)
}
return nil
},
}

View File

@ -1,204 +0,0 @@
package main
import (
"context"
"database/sql"
"encoding/json"
"errors"
"fmt"
"os"
"time"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/dline"
curio "github.com/filecoin-project/lotus/curiosrc"
"github.com/filecoin-project/lotus/curiosrc/deps"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
)
var testCmd = &cli.Command{
Name: "test",
Usage: "Utility functions for testing",
Subcommands: []*cli.Command{
//provingInfoCmd,
wdPostCmd,
},
Before: func(cctx *cli.Context) error {
return nil
},
}
var wdPostCmd = &cli.Command{
Name: "window-post",
Aliases: []string{"wd", "windowpost", "wdpost"},
Usage: "Compute a proof-of-spacetime for a sector (requires the sector to be pre-sealed). These will not send to the chain.",
Subcommands: []*cli.Command{
wdPostHereCmd,
wdPostTaskCmd,
},
}
// wdPostTaskCmd writes to harmony_task and wdpost_partition_tasks, then waits for the result.
// It is intended to be used to test the windowpost scheduler.
// The end of the compute task puts the task_id onto wdpost_proofs, which is read by the submit task.
// The submit task will not send test tasks to the chain, and instead will write the result to harmony_test.
// The result is read by this command, and printed to stdout.
var wdPostTaskCmd = &cli.Command{
Name: "task",
Aliases: []string{"scheduled", "schedule", "async", "asynchronous"},
Usage: "Test the windowpost scheduler by running it on the next available curio. ",
Flags: []cli.Flag{
&cli.Uint64Flag{
Name: "deadline",
Usage: "deadline to compute WindowPoSt for ",
Value: 0,
},
&cli.StringSliceFlag{
Name: "layers",
Usage: "list of layers to be interpreted (atop defaults). Default: base",
},
},
Action: func(cctx *cli.Context) error {
ctx := context.Background()
deps, err := deps.GetDeps(ctx, cctx)
if err != nil {
return xerrors.Errorf("get config: %w", err)
}
ts, err := deps.Full.ChainHead(ctx)
if err != nil {
return xerrors.Errorf("cannot get chainhead %w", err)
}
ht := ts.Height()
// It's not important to be super-accurate as it's only for basic testing.
addr, err := address.NewFromString(deps.Cfg.Addresses[0].MinerAddresses[0])
if err != nil {
return xerrors.Errorf("cannot get miner address %w", err)
}
maddr, err := address.IDFromAddress(addr)
if err != nil {
return xerrors.Errorf("cannot get miner id %w", err)
}
var taskId int64
_, err = deps.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) {
err = tx.QueryRow(`INSERT INTO harmony_task (name, posted_time, added_by) VALUES ('WdPost', CURRENT_TIMESTAMP, 123) RETURNING id`).Scan(&taskId)
if err != nil {
log.Error("inserting harmony_task: ", err)
return false, xerrors.Errorf("inserting harmony_task: %w", err)
}
_, err = tx.Exec(`INSERT INTO wdpost_partition_tasks
(task_id, sp_id, proving_period_start, deadline_index, partition_index) VALUES ($1, $2, $3, $4, $5)`,
taskId, maddr, ht, cctx.Uint64("deadline"), 0)
if err != nil {
log.Error("inserting wdpost_partition_tasks: ", err)
return false, xerrors.Errorf("inserting wdpost_partition_tasks: %w", err)
}
_, err = tx.Exec("INSERT INTO harmony_test (task_id) VALUES ($1)", taskId)
if err != nil {
return false, xerrors.Errorf("inserting into harmony_tests: %w", err)
}
return true, nil
}, harmonydb.OptionRetry())
if err != nil {
return xerrors.Errorf("writing SQL transaction: %w", err)
}
fmt.Printf("Inserted task %v. Waiting for success ", taskId)
var result sql.NullString
for {
time.Sleep(time.Second)
err = deps.DB.QueryRow(ctx, `SELECT result FROM harmony_test WHERE task_id=$1`, taskId).Scan(&result)
if err != nil {
return xerrors.Errorf("reading result from harmony_test: %w", err)
}
if result.Valid {
break
}
fmt.Print(".")
}
fmt.Println()
log.Infof("Result: %s", result.String)
return nil
},
}
// This command is intended to be used to verify PoSt compute performance.
// It will not send any messages to the chain. Since it can compute any deadline, output may be incorrectly timed for the chain.
// The entire processing happens in this process while you wait. It does not use the scheduler.
var wdPostHereCmd = &cli.Command{
Name: "here",
Aliases: []string{"cli"},
Usage: "Compute WindowPoSt for performance and configuration testing.",
Description: `Note: This command is intended to be used to verify PoSt compute performance.
It will not send any messages to the chain. Since it can compute any deadline, output may be incorrectly timed for the chain.`,
ArgsUsage: "[deadline index]",
Flags: []cli.Flag{
&cli.Uint64Flag{
Name: "deadline",
Usage: "deadline to compute WindowPoSt for ",
Value: 0,
},
&cli.StringSliceFlag{
Name: "layers",
Usage: "list of layers to be interpreted (atop defaults). Default: base",
},
&cli.StringFlag{
Name: "storage-json",
Usage: "path to json file containing storage config",
Value: "~/.curio/storage.json",
},
&cli.Uint64Flag{
Name: "partition",
Usage: "partition to compute WindowPoSt for",
Value: 0,
},
},
Action: func(cctx *cli.Context) error {
ctx := context.Background()
deps, err := deps.GetDeps(ctx, cctx)
if err != nil {
return err
}
wdPostTask, wdPoStSubmitTask, derlareRecoverTask, err := curio.WindowPostScheduler(
ctx, deps.Cfg.Fees, deps.Cfg.Proving, deps.Full, deps.Verif, nil, nil,
deps.As, deps.Maddrs, deps.DB, deps.Stor, deps.Si, deps.Cfg.Subsystems.WindowPostMaxTasks)
if err != nil {
return err
}
_, _ = wdPoStSubmitTask, derlareRecoverTask
if len(deps.Maddrs) == 0 {
return errors.New("no miners to compute WindowPoSt for")
}
head, err := deps.Full.ChainHead(ctx)
if err != nil {
return xerrors.Errorf("failed to get chain head: %w", err)
}
di := dline.NewInfo(head.Height(), cctx.Uint64("deadline"), 0, 0, 0, 10 /*challenge window*/, 0, 0)
for maddr := range deps.Maddrs {
out, err := wdPostTask.DoPartition(ctx, head, address.Address(maddr), di, cctx.Uint64("partition"))
if err != nil {
fmt.Println("Error computing WindowPoSt for miner", maddr, err)
continue
}
fmt.Println("Computed WindowPoSt for miner", maddr, ":")
err = json.NewEncoder(os.Stdout).Encode(out)
if err != nil {
fmt.Println("Could not encode WindowPoSt output for miner", maddr, err)
continue
}
}
return nil
},
}

View File

@ -1,339 +0,0 @@
// Package rpc provides all direct access to this node.
package rpc
import (
"context"
"encoding/base64"
"encoding/json"
"net"
"net/http"
"net/url"
"os"
"path/filepath"
"time"
"github.com/gbrlsnchs/jwt/v3"
"github.com/google/uuid"
"github.com/gorilla/mux"
logging "github.com/ipfs/go-log/v2"
"github.com/mitchellh/go-homedir"
"github.com/urfave/cli/v2"
"go.opencensus.io/tag"
"golang.org/x/sync/errgroup"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/client"
cliutil "github.com/filecoin-project/lotus/cli/util"
"github.com/filecoin-project/lotus/curiosrc/deps"
"github.com/filecoin-project/lotus/curiosrc/market"
"github.com/filecoin-project/lotus/curiosrc/web"
"github.com/filecoin-project/lotus/lib/rpcenc"
"github.com/filecoin-project/lotus/metrics"
"github.com/filecoin-project/lotus/metrics/proxy"
"github.com/filecoin-project/lotus/node/repo"
"github.com/filecoin-project/lotus/storage/paths"
"github.com/filecoin-project/lotus/storage/pipeline/piece"
"github.com/filecoin-project/lotus/storage/sealer/fsutil"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
const metaFile = "sectorstore.json"
var log = logging.Logger("curio/rpc")
var permissioned = os.Getenv("LOTUS_DISABLE_AUTH_PERMISSIONED") != "1"
func CurioHandler(
authv func(ctx context.Context, token string) ([]auth.Permission, error),
remote http.HandlerFunc,
a api.Curio,
permissioned bool) http.Handler {
mux := mux.NewRouter()
readerHandler, readerServerOpt := rpcenc.ReaderParamDecoder()
rpcServer := jsonrpc.NewServer(jsonrpc.WithServerErrors(api.RPCErrors), readerServerOpt)
wapi := proxy.MetricedAPI[api.Curio, api.CurioStruct](a)
if permissioned {
wapi = api.PermissionedAPI[api.Curio, api.CurioStruct](wapi)
}
rpcServer.Register("Filecoin", wapi)
rpcServer.AliasMethod("rpc.discover", "Filecoin.Discover")
mux.Handle("/rpc/v0", rpcServer)
mux.Handle("/rpc/streams/v0/push/{uuid}", readerHandler)
mux.PathPrefix("/remote").HandlerFunc(remote)
mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof
if !permissioned {
return mux
}
ah := &auth.Handler{
Verify: authv,
Next: mux.ServeHTTP,
}
return ah
}
type CurioAPI struct {
*deps.Deps
paths.SectorIndex
ShutdownChan chan struct{}
}
func (p *CurioAPI) Version(context.Context) (api.Version, error) {
return api.CurioAPIVersion0, nil
}
func (p *CurioAPI) StorageDetachLocal(ctx context.Context, path string) error {
path, err := homedir.Expand(path)
if err != nil {
return xerrors.Errorf("expanding local path: %w", err)
}
// check that we have the path opened
lps, err := p.LocalStore.Local(ctx)
if err != nil {
return xerrors.Errorf("getting local path list: %w", err)
}
var localPath *storiface.StoragePath
for _, lp := range lps {
if lp.LocalPath == path {
lp := lp // copy to make the linter happy
localPath = &lp
break
}
}
if localPath == nil {
return xerrors.Errorf("no local paths match '%s'", path)
}
// drop from the persisted storage.json
var found bool
if err := p.LocalPaths.SetStorage(func(sc *storiface.StorageConfig) {
out := make([]storiface.LocalPath, 0, len(sc.StoragePaths))
for _, storagePath := range sc.StoragePaths {
if storagePath.Path != path {
out = append(out, storagePath)
continue
}
found = true
}
sc.StoragePaths = out
}); err != nil {
return xerrors.Errorf("set storage config: %w", err)
}
if !found {
// maybe this is fine?
return xerrors.Errorf("path not found in storage.json")
}
// unregister locally, drop from sector index
return p.LocalStore.ClosePath(ctx, localPath.ID)
}
func (p *CurioAPI) StorageLocal(ctx context.Context) (map[storiface.ID]string, error) {
ps, err := p.LocalStore.Local(ctx)
if err != nil {
return nil, err
}
var out = make(map[storiface.ID]string)
for _, path := range ps {
out[path.ID] = path.LocalPath
}
return out, nil
}
func (p *CurioAPI) StorageStat(ctx context.Context, id storiface.ID) (fsutil.FsStat, error) {
return p.Stor.FsStat(ctx, id)
}
func (p *CurioAPI) AllocatePieceToSector(ctx context.Context, maddr address.Address, piece piece.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (api.SectorOffset, error) {
di, err := market.NewPieceIngester(ctx, p.Deps.DB, p.Deps.Full, maddr, true, time.Minute)
if err != nil {
return api.SectorOffset{}, xerrors.Errorf("failed to create a piece ingestor")
}
sector, err := di.AllocatePieceToSector(ctx, maddr, piece, rawSize, source, header)
if err != nil {
return api.SectorOffset{}, xerrors.Errorf("failed to add piece to a sector")
}
err = di.Seal()
if err != nil {
return api.SectorOffset{}, xerrors.Errorf("failed to start sealing the sector %d for actor %s", sector.Sector, maddr)
}
return sector, nil
}
// Trigger shutdown
func (p *CurioAPI) Shutdown(context.Context) error {
close(p.ShutdownChan)
return nil
}
func (p *CurioAPI) StorageInit(ctx context.Context, path string, opts storiface.LocalStorageMeta) error {
path, err := homedir.Expand(path)
if err != nil {
return xerrors.Errorf("expanding local path: %w", err)
}
if err := os.MkdirAll(path, 0755); err != nil {
if !os.IsExist(err) {
return err
}
}
_, err = os.Stat(filepath.Join(path, metaFile))
if !os.IsNotExist(err) {
if err == nil {
return xerrors.Errorf("path is already initialized")
}
return err
}
if opts.ID == "" {
opts.ID = storiface.ID(uuid.New().String())
}
if !(opts.CanStore || opts.CanSeal) {
return xerrors.Errorf("must specify at least one of --store or --seal")
}
b, err := json.MarshalIndent(opts, "", " ")
if err != nil {
return xerrors.Errorf("marshaling storage config: %w", err)
}
if err := os.WriteFile(filepath.Join(path, metaFile), b, 0644); err != nil {
return xerrors.Errorf("persisting storage metadata (%s): %w", filepath.Join(path, metaFile), err)
}
return nil
}
func (p *CurioAPI) StorageAddLocal(ctx context.Context, path string) error {
path, err := homedir.Expand(path)
if err != nil {
return xerrors.Errorf("expanding local path: %w", err)
}
if err := p.LocalStore.OpenPath(ctx, path); err != nil {
return xerrors.Errorf("opening local path: %w", err)
}
if err := p.LocalPaths.SetStorage(func(sc *storiface.StorageConfig) {
sc.StoragePaths = append(sc.StoragePaths, storiface.LocalPath{Path: path})
}); err != nil {
return xerrors.Errorf("get storage config: %w", err)
}
return nil
}
func (p *CurioAPI) LogList(ctx context.Context) ([]string, error) {
return logging.GetSubsystems(), nil
}
func (p *CurioAPI) LogSetLevel(ctx context.Context, subsystem, level string) error {
return logging.SetLogLevel(subsystem, level)
}
func ListenAndServe(ctx context.Context, dependencies *deps.Deps, shutdownChan chan struct{}) error {
fh := &paths.FetchHandler{Local: dependencies.LocalStore, PfHandler: &paths.DefaultPartialFileHandler{}}
remoteHandler := func(w http.ResponseWriter, r *http.Request) {
if !auth.HasPerm(r.Context(), nil, api.PermAdmin) {
w.WriteHeader(401)
_ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing admin permission"})
return
}
fh.ServeHTTP(w, r)
}
var authVerify func(context.Context, string) ([]auth.Permission, error)
{
privateKey, err := base64.StdEncoding.DecodeString(dependencies.Cfg.Apis.StorageRPCSecret)
if err != nil {
return xerrors.Errorf("decoding storage rpc secret: %w", err)
}
authVerify = func(ctx context.Context, token string) ([]auth.Permission, error) {
var payload deps.JwtPayload
if _, err := jwt.Verify([]byte(token), jwt.NewHS256(privateKey), &payload); err != nil {
return nil, xerrors.Errorf("JWT Verification failed: %w", err)
}
return payload.Allow, nil
}
}
// Serve the RPC.
srv := &http.Server{
Handler: CurioHandler(
authVerify,
remoteHandler,
&CurioAPI{dependencies, dependencies.Si, shutdownChan},
permissioned),
ReadHeaderTimeout: time.Minute * 3,
BaseContext: func(listener net.Listener) context.Context {
ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-worker"))
return ctx
},
Addr: dependencies.ListenAddr,
}
log.Infof("Setting up RPC server at %s", dependencies.ListenAddr)
eg := errgroup.Group{}
eg.Go(srv.ListenAndServe)
if dependencies.Cfg.Subsystems.EnableWebGui {
web, err := web.GetSrv(ctx, dependencies)
if err != nil {
return err
}
go func() {
<-ctx.Done()
log.Warn("Shutting down...")
if err := srv.Shutdown(context.TODO()); err != nil {
log.Errorf("shutting down RPC server failed: %s", err)
}
if err := web.Shutdown(context.Background()); err != nil {
log.Errorf("shutting down web server failed: %s", err)
}
log.Warn("Graceful shutdown successful")
}()
uiAddress := dependencies.Cfg.Subsystems.GuiAddress
if uiAddress == "" || uiAddress[0] == ':' {
uiAddress = "localhost" + uiAddress
}
log.Infof("GUI: http://%s", uiAddress)
eg.Go(web.ListenAndServe)
}
return eg.Wait()
}
func GetCurioAPI(ctx *cli.Context) (api.Curio, jsonrpc.ClientCloser, error) {
addr, headers, err := cliutil.GetRawAPI(ctx, repo.Curio, "v0")
if err != nil {
return nil, nil, err
}
u, err := url.Parse(addr)
if err != nil {
return nil, nil, xerrors.Errorf("parsing miner api URL: %w", err)
}
switch u.Scheme {
case "ws":
u.Scheme = "http"
case "wss":
u.Scheme = "https"
}
addr = u.String()
return client.NewCurioRpc(ctx.Context, addr, headers)
}

View File

@ -1,200 +0,0 @@
package main
import (
"context"
"fmt"
"os"
"strings"
"github.com/pkg/errors"
"github.com/urfave/cli/v2"
"go.opencensus.io/stats"
"golang.org/x/xerrors"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/curiosrc/cmd/curio/rpc"
"github.com/filecoin-project/lotus/curiosrc/cmd/curio/tasks"
"github.com/filecoin-project/lotus/curiosrc/deps"
"github.com/filecoin-project/lotus/curiosrc/market/lmrpc"
"github.com/filecoin-project/lotus/lib/ulimit"
"github.com/filecoin-project/lotus/metrics"
"github.com/filecoin-project/lotus/node"
)
type stackTracer interface {
StackTrace() errors.StackTrace
}
var runCmd = &cli.Command{
Name: "run",
Usage: "Start a Curio process",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "listen",
Usage: "host address and port the worker api will listen on",
Value: "0.0.0.0:12300",
EnvVars: []string{"CURIO_LISTEN"},
},
&cli.StringFlag{
Name: "gui-listen",
Usage: "host address and port the gui will listen on",
Hidden: true,
},
&cli.BoolFlag{
Name: "nosync",
Usage: "don't check full-node sync status",
},
&cli.BoolFlag{
Name: "halt-after-init",
Usage: "only run init, then return",
Hidden: true,
},
&cli.BoolFlag{
Name: "manage-fdlimit",
Usage: "manage open file limit",
Value: true,
},
&cli.StringFlag{
Name: "storage-json",
Usage: "path to json file containing storage config",
Value: "~/.curio/storage.json",
},
&cli.StringFlag{
Name: "journal",
Usage: "path to journal files",
Value: "~/.curio/",
},
&cli.StringSliceFlag{
Name: "layers",
Usage: "list of layers to be interpreted (atop defaults). Default: base",
EnvVars: []string{"CURIO_LAYERS"},
Aliases: []string{"l", "layer"},
},
},
Action: func(cctx *cli.Context) (err error) {
defer func() {
if err != nil {
if err, ok := err.(stackTracer); ok {
for _, f := range err.StackTrace() {
fmt.Printf("%+s:%d\n", f, f)
}
}
}
}()
if !cctx.Bool("enable-gpu-proving") {
err := os.Setenv("BELLMAN_NO_GPU", "true")
if err != nil {
return err
}
}
if err := os.MkdirAll(os.TempDir(), 0755); err != nil {
log.Errorf("ensuring tempdir exists: %s", err)
}
ctx := lcli.DaemonContext(cctx)
shutdownChan := make(chan struct{})
{
var ctxclose func()
ctx, ctxclose = context.WithCancel(ctx)
go func() {
<-shutdownChan
ctxclose()
}()
}
// Register all metric views
/*
if err := view.Register(
metrics.MinerNodeViews...,
); err != nil {
log.Fatalf("Cannot register the view: %v", err)
}
*/
// Set the metric to one so it is published to the exporter
stats.Record(ctx, metrics.LotusInfo.M(1))
if cctx.Bool("manage-fdlimit") {
if _, _, err := ulimit.ManageFdLimit(); err != nil {
log.Errorf("setting file descriptor limit: %s", err)
}
}
dependencies := &deps.Deps{}
err = dependencies.PopulateRemainingDeps(ctx, cctx, true)
if err != nil {
return err
}
go ffiSelfTest() // Panics on failure
taskEngine, err := tasks.StartTasks(ctx, dependencies)
if err != nil {
return nil
}
defer taskEngine.GracefullyTerminate()
if err := lmrpc.ServeCurioMarketRPCFromConfig(dependencies.DB, dependencies.Full, dependencies.Cfg); err != nil {
return xerrors.Errorf("starting market RPCs: %w", err)
}
err = rpc.ListenAndServe(ctx, dependencies, shutdownChan) // Monitor for shutdown.
if err != nil {
return err
}
finishCh := node.MonitorShutdown(shutdownChan) //node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper},
//node.ShutdownHandler{Component: "curio", StopFunc: stop},
<-finishCh
return nil
},
}
var layersFlag = &cli.StringSliceFlag{
Name: "layers",
Usage: "list of layers to be interpreted (atop defaults). Default: base",
}
var webCmd = &cli.Command{
Name: "web",
Usage: "Start Curio web interface",
Description: `Start an instance of Curio web interface.
This creates the 'web' layer if it does not exist, then calls run with that layer.`,
Flags: []cli.Flag{
&cli.StringFlag{
Name: "gui-listen",
Usage: "Address to listen for the GUI on",
Value: "0.0.0.0:4701",
},
&cli.BoolFlag{
Name: "nosync",
Usage: "don't check full-node sync status",
},
layersFlag,
},
Action: func(cctx *cli.Context) error {
db, err := deps.MakeDB(cctx)
if err != nil {
return err
}
webtxt, err := getConfig(db, "web")
if err != nil || webtxt == "" {
s := `[Susbystems]
EnableWebGui = true
`
if err = setConfig(db, "web", s); err != nil {
return err
}
}
layers := append([]string{"web"}, cctx.StringSlice("layers")...)
err = cctx.Set("layers", strings.Join(layers, ","))
if err != nil {
return err
}
return runCmd.Action(cctx)
},
}

View File

@ -1,30 +0,0 @@
package main
import (
_ "net/http/pprof"
"github.com/urfave/cli/v2"
lcli "github.com/filecoin-project/lotus/cli"
)
var stopCmd = &cli.Command{
Name: "stop",
Usage: "Stop a running Curio process",
Flags: []cli.Flag{},
Action: func(cctx *cli.Context) error {
api, closer, err := lcli.GetAPI(cctx)
if err != nil {
return err
}
defer closer()
err = api.Shutdown(lcli.ReqContext(cctx))
if err != nil {
return err
}
return nil
},
}

View File

@ -1,499 +0,0 @@
package main
import (
"fmt"
"math/bits"
"sort"
"strconv"
"strings"
"time"
"github.com/docker/go-units"
"github.com/fatih/color"
"github.com/google/uuid"
"github.com/mitchellh/go-homedir"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/curiosrc/cmd/curio/rpc"
"github.com/filecoin-project/lotus/storage/sealer/fsutil"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
var storageCmd = &cli.Command{
Name: "storage",
Usage: "manage sector storage",
Description: `Sectors can be stored across many filesystem paths. These
commands provide ways to manage the storage the miner will used to store sectors
long term for proving (references as 'store') as well as how sectors will be
stored while moving through the sealing pipeline (references as 'seal').`,
Subcommands: []*cli.Command{
storageAttachCmd,
storageDetachCmd,
storageListCmd,
storageFindCmd,
/*storageDetachCmd,
storageRedeclareCmd,
storageCleanupCmd,
storageLocks,*/
},
}
var storageAttachCmd = &cli.Command{
Name: "attach",
Usage: "attach local storage path",
ArgsUsage: "[path]",
Description: `Storage can be attached to the miner using this command. The storage volume
list is stored local to the miner in storage.json set in curio run. We do not
recommend manually modifying this value without further understanding of the
storage system.
Each storage volume contains a configuration file which describes the
capabilities of the volume. When the '--init' flag is provided, this file will
be created using the additional flags.
Weight
A high weight value means data will be more likely to be stored in this path
Seal
Data for the sealing process will be stored here
Store
Finalized sectors that will be moved here for long term storage and be proven
over time
`,
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "init",
Usage: "initialize the path first",
},
&cli.Uint64Flag{
Name: "weight",
Usage: "(for init) path weight",
Value: 10,
},
&cli.BoolFlag{
Name: "seal",
Usage: "(for init) use path for sealing",
},
&cli.BoolFlag{
Name: "store",
Usage: "(for init) use path for long-term storage",
},
&cli.StringFlag{
Name: "max-storage",
Usage: "(for init) limit storage space for sectors (expensive for very large paths!)",
},
&cli.StringSliceFlag{
Name: "groups",
Usage: "path group names",
},
&cli.StringSliceFlag{
Name: "allow-to",
Usage: "path groups allowed to pull data from this path (allow all if not specified)",
},
},
Action: func(cctx *cli.Context) error {
minerApi, closer, err := rpc.GetCurioAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
if cctx.NArg() != 1 {
return lcli.IncorrectNumArgs(cctx)
}
p, err := homedir.Expand(cctx.Args().First())
if err != nil {
return xerrors.Errorf("expanding path: %w", err)
}
if cctx.Bool("init") {
var maxStor int64
if cctx.IsSet("max-storage") {
maxStor, err = units.RAMInBytes(cctx.String("max-storage"))
if err != nil {
return xerrors.Errorf("parsing max-storage: %w", err)
}
}
cfg := storiface.LocalStorageMeta{
ID: storiface.ID(uuid.New().String()),
Weight: cctx.Uint64("weight"),
CanSeal: cctx.Bool("seal"),
CanStore: cctx.Bool("store"),
MaxStorage: uint64(maxStor),
Groups: cctx.StringSlice("groups"),
AllowTo: cctx.StringSlice("allow-to"),
}
if !(cfg.CanStore || cfg.CanSeal) {
return xerrors.Errorf("must specify at least one of --store or --seal")
}
if err := minerApi.StorageInit(ctx, p, cfg); err != nil {
return xerrors.Errorf("init storage: %w", err)
}
}
return minerApi.StorageAddLocal(ctx, p)
},
}
var storageDetachCmd = &cli.Command{
Name: "detach",
Usage: "detach local storage path",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "really-do-it",
},
},
ArgsUsage: "[path]",
Action: func(cctx *cli.Context) error {
minerApi, closer, err := rpc.GetCurioAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
if cctx.NArg() != 1 {
return lcli.IncorrectNumArgs(cctx)
}
p, err := homedir.Expand(cctx.Args().First())
if err != nil {
return xerrors.Errorf("expanding path: %w", err)
}
if !cctx.Bool("really-do-it") {
return xerrors.Errorf("pass --really-do-it to execute the action")
}
return minerApi.StorageDetachLocal(ctx, p)
},
}
var storageListCmd = &cli.Command{
Name: "list",
Usage: "list local storage paths",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "local",
Usage: "only list local storage paths",
},
},
Subcommands: []*cli.Command{
//storageListSectorsCmd,
},
Action: func(cctx *cli.Context) error {
minerApi, closer, err := rpc.GetCurioAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
st, err := minerApi.StorageList(ctx)
if err != nil {
return err
}
local, err := minerApi.StorageLocal(ctx)
if err != nil {
return err
}
type fsInfo struct {
storiface.ID
sectors []storiface.Decl
stat fsutil.FsStat
}
sorted := make([]fsInfo, 0, len(st))
for id, decls := range st {
if cctx.Bool("local") {
if _, ok := local[id]; !ok {
continue
}
}
st, err := minerApi.StorageStat(ctx, id)
if err != nil {
sorted = append(sorted, fsInfo{ID: id, sectors: decls})
continue
}
sorted = append(sorted, fsInfo{id, decls, st})
}
sort.Slice(sorted, func(i, j int) bool {
if sorted[i].stat.Capacity != sorted[j].stat.Capacity {
return sorted[i].stat.Capacity > sorted[j].stat.Capacity
}
return sorted[i].ID < sorted[j].ID
})
for _, s := range sorted {
var cnt [5]int
for _, decl := range s.sectors {
for i := range cnt {
if decl.SectorFileType&(1<<i) != 0 {
cnt[i]++
}
}
}
fmt.Printf("%s:\n", s.ID)
pingStart := time.Now()
st, err := minerApi.StorageStat(ctx, s.ID)
if err != nil {
fmt.Printf("\t%s: %s:\n", color.RedString("Error"), err)
continue
}
ping := time.Now().Sub(pingStart)
safeRepeat := func(s string, count int) string {
if count < 0 {
return ""
}
return strings.Repeat(s, count)
}
var barCols = int64(50)
// filesystem use bar
{
usedPercent := (st.Capacity - st.FSAvailable) * 100 / st.Capacity
percCol := color.FgGreen
switch {
case usedPercent > 98:
percCol = color.FgRed
case usedPercent > 90:
percCol = color.FgYellow
}
set := (st.Capacity - st.FSAvailable) * barCols / st.Capacity
used := (st.Capacity - (st.FSAvailable + st.Reserved)) * barCols / st.Capacity
reserved := set - used
bar := safeRepeat("#", int(used)) + safeRepeat("*", int(reserved)) + safeRepeat(" ", int(barCols-set))
desc := ""
if st.Max > 0 {
desc = " (filesystem)"
}
fmt.Printf("\t[%s] %s/%s %s%s\n", color.New(percCol).Sprint(bar),
types.SizeStr(types.NewInt(uint64(st.Capacity-st.FSAvailable))),
types.SizeStr(types.NewInt(uint64(st.Capacity))),
color.New(percCol).Sprintf("%d%%", usedPercent), desc)
}
// optional configured limit bar
if st.Max > 0 {
usedPercent := st.Used * 100 / st.Max
percCol := color.FgGreen
switch {
case usedPercent > 98:
percCol = color.FgRed
case usedPercent > 90:
percCol = color.FgYellow
}
set := st.Used * barCols / st.Max
used := (st.Used + st.Reserved) * barCols / st.Max
reserved := set - used
bar := safeRepeat("#", int(used)) + safeRepeat("*", int(reserved)) + safeRepeat(" ", int(barCols-set))
fmt.Printf("\t[%s] %s/%s %s (limit)\n", color.New(percCol).Sprint(bar),
types.SizeStr(types.NewInt(uint64(st.Used))),
types.SizeStr(types.NewInt(uint64(st.Max))),
color.New(percCol).Sprintf("%d%%", usedPercent))
}
fmt.Printf("\t%s; %s; %s; %s; %s; Reserved: %s\n",
color.YellowString("Unsealed: %d", cnt[0]),
color.GreenString("Sealed: %d", cnt[1]),
color.BlueString("Caches: %d", cnt[2]),
color.GreenString("Updated: %d", cnt[3]),
color.BlueString("Update-caches: %d", cnt[4]),
types.SizeStr(types.NewInt(uint64(st.Reserved))))
si, err := minerApi.StorageInfo(ctx, s.ID)
if err != nil {
return err
}
fmt.Print("\t")
if si.CanSeal || si.CanStore {
fmt.Printf("Weight: %d; Use: ", si.Weight)
if si.CanSeal {
fmt.Print(color.MagentaString("Seal "))
}
if si.CanStore {
fmt.Print(color.CyanString("Store"))
}
} else {
fmt.Print(color.HiYellowString("Use: ReadOnly"))
}
fmt.Println()
if len(si.Groups) > 0 {
fmt.Printf("\tGroups: %s\n", strings.Join(si.Groups, ", "))
}
if len(si.AllowTo) > 0 {
fmt.Printf("\tAllowTo: %s\n", strings.Join(si.AllowTo, ", "))
}
if len(si.AllowTypes) > 0 || len(si.DenyTypes) > 0 {
denied := storiface.FTAll.SubAllowed(si.AllowTypes, si.DenyTypes)
allowed := storiface.FTAll ^ denied
switch {
case bits.OnesCount64(uint64(allowed)) == 0:
fmt.Printf("\tAllow Types: %s\n", color.RedString("None"))
case bits.OnesCount64(uint64(allowed)) < bits.OnesCount64(uint64(denied)):
fmt.Printf("\tAllow Types: %s\n", color.GreenString(strings.Join(allowed.Strings(), " ")))
default:
fmt.Printf("\tDeny Types: %s\n", color.RedString(strings.Join(denied.Strings(), " ")))
}
}
if localPath, ok := local[s.ID]; ok {
fmt.Printf("\tLocal: %s\n", color.GreenString(localPath))
}
for i, l := range si.URLs {
var rtt string
if _, ok := local[s.ID]; !ok && i == 0 {
rtt = " (latency: " + ping.Truncate(time.Microsecond*100).String() + ")"
}
fmt.Printf("\tURL: %s%s\n", l, rtt) // TODO; try pinging maybe?? print latency?
}
fmt.Println()
}
return nil
},
}
type storedSector struct {
id storiface.ID
store storiface.SectorStorageInfo
types map[storiface.SectorFileType]bool
}
var storageFindCmd = &cli.Command{
Name: "find",
Usage: "find sector in the storage system",
ArgsUsage: "[miner address] [sector number]",
Action: func(cctx *cli.Context) error {
minerApi, closer, err := rpc.GetCurioAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
if cctx.NArg() != 2 {
return lcli.IncorrectNumArgs(cctx)
}
maddr := cctx.Args().First()
ma, err := address.NewFromString(maddr)
if err != nil {
return xerrors.Errorf("parsing miner address: %w", err)
}
mid, err := address.IDFromAddress(ma)
if err != nil {
return err
}
if !cctx.Args().Present() {
return xerrors.New("Usage: lotus-miner storage find [sector number]")
}
snum, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64)
if err != nil {
return err
}
sid := abi.SectorID{
Miner: abi.ActorID(mid),
Number: abi.SectorNumber(snum),
}
sectorTypes := []storiface.SectorFileType{
storiface.FTUnsealed, storiface.FTSealed, storiface.FTCache, storiface.FTUpdate, storiface.FTUpdateCache,
}
byId := make(map[storiface.ID]*storedSector)
for _, sectorType := range sectorTypes {
infos, err := minerApi.StorageFindSector(ctx, sid, sectorType, 0, false)
if err != nil {
return xerrors.Errorf("finding sector type %d: %w", sectorType, err)
}
for _, info := range infos {
sts, ok := byId[info.ID]
if !ok {
sts = &storedSector{
id: info.ID,
store: info,
types: make(map[storiface.SectorFileType]bool),
}
byId[info.ID] = sts
}
sts.types[sectorType] = true
}
}
local, err := minerApi.StorageLocal(ctx)
if err != nil {
return err
}
var out []*storedSector
for _, sector := range byId {
out = append(out, sector)
}
sort.Slice(out, func(i, j int) bool {
return out[i].id < out[j].id
})
for _, info := range out {
var types []string
for sectorType, present := range info.types {
if present {
types = append(types, sectorType.String())
}
}
sort.Strings(types) // Optional: Sort types for consistent output
fmt.Printf("In %s (%s)\n", info.id, strings.Join(types, ", "))
fmt.Printf("\tSealing: %t; Storage: %t\n", info.store.CanSeal, info.store.CanStore)
if localPath, ok := local[info.id]; ok {
fmt.Printf("\tLocal (%s)\n", localPath)
} else {
fmt.Printf("\tRemote\n")
}
for _, l := range info.store.URLs {
fmt.Printf("\tURL: %s\n", l)
}
}
return nil
},
}

View File

@ -1,248 +0,0 @@
// Package tasks contains tasks that can be run by the curio command.
package tasks
import (
"context"
"sort"
"strings"
"time"
logging "github.com/ipfs/go-log/v2"
"github.com/samber/lo"
"golang.org/x/exp/maps"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
curio "github.com/filecoin-project/lotus/curiosrc"
"github.com/filecoin-project/lotus/curiosrc/alertmanager"
"github.com/filecoin-project/lotus/curiosrc/chainsched"
"github.com/filecoin-project/lotus/curiosrc/deps"
"github.com/filecoin-project/lotus/curiosrc/ffi"
"github.com/filecoin-project/lotus/curiosrc/gc"
"github.com/filecoin-project/lotus/curiosrc/harmony/harmonytask"
"github.com/filecoin-project/lotus/curiosrc/message"
"github.com/filecoin-project/lotus/curiosrc/piece"
"github.com/filecoin-project/lotus/curiosrc/seal"
"github.com/filecoin-project/lotus/curiosrc/winning"
"github.com/filecoin-project/lotus/lib/lazy"
"github.com/filecoin-project/lotus/lib/must"
"github.com/filecoin-project/lotus/node/modules"
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
var log = logging.Logger("curio/deps")
func StartTasks(ctx context.Context, dependencies *deps.Deps) (*harmonytask.TaskEngine, error) {
cfg := dependencies.Cfg
db := dependencies.DB
full := dependencies.Full
verif := dependencies.Verif
as := dependencies.As
maddrs := dependencies.Maddrs
stor := dependencies.Stor
lstor := dependencies.LocalStore
si := dependencies.Si
var activeTasks []harmonytask.TaskInterface
sender, sendTask := message.NewSender(full, full, db)
activeTasks = append(activeTasks, sendTask)
chainSched := chainsched.New(full)
var needProofParams bool
///////////////////////////////////////////////////////////////////////
///// Task Selection
///////////////////////////////////////////////////////////////////////
{
// PoSt
if cfg.Subsystems.EnableWindowPost {
wdPostTask, wdPoStSubmitTask, derlareRecoverTask, err := curio.WindowPostScheduler(
ctx, cfg.Fees, cfg.Proving, full, verif, sender, chainSched,
as, maddrs, db, stor, si, cfg.Subsystems.WindowPostMaxTasks)
if err != nil {
return nil, err
}
activeTasks = append(activeTasks, wdPostTask, wdPoStSubmitTask, derlareRecoverTask)
needProofParams = true
}
if cfg.Subsystems.EnableWinningPost {
pl := dependencies.LocalStore
winPoStTask := winning.NewWinPostTask(cfg.Subsystems.WinningPostMaxTasks, db, pl, verif, full, maddrs)
activeTasks = append(activeTasks, winPoStTask)
needProofParams = true
}
}
slrLazy := lazy.MakeLazy(func() (*ffi.SealCalls, error) {
return ffi.NewSealCalls(stor, lstor, si), nil
})
{
// Piece handling
if cfg.Subsystems.EnableParkPiece {
parkPieceTask, err := piece.NewParkPieceTask(db, must.One(slrLazy.Val()), cfg.Subsystems.ParkPieceMaxTasks)
if err != nil {
return nil, err
}
cleanupPieceTask := piece.NewCleanupPieceTask(db, must.One(slrLazy.Val()), 0)
activeTasks = append(activeTasks, parkPieceTask, cleanupPieceTask)
}
}
hasAnySealingTask := cfg.Subsystems.EnableSealSDR ||
cfg.Subsystems.EnableSealSDRTrees ||
cfg.Subsystems.EnableSendPrecommitMsg ||
cfg.Subsystems.EnablePoRepProof ||
cfg.Subsystems.EnableMoveStorage ||
cfg.Subsystems.EnableSendCommitMsg
{
// Sealing
var sp *seal.SealPoller
var slr *ffi.SealCalls
if hasAnySealingTask {
sp = seal.NewPoller(db, full)
go sp.RunPoller(ctx)
slr = must.One(slrLazy.Val())
}
// NOTE: Tasks with the LEAST priority are at the top
if cfg.Subsystems.EnableSealSDR {
sdrTask := seal.NewSDRTask(full, db, sp, slr, cfg.Subsystems.SealSDRMaxTasks)
activeTasks = append(activeTasks, sdrTask)
}
if cfg.Subsystems.EnableSealSDRTrees {
treeDTask := seal.NewTreeDTask(sp, db, slr, cfg.Subsystems.SealSDRTreesMaxTasks)
treeRCTask := seal.NewTreeRCTask(sp, db, slr, cfg.Subsystems.SealSDRTreesMaxTasks)
finalizeTask := seal.NewFinalizeTask(cfg.Subsystems.FinalizeMaxTasks, sp, slr, db)
activeTasks = append(activeTasks, treeDTask, treeRCTask, finalizeTask)
}
if cfg.Subsystems.EnableSendPrecommitMsg {
precommitTask := seal.NewSubmitPrecommitTask(sp, db, full, sender, as, cfg.Fees.MaxPreCommitGasFee)
activeTasks = append(activeTasks, precommitTask)
}
if cfg.Subsystems.EnablePoRepProof {
porepTask := seal.NewPoRepTask(db, full, sp, slr, cfg.Subsystems.PoRepProofMaxTasks)
activeTasks = append(activeTasks, porepTask)
needProofParams = true
}
if cfg.Subsystems.EnableMoveStorage {
moveStorageTask := seal.NewMoveStorageTask(sp, slr, db, cfg.Subsystems.MoveStorageMaxTasks)
activeTasks = append(activeTasks, moveStorageTask)
}
if cfg.Subsystems.EnableSendCommitMsg {
commitTask := seal.NewSubmitCommitTask(sp, db, full, sender, as, cfg)
activeTasks = append(activeTasks, commitTask)
}
}
if hasAnySealingTask {
// Sealing nodes maintain storage index when bored
storageEndpointGcTask := gc.NewStorageEndpointGC(si, stor, db)
activeTasks = append(activeTasks, storageEndpointGcTask)
}
amTask := alertmanager.NewAlertTask(full, db, cfg.Alerting)
activeTasks = append(activeTasks, amTask)
if needProofParams {
for spt := range dependencies.ProofTypes {
if err := modules.GetParams(true)(spt); err != nil {
return nil, xerrors.Errorf("getting params: %w", err)
}
}
}
minerAddresses := make([]string, 0, len(maddrs))
for k := range maddrs {
minerAddresses = append(minerAddresses, address.Address(k).String())
}
log.Infow("This Curio instance handles",
"miner_addresses", minerAddresses,
"tasks", lo.Map(activeTasks, func(t harmonytask.TaskInterface, _ int) string { return t.TypeDetails().Name }))
// harmony treats the first task as highest priority, so reverse the order
// (we could have just appended to this list in the reverse order, but defining
// tasks in pipeline order is more intuitive)
activeTasks = lo.Reverse(activeTasks)
ht, err := harmonytask.New(db, activeTasks, dependencies.ListenAddr)
if err != nil {
return nil, err
}
go machineDetails(dependencies, activeTasks, ht.ResourcesAvailable().MachineID)
if hasAnySealingTask {
watcher, err := message.NewMessageWatcher(db, ht, chainSched, full)
if err != nil {
return nil, err
}
_ = watcher
}
if cfg.Subsystems.EnableWindowPost || hasAnySealingTask {
go chainSched.Run(ctx)
}
return ht, nil
}
func machineDetails(deps *deps.Deps, activeTasks []harmonytask.TaskInterface, machineID int) {
taskNames := lo.Map(activeTasks, func(item harmonytask.TaskInterface, _ int) string {
return item.TypeDetails().Name
})
miners := lo.Map(maps.Keys(deps.Maddrs), func(item dtypes.MinerAddress, _ int) string {
return address.Address(item).String()
})
sort.Strings(miners)
_, err := deps.DB.Exec(context.Background(), `INSERT INTO harmony_machine_details
(tasks, layers, startup_time, miners, machine_id) VALUES ($1, $2, $3, $4, $5)
ON CONFLICT (machine_id) DO UPDATE SET tasks=$1, layers=$2, startup_time=$3, miners=$4`,
strings.Join(taskNames, ","), strings.Join(deps.Layers, ","),
time.Now(), strings.Join(miners, ","), machineID)
if err != nil {
log.Errorf("failed to update machine details: %s", err)
return
}
// maybePostWarning
if !lo.Contains(taskNames, "WdPost") && !lo.Contains(taskNames, "WinPost") {
// Maybe we aren't running a PoSt for these miners?
var allMachines []struct {
MachineID int `db:"machine_id"`
Miners string `db:"miners"`
Tasks string `db:"tasks"`
}
err := deps.DB.Select(context.Background(), &allMachines, `SELECT machine_id, miners, tasks FROM harmony_machine_details`)
if err != nil {
log.Errorf("failed to get machine details: %s", err)
return
}
for _, miner := range miners {
var myPostIsHandled bool
for _, m := range allMachines {
if !lo.Contains(strings.Split(m.Miners, ","), miner) {
continue
}
if lo.Contains(strings.Split(m.Tasks, ","), "WdPost") && lo.Contains(strings.Split(m.Tasks, ","), "WinPost") {
myPostIsHandled = true
break
}
}
if !myPostIsHandled {
log.Errorf("No PoSt tasks are running for miner %s. Start handling PoSts immediately with:\n\tcurio run --layers=\"post\" ", miner)
}
}
}
}

View File

@ -1,141 +0,0 @@
package main
import (
"fmt"
"os"
"strings"
"github.com/fatih/color"
"github.com/urfave/cli/v2"
"github.com/filecoin-project/go-address"
builtin2 "github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/cli/spcli"
"github.com/filecoin-project/lotus/lib/tablewriter"
)
var actorCmd = &cli.Command{
Name: "actor",
Usage: "Manage Filecoin Miner Actor Metadata",
Subcommands: []*cli.Command{
spcli.ActorSetAddrsCmd(SPTActorGetter),
spcli.ActorWithdrawCmd(SPTActorGetter),
spcli.ActorRepayDebtCmd(SPTActorGetter),
spcli.ActorSetPeeridCmd(SPTActorGetter),
spcli.ActorSetOwnerCmd(SPTActorGetter),
spcli.ActorControlCmd(SPTActorGetter, actorControlListCmd(SPTActorGetter)),
spcli.ActorProposeChangeWorkerCmd(SPTActorGetter),
spcli.ActorConfirmChangeWorkerCmd(SPTActorGetter),
spcli.ActorCompactAllocatedCmd(SPTActorGetter),
spcli.ActorProposeChangeBeneficiaryCmd(SPTActorGetter),
spcli.ActorConfirmChangeBeneficiaryCmd(SPTActorGetter),
spcli.ActorNewMinerCmd,
},
}
func actorControlListCmd(getActor spcli.ActorAddressGetter) *cli.Command {
return &cli.Command{
Name: "list",
Usage: "Get currently set control addresses. Note: This excludes most roles as they are not known to the immediate chain state.",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "verbose",
},
},
Action: func(cctx *cli.Context) error {
api, acloser, err := lcli.GetFullNodeAPIV1(cctx)
if err != nil {
return err
}
defer acloser()
ctx := lcli.ReqContext(cctx)
maddr, err := getActor(cctx)
if err != nil {
return err
}
mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
if err != nil {
return err
}
tw := tablewriter.New(
tablewriter.Col("name"),
tablewriter.Col("ID"),
tablewriter.Col("key"),
tablewriter.Col("use"),
tablewriter.Col("balance"),
)
post := map[address.Address]struct{}{}
for _, ca := range mi.ControlAddresses {
post[ca] = struct{}{}
}
printKey := func(name string, a address.Address) {
var actor *types.Actor
if actor, err = api.StateGetActor(ctx, a, types.EmptyTSK); err != nil {
fmt.Printf("%s\t%s: error getting actor: %s\n", name, a, err)
return
}
b := actor.Balance
var k = a
// 'a' maybe a 'robust', in that case, 'StateAccountKey' returns an error.
if builtin2.IsAccountActor(actor.Code) {
if k, err = api.StateAccountKey(ctx, a, types.EmptyTSK); err != nil {
fmt.Printf("%s\t%s: error getting account key: %s\n", name, a, err)
return
}
}
kstr := k.String()
if !cctx.Bool("verbose") {
if len(kstr) > 9 {
kstr = kstr[:6] + "..."
}
}
bstr := types.FIL(b).String()
switch {
case b.LessThan(types.FromFil(10)):
bstr = color.RedString(bstr)
case b.LessThan(types.FromFil(50)):
bstr = color.YellowString(bstr)
default:
bstr = color.GreenString(bstr)
}
var uses []string
if a == mi.Worker {
uses = append(uses, color.YellowString("other"))
}
if _, ok := post[a]; ok {
uses = append(uses, color.GreenString("post"))
}
tw.Write(map[string]interface{}{
"name": name,
"ID": a,
"key": kstr,
"use": strings.Join(uses, " "),
"balance": bstr,
})
}
printKey("owner", mi.Owner)
printKey("worker", mi.Worker)
printKey("beneficiary", mi.Beneficiary)
for i, ca := range mi.ControlAddresses {
printKey(fmt.Sprintf("control-%d", i), ca)
}
return tw.Flush(os.Stdout)
},
}
}

View File

@ -1,84 +0,0 @@
package main
import (
"context"
"fmt"
"os"
"os/signal"
logging "github.com/ipfs/go-log/v2"
"github.com/urfave/cli/v2"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/cli/spcli"
)
var log = logging.Logger("sptool")
func main() {
local := []*cli.Command{
actorCmd,
spcli.InfoCmd(SPTActorGetter),
sectorsCmd,
provingCmd,
//multiSigCmd,
}
app := &cli.App{
Name: "sptool",
Usage: "Manage Filecoin Miner Actor",
Version: build.UserVersion(),
Commands: local,
Flags: []cli.Flag{
&cli.StringFlag{
Name: "repo",
EnvVars: []string{"LOTUS_PATH"},
Hidden: true,
Value: "~/.lotus", // TODO: Consider XDG_DATA_HOME
},
&cli.StringFlag{
Name: "log-level",
Value: "info",
},
&cli.StringFlag{
Name: "actor",
Required: os.Getenv("LOTUS_DOCS_GENERATION") != "1",
Usage: "miner actor to manage",
EnvVars: []string{"SP_ADDRESS"},
},
},
Before: func(cctx *cli.Context) error {
return logging.SetLogLevel("sptool", cctx.String("sptool"))
},
}
// terminate early on ctrl+c
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
ctx, cancel := context.WithCancel(context.Background())
go func() {
<-c
cancel()
fmt.Println("Received interrupt, shutting down... Press CTRL+C again to force shutdown")
<-c
fmt.Println("Forcing stop")
os.Exit(1)
}()
if err := app.RunContext(ctx, os.Args); err != nil {
log.Errorf("%+v", err)
os.Exit(1)
return
}
}
func SPTActorGetter(cctx *cli.Context) (address.Address, error) {
addr, err := address.NewFromString(cctx.String("actor"))
if err != nil {
return address.Undef, fmt.Errorf("parsing address: %w", err)
}
return addr, nil
}

View File

@ -1,18 +0,0 @@
package main
import (
"github.com/urfave/cli/v2"
"github.com/filecoin-project/lotus/cli/spcli"
)
var provingCmd = &cli.Command{
Name: "proving",
Usage: "View proving information",
Subcommands: []*cli.Command{
spcli.ProvingInfoCmd(SPTActorGetter),
spcli.ProvingDeadlinesCmd(SPTActorGetter),
spcli.ProvingDeadlineInfoCmd(SPTActorGetter),
spcli.ProvingFaultsCmd(SPTActorGetter),
},
}

View File

@ -1,355 +0,0 @@
package main
import (
"fmt"
"os"
"sort"
"github.com/docker/go-units"
"github.com/fatih/color"
cbor "github.com/ipfs/go-ipld-cbor"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/lotus/cli/spcli"
cliutil "github.com/filecoin-project/lotus/cli/util"
"github.com/filecoin-project/lotus/lib/tablewriter"
)
var sectorsCmd = &cli.Command{
Name: "sectors",
Usage: "interact with sector store",
Subcommands: []*cli.Command{
spcli.SectorsStatusCmd(SPTActorGetter, nil),
sectorsListCmd, // in-house b/c chain-only is so different. Needs Curio *web* implementation
spcli.SectorPreCommitsCmd(SPTActorGetter),
spcli.SectorsCheckExpireCmd(SPTActorGetter),
sectorsExpiredCmd, // in-house b/c chain-only is so different
spcli.SectorsExtendCmd(SPTActorGetter),
spcli.TerminateSectorCmd(SPTActorGetter),
spcli.SectorsCompactPartitionsCmd(SPTActorGetter),
}}
var sectorsExpiredCmd = &cli.Command{
Name: "expired",
Usage: "Get or cleanup expired sectors",
Flags: []cli.Flag{
&cli.Int64Flag{
Name: "expired-epoch",
Usage: "epoch at which to check sector expirations",
DefaultText: "WinningPoSt lookback epoch",
},
},
Action: func(cctx *cli.Context) error {
fullApi, nCloser, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return xerrors.Errorf("getting fullnode api: %w", err)
}
defer nCloser()
ctx := lcli.ReqContext(cctx)
head, err := fullApi.ChainHead(ctx)
if err != nil {
return xerrors.Errorf("getting chain head: %w", err)
}
lbEpoch := abi.ChainEpoch(cctx.Int64("expired-epoch"))
if !cctx.IsSet("expired-epoch") {
nv, err := fullApi.StateNetworkVersion(ctx, head.Key())
if err != nil {
return xerrors.Errorf("getting network version: %w", err)
}
lbEpoch = head.Height() - policy.GetWinningPoStSectorSetLookback(nv)
if lbEpoch < 0 {
return xerrors.Errorf("too early to terminate sectors")
}
}
if cctx.IsSet("confirm-remove-count") && !cctx.IsSet("expired-epoch") {
return xerrors.Errorf("--expired-epoch must be specified with --confirm-remove-count")
}
lbts, err := fullApi.ChainGetTipSetByHeight(ctx, lbEpoch, head.Key())
if err != nil {
return xerrors.Errorf("getting lookback tipset: %w", err)
}
maddr, err := SPTActorGetter(cctx)
if err != nil {
return xerrors.Errorf("getting actor address: %w", err)
}
// toCheck is a working bitfield which will only contain terminated sectors
toCheck := bitfield.New()
{
sectors, err := fullApi.StateMinerSectors(ctx, maddr, nil, lbts.Key())
if err != nil {
return xerrors.Errorf("getting sector on chain info: %w", err)
}
for _, sector := range sectors {
if sector.Expiration <= lbts.Height() {
toCheck.Set(uint64(sector.SectorNumber))
}
}
}
mact, err := fullApi.StateGetActor(ctx, maddr, lbts.Key())
if err != nil {
return err
}
tbs := blockstore.NewTieredBstore(blockstore.NewAPIBlockstore(fullApi), blockstore.NewMemory())
mas, err := miner.Load(adt.WrapStore(ctx, cbor.NewCborStore(tbs)), mact)
if err != nil {
return err
}
alloc, err := mas.GetAllocatedSectors()
if err != nil {
return xerrors.Errorf("getting allocated sectors: %w", err)
}
// only allocated sectors can be expired,
toCheck, err = bitfield.IntersectBitField(toCheck, *alloc)
if err != nil {
return xerrors.Errorf("intersecting bitfields: %w", err)
}
if err := mas.ForEachDeadline(func(dlIdx uint64, dl miner.Deadline) error {
return dl.ForEachPartition(func(partIdx uint64, part miner.Partition) error {
live, err := part.LiveSectors()
if err != nil {
return err
}
toCheck, err = bitfield.SubtractBitField(toCheck, live)
if err != nil {
return err
}
unproven, err := part.UnprovenSectors()
if err != nil {
return err
}
toCheck, err = bitfield.SubtractBitField(toCheck, unproven)
return err
})
}); err != nil {
return err
}
err = mas.ForEachPrecommittedSector(func(pci miner.SectorPreCommitOnChainInfo) error {
toCheck.Unset(uint64(pci.Info.SectorNumber))
return nil
})
if err != nil {
return err
}
// toCheck now only contains sectors which either failed to precommit or are expired/terminated
fmt.Printf("Sectors that either failed to precommit or are expired/terminated:\n")
err = toCheck.ForEach(func(u uint64) error {
fmt.Println(abi.SectorNumber(u))
return nil
})
if err != nil {
return err
}
return nil
},
}
var sectorsListCmd = &cli.Command{
Name: "list",
Usage: "List sectors",
Flags: []cli.Flag{
/*
&cli.BoolFlag{
Name: "show-removed",
Usage: "show removed sectors",
Aliases: []string{"r"},
},
&cli.BoolFlag{
Name: "fast",
Usage: "don't show on-chain info for better performance",
Aliases: []string{"f"},
},
&cli.BoolFlag{
Name: "events",
Usage: "display number of events the sector has received",
Aliases: []string{"e"},
},
&cli.BoolFlag{
Name: "initial-pledge",
Usage: "display initial pledge",
Aliases: []string{"p"},
},
&cli.BoolFlag{
Name: "seal-time",
Usage: "display how long it took for the sector to be sealed",
Aliases: []string{"t"},
},
&cli.StringFlag{
Name: "states",
Usage: "filter sectors by a comma-separated list of states",
},
&cli.BoolFlag{
Name: "unproven",
Usage: "only show sectors which aren't in the 'Proving' state",
Aliases: []string{"u"},
},
*/
},
Subcommands: []*cli.Command{
//sectorsListUpgradeBoundsCmd,
},
Action: func(cctx *cli.Context) error {
fullApi, closer2, err := lcli.GetFullNodeAPI(cctx) // TODO: consider storing full node address in config
if err != nil {
return err
}
defer closer2()
ctx := lcli.ReqContext(cctx)
maddr, err := SPTActorGetter(cctx)
if err != nil {
return err
}
head, err := fullApi.ChainHead(ctx)
if err != nil {
return err
}
activeSet, err := fullApi.StateMinerActiveSectors(ctx, maddr, head.Key())
if err != nil {
return err
}
activeIDs := make(map[abi.SectorNumber]struct{}, len(activeSet))
for _, info := range activeSet {
activeIDs[info.SectorNumber] = struct{}{}
}
sset, err := fullApi.StateMinerSectors(ctx, maddr, nil, head.Key())
if err != nil {
return err
}
commitedIDs := make(map[abi.SectorNumber]struct{}, len(sset))
for _, info := range sset {
commitedIDs[info.SectorNumber] = struct{}{}
}
sort.Slice(sset, func(i, j int) bool {
return sset[i].SectorNumber < sset[j].SectorNumber
})
tw := tablewriter.New(
tablewriter.Col("ID"),
tablewriter.Col("State"),
tablewriter.Col("OnChain"),
tablewriter.Col("Active"),
tablewriter.Col("Expiration"),
tablewriter.Col("SealTime"),
tablewriter.Col("Events"),
tablewriter.Col("Deals"),
tablewriter.Col("DealWeight"),
tablewriter.Col("VerifiedPower"),
tablewriter.Col("Pledge"),
tablewriter.NewLineCol("Error"),
tablewriter.NewLineCol("RecoveryTimeout"))
fast := cctx.Bool("fast")
for _, st := range sset {
s := st.SectorNumber
_, inSSet := commitedIDs[s]
_, inASet := activeIDs[s]
const verifiedPowerGainMul = 9
dw, vp := .0, .0
{
rdw := big.Add(st.DealWeight, st.VerifiedDealWeight)
dw = float64(big.Div(rdw, big.NewInt(int64(st.Expiration-st.Activation))).Uint64())
vp = float64(big.Div(big.Mul(st.VerifiedDealWeight, big.NewInt(verifiedPowerGainMul)), big.NewInt(int64(st.Expiration-st.Activation))).Uint64())
}
var deals int
for _, deal := range st.DealIDs {
if deal != 0 {
deals++
}
}
exp := st.Expiration
// if st.OnTime > 0 && st.OnTime < exp {
// exp = st.OnTime // Can be different when the sector was CC upgraded
// }
m := map[string]interface{}{
"ID": s,
//"State": color.New(spcli.StateOrder[sealing.SectorState(st.State)].Col).Sprint(st.State),
"OnChain": yesno(inSSet),
"Active": yesno(inASet),
}
if deals > 0 {
m["Deals"] = color.GreenString("%d", deals)
} else {
m["Deals"] = color.BlueString("CC")
// if st.ToUpgrade {
// m["Deals"] = color.CyanString("CC(upgrade)")
// }
}
if !fast {
if !inSSet {
m["Expiration"] = "n/a"
} else {
m["Expiration"] = cliutil.EpochTime(head.Height(), exp)
// if st.Early > 0 {
// m["RecoveryTimeout"] = color.YellowString(cliutil.EpochTime(head.Height(), st.Early))
// }
}
if inSSet && cctx.Bool("initial-pledge") {
m["Pledge"] = types.FIL(st.InitialPledge).Short()
}
}
if !fast && deals > 0 {
m["DealWeight"] = units.BytesSize(dw)
if vp > 0 {
m["VerifiedPower"] = color.GreenString(units.BytesSize(vp))
}
}
tw.Write(m)
}
return tw.Flush(os.Stdout)
},
}
func yesno(b bool) string {
if b {
return color.GreenString("YES")
}
return color.RedString("NO")
}

View File

@ -1,94 +0,0 @@
package deps
import (
"fmt"
"net/http"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/client"
"github.com/filecoin-project/lotus/api/v1api"
cliutil "github.com/filecoin-project/lotus/cli/util"
)
func getFullNodeAPIV1Curio(ctx *cli.Context, ainfoCfg []string, opts ...cliutil.GetFullNodeOption) (v1api.FullNode, jsonrpc.ClientCloser, error) {
if tn, ok := ctx.App.Metadata["testnode-full"]; ok {
return tn.(v1api.FullNode), func() {}, nil
}
var options cliutil.GetFullNodeOptions
for _, opt := range opts {
opt(&options)
}
var rpcOpts []jsonrpc.Option
if options.EthSubHandler != nil {
rpcOpts = append(rpcOpts, jsonrpc.WithClientHandler("Filecoin", options.EthSubHandler), jsonrpc.WithClientHandlerAlias("eth_subscription", "Filecoin.EthSubscription"))
}
var httpHeads []httpHead
version := "v1"
{
if len(ainfoCfg) == 0 {
return nil, nil, xerrors.Errorf("could not get API info: none configured. \nConsider getting base.toml with './curio config get base >/tmp/base.toml' \nthen adding \n[APIs] \n ChainApiInfo = [\" result_from lotus auth api-info --perm=admin \"]\n and updating it with './curio config set /tmp/base.toml'")
}
for _, i := range ainfoCfg {
ainfo := cliutil.ParseApiInfo(i)
addr, err := ainfo.DialArgs(version)
if err != nil {
return nil, nil, xerrors.Errorf("could not get DialArgs: %w", err)
}
httpHeads = append(httpHeads, httpHead{addr: addr, header: ainfo.AuthHeader()})
}
}
if cliutil.IsVeryVerbose {
_, _ = fmt.Fprintln(ctx.App.Writer, "using full node API v1 endpoint:", httpHeads[0].addr)
}
var fullNodes []api.FullNode
var closers []jsonrpc.ClientCloser
for _, head := range httpHeads {
v1api, closer, err := client.NewFullNodeRPCV1(ctx.Context, head.addr, head.header, rpcOpts...)
if err != nil {
log.Warnf("Not able to establish connection to node with addr: %s, Reason: %s", head.addr, err.Error())
continue
}
fullNodes = append(fullNodes, v1api)
closers = append(closers, closer)
}
// When running in cluster mode and trying to establish connections to multiple nodes, fail
// if less than 2 lotus nodes are actually running
if len(httpHeads) > 1 && len(fullNodes) < 2 {
return nil, nil, xerrors.Errorf("Not able to establish connection to more than a single node")
}
finalCloser := func() {
for _, c := range closers {
c()
}
}
var v1API api.FullNodeStruct
cliutil.FullNodeProxy(fullNodes, &v1API)
v, err := v1API.Version(ctx.Context)
if err != nil {
return nil, nil, err
}
if !v.APIVersion.EqMajorMinor(api.FullAPIVersion1) {
return nil, nil, xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", api.FullAPIVersion1, v.APIVersion)
}
return &v1API, finalCloser, nil
}
type httpHead struct {
addr string
header http.Header
}

View File

@ -1,529 +0,0 @@
// Package deps provides the dependencies for the curio node.
package deps
import (
"context"
"crypto/rand"
"database/sql"
"encoding/base64"
"errors"
"fmt"
"io"
"net"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
"github.com/BurntSushi/toml"
"github.com/gbrlsnchs/jwt/v3"
logging "github.com/ipfs/go-log/v2"
"github.com/samber/lo"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/chain/types"
curio "github.com/filecoin-project/lotus/curiosrc"
"github.com/filecoin-project/lotus/curiosrc/multictladdr"
"github.com/filecoin-project/lotus/journal"
"github.com/filecoin-project/lotus/journal/alerting"
"github.com/filecoin-project/lotus/journal/fsjournal"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
"github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/lotus/node/modules"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/node/repo"
"github.com/filecoin-project/lotus/storage/paths"
"github.com/filecoin-project/lotus/storage/sealer"
"github.com/filecoin-project/lotus/storage/sealer/ffiwrapper"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
var log = logging.Logger("curio/deps")
func MakeDB(cctx *cli.Context) (*harmonydb.DB, error) {
// #1 CLI opts
fromCLI := func() (*harmonydb.DB, error) {
dbConfig := config.HarmonyDB{
Username: cctx.String("db-user"),
Password: cctx.String("db-password"),
Hosts: strings.Split(cctx.String("db-host"), ","),
Database: cctx.String("db-name"),
Port: cctx.String("db-port"),
}
return harmonydb.NewFromConfig(dbConfig)
}
readToml := func(path string) (*harmonydb.DB, error) {
cfg, err := config.FromFile(path)
if err != nil {
return nil, err
}
if c, ok := cfg.(*config.StorageMiner); ok {
return harmonydb.NewFromConfig(c.HarmonyDB)
}
return nil, errors.New("not a miner config")
}
// #2 Try local miner config
fromMinerEnv := func() (*harmonydb.DB, error) {
v := os.Getenv("LOTUS_MINER_PATH")
if v == "" {
return nil, errors.New("no miner env")
}
return readToml(filepath.Join(v, "config.toml"))
}
fromMiner := func() (*harmonydb.DB, error) {
u, err := os.UserHomeDir()
if err != nil {
return nil, err
}
return readToml(filepath.Join(u, ".lotusminer/config.toml"))
}
fromEnv := func() (*harmonydb.DB, error) {
// #3 Try env
u, err := url.Parse(os.Getenv("CURIO_DB"))
if err != nil {
return nil, errors.New("no db connection string found in CURIO_DB env")
}
cfg := config.DefaultStorageMiner().HarmonyDB
if u.User.Username() != "" {
cfg.Username = u.User.Username()
}
if p, ok := u.User.Password(); ok && p != "" {
cfg.Password = p
}
if u.Hostname() != "" {
cfg.Hosts = []string{u.Hostname()}
}
if u.Port() != "" {
cfg.Port = u.Port()
}
if strings.TrimPrefix(u.Path, "/") != "" {
cfg.Database = strings.TrimPrefix(u.Path, "/")
}
return harmonydb.NewFromConfig(cfg)
}
for _, f := range []func() (*harmonydb.DB, error){fromCLI, fromMinerEnv, fromMiner, fromEnv} {
db, err := f()
if err != nil {
continue
}
return db, nil
}
log.Error("No db connection string found. User CLI args or env var: set CURIO_DB=postgres://USER:PASSWORD@HOST:PORT/DATABASE")
return fromCLI() //in-case it's not about bad config.
}
type JwtPayload struct {
Allow []auth.Permission
}
func StorageAuth(apiKey string) (sealer.StorageAuth, error) {
if apiKey == "" {
return nil, xerrors.Errorf("no api key provided")
}
rawKey, err := base64.StdEncoding.DecodeString(apiKey)
if err != nil {
return nil, xerrors.Errorf("decoding api key: %w", err)
}
key := jwt.NewHS256(rawKey)
p := JwtPayload{
Allow: []auth.Permission{"admin"},
}
token, err := jwt.Sign(&p, key)
if err != nil {
return nil, err
}
headers := http.Header{}
headers.Add("Authorization", "Bearer "+string(token))
return sealer.StorageAuth(headers), nil
}
func GetDeps(ctx context.Context, cctx *cli.Context) (*Deps, error) {
var deps Deps
return &deps, deps.PopulateRemainingDeps(ctx, cctx, true)
}
type Deps struct {
Layers []string
Cfg *config.CurioConfig // values
DB *harmonydb.DB // has itest capability
Full api.FullNode
Verif storiface.Verifier
As *multictladdr.MultiAddressSelector
Maddrs map[dtypes.MinerAddress]bool
ProofTypes map[abi.RegisteredSealProof]bool
Stor *paths.Remote
Si *paths.DBIndex
LocalStore *paths.Local
LocalPaths *paths.BasicLocalStorage
ListenAddr string
}
const (
FlagRepoPath = "repo-path"
)
func (deps *Deps) PopulateRemainingDeps(ctx context.Context, cctx *cli.Context, makeRepo bool) error {
var err error
if makeRepo {
// Open repo
repoPath := cctx.String(FlagRepoPath)
fmt.Println("repopath", repoPath)
r, err := repo.NewFS(repoPath)
if err != nil {
return err
}
ok, err := r.Exists()
if err != nil {
return err
}
if !ok {
if err := r.Init(repo.Curio); err != nil {
return err
}
}
}
if deps.DB == nil {
deps.DB, err = MakeDB(cctx)
if err != nil {
return err
}
}
if deps.Layers == nil {
deps.Layers = append([]string{"base"}, cctx.StringSlice("layers")...) // Always stack on top of "base" layer
}
if deps.Cfg == nil {
// The config feeds into task runners & their helpers
deps.Cfg, err = GetConfig(cctx.Context, cctx.StringSlice("layers"), deps.DB)
if err != nil {
return xerrors.Errorf("populate config: %w", err)
}
}
log.Debugw("config", "config", deps.Cfg)
if deps.Verif == nil {
deps.Verif = ffiwrapper.ProofVerifier
}
if deps.As == nil {
deps.As, err = curio.AddressSelector(deps.Cfg.Addresses)()
if err != nil {
return err
}
}
if deps.Si == nil {
de, err := journal.ParseDisabledEvents(deps.Cfg.Journal.DisabledEvents)
if err != nil {
return err
}
j, err := fsjournal.OpenFSJournalPath(cctx.String("journal"), de)
if err != nil {
return err
}
go func() {
<-ctx.Done()
_ = j.Close()
}()
al := alerting.NewAlertingSystem(j)
deps.Si = paths.NewDBIndex(al, deps.DB)
}
if deps.Full == nil {
var fullCloser func()
cfgApiInfo := deps.Cfg.Apis.ChainApiInfo
if v := os.Getenv("FULLNODE_API_INFO"); v != "" {
cfgApiInfo = []string{v}
}
deps.Full, fullCloser, err = getFullNodeAPIV1Curio(cctx, cfgApiInfo)
if err != nil {
return err
}
go func() {
<-ctx.Done()
fullCloser()
}()
}
deps.LocalPaths = &paths.BasicLocalStorage{
PathToJSON: cctx.String("storage-json"),
}
if deps.ListenAddr == "" {
listenAddr := cctx.String("listen")
const unspecifiedAddress = "0.0.0.0"
addressSlice := strings.Split(listenAddr, ":")
if ip := net.ParseIP(addressSlice[0]); ip != nil {
if ip.String() == unspecifiedAddress {
rip, err := deps.DB.GetRoutableIP()
if err != nil {
return err
}
deps.ListenAddr = rip + ":" + addressSlice[1]
}
}
}
if cctx.IsSet("gui-listen") {
deps.Cfg.Subsystems.GuiAddress = cctx.String("gui-listen")
}
if deps.LocalStore == nil {
deps.LocalStore, err = paths.NewLocal(ctx, deps.LocalPaths, deps.Si, []string{"http://" + deps.ListenAddr + "/remote"})
if err != nil {
return err
}
}
sa, err := StorageAuth(deps.Cfg.Apis.StorageRPCSecret)
if err != nil {
return xerrors.Errorf(`'%w' while parsing the config toml's
[Apis]
StorageRPCSecret=%v
Get it with: jq .PrivateKey ~/.lotus-miner/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU`, err, deps.Cfg.Apis.StorageRPCSecret)
}
if deps.Stor == nil {
deps.Stor = paths.NewRemote(deps.LocalStore, deps.Si, http.Header(sa), 10, &paths.DefaultPartialFileHandler{})
}
if deps.Maddrs == nil {
deps.Maddrs = map[dtypes.MinerAddress]bool{}
}
if len(deps.Maddrs) == 0 {
for _, s := range deps.Cfg.Addresses {
for _, s := range s.MinerAddresses {
addr, err := address.NewFromString(s)
if err != nil {
return err
}
deps.Maddrs[dtypes.MinerAddress(addr)] = true
}
}
}
if deps.ProofTypes == nil {
deps.ProofTypes = map[abi.RegisteredSealProof]bool{}
}
if len(deps.ProofTypes) == 0 {
for maddr := range deps.Maddrs {
spt, err := modules.SealProofType(maddr, deps.Full)
if err != nil {
return err
}
deps.ProofTypes[spt] = true
}
}
return nil
}
func LoadConfigWithUpgrades(text string, curioConfigWithDefaults *config.CurioConfig) (toml.MetaData, error) {
// allow migration from old config format that was limited to 1 wallet setup.
newText := strings.Join(lo.Map(strings.Split(text, "\n"), func(line string, _ int) string {
if strings.EqualFold(line, "[addresses]") {
return "[[addresses]]"
}
return line
}), "\n")
meta, err := toml.Decode(newText, &curioConfigWithDefaults)
for i := range curioConfigWithDefaults.Addresses {
if curioConfigWithDefaults.Addresses[i].PreCommitControl == nil {
curioConfigWithDefaults.Addresses[i].PreCommitControl = []string{}
}
if curioConfigWithDefaults.Addresses[i].CommitControl == nil {
curioConfigWithDefaults.Addresses[i].CommitControl = []string{}
}
if curioConfigWithDefaults.Addresses[i].TerminateControl == nil {
curioConfigWithDefaults.Addresses[i].TerminateControl = []string{}
}
}
return meta, err
}
func GetConfig(ctx context.Context, layers []string, db *harmonydb.DB) (*config.CurioConfig, error) {
curioConfig := config.DefaultCurioConfig()
have := []string{}
layers = append([]string{"base"}, layers...) // Always stack on top of "base" layer
for _, layer := range layers {
text := ""
err := db.QueryRow(ctx, `SELECT config FROM harmony_config WHERE title=$1`, layer).Scan(&text)
if err != nil {
if strings.Contains(err.Error(), sql.ErrNoRows.Error()) {
return nil, fmt.Errorf("missing layer '%s' ", layer)
}
if layer == "base" {
return nil, errors.New(`curio defaults to a layer named 'base'.
Either use 'migrate' command or edit a base.toml and upload it with: curio config set base.toml`)
}
return nil, fmt.Errorf("could not read layer '%s': %w", layer, err)
}
meta, err := LoadConfigWithUpgrades(text, curioConfig)
if err != nil {
return curioConfig, fmt.Errorf("could not read layer, bad toml %s: %w", layer, err)
}
for _, k := range meta.Keys() {
have = append(have, strings.Join(k, " "))
}
log.Debugw("Using layer", "layer", layer, "config", curioConfig)
}
_ = have // FUTURE: verify that required fields are here.
// If config includes 3rd-party config, consider JSONSchema as a way that
// 3rd-parties can dynamically include config requirements and we can
// validate the config. Because of layering, we must validate @ startup.
return curioConfig, nil
}
func GetDefaultConfig(comment bool) (string, error) {
c := config.DefaultCurioConfig()
cb, err := config.ConfigUpdate(c, nil, config.Commented(comment), config.DefaultKeepUncommented(), config.NoEnv())
if err != nil {
return "", err
}
return string(cb), nil
}
func GetDepsCLI(ctx context.Context, cctx *cli.Context) (*Deps, error) {
db, err := MakeDB(cctx)
if err != nil {
return nil, err
}
layers := cctx.StringSlice("layers")
cfg, err := GetConfig(cctx.Context, layers, db)
if err != nil {
return nil, err
}
full, fullCloser, err := getFullNodeAPIV1Curio(cctx, cfg.Apis.ChainApiInfo)
if err != nil {
return nil, err
}
go func() {
select {
case <-ctx.Done():
fullCloser()
}
}()
return &Deps{
Cfg: cfg,
DB: db,
Full: full,
}, nil
}
func CreateMinerConfig(ctx context.Context, full v1api.FullNode, db *harmonydb.DB, miners []string, info string) error {
var titles []string
err := db.Select(ctx, &titles, `SELECT title FROM harmony_config WHERE LENGTH(config) > 0`)
if err != nil {
return fmt.Errorf("cannot reach the db. Ensure that Yugabyte flags are set correctly to"+
" reach Yugabyte: %s", err.Error())
}
// setup config
curioConfig := config.DefaultCurioConfig()
for _, addr := range miners {
maddr, err := address.NewFromString(addr)
if err != nil {
return xerrors.Errorf("Invalid address: %s", addr)
}
_, err = full.StateMinerInfo(ctx, maddr, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("Failed to get miner info: %w", err)
}
curioConfig.Addresses = append(curioConfig.Addresses, config.CurioAddresses{
PreCommitControl: []string{},
CommitControl: []string{},
TerminateControl: []string{},
DisableOwnerFallback: false,
DisableWorkerFallback: false,
MinerAddresses: []string{addr},
})
}
{
sk, err := io.ReadAll(io.LimitReader(rand.Reader, 32))
if err != nil {
return err
}
curioConfig.Apis.StorageRPCSecret = base64.StdEncoding.EncodeToString(sk)
}
{
curioConfig.Apis.ChainApiInfo = append(curioConfig.Apis.ChainApiInfo, info)
}
curioConfig.Addresses = lo.Filter(curioConfig.Addresses, func(a config.CurioAddresses, _ int) bool {
return len(a.MinerAddresses) > 0
})
// If no base layer is present
if !lo.Contains(titles, "base") {
cb, err := config.ConfigUpdate(curioConfig, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv())
if err != nil {
return xerrors.Errorf("Failed to generate default config: %w", err)
}
cfg := string(cb)
_, err = db.Exec(ctx, "INSERT INTO harmony_config (title, config) VALUES ('base', $1)", cfg)
if err != nil {
return xerrors.Errorf("failed to insert the 'base' into the database: %w", err)
}
fmt.Printf("The base layer has been updated with miner[s] %s\n", miners)
return nil
}
// if base layer is present
baseCfg := config.DefaultCurioConfig()
var baseText string
err = db.QueryRow(ctx, "SELECT config FROM harmony_config WHERE title='base'").Scan(&baseText)
if err != nil {
return xerrors.Errorf("Cannot load base config from database: %w", err)
}
_, err = LoadConfigWithUpgrades(baseText, baseCfg)
if err != nil {
return xerrors.Errorf("Cannot parse base config: %w", err)
}
baseCfg.Addresses = append(baseCfg.Addresses, curioConfig.Addresses...)
baseCfg.Addresses = lo.Filter(baseCfg.Addresses, func(a config.CurioAddresses, _ int) bool {
return len(a.MinerAddresses) > 0
})
cb, err := config.ConfigUpdate(baseCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv())
if err != nil {
return xerrors.Errorf("cannot interpret config: %w", err)
}
_, err = db.Exec(ctx, "UPDATE harmony_config SET config=$1 WHERE title='base'", string(cb))
if err != nil {
return xerrors.Errorf("cannot update base config: %w", err)
}
fmt.Printf("The base layer has been updated with miner[s] %s\n", miners)
return nil
}

View File

@ -1,5 +0,0 @@
DOCKER_USER=curio
LOTUS_IMAGE=${DOCKER_USER}/lotus-dev:dev
LOTUS_MINER_IMAGE=${DOCKER_USER}/lotus-miner-dev:dev
CURIO_IMAGE=${DOCKER_USER}/curio-dev:dev
FIL_PROOFS_PARAMETER_CACHE=${HOME}/.cache/filecoin-proof-parameters

View File

@ -1 +0,0 @@
data

View File

@ -1,30 +0,0 @@
ARG CURIO_TEST_IMAGE=curio/curio-all-in-one:latest
#############################################################################
FROM ${CURIO_TEST_IMAGE}
ARG BUILD_VERSION=0.1
LABEL org.opencontainers.image.version=$BUILD_VERSION \
org.opencontainers.image.authors="Curio Dev Team" \
name="lotus-dev" \
maintainer="Curio Dev Team" \
vendor="Curio Dev Team" \
version=$BUILD_VERSION \
release=$BUILD_VERSION \
summary="This image is used to host the curio dev service" \
description="This image is used to host the curio dev service"
EXPOSE 12300 4701 32100
VOLUME /var/tmp/filecoin-proof-parameters
VOLUME /var/lib/genesis
VOLUME /var/lib/builtin-actors
WORKDIR /app
RUN mkdir -p /app
COPY entrypoint.sh /app
USER root
ENTRYPOINT ["./entrypoint.sh"]

View File

@ -1,60 +0,0 @@
#!/usr/bin/env bash
set -e
echo CURIO_REPO_PATH=$CURIO_REPO_PATH
echo Wait for lotus is ready ...
lotus wait-api
echo Wait for lotus-miner is ready ...
lotus-miner wait-api
head=0
# Loop until the head is greater than 9
while [[ $head -le 9 ]]; do
head=$(lotus chain list | awk '{print $1}' | awk -F':' '{print $1}' | tail -1)
if [[ $head -le 9 ]]; then
echo "Current head: $head, which is not greater than 9. Waiting..."
sleep 1 # Wait for 4 seconds before checking again
else
echo "The head is now greater than 9: $head"
fi
done
echo All ready. Lets go
myip=`nslookup curio | grep -v "#" | grep Address | awk '{print $2}'`
if [ ! -f $CURIO_REPO_PATH/.init.curio ]; then
if [ ! -f $CURIO_REPO_PATH/.init.setup ]; then
export DEFAULT_WALLET=`lotus wallet default`
echo Create a new miner actor ...
lotus-shed miner create $DEFAULT_WALLET $DEFAULT_WALLET $DEFAULT_WALLET 8MiB
touch $CURIO_REPO_PATH/.init.setup
fi
if [ ! -f $CURIO_REPO_PATH/.init.config ]; then
newminer=`lotus state list-miners | grep -v t01000`
echo "New Miner is $newminer"
echo Initiating a new Curio cluster ...
curio config new-cluster $newminer
echo Enabling market ...
curio config get seal | sed -e $'$a\\\n BoostAdapters = ["'"$newminer"':'"$myip"':32100"]\n EnableParkPiece = true' | curio config set --title seal
touch $CURIO_REPO_PATH/.init.config
fi
echo Starting Curio node to attach storage ...
curio run --nosync --layers seal,post,gui &
CURIO_PID=`echo $!`
until curio cli --machine $myip:12300 wait-api; do
echo "Waiting for the curio CLI to become ready..."
sleep 5
done
curio cli --machine $myip:12300 storage attach --init --seal --store $CURIO_REPO_PATH
touch $CURIO_REPO_PATH/.init.curio
echo Stopping Curio node ...
echo Try to stop boost...
kill -15 $CURIO_PID || kill -9 $CURIO_PID
echo Done
fi
echo Starting curio node ...
exec curio run --nosync --layers seal,post,gui

View File

@ -1,101 +0,0 @@
version: '3.8'
name: curio-devnet
x-logging:
&default-logging
options:
max-size: '20m'
max-file: '3'
driver: json-file
networks:
curio-net:
driver: bridge
ipam:
config:
- subnet: 172.20.0.0/16
services:
lotus:
container_name: lotus
image: ${LOTUS_IMAGE}
init: true
ports:
- "1234:1234"
- "9090:9090"
environment:
- LOTUS_FEVM_ENABLEETHRPC=true
- LOTUS_API_LISTENADDRESS=/dns/lotus/tcp/1234/http
- LOTUS_LIBP2P_LISTENADDRESSES=/ip4/0.0.0.0/tcp/9090
restart: unless-stopped
logging: *default-logging
volumes:
- ./data/lotus:/var/lib/lotus:rw
- ./data/genesis:/var/lib/genesis:rw
- ${FIL_PROOFS_PARAMETER_CACHE}:/var/tmp/filecoin-proof-parameters:rw
networks:
curio-net:
ipv4_address: 172.20.0.2
lotus-miner:
container_name: lotus-miner
image: ${LOTUS_MINER_IMAGE}
init: true
ports:
- "2345:2345"
environment:
- LOTUS_API_LISTENADDRESS=/dns/lotus-miner/tcp/2345/http
- LOTUS_API_REMOTELISTENADDRESS=lotus-miner:2345
- LOTUS_SEALING_BATCHPRECOMMITS=false
- LOTUS_SEALING_AGGREGATECOMMITS=false
- LOTUS_SUBSYSTEMS_ENABLEMARKETS=false
- LOTUS_SEALING_WAITDEALSDELAY=20s
restart: unless-stopped
logging: *default-logging
volumes:
- ./data/lotus-miner:/var/lib/lotus-miner:rw
- ./data/lotus:/var/lib/lotus:ro
- ./data/genesis:/var/lib/genesis:ro
- ${FIL_PROOFS_PARAMETER_CACHE}:/var/tmp/filecoin-proof-parameters:rw
networks:
curio-net:
ipv4_address: 172.20.0.3
curio:
container_name: curio
image: ${CURIO_IMAGE}
init: true
ports:
- "12300:12300" # API
- "4701:4701" # UI
- "32100:32100" # Market
environment:
- CURIO_REPO_PATH=/var/lib/curio
- CURIO_HARMONYDB_HOSTS=yugabyte
restart: unless-stopped
logging: *default-logging
volumes:
- ./data/curio:/var/lib/curio:rw
- ./data/lotus:/var/lib/lotus:ro
- ./data/lotus-miner:/var/lib/lotus-miner:ro
- ${FIL_PROOFS_PARAMETER_CACHE}:/var/tmp/filecoin-proof-parameters:rw
networks:
curio-net:
ipv4_address: 172.20.0.4
yugabyte:
container_name: yugabyte
image: curio/yugabyte-dev:dev
init: true
ports:
- "5433:5433"
- "9000:9000"
- "9042:9042"
restart: unless-stopped
logging: *default-logging
volumes:
- ./data/yugabyte-data:/root/var/data
- ./data/yugabyte-logs:/root/var/logs
networks:
curio-net:
ipv4_address: 172.20.0.5

View File

@ -1,33 +0,0 @@
ARG CURIO_TEST_IMAGE=curio/curio-all-in-one:latest
#############################################################################
FROM ${CURIO_TEST_IMAGE}
ARG BUILD_VERSION=0.1
LABEL org.opencontainers.image.version=$BUILD_VERSION \
org.opencontainers.image.authors="Curio Dev Team" \
name="lotus-miner-dev" \
maintainer="Curio Dev Team" \
vendor="Curio Dev Team" \
version=$BUILD_VERSION \
release=$BUILD_VERSION \
summary="This image is used to host the lotus-miner dev service" \
description="This image is used to host the lotus-miner dev service"
EXPOSE 2345
ENV LOTUS_SKIP_GENESIS_CHECK=_yes_
ENV GENESIS_PATH=/var/lib/genesis
ENV SECTOR_SIZE=8388608
VOLUME /var/tmp/filecoin-proof-parameters
VOLUME /var/lib/genesis
VOLUME /var/lib/builtin-actors
WORKDIR /app
RUN mkdir -p /app
COPY entrypoint.sh /app
USER root
ENTRYPOINT ["./entrypoint.sh"]

View File

@ -1,16 +0,0 @@
#!/usr/bin/env bash
set -e
echo Wait for lotus is ready ...
lotus wait-api
echo Lotus ready. Lets go
if [ ! -f $LOTUS_MINER_PATH/.init.miner ]; then
echo Import the genesis miner key ...
lotus wallet import --as-default $GENESIS_PATH/pre-seal-t01000.key
echo Set up the genesis miner ...
lotus-miner init --genesis-miner --actor=t01000 --sector-size=$SECTOR_SIZE --pre-sealed-sectors=$GENESIS_PATH --pre-sealed-metadata=$GENESIS_PATH/pre-seal-t01000.json --nosync
touch $LOTUS_MINER_PATH/.init.miner
echo Done
fi
echo Starting lotus miner ...
exec lotus-miner run --nosync

View File

@ -1,35 +0,0 @@
ARG CURIO_TEST_IMAGE=curio/curio-all-in-one:latest
#############################################################################
FROM ${CURIO_TEST_IMAGE}
ARG BUILD_VERSION=0.1
LABEL org.opencontainers.image.version=$BUILD_VERSION \
org.opencontainers.image.authors="Curio Dev Team" \
name="lotus-dev" \
maintainer="Curio Dev Team" \
vendor="Curio Dev Team" \
version=$BUILD_VERSION \
release=$BUILD_VERSION \
summary="This image is used to host the lotus dev service" \
description="This image is used to host the lotus dev service"
EXPOSE 1234
EXPOSE 9090
ENV LOTUS_SKIP_GENESIS_CHECK=_yes_
ENV GENESIS_PATH=/var/lib/genesis
ENV SECTOR_SIZE=8388608
ENV LOTUS_FEVM_ENABLEETHRPC=true
VOLUME /var/tmp/filecoin-proof-parameters
VOLUME /var/lib/genesis
VOLUME /var/lib/builtin-actors
WORKDIR /app
RUN mkdir -p /app
COPY entrypoint.sh /app
USER root
ENTRYPOINT ["./entrypoint.sh"]

View File

@ -1,33 +0,0 @@
#!/usr/bin/env bash
set -e
if [ ! -f $LOTUS_PATH/.init.params ]; then
echo Initializing fetch params ...
lotus fetch-params $SECTOR_SIZE
touch $LOTUS_PATH/.init.params
echo Done
fi
if [ ! -f $LOTUS_PATH/.init.genesis ]; then
pushd $LOTUS_PATH
echo Generate root-key-1 for FIL plus
ROOT_KEY_1=`lotus-shed keyinfo new bls`
echo $ROOT_KEY_1 > rootkey-1
echo Generate root-key-2 for FIL plus
ROOT_KEY_2=`lotus-shed keyinfo new bls`
echo $ROOT_KEY_2 > rootkey-2
popd
echo Initializing pre seal ...
lotus-seed --sector-dir $GENESIS_PATH pre-seal --sector-size $SECTOR_SIZE --num-sectors 1
echo Initializing genesis ...
lotus-seed --sector-dir $GENESIS_PATH genesis new $LOTUS_PATH/localnet.json
echo Setting signers ...
lotus-seed --sector-dir $GENESIS_PATH genesis set-signers --threshold=2 --signers $ROOT_KEY_1 --signers $ROOT_KEY_2 $LOTUS_PATH/localnet.json
echo Initializing address ...
lotus-seed --sector-dir $GENESIS_PATH genesis add-miner $LOTUS_PATH/localnet.json $GENESIS_PATH/pre-seal-t01000.json
touch $LOTUS_PATH/.init.genesis
echo Done
fi
echo Starting lotus deamon ...
exec lotus daemon --lotus-make-genesis=$LOTUS_PATH/devgen.car --genesis-template=$LOTUS_PATH/localnet.json --bootstrap=false

View File

@ -1,12 +0,0 @@
FROM centos:centos8
RUN cd /etc/yum.repos.d/
RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-*
RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*
RUN yum upgrade -y
RUN yum install procps-ng wget libatomic python39 -y
RUN alternatives --set python /usr/bin/python3
RUN arch=$(arch | sed s/aarch64/el8-aarch64/ | sed s/x86_64/linux-x86_64/) && wget "https://downloads.yugabyte.com/releases/2.20.2.0/yugabyte-2.20.2.0-b145-${arch}.tar.gz" -O /tmp/yugabyte.tar.gz
RUN tar xvfz /tmp/yugabyte.tar.gz
RUN ln -s /yugabyte-2.20.2.0 /yugabyte
RUN /yugabyte/bin/post_install.sh
CMD /yugabyte/bin/yugabyted start --daemon=false --ui=false

View File

@ -1,76 +0,0 @@
package ffi
import (
"context"
"io"
"os"
"time"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/curiosrc/harmony/harmonytask"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
func (sb *SealCalls) WritePiece(ctx context.Context, taskID *harmonytask.TaskID, pieceID storiface.PieceNumber, size int64, data io.Reader) error {
// todo: config(?): allow setting PathStorage for this
// todo storage reservations
paths, _, done, err := sb.sectors.AcquireSector(ctx, taskID, pieceID.Ref(), storiface.FTNone, storiface.FTPiece, storiface.PathSealing)
if err != nil {
return err
}
defer done()
dest := paths.Piece
tempDest := dest + ".tmp"
destFile, err := os.OpenFile(tempDest, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return xerrors.Errorf("creating temp piece file '%s': %w", tempDest, err)
}
removeTemp := true
defer func() {
if removeTemp {
rerr := os.Remove(tempDest)
if rerr != nil {
log.Errorf("removing temp file: %+v", rerr)
}
}
}()
copyStart := time.Now()
n, err := io.CopyBuffer(destFile, io.LimitReader(data, size), make([]byte, 8<<20))
if err != nil {
_ = destFile.Close()
return xerrors.Errorf("copying piece data: %w", err)
}
if err := destFile.Close(); err != nil {
return xerrors.Errorf("closing temp piece file: %w", err)
}
if n != size {
return xerrors.Errorf("short write: %d", n)
}
copyEnd := time.Now()
log.Infow("wrote parked piece", "piece", pieceID, "size", size, "duration", copyEnd.Sub(copyStart), "dest", dest, "MiB/s", float64(size)/(1<<20)/copyEnd.Sub(copyStart).Seconds())
if err := os.Rename(tempDest, dest); err != nil {
return xerrors.Errorf("rename temp piece to dest %s -> %s: %w", tempDest, dest, err)
}
removeTemp = false
return nil
}
func (sb *SealCalls) PieceReader(ctx context.Context, id storiface.PieceNumber) (io.ReadCloser, error) {
return sb.sectors.storage.ReaderSeq(ctx, id.Ref(), storiface.FTPiece)
}
func (sb *SealCalls) RemovePiece(ctx context.Context, id storiface.PieceNumber) error {
return sb.sectors.storage.Remove(ctx, id.Ref().ID, storiface.FTPiece, true, nil)
}

View File

@ -1,662 +0,0 @@
package ffi
import (
"context"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"github.com/KarpelesLab/reflink"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
"github.com/puzpuzpuz/xsync/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/curiosrc/ffiselect"
"github.com/filecoin-project/lotus/curiosrc/harmony/harmonytask"
// TODO everywhere here that we call this we should call our proxy instead.
ffi "github.com/filecoin-project/filecoin-ffi"
commcid "github.com/filecoin-project/go-fil-commcid"
"github.com/filecoin-project/go-state-types/abi"
proof2 "github.com/filecoin-project/go-state-types/proof"
"github.com/filecoin-project/lotus/curiosrc/proof"
"github.com/filecoin-project/lotus/storage/paths"
"github.com/filecoin-project/lotus/storage/sealer/proofpaths"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
var log = logging.Logger("cu/ffi")
/*
type ExternPrecommit2 func(ctx context.Context, sector storiface.SectorRef, cache, sealed string, pc1out storiface.PreCommit1Out) (sealedCID cid.Cid, unsealedCID cid.Cid, err error)
type ExternalSealer struct {
PreCommit2 ExternPrecommit2
}
*/
type SealCalls struct {
sectors *storageProvider
/*// externCalls cointain overrides for calling alternative sealing logic
externCalls ExternalSealer*/
}
func NewSealCalls(st *paths.Remote, ls *paths.Local, si paths.SectorIndex) *SealCalls {
return &SealCalls{
sectors: &storageProvider{
storage: st,
localStore: ls,
sindex: si,
storageReservations: xsync.NewIntegerMapOf[harmonytask.TaskID, *StorageReservation](),
},
}
}
type storageProvider struct {
storage *paths.Remote
localStore *paths.Local
sindex paths.SectorIndex
storageReservations *xsync.MapOf[harmonytask.TaskID, *StorageReservation]
}
func (l *storageProvider) AcquireSector(ctx context.Context, taskID *harmonytask.TaskID, sector storiface.SectorRef, existing, allocate storiface.SectorFileType, sealing storiface.PathType) (fspaths, ids storiface.SectorPaths, release func(), err error) {
var sectorPaths, storageIDs storiface.SectorPaths
var releaseStorage func()
var ok bool
var resv *StorageReservation
if taskID != nil {
resv, ok = l.storageReservations.Load(*taskID)
}
if ok && resv != nil {
if resv.Alloc != allocate || resv.Existing != existing {
// this should never happen, only when task definition is wrong
return storiface.SectorPaths{}, storiface.SectorPaths{}, nil, xerrors.Errorf("storage reservation type mismatch")
}
log.Debugw("using existing storage reservation", "task", taskID, "sector", sector, "existing", existing, "allocate", allocate)
sectorPaths = resv.Paths
storageIDs = resv.PathIDs
releaseStorage = resv.Release
if len(existing.AllSet()) > 0 {
// there are some "existing" files in the reservation. Some of them may need fetching, so call l.storage.AcquireSector
// (which unlike in the reservation code will be called on the paths.Remote instance) to ensure that the files are
// present locally. Note that we do not care about 'allocate' reqeuests, those files don't exist, and are just
// proposed paths with a reservation of space.
_, checkPathIDs, err := l.storage.AcquireSector(ctx, sector, existing, storiface.FTNone, sealing, storiface.AcquireMove, storiface.AcquireInto(storiface.PathsWithIDs{Paths: sectorPaths, IDs: storageIDs}))
if err != nil {
return storiface.SectorPaths{}, storiface.SectorPaths{}, nil, xerrors.Errorf("acquire reserved existing files: %w", err)
}
// assert that checkPathIDs is the same as storageIDs
if storageIDs.Subset(existing) != checkPathIDs.Subset(existing) {
return storiface.SectorPaths{}, storiface.SectorPaths{}, nil, xerrors.Errorf("acquire reserved existing files: pathIDs mismatch %#v != %#v", storageIDs, checkPathIDs)
}
}
} else {
// No related reservation, acquire storage as usual
var err error
sectorPaths, storageIDs, err = l.storage.AcquireSector(ctx, sector, existing, allocate, sealing, storiface.AcquireMove)
if err != nil {
return storiface.SectorPaths{}, storiface.SectorPaths{}, nil, err
}
releaseStorage, err = l.localStore.Reserve(ctx, sector, allocate, storageIDs, storiface.FSOverheadSeal, paths.MinFreeStoragePercentage)
if err != nil {
return storiface.SectorPaths{}, storiface.SectorPaths{}, nil, xerrors.Errorf("reserving storage space: %w", err)
}
}
log.Debugf("acquired sector %d (e:%d; a:%d): %v", sector, existing, allocate, sectorPaths)
return sectorPaths, storageIDs, func() {
releaseStorage()
for _, fileType := range storiface.PathTypes {
if fileType&allocate == 0 {
continue
}
sid := storiface.PathByType(storageIDs, fileType)
if err := l.sindex.StorageDeclareSector(ctx, storiface.ID(sid), sector.ID, fileType, true); err != nil {
log.Errorf("declare sector error: %+v", err)
}
}
}, nil
}
func (sb *SealCalls) GenerateSDR(ctx context.Context, taskID harmonytask.TaskID, sector storiface.SectorRef, ticket abi.SealRandomness, commKcid cid.Cid) error {
paths, pathIDs, releaseSector, err := sb.sectors.AcquireSector(ctx, &taskID, sector, storiface.FTNone, storiface.FTCache, storiface.PathSealing)
if err != nil {
return xerrors.Errorf("acquiring sector paths: %w", err)
}
defer releaseSector()
// prepare SDR params
commp, err := commcid.CIDToDataCommitmentV1(commKcid)
if err != nil {
return xerrors.Errorf("computing commK: %w", err)
}
replicaID, err := sector.ProofType.ReplicaId(sector.ID.Miner, sector.ID.Number, ticket, commp)
if err != nil {
return xerrors.Errorf("computing replica id: %w", err)
}
// make sure the cache dir is empty
if err := os.RemoveAll(paths.Cache); err != nil {
return xerrors.Errorf("removing cache dir: %w", err)
}
if err := os.MkdirAll(paths.Cache, 0755); err != nil {
return xerrors.Errorf("mkdir cache dir: %w", err)
}
// generate new sector key
err = ffi.GenerateSDR(
sector.ProofType,
paths.Cache,
replicaID,
)
if err != nil {
return xerrors.Errorf("generating SDR %d (%s): %w", sector.ID.Number, paths.Unsealed, err)
}
if err := sb.ensureOneCopy(ctx, sector.ID, pathIDs, storiface.FTCache); err != nil {
return xerrors.Errorf("ensure one copy: %w", err)
}
return nil
}
// ensureOneCopy makes sure that there is only one version of sector data.
// Usually called after a successful operation was done successfully on sector data.
func (sb *SealCalls) ensureOneCopy(ctx context.Context, sid abi.SectorID, pathIDs storiface.SectorPaths, fts storiface.SectorFileType) error {
if !pathIDs.HasAllSet(fts) {
return xerrors.Errorf("ensure one copy: not all paths are set")
}
for _, fileType := range fts.AllSet() {
pid := storiface.PathByType(pathIDs, fileType)
keepIn := []storiface.ID{storiface.ID(pid)}
log.Debugw("ensureOneCopy", "sector", sid, "type", fileType, "keep", keepIn)
if err := sb.sectors.storage.Remove(ctx, sid, fileType, true, keepIn); err != nil {
return err
}
}
return nil
}
func (sb *SealCalls) TreeRC(ctx context.Context, task *harmonytask.TaskID, sector storiface.SectorRef, unsealed cid.Cid) (scid cid.Cid, ucid cid.Cid, err error) {
p1o, err := sb.makePhase1Out(unsealed, sector.ProofType)
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("make phase1 output: %w", err)
}
fspaths, pathIDs, releaseSector, err := sb.sectors.AcquireSector(ctx, task, sector, storiface.FTCache, storiface.FTSealed, storiface.PathSealing)
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("acquiring sector paths: %w", err)
}
defer releaseSector()
defer func() {
if err != nil {
clerr := removeDRCTrees(fspaths.Cache, false)
if clerr != nil {
log.Errorw("removing tree files after TreeDRC error", "error", clerr, "exec-error", err, "sector", sector, "cache", fspaths.Cache)
}
}
}()
// create sector-sized file at paths.Sealed; PC2 transforms it into a sealed sector in-place
ssize, err := sector.ProofType.SectorSize()
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("getting sector size: %w", err)
}
{
// copy TreeD prefix to sealed sector, SealPreCommitPhase2 will mutate it in place into the sealed sector
// first try reflink + truncate, that should be way faster
err := reflink.Always(filepath.Join(fspaths.Cache, proofpaths.TreeDName), fspaths.Sealed)
if err == nil {
err = os.Truncate(fspaths.Sealed, int64(ssize))
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("truncating reflinked sealed file: %w", err)
}
} else {
log.Errorw("reflink treed -> sealed failed, falling back to slow copy, use single scratch btrfs or xfs filesystem", "error", err, "sector", sector, "cache", fspaths.Cache, "sealed", fspaths.Sealed)
// fallback to slow copy, copy ssize bytes from treed to sealed
dst, err := os.OpenFile(fspaths.Sealed, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("opening sealed sector file: %w", err)
}
src, err := os.Open(filepath.Join(fspaths.Cache, proofpaths.TreeDName))
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("opening treed sector file: %w", err)
}
_, err = io.CopyN(dst, src, int64(ssize))
derr := dst.Close()
_ = src.Close()
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("copying treed -> sealed: %w", err)
}
if derr != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("closing sealed file: %w", derr)
}
}
}
sl, uns, err := ffiselect.FFISelect{}.SealPreCommitPhase2(sector.ID, p1o, fspaths.Cache, fspaths.Sealed)
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("computing seal proof: %w", err)
}
if uns != unsealed {
return cid.Undef, cid.Undef, xerrors.Errorf("unsealed cid changed after sealing")
}
if err := sb.ensureOneCopy(ctx, sector.ID, pathIDs, storiface.FTCache|storiface.FTSealed); err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("ensure one copy: %w", err)
}
return sl, uns, nil
}
func removeDRCTrees(cache string, isDTree bool) error {
files, err := os.ReadDir(cache)
if err != nil {
return xerrors.Errorf("listing cache: %w", err)
}
var testFunc func(string) bool
if isDTree {
testFunc = proofpaths.IsTreeDFile
} else {
testFunc = proofpaths.IsTreeRCFile
}
for _, file := range files {
if testFunc(file.Name()) {
err := os.Remove(filepath.Join(cache, file.Name()))
if err != nil {
return xerrors.Errorf("removing tree file: %w", err)
}
}
}
return nil
}
func (sb *SealCalls) GenerateSynthPoRep() {
panic("todo")
}
func (sb *SealCalls) PoRepSnark(ctx context.Context, sn storiface.SectorRef, sealed, unsealed cid.Cid, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness) ([]byte, error) {
vproof, err := sb.sectors.storage.GeneratePoRepVanillaProof(ctx, sn, sealed, unsealed, ticket, seed)
if err != nil {
return nil, xerrors.Errorf("failed to generate vanilla proof: %w", err)
}
proof, err := ffiselect.FFISelect{}.SealCommitPhase2(vproof, sn.ID.Number, sn.ID.Miner)
if err != nil {
return nil, xerrors.Errorf("computing seal proof failed: %w", err)
}
ok, err := ffi.VerifySeal(proof2.SealVerifyInfo{
SealProof: sn.ProofType,
SectorID: sn.ID,
DealIDs: nil,
Randomness: ticket,
InteractiveRandomness: seed,
Proof: proof,
SealedCID: sealed,
UnsealedCID: unsealed,
})
if err != nil {
return nil, xerrors.Errorf("failed to verify proof: %w", err)
}
if !ok {
return nil, xerrors.Errorf("porep failed to validate")
}
return proof, nil
}
func (sb *SealCalls) makePhase1Out(unsCid cid.Cid, spt abi.RegisteredSealProof) ([]byte, error) {
commd, err := commcid.CIDToDataCommitmentV1(unsCid)
if err != nil {
return nil, xerrors.Errorf("make uns cid: %w", err)
}
type Config struct {
ID string `json:"id"`
Path string `json:"path"`
RowsToDiscard int `json:"rows_to_discard"`
Size int `json:"size"`
}
type Labels struct {
H *string `json:"_h"` // proofs want this..
Labels []Config `json:"labels"`
}
var phase1Output struct {
CommD [32]byte `json:"comm_d"`
Config Config `json:"config"` // TreeD
Labels map[string]*Labels `json:"labels"`
RegisteredProof string `json:"registered_proof"`
}
copy(phase1Output.CommD[:], commd)
phase1Output.Config.ID = "tree-d"
phase1Output.Config.Path = "/placeholder"
phase1Output.Labels = map[string]*Labels{}
switch spt {
case abi.RegisteredSealProof_StackedDrg2KiBV1_1, abi.RegisteredSealProof_StackedDrg2KiBV1_1_Feat_SyntheticPoRep:
phase1Output.Config.RowsToDiscard = 0
phase1Output.Config.Size = 127
phase1Output.Labels["StackedDrg2KiBV1"] = &Labels{}
phase1Output.RegisteredProof = "StackedDrg2KiBV1_1"
for i := 0; i < 2; i++ {
phase1Output.Labels["StackedDrg2KiBV1"].Labels = append(phase1Output.Labels["StackedDrg2KiBV1"].Labels, Config{
ID: fmt.Sprintf("layer-%d", i+1),
Path: "/placeholder",
RowsToDiscard: 0,
Size: 64,
})
}
case abi.RegisteredSealProof_StackedDrg8MiBV1_1, abi.RegisteredSealProof_StackedDrg8MiBV1_1_Feat_SyntheticPoRep:
phase1Output.Config.RowsToDiscard = 0
phase1Output.Config.Size = 524287
phase1Output.Labels["StackedDrg8MiBV1"] = &Labels{}
phase1Output.RegisteredProof = "StackedDrg8MiBV1_1"
for i := 0; i < 2; i++ {
phase1Output.Labels["StackedDrg8MiBV1"].Labels = append(phase1Output.Labels["StackedDrg8MiBV1"].Labels, Config{
ID: fmt.Sprintf("layer-%d", i+1),
Path: "/placeholder",
RowsToDiscard: 0,
Size: 262144,
})
}
case abi.RegisteredSealProof_StackedDrg512MiBV1_1:
phase1Output.Config.RowsToDiscard = 0
phase1Output.Config.Size = 33554431
phase1Output.Labels["StackedDrg512MiBV1"] = &Labels{}
phase1Output.RegisteredProof = "StackedDrg512MiBV1_1"
for i := 0; i < 2; i++ {
phase1Output.Labels["StackedDrg512MiBV1"].Labels = append(phase1Output.Labels["StackedDrg512MiBV1"].Labels, Config{
ID: fmt.Sprintf("layer-%d", i+1),
Path: "placeholder",
RowsToDiscard: 0,
Size: 16777216,
})
}
case abi.RegisteredSealProof_StackedDrg32GiBV1_1:
phase1Output.Config.RowsToDiscard = 0
phase1Output.Config.Size = 2147483647
phase1Output.Labels["StackedDrg32GiBV1"] = &Labels{}
phase1Output.RegisteredProof = "StackedDrg32GiBV1_1"
for i := 0; i < 11; i++ {
phase1Output.Labels["StackedDrg32GiBV1"].Labels = append(phase1Output.Labels["StackedDrg32GiBV1"].Labels, Config{
ID: fmt.Sprintf("layer-%d", i+1),
Path: "/placeholder",
RowsToDiscard: 0,
Size: 1073741824,
})
}
case abi.RegisteredSealProof_StackedDrg64GiBV1_1:
phase1Output.Config.RowsToDiscard = 0
phase1Output.Config.Size = 4294967295
phase1Output.Labels["StackedDrg64GiBV1"] = &Labels{}
phase1Output.RegisteredProof = "StackedDrg64GiBV1_1"
for i := 0; i < 11; i++ {
phase1Output.Labels["StackedDrg64GiBV1"].Labels = append(phase1Output.Labels["StackedDrg64GiBV1"].Labels, Config{
ID: fmt.Sprintf("layer-%d", i+1),
Path: "/placeholder",
RowsToDiscard: 0,
Size: 2147483648,
})
}
default:
panic("proof type not handled")
}
return json.Marshal(phase1Output)
}
func (sb *SealCalls) LocalStorage(ctx context.Context) ([]storiface.StoragePath, error) {
return sb.sectors.localStore.Local(ctx)
}
func (sb *SealCalls) FinalizeSector(ctx context.Context, sector storiface.SectorRef, keepUnsealed bool) error {
alloc := storiface.FTNone
if keepUnsealed {
// note: In Curio we don't write the unsealed file in any of the previous stages, it's only written here from tree-d
alloc = storiface.FTUnsealed
}
sectorPaths, pathIDs, releaseSector, err := sb.sectors.AcquireSector(ctx, nil, sector, storiface.FTCache, alloc, storiface.PathSealing)
if err != nil {
return xerrors.Errorf("acquiring sector paths: %w", err)
}
defer releaseSector()
ssize, err := sector.ProofType.SectorSize()
if err != nil {
return xerrors.Errorf("getting sector size: %w", err)
}
if keepUnsealed {
// tree-d contains exactly unsealed data in the prefix, so
// * we move it to a temp file
// * we truncate the temp file to the sector size
// * we move the temp file to the unsealed location
// temp path in cache where we'll move tree-d before truncating
// it is in the cache directory so that we can use os.Rename to move it
// to unsealed (which may be on a different filesystem)
tempUnsealed := filepath.Join(sectorPaths.Cache, storiface.SectorName(sector.ID))
_, terr := os.Stat(tempUnsealed)
tempUnsealedExists := terr == nil
// First handle an edge case where we have already gone through this step,
// but ClearCache or later steps failed. In that case we'll see tree-d missing and unsealed present
if _, err := os.Stat(filepath.Join(sectorPaths.Cache, proofpaths.TreeDName)); err != nil {
if os.IsNotExist(err) {
// check that unsealed exists and is the right size
st, err := os.Stat(sectorPaths.Unsealed)
if err != nil {
if os.IsNotExist(err) {
if tempUnsealedExists {
// unsealed file does not exist, but temp unsealed file does
// so we can just resume where the previous attempt left off
goto retryUnsealedMove
}
return xerrors.Errorf("neither unsealed file nor temp-unsealed file exists")
}
return xerrors.Errorf("stat unsealed file: %w", err)
}
if st.Size() != int64(ssize) {
if tempUnsealedExists {
// unsealed file exists but is the wrong size, and temp unsealed file exists
// so we can just resume where the previous attempt left off with some cleanup
if err := os.Remove(sectorPaths.Unsealed); err != nil {
return xerrors.Errorf("removing unsealed file from last attempt: %w", err)
}
goto retryUnsealedMove
}
return xerrors.Errorf("unsealed file is not the right size: %d != %d and temp unsealed is missing", st.Size(), ssize)
}
// all good, just log that this edge case happened
log.Warnw("unsealed file exists but tree-d is missing, skipping move", "sector", sector.ID, "unsealed", sectorPaths.Unsealed, "cache", sectorPaths.Cache)
goto afterUnsealedMove
}
return xerrors.Errorf("stat tree-d file: %w", err)
}
// If the state in clean do the move
// move tree-d to temp file
if err := os.Rename(filepath.Join(sectorPaths.Cache, proofpaths.TreeDName), tempUnsealed); err != nil {
return xerrors.Errorf("moving tree-d to temp file: %w", err)
}
retryUnsealedMove:
// truncate sealed file to sector size
if err := os.Truncate(tempUnsealed, int64(ssize)); err != nil {
return xerrors.Errorf("truncating unsealed file to sector size: %w", err)
}
// move temp file to unsealed location
if err := paths.Move(tempUnsealed, sectorPaths.Unsealed); err != nil {
return xerrors.Errorf("move temp unsealed sector to final location (%s -> %s): %w", tempUnsealed, sectorPaths.Unsealed, err)
}
}
afterUnsealedMove:
if err := ffi.ClearCache(uint64(ssize), sectorPaths.Cache); err != nil {
return xerrors.Errorf("clearing cache: %w", err)
}
if err := sb.ensureOneCopy(ctx, sector.ID, pathIDs, storiface.FTCache|alloc); err != nil {
return xerrors.Errorf("ensure one copy: %w", err)
}
return nil
}
func (sb *SealCalls) MoveStorage(ctx context.Context, sector storiface.SectorRef, taskID *harmonytask.TaskID) error {
// only move the unsealed file if it still exists and needs moving
moveUnsealed := storiface.FTUnsealed
{
found, unsealedPathType, err := sb.sectorStorageType(ctx, sector, storiface.FTUnsealed)
if err != nil {
return xerrors.Errorf("checking cache storage type: %w", err)
}
if !found || unsealedPathType == storiface.PathStorage {
moveUnsealed = storiface.FTNone
}
}
toMove := storiface.FTCache | storiface.FTSealed | moveUnsealed
var opts []storiface.AcquireOption
if taskID != nil {
resv, ok := sb.sectors.storageReservations.Load(*taskID)
// if the reservation is missing MoveStorage will simply create one internally. This is fine as the reservation
// will only be missing when the node is restarting, which means that the missing reservations will get recreated
// anyways, and before we start claiming other tasks.
if ok {
defer resv.Release()
if resv.Alloc != storiface.FTNone {
return xerrors.Errorf("task %d has storage reservation with alloc", taskID)
}
if resv.Existing != toMove|storiface.FTUnsealed {
return xerrors.Errorf("task %d has storage reservation with different existing", taskID)
}
opts = append(opts, storiface.AcquireInto(storiface.PathsWithIDs{Paths: resv.Paths, IDs: resv.PathIDs}))
}
}
err := sb.sectors.storage.MoveStorage(ctx, sector, toMove, opts...)
if err != nil {
return xerrors.Errorf("moving storage: %w", err)
}
for _, fileType := range toMove.AllSet() {
if err := sb.sectors.storage.RemoveCopies(ctx, sector.ID, fileType); err != nil {
return xerrors.Errorf("rm copies (t:%s, s:%v): %w", fileType, sector, err)
}
}
return nil
}
func (sb *SealCalls) sectorStorageType(ctx context.Context, sector storiface.SectorRef, ft storiface.SectorFileType) (sectorFound bool, ptype storiface.PathType, err error) {
stores, err := sb.sectors.sindex.StorageFindSector(ctx, sector.ID, ft, 0, false)
if err != nil {
return false, "", xerrors.Errorf("finding sector: %w", err)
}
if len(stores) == 0 {
return false, "", nil
}
for _, store := range stores {
if store.CanSeal {
return true, storiface.PathSealing, nil
}
}
return true, storiface.PathStorage, nil
}
// PreFetch fetches the sector file to local storage before SDR and TreeRC Tasks
func (sb *SealCalls) PreFetch(ctx context.Context, sector storiface.SectorRef, task *harmonytask.TaskID) (fsPath, pathID storiface.SectorPaths, releaseSector func(), err error) {
fsPath, pathID, releaseSector, err = sb.sectors.AcquireSector(ctx, task, sector, storiface.FTCache, storiface.FTNone, storiface.PathSealing)
if err != nil {
return storiface.SectorPaths{}, storiface.SectorPaths{}, nil, xerrors.Errorf("acquiring sector paths: %w", err)
}
// Don't release the storage locks. They will be released in TreeD func()
return
}
func (sb *SealCalls) TreeD(ctx context.Context, sector storiface.SectorRef, unsealed cid.Cid, size abi.PaddedPieceSize, data io.Reader, unpaddedData bool, fspaths, pathIDs storiface.SectorPaths) error {
var err error
defer func() {
if err != nil {
clerr := removeDRCTrees(fspaths.Cache, true)
if clerr != nil {
log.Errorw("removing tree files after TreeDRC error", "error", clerr, "exec-error", err, "sector", sector, "cache", fspaths.Cache)
}
}
}()
treeDUnsealed, err := proof.BuildTreeD(data, unpaddedData, filepath.Join(fspaths.Cache, proofpaths.TreeDName), size)
if err != nil {
return xerrors.Errorf("building tree-d: %w", err)
}
if treeDUnsealed != unsealed {
return xerrors.Errorf("tree-d cid %s mismatch with supplied unsealed cid %s", treeDUnsealed, unsealed)
}
if err := sb.ensureOneCopy(ctx, sector.ID, pathIDs, storiface.FTCache); err != nil {
return xerrors.Errorf("ensure one copy: %w", err)
}
return nil
}

View File

@ -1,232 +0,0 @@
package ffi
import (
"context"
"sync"
"time"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/curiosrc/harmony/harmonytask"
"github.com/filecoin-project/lotus/curiosrc/harmony/resources"
storagePaths "github.com/filecoin-project/lotus/storage/paths"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
type SectorRef struct {
SpID int64 `db:"sp_id"`
SectorNumber int64 `db:"sector_number"`
RegSealProof abi.RegisteredSealProof `db:"reg_seal_proof"`
}
func (sr SectorRef) ID() abi.SectorID {
return abi.SectorID{
Miner: abi.ActorID(sr.SpID),
Number: abi.SectorNumber(sr.SectorNumber),
}
}
func (sr SectorRef) Ref() storiface.SectorRef {
return storiface.SectorRef{
ID: sr.ID(),
ProofType: sr.RegSealProof,
}
}
type TaskStorage struct {
sc *SealCalls
alloc, existing storiface.SectorFileType
ssize abi.SectorSize
pathType storiface.PathType
taskToSectorRef func(taskID harmonytask.TaskID) (SectorRef, error)
// Minimum free storage percentage cutoff for reservation rejection
MinFreeStoragePercentage float64
}
type ReleaseStorageFunc func() // free storage reservation
type StorageReservation struct {
SectorRef SectorRef
Release ReleaseStorageFunc
Paths storiface.SectorPaths
PathIDs storiface.SectorPaths
Alloc, Existing storiface.SectorFileType
}
func (sb *SealCalls) Storage(taskToSectorRef func(taskID harmonytask.TaskID) (SectorRef, error), alloc, existing storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType, MinFreeStoragePercentage float64) *TaskStorage {
return &TaskStorage{
sc: sb,
alloc: alloc,
existing: existing,
ssize: ssize,
pathType: pathType,
taskToSectorRef: taskToSectorRef,
MinFreeStoragePercentage: MinFreeStoragePercentage,
}
}
func (t *TaskStorage) HasCapacity() bool {
ctx := context.Background()
paths, err := t.sc.sectors.sindex.StorageBestAlloc(ctx, t.alloc, t.ssize, t.pathType, storagePaths.NoMinerFilter)
if err != nil {
log.Errorf("finding best alloc in HasCapacity: %+v", err)
return false
}
local, err := t.sc.sectors.localStore.Local(ctx)
if err != nil {
log.Errorf("getting local storage: %+v", err)
return false
}
for _, path := range paths {
if t.pathType == storiface.PathStorage && !path.CanStore {
continue // we want to store, and this isn't a store path
}
if t.pathType == storiface.PathSealing && !path.CanSeal {
continue // we want to seal, and this isn't a seal path
}
// check if this path is on this node
var found bool
for _, storagePath := range local {
if storagePath.ID == path.ID {
found = true
break
}
}
if !found {
// this path isn't on this node
continue
}
// StorageBestAlloc already checks that there is enough space; Not atomic like reserving space, but it's
// good enough for HasCapacity
return true
}
return false // no path found
}
func (t *TaskStorage) Claim(taskID int) (func() error, error) {
// TaskStorage Claim Attempts to reserve storage for the task
// A: Create a reservation for files to be allocated
// B: Create a reservation for existing files to be fetched into local storage
// C: Create a reservation for existing files in local storage which may be extended (e.g. sector cache when computing Trees)
ctx := context.Background()
sectorRef, err := t.taskToSectorRef(harmonytask.TaskID(taskID))
if err != nil {
return nil, xerrors.Errorf("getting sector ref: %w", err)
}
// storage writelock sector
lkctx, cancel := context.WithCancel(ctx)
requestedTypes := t.alloc | t.existing
lockAcquireTimuout := time.Second * 10
lockAcquireTimer := time.NewTimer(lockAcquireTimuout)
go func() {
defer cancel()
select {
case <-lockAcquireTimer.C:
case <-ctx.Done():
}
}()
if err := t.sc.sectors.sindex.StorageLock(lkctx, sectorRef.ID(), storiface.FTNone, requestedTypes); err != nil {
// timer will expire
return nil, xerrors.Errorf("claim StorageLock: %w", err)
}
if !lockAcquireTimer.Stop() {
// timer expired, so lkctx is done, and that means the lock was acquired and dropped..
return nil, xerrors.Errorf("failed to acquire lock")
}
defer func() {
// make sure we release the sector lock
lockAcquireTimer.Reset(0)
}()
// First see what we have locally. We are putting allocate and existing together because local acquire will look
// for existing files for allocate requests, separately existing files which aren't found locally will be need to
// be fetched, so we will need to create reservations for that too.
// NOTE localStore.AcquireSector does not open or create any files, nor does it reserve space. It only proposes
// paths to be used.
pathsFs, pathIDs, err := t.sc.sectors.localStore.AcquireSector(ctx, sectorRef.Ref(), storiface.FTNone, requestedTypes, t.pathType, storiface.AcquireMove)
if err != nil {
return nil, err
}
// reserve the space
release, err := t.sc.sectors.localStore.Reserve(ctx, sectorRef.Ref(), requestedTypes, pathIDs, storiface.FSOverheadSeal, t.MinFreeStoragePercentage)
if err != nil {
return nil, err
}
var releaseOnce sync.Once
releaseFunc := func() {
releaseOnce.Do(release)
}
sres := &StorageReservation{
SectorRef: sectorRef,
Release: releaseFunc,
Paths: pathsFs,
PathIDs: pathIDs,
Alloc: t.alloc,
Existing: t.existing,
}
t.sc.sectors.storageReservations.Store(harmonytask.TaskID(taskID), sres)
log.Debugw("claimed storage", "task_id", taskID, "sector", sectorRef.ID(), "paths", pathsFs)
// note: we drop the sector writelock on return; THAT IS INTENTIONAL, this code runs in CanAccept, which doesn't
// guarantee that the work for this sector will happen on this node; SDR CanAccept just ensures that the node can
// run the job, harmonytask is what ensures that only one SDR runs at a time
return func() error {
return t.markComplete(taskID, sectorRef)
}, nil
}
func (t *TaskStorage) markComplete(taskID int, sectorRef SectorRef) error {
// MarkComplete is ALWAYS called after the task is done or not scheduled
// If Claim is called and returns without errors, MarkComplete with the same
// taskID is guaranteed to eventually be called
sres, ok := t.sc.sectors.storageReservations.Load(harmonytask.TaskID(taskID))
if !ok {
return xerrors.Errorf("no reservation found for task %d", taskID)
}
if sectorRef != sres.SectorRef {
return xerrors.Errorf("reservation sector ref doesn't match task sector ref: %+v != %+v", sectorRef, sres.SectorRef)
}
log.Debugw("marking storage complete", "task_id", taskID, "sector", sectorRef.ID(), "paths", sres.Paths)
// remove the reservation
t.sc.sectors.storageReservations.Delete(harmonytask.TaskID(taskID))
// release the reservation
sres.Release()
// note: this only frees the reservation, allocated sectors are declared in AcquireSector which is aware of
// the reservation
return nil
}
var _ resources.Storage = &TaskStorage{}

View File

@ -1,71 +0,0 @@
// This is a wrapper around the FFI functions that allows them to be called by reflection.
// For the Curio GPU selector, see lib/ffiselect/ffiselect.go.
package ffidirect
import (
"github.com/ipfs/go-cid"
ffi "github.com/filecoin-project/filecoin-ffi"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/proof"
)
// This allow reflection access to the FFI functions.
type FFI struct{}
type ErrorString = string
func untypeError1[R any](r R, err error) (R, string) {
if err == nil {
return r, ""
}
return r, err.Error()
}
func untypeError2[R1, R2 any](r1 R1, r2 R2, err error) (R1, R2, string) {
if err == nil {
return r1, r2, ""
}
return r1, r2, err.Error()
}
func (FFI) GenerateSinglePartitionWindowPoStWithVanilla(
proofType abi.RegisteredPoStProof,
minerID abi.ActorID,
randomness abi.PoStRandomness,
proofs [][]byte,
partitionIndex uint,
) (*ffi.PartitionProof, ErrorString) {
return untypeError1(ffi.GenerateSinglePartitionWindowPoStWithVanilla(proofType, minerID, randomness, proofs, partitionIndex))
}
func (FFI) SealPreCommitPhase2(
phase1Output []byte,
cacheDirPath string,
sealedSectorPath string,
) (sealedCID cid.Cid, unsealedCID cid.Cid, err ErrorString) {
return untypeError2(ffi.SealPreCommitPhase2(phase1Output, cacheDirPath, sealedSectorPath))
}
func (FFI) SealCommitPhase2(
phase1Output []byte,
sectorNum abi.SectorNumber,
minerID abi.ActorID,
) ([]byte, ErrorString) {
return untypeError1(ffi.SealCommitPhase2(phase1Output, sectorNum, minerID))
}
func (FFI) GenerateWinningPoStWithVanilla(
proofType abi.RegisteredPoStProof,
minerID abi.ActorID,
randomness abi.PoStRandomness,
proofs [][]byte,
) ([]proof.PoStProof, ErrorString) {
return untypeError1(ffi.GenerateWinningPoStWithVanilla(proofType, minerID, randomness, proofs))
}
func (FFI) SelfTest(val1 int, val2 cid.Cid) (int, cid.Cid, ErrorString) {
return untypeError2(val1, val2, nil)
}

View File

@ -1,262 +0,0 @@
package ffiselect
import (
"bytes"
"encoding/gob"
"io"
"os"
"os/exec"
"reflect"
"strconv"
"strings"
"github.com/ipfs/go-cid"
"github.com/samber/lo"
"golang.org/x/xerrors"
ffi "github.com/filecoin-project/filecoin-ffi"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/proof"
"github.com/filecoin-project/lotus/curiosrc/build"
"github.com/filecoin-project/lotus/curiosrc/ffiselect/ffidirect"
)
var IsTest = false
var IsCuda = build.IsOpencl != "1"
// Get all devices from ffi
var ch chan string
func init() {
devices, err := ffi.GetGPUDevices()
if err != nil {
panic(err)
}
ch = make(chan string, len(devices))
for i := 0; i < len(devices); i++ {
ch <- strconv.Itoa(i)
}
}
type ValErr struct {
Val []interface{}
Err string
}
// This is not the one you're looking for.
type FFICall struct {
Fn string
Args []interface{}
}
func subStrInSet(set []string, sub string) bool {
return lo.Reduce(set, func(agg bool, item string, _ int) bool { return agg || strings.Contains(item, sub) }, false)
}
func call(logctx []any, fn string, args ...interface{}) ([]interface{}, error) {
if IsTest {
return callTest(logctx, fn, args...)
}
// get dOrdinal
dOrdinal := <-ch
defer func() {
ch <- dOrdinal
}()
p, err := os.Executable()
if err != nil {
return nil, err
}
commandAry := []string{"ffi"}
cmd := exec.Command(p, commandAry...)
// Set Visible Devices for CUDA and OpenCL
cmd.Env = append(os.Environ(),
func(isCuda bool) string {
if isCuda {
return "CUDA_VISIBLE_DEVICES=" + dOrdinal
}
return "GPU_DEVICE_ORDINAL=" + dOrdinal
}(IsCuda))
tmpDir, err := os.MkdirTemp("", "rust-fil-proofs")
if err != nil {
return nil, err
}
cmd.Env = append(cmd.Env, "TMPDIR="+tmpDir)
if !subStrInSet(cmd.Env, "RUST_LOG") {
cmd.Env = append(cmd.Env, "RUST_LOG=debug")
}
if !subStrInSet(cmd.Env, "FIL_PROOFS_USE_GPU_COLUMN_BUILDER") {
cmd.Env = append(cmd.Env, "FIL_PROOFS_USE_GPU_COLUMN_BUILDER=1")
}
if !subStrInSet(cmd.Env, "FIL_PROOFS_USE_GPU_TREE_BUILDER") {
cmd.Env = append(cmd.Env, "FIL_PROOFS_USE_GPU_TREE_BUILDER=1")
}
defer func() { _ = os.RemoveAll(tmpDir) }()
lw := NewLogWriter(logctx, os.Stderr)
cmd.Stderr = lw
cmd.Stdout = os.Stdout
outFile, err := os.CreateTemp("", "out")
if err != nil {
return nil, err
}
cmd.ExtraFiles = []*os.File{outFile}
var encArgs bytes.Buffer
err = gob.NewEncoder(&encArgs).Encode(FFICall{
Fn: fn,
Args: args,
})
if err != nil {
return nil, xerrors.Errorf("subprocess caller cannot encode: %w", err)
}
cmd.Stdin = &encArgs
err = cmd.Run()
if err != nil {
return nil, err
}
// seek to start
if _, err := outFile.Seek(0, io.SeekStart); err != nil {
return nil, xerrors.Errorf("failed to seek to beginning of output file: %w", err)
}
var ve ValErr
err = gob.NewDecoder(outFile).Decode(&ve)
if err != nil {
return nil, xerrors.Errorf("subprocess caller cannot decode: %w", err)
}
if ve.Err != "" {
return nil, xerrors.Errorf("subprocess failure: %s", ve.Err)
}
if ve.Val[len(ve.Val)-1].(ffidirect.ErrorString) != "" {
return nil, xerrors.Errorf("subprocess call error: %s", ve.Val[len(ve.Val)-1].(ffidirect.ErrorString))
}
return ve.Val, nil
}
///////////Funcs reachable by the GPU selector.///////////
// NOTE: Changes here MUST also change ffi-direct.go
type FFISelect struct{}
func (FFISelect) GenerateSinglePartitionWindowPoStWithVanilla(
proofType abi.RegisteredPoStProof,
minerID abi.ActorID,
randomness abi.PoStRandomness,
proofs [][]byte,
partitionIndex uint,
) (*ffi.PartitionProof, error) {
logctx := []any{"spid", minerID, "proof_count", len(proofs), "partition_index", partitionIndex}
val, err := call(logctx, "GenerateSinglePartitionWindowPoStWithVanilla", proofType, minerID, randomness, proofs, partitionIndex)
if err != nil {
return nil, err
}
return val[0].(*ffi.PartitionProof), nil
}
func (FFISelect) SealPreCommitPhase2(
sid abi.SectorID,
phase1Output []byte,
cacheDirPath string,
sealedSectorPath string,
) (sealedCID cid.Cid, unsealedCID cid.Cid, err error) {
logctx := []any{"sector", sid}
val, err := call(logctx, "SealPreCommitPhase2", phase1Output, cacheDirPath, sealedSectorPath)
if err != nil {
return cid.Undef, cid.Undef, err
}
return val[0].(cid.Cid), val[1].(cid.Cid), nil
}
func (FFISelect) SealCommitPhase2(
phase1Output []byte,
sectorNum abi.SectorNumber,
minerID abi.ActorID,
) ([]byte, error) {
logctx := []any{"sector", abi.SectorID{Miner: minerID, Number: sectorNum}}
val, err := call(logctx, "SealCommitPhase2", phase1Output, sectorNum, minerID)
if err != nil {
return nil, err
}
return val[0].([]byte), nil
}
func (FFISelect) GenerateWinningPoStWithVanilla(
proofType abi.RegisteredPoStProof,
minerID abi.ActorID,
randomness abi.PoStRandomness,
proofs [][]byte,
) ([]proof.PoStProof, error) {
logctx := []any{"proof_type", proofType, "miner_id", minerID}
val, err := call(logctx, "GenerateWinningPoStWithVanilla", proofType, minerID, randomness, proofs)
if err != nil {
return nil, err
}
return val[0].([]proof.PoStProof), nil
}
func (FFISelect) SelfTest(val1 int, val2 cid.Cid) (int, cid.Cid, error) {
val, err := call([]any{"selftest", "true"}, "SelfTest", val1, val2)
if err != nil {
return 0, cid.Undef, err
}
return val[0].(int), val[1].(cid.Cid), nil
}
// //////////////////////////
func init() {
registeredTypes := []any{
ValErr{},
FFICall{},
cid.Cid{},
abi.RegisteredPoStProof(0),
abi.ActorID(0),
abi.PoStRandomness{},
abi.SectorNumber(0),
ffi.PartitionProof{},
proof.PoStProof{},
abi.RegisteredPoStProof(0),
}
var registeredTypeNames = make(map[string]struct{})
//Ensure all methods are implemented:
// This is designed to fail for happy-path runs
// and should never actually impact curio users.
for _, t := range registeredTypes {
gob.Register(t)
registeredTypeNames[reflect.TypeOf(t).PkgPath()+"."+reflect.TypeOf(t).Name()] = struct{}{}
}
to := reflect.TypeOf(ffidirect.FFI{})
for m := 0; m < to.NumMethod(); m++ {
tm := to.Method(m)
tf := tm.Func
for i := 1; i < tf.Type().NumIn(); i++ { // skipping first arg (struct type)
in := tf.Type().In(i)
nm := in.PkgPath() + "." + in.Name()
if _, ok := registeredTypeNames[nm]; in.PkgPath() != "" && !ok { // built-ins ok
panic("ffiSelect: unregistered type: " + nm + " from " + tm.Name + " arg: " + strconv.Itoa(i))
}
}
for i := 0; i < tf.Type().NumOut(); i++ {
out := tf.Type().Out(i)
nm := out.PkgPath() + "." + out.Name()
if _, ok := registeredTypeNames[nm]; out.PkgPath() != "" && !ok { // built-ins ok
panic("ffiSelect: unregistered type: " + nm + " from " + tm.Name + " arg: " + strconv.Itoa(i))
}
}
}
}

View File

@ -1,88 +0,0 @@
package ffiselect
import (
"bufio"
"bytes"
"io"
"regexp"
"strings"
"time"
logging "github.com/ipfs/go-log/v2"
"go.uber.org/zap"
)
var log = logging.Logger("ffiselect")
type LogWriter struct {
ctx []any
errOut io.Writer
re *regexp.Regexp
}
func NewLogWriter(logctx []any, errOut io.Writer) *LogWriter {
re := regexp.MustCompile(`^(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3})\s+(\w+)\s+(.*)$`)
return &LogWriter{
ctx: logctx,
errOut: errOut,
re: re,
}
}
func (lw *LogWriter) Write(p []byte) (n int, err error) {
reader := bufio.NewReader(bytes.NewReader(p))
for {
line, err := reader.ReadBytes('\n')
if err == io.EOF {
break
}
if err != nil {
return 0, err
}
lineStr := string(line)
// trim trailing \n
lineStr = strings.TrimSpace(lineStr)
matches := lw.re.FindStringSubmatch(lineStr)
if matches == nil {
// Line didn't match the expected format, write it to stderr as-is
_, err := lw.errOut.Write(line)
if err != nil {
return 0, err
}
continue
}
timestamp, logLevel, message := matches[1], matches[2], matches[3]
logTime, err := time.Parse("2006-01-02T15:04:05.000", timestamp)
if err != nil {
_, err := lw.errOut.Write(line)
if err != nil {
return 0, err
}
continue
}
var zapLevel zap.AtomicLevel
switch logLevel {
case "DEBUG":
zapLevel = zap.NewAtomicLevelAt(zap.DebugLevel)
case "INFO":
zapLevel = zap.NewAtomicLevelAt(zap.InfoLevel)
case "WARN":
zapLevel = zap.NewAtomicLevelAt(zap.WarnLevel)
case "ERROR":
zapLevel = zap.NewAtomicLevelAt(zap.ErrorLevel)
default:
_, err := lw.errOut.Write(line)
if err != nil {
return 0, err
}
continue
}
log.With(zap.Time("timestamp", logTime)).Logw(zapLevel.Level(), message, lw.ctx...)
}
return len(p), nil
}

View File

@ -1,27 +0,0 @@
package ffiselect
import (
"reflect"
"github.com/samber/lo"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/curiosrc/ffiselect/ffidirect"
)
func callTest(logctx []any, fn string, rawargs ...interface{}) ([]interface{}, error) {
args := lo.Map(rawargs, func(arg any, i int) reflect.Value {
return reflect.ValueOf(arg)
})
resAry := reflect.ValueOf(ffidirect.FFI{}).MethodByName(fn).Call(args)
res := lo.Map(resAry, func(res reflect.Value, i int) any {
return res.Interface()
})
if res[len(res)-1].(ffidirect.ErrorString) != "" {
return nil, xerrors.Errorf("callTest error: %s", res[len(res)-1].(ffidirect.ErrorString))
}
return res, nil
}

View File

@ -1,288 +0,0 @@
package gc
import (
"context"
"strings"
"sync"
"time"
logging "github.com/ipfs/go-log/v2"
"github.com/samber/lo"
"golang.org/x/xerrors"
harmonytask2 "github.com/filecoin-project/lotus/curiosrc/harmony/harmonytask"
"github.com/filecoin-project/lotus/curiosrc/harmony/resources"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
"github.com/filecoin-project/lotus/lib/result"
"github.com/filecoin-project/lotus/storage/paths"
"github.com/filecoin-project/lotus/storage/sealer/fsutil"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
var log = logging.Logger("curiogc")
const StorageEndpointGCInterval = 21 * time.Minute
const StorageEndpointDeadTime = StorageEndpointGCInterval * 6 // ~2h
const MaxParallelEndpointChecks = 32
type StorageEndpointGC struct {
si *paths.DBIndex
remote *paths.Remote
db *harmonydb.DB
}
func NewStorageEndpointGC(si *paths.DBIndex, remote *paths.Remote, db *harmonydb.DB) *StorageEndpointGC {
return &StorageEndpointGC{
si: si,
remote: remote,
db: db,
}
}
func (s *StorageEndpointGC) Do(taskID harmonytask2.TaskID, stillOwned func() bool) (done bool, err error) {
/*
1. Get all storage paths + urls (endpoints)
2. Ping each url, record results
3. Update sector_path_url_liveness with success/failure
4.1 If a URL was consistently down for StorageEndpointDeadTime, remove it from the storage_path table
4.2 Remove storage paths with no URLs remaining
4.2.1 in the same transaction remove sector refs to the dead path
*/
ctx := context.Background()
var pathRefs []struct {
StorageID storiface.ID `db:"storage_id"`
Urls string `db:"urls"`
LastHeartbeat *time.Time `db:"last_heartbeat"`
}
err = s.db.Select(ctx, &pathRefs, `SELECT storage_id, urls, last_heartbeat FROM storage_path`)
if err != nil {
return false, xerrors.Errorf("getting path metadata: %w", err)
}
type pingResult struct {
storageID storiface.ID
url string
res result.Result[fsutil.FsStat]
}
var pingResults []pingResult
var resultLk sync.Mutex
var resultThrottle = make(chan struct{}, MaxParallelEndpointChecks)
for _, pathRef := range pathRefs {
pathRef := pathRef
urls := strings.Split(pathRef.Urls, paths.URLSeparator)
for _, url := range urls {
url := url
select {
case resultThrottle <- struct{}{}:
case <-ctx.Done():
return false, ctx.Err()
}
go func() {
defer func() {
<-resultThrottle
}()
st, err := s.remote.StatUrl(ctx, url, pathRef.StorageID)
res := pingResult{
storageID: pathRef.StorageID,
url: url,
res: result.Wrap(st, err),
}
resultLk.Lock()
pingResults = append(pingResults, res)
resultLk.Unlock()
}()
}
}
// Wait for all pings to finish
for i := 0; i < MaxParallelEndpointChecks; i++ {
select {
case resultThrottle <- struct{}{}:
case <-ctx.Done():
return false, ctx.Err()
}
}
// Update the liveness table
/*
create table sector_path_url_liveness (
storage_id text,
url text,
last_checked timestamp not null,
last_live timestamp,
last_dead timestamp,
last_dead_reason text,
primary key (storage_id, url),
foreign key (storage_id) references storage_path (storage_id) on delete cascade
)
*/
currentTime := time.Now().UTC()
committed, err := s.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (bool, error) {
for _, pingResult := range pingResults {
var lastLive, lastDead, lastDeadReason interface{}
if pingResult.res.Error == nil {
lastLive = currentTime.UTC()
lastDead = nil
lastDeadReason = nil
} else {
lastLive = nil
lastDead = currentTime.UTC()
lastDeadReason = pingResult.res.Error.Error()
}
// This function updates the liveness data for a URL in the `sector_path_url_liveness` table.
//
// On conflict, where the same `storage_id` and `url` are found:
// - last_checked is always updated to the current timestamp.
// - last_live is updated to the new `last_live` if it is not null; otherwise, it retains the existing value.
// - last_dead is conditionally updated based on two criteria:
// 1. It is set to the new `last_dead` if the existing `last_dead` is null (indicating this is the first recorded failure).
// 2. It is updated to the new `last_dead` if there has been a live instance recorded after the most recent dead timestamp, indicating the resource was alive again before this new failure.
// 3. It retains the existing value if none of the above conditions are met.
// - last_dead_reason is updated similarly to `last_live`, using COALESCE to prefer the new reason if it's provided.
_, err := tx.Exec(`
INSERT INTO sector_path_url_liveness (storage_id, url, last_checked, last_live, last_dead, last_dead_reason)
VALUES ($1, $2, $3, $4, $5, $6)
ON CONFLICT (storage_id, url) DO UPDATE
SET last_checked = EXCLUDED.last_checked,
last_live = COALESCE(EXCLUDED.last_live, sector_path_url_liveness.last_live),
last_dead = CASE
WHEN sector_path_url_liveness.last_dead IS NULL THEN EXCLUDED.last_dead
WHEN sector_path_url_liveness.last_dead IS NOT NULL AND sector_path_url_liveness.last_live > sector_path_url_liveness.last_dead THEN EXCLUDED.last_dead
ELSE sector_path_url_liveness.last_dead
END,
last_dead_reason = COALESCE(EXCLUDED.last_dead_reason, sector_path_url_liveness.last_dead_reason)
`, pingResult.storageID, pingResult.url, currentTime, lastLive, lastDead, lastDeadReason)
if err != nil {
return false, xerrors.Errorf("updating liveness data: %w", err)
}
}
return true, nil
}, harmonydb.OptionRetry())
if err != nil {
return false, xerrors.Errorf("sector_path_url_liveness update: %w", err)
}
if !committed {
return false, xerrors.Errorf("sector_path_url_liveness update: transaction didn't commit")
}
///////
// Now we do the actual database cleanup
if !stillOwned() {
return false, xerrors.Errorf("task no longer owned")
}
committed, err = s.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (bool, error) {
// Identify URLs that are consistently down
var deadURLs []struct {
StorageID storiface.ID
URL string
}
err = tx.Select(&deadURLs, `
SELECT storage_id, url FROM sector_path_url_liveness
WHERE last_dead > COALESCE(last_live, '1970-01-01') AND last_dead < $1
`, currentTime.Add(-StorageEndpointDeadTime).UTC())
if err != nil {
return false, xerrors.Errorf("selecting dead URLs: %w", err)
}
log.Debugw("dead urls", "dead_urls", deadURLs)
// Remove dead URLs from storage_path entries and handle path cleanup
for _, du := range deadURLs {
du := du
// Fetch the current URLs for the storage path
var URLs string
err = tx.QueryRow("SELECT urls FROM storage_path WHERE storage_id = $1", du.StorageID).Scan(&URLs)
if err != nil {
return false, xerrors.Errorf("fetching storage paths: %w", err)
}
// Filter out the dead URL using lo.Reject and prepare the updated list
urls := strings.Split(URLs, paths.URLSeparator)
urls = lo.Reject(urls, func(u string, _ int) bool {
return u == du.URL
})
log.Debugw("filtered urls", "urls", urls, "dead_url", du.URL, "storage_id", du.StorageID)
if len(urls) == 0 {
// If no URLs left, remove the storage path entirely
_, err = tx.Exec("DELETE FROM storage_path WHERE storage_id = $1", du.StorageID)
if err != nil {
return false, xerrors.Errorf("deleting storage path: %w", err)
}
_, err = tx.Exec("DELETE FROM sector_location WHERE storage_id = $1", du.StorageID)
if err != nil {
return false, xerrors.Errorf("deleting sector locations: %w", err)
}
} else {
// Update the storage path with the filtered URLs
newURLs := strings.Join(urls, paths.URLSeparator)
_, err = tx.Exec("UPDATE storage_path SET urls = $1 WHERE storage_id = $2", newURLs, du.StorageID)
if err != nil {
return false, xerrors.Errorf("updating storage path urls: %w", err)
}
// Remove sector_path_url_liveness entry
_, err = tx.Exec("DELETE FROM sector_path_url_liveness WHERE storage_id = $1 AND url = $2", du.StorageID, du.URL)
if err != nil {
return false, xerrors.Errorf("deleting sector_path_url_liveness entry: %w", err)
}
}
}
return true, nil
}, harmonydb.OptionRetry())
if err != nil {
return false, xerrors.Errorf("removing dead URLs and cleaning storage paths: %w", err)
}
if !committed {
return false, xerrors.Errorf("transaction for removing dead URLs and cleaning paths did not commit")
}
return true, nil
}
func (s *StorageEndpointGC) CanAccept(ids []harmonytask2.TaskID, engine *harmonytask2.TaskEngine) (*harmonytask2.TaskID, error) {
id := ids[0]
return &id, nil
}
func (s *StorageEndpointGC) TypeDetails() harmonytask2.TaskTypeDetails {
return harmonytask2.TaskTypeDetails{
Max: 1,
Name: "StorageMetaGC",
Cost: resources.Resources{
Cpu: 1,
Ram: 64 << 20,
Gpu: 0,
},
IAmBored: harmonytask2.SingletonTaskAdder(StorageEndpointGCInterval, s),
}
}
func (s *StorageEndpointGC) Adder(taskFunc harmonytask2.AddTaskFunc) {
// lazy endpoint, added when bored
return
}
var _ harmonytask2.TaskInterface = &StorageEndpointGC{}

View File

@ -1,71 +0,0 @@
/*
Package harmonytask implements a pure (no task logic), distributed
task manager. This clean interface allows a task implementer to completely
avoid being concerned with task scheduling and management.
It's based on the idea of tasks as small units of work broken from other
work by hardware, parallelizabilty, reliability, or any other reason.
Workers will be Greedy: vaccuuming up their favorite jobs from a list.
Once 1 task is accepted, harmonydb tries to get other task runner
machines to accept work (round robin) before trying again to accept.
*
Mental Model:
Things that block tasks:
- task not registered for any running server
- max was specified and reached
- resource exhaustion
- CanAccept() interface (per-task implmentation) does not accept it.
Ways tasks start:
- DB Read every 3 seconds
- Task was added (to db) by this process
Ways tasks get added:
- Async Listener task (for chain, etc)
- Followers: Tasks get added because another task completed
When Follower collectors run:
- If both sides are process-local, then this process will pick it up.
- If properly registered already, the http endpoint will be tried to start it.
- Otherwise, at the listen interval during db scrape it will be found.
How duplicate tasks are avoided:
- that's up to the task definition, but probably a unique key
*
To use:
1.Implement TaskInterface for a new task.
2. Have New() receive this & all other ACTIVE implementations.
*
*
As we are not expecting DBAs in this database, it's important to know
what grows uncontrolled. The only growing harmony_* table is
harmony_task_history (somewhat quickly). These will need a
clean-up for after the task data could never be acted upon.
but the design **requires** extraInfo tables to grow until the task's
info could not possibly be used by a following task, including slow
release rollout. This would normally be in the order of months old.
*
Other possible enhancements include more collaborative coordination
to assign a task to machines closer to the data.
__Database_Behavior__
harmony_task is the list of work that has not been completed.
AddTaskFunc manages the additions, but is designed to have its
transactions failed-out on overlap with a similar task already written.
It's up to the TaskInterface implementer to discover this overlap via
some other table it uses (since overlap can mean very different things).
harmony_task_history
This holds transactions that completed or saw too many retries. It also
serves as input for subsequent (follower) tasks to kick off. This is not
done machine-internally because a follower may not be on the same machine
as the previous task.
harmony_task_machines
Managed by lib/harmony/resources, this is a reference to machines registered
via the resources. This registration does not obligate the machine to
anything, but serves as a discovery mechanism. Paths are hostnames + ports
which are presumed to support http, but this assumption is only used by
the task system.
*/
package harmonytask

View File

@ -1,392 +0,0 @@
package harmonytask
import (
"context"
"fmt"
"strconv"
"sync/atomic"
"time"
"github.com/filecoin-project/lotus/curiosrc/harmony/resources"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
)
// Consts (except for unit test)
var POLL_DURATION = time.Second * 3 // Poll for Work this frequently
var POLL_NEXT_DURATION = 100 * time.Millisecond // After scheduling a task, wait this long before scheduling another
var CLEANUP_FREQUENCY = 5 * time.Minute // Check for dead workers this often * everyone
var FOLLOW_FREQUENCY = 1 * time.Minute // Check for work to follow this often
type TaskTypeDetails struct {
// Max returns how many tasks this machine can run of this type.
// Zero (default) or less means unrestricted.
Max int
// Name is the task name to be added to the task list.
Name string
// Peak costs to Do() the task.
Cost resources.Resources
// Max Failure count before the job is dropped.
// 0 = retry forever
MaxFailures uint
// Follow another task's completion via this task's creation.
// The function should populate extraInfo from data
// available from the previous task's tables, using the given TaskID.
// It should also return success if the trigger succeeded.
// NOTE: if refatoring tasks, see if your task is
// necessary. Ex: Is the sector state correct for your stage to run?
Follows map[string]func(TaskID, AddTaskFunc) (bool, error)
// IAmBored is called (when populated) when there's capacity but no work.
// Tasks added will be proposed to CanAccept() on this machine.
// CanAccept() can read taskEngine's WorkOrigin string to learn about a task.
// Ex: make new CC sectors, clean-up, or retrying pipelines that failed in later states.
IAmBored func(AddTaskFunc) error
}
// TaskInterface must be implemented in order to have a task used by harmonytask.
type TaskInterface interface {
// Do the task assigned. Call stillOwned before making single-writer-only
// changes to ensure the work has not been stolen.
// This is the ONLY function that should attempt to do the work, and must
// ONLY be called by harmonytask.
// Indicate if the task no-longer needs scheduling with done=true including
// cases where it's past the deadline.
Do(taskID TaskID, stillOwned func() bool) (done bool, err error)
// CanAccept should return if the task can run on this machine. It should
// return null if the task type is not allowed on this machine.
// It should select the task it most wants to accomplish.
// It is also responsible for determining & reserving disk space (including scratch).
CanAccept([]TaskID, *TaskEngine) (*TaskID, error)
// TypeDetails() returns static details about how this task behaves and
// how this machine will run it. Read once at the beginning.
TypeDetails() TaskTypeDetails
// This listener will consume all external sources continuously for work.
// Do() may also be called from a backlog of work. This must not
// start doing the work (it still must be scheduled).
// Note: Task de-duplication should happen in ExtraInfoFunc by
// returning false, typically by determining from the tx that the work
// exists already. The easy way is to have a unique joint index
// across all fields that will be common.
// Adder should typically only add its own task type, but multiple
// is possible for when 1 trigger starts 2 things.
// Usage Example:
// func (b *BazType)Adder(addTask AddTaskFunc) {
// for {
// bazMaker := <- bazChannel
// addTask("baz", func(t harmonytask.TaskID, txn db.Transaction) (bool, error) {
// _, err := txn.Exec(`INSERT INTO bazInfoTable (taskID, qix, mot)
// VALUES ($1,$2,$3)`, id, bazMaker.qix, bazMaker.mot)
// if err != nil {
// scream(err)
// return false
// }
// return true
// })
// }
// }
Adder(AddTaskFunc)
}
// AddTaskFunc is responsible for adding a task's details "extra info" to the DB.
// It should return true if the task should be added, false if it was already there.
// This is typically accomplished with a "unique" index on your detals table that
// would cause the insert to fail.
// The error indicates that instead of a conflict (which we should ignore) that we
// actually have a serious problem that needs to be logged with context.
type AddTaskFunc func(extraInfo func(TaskID, *harmonydb.Tx) (shouldCommit bool, seriousError error))
type TaskEngine struct {
// Static After New()
ctx context.Context
handlers []*taskTypeHandler
db *harmonydb.DB
reg *resources.Reg
grace context.CancelFunc
taskMap map[string]*taskTypeHandler
ownerID int
follows map[string][]followStruct
hostAndPort string
// synchronous to the single-threaded poller
lastFollowTime time.Time
lastCleanup atomic.Value
WorkOrigin string
}
type followStruct struct {
f func(TaskID, AddTaskFunc) (bool, error)
h *taskTypeHandler
name string
}
type TaskID int
// New creates all the task definitions. Note that TaskEngine
// knows nothing about the tasks themselves and serves to be a
// generic container for common work
func New(
db *harmonydb.DB,
impls []TaskInterface,
hostnameAndPort string) (*TaskEngine, error) {
reg, err := resources.Register(db, hostnameAndPort)
if err != nil {
return nil, fmt.Errorf("cannot get resources: %w", err)
}
ctx, grace := context.WithCancel(context.Background())
e := &TaskEngine{
ctx: ctx,
grace: grace,
db: db,
reg: reg,
ownerID: reg.Resources.MachineID, // The current number representing "hostAndPort"
taskMap: make(map[string]*taskTypeHandler, len(impls)),
follows: make(map[string][]followStruct),
hostAndPort: hostnameAndPort,
}
e.lastCleanup.Store(time.Now())
for _, c := range impls {
h := taskTypeHandler{
TaskInterface: c,
TaskTypeDetails: c.TypeDetails(),
TaskEngine: e,
}
if len(h.Name) > 16 {
return nil, fmt.Errorf("task name too long: %s, max 16 characters", h.Name)
}
e.handlers = append(e.handlers, &h)
e.taskMap[h.TaskTypeDetails.Name] = &h
}
// resurrect old work
{
var taskRet []struct {
ID int
Name string
}
err := db.Select(e.ctx, &taskRet, `SELECT id, name from harmony_task WHERE owner_id=$1`, e.ownerID)
if err != nil {
return nil, err
}
for _, w := range taskRet {
// edge-case: if old assignments are not available tasks, unlock them.
h := e.taskMap[w.Name]
if h == nil {
_, err := db.Exec(e.ctx, `UPDATE harmony_task SET owner=NULL WHERE id=$1`, w.ID)
if err != nil {
log.Errorw("Cannot remove self from owner field", "error", err)
continue // not really fatal, but not great
}
}
if !h.considerWork(WorkSourceRecover, []TaskID{TaskID(w.ID)}) {
log.Errorw("Strange: Unable to accept previously owned task", "id", w.ID, "type", w.Name)
}
}
}
for _, h := range e.handlers {
go h.Adder(h.AddTask)
}
go e.poller()
return e, nil
}
// GracefullyTerminate hangs until all present tasks have completed.
// Call this to cleanly exit the process. As some processes are long-running,
// passing a deadline will ignore those still running (to be picked-up later).
func (e *TaskEngine) GracefullyTerminate() {
// call the cancel func to avoid picking up any new tasks. Running tasks have context.Background()
// Call shutdown to stop posting heartbeat to DB.
e.grace()
e.reg.Shutdown()
// If there are any Post tasks then wait till Timeout and check again
// When no Post tasks are active, break out of loop and call the shutdown function
for {
timeout := time.Millisecond
for _, h := range e.handlers {
if h.TaskTypeDetails.Name == "WinPost" && h.Count.Load() > 0 {
timeout = time.Second
log.Infof("node shutdown deferred for %f seconds", timeout.Seconds())
continue
}
if h.TaskTypeDetails.Name == "WdPost" && h.Count.Load() > 0 {
timeout = time.Second * 3
log.Infof("node shutdown deferred for %f seconds due to running WdPost task", timeout.Seconds())
continue
}
if h.TaskTypeDetails.Name == "WdPostSubmit" && h.Count.Load() > 0 {
timeout = time.Second
log.Infof("node shutdown deferred for %f seconds due to running WdPostSubmit task", timeout.Seconds())
continue
}
if h.TaskTypeDetails.Name == "WdPostRecover" && h.Count.Load() > 0 {
timeout = time.Second
log.Infof("node shutdown deferred for %f seconds due to running WdPostRecover task", timeout.Seconds())
continue
}
// Test tasks for itest
if h.TaskTypeDetails.Name == "ThingOne" && h.Count.Load() > 0 {
timeout = time.Second
log.Infof("node shutdown deferred for %f seconds due to running itest task", timeout.Seconds())
continue
}
}
if timeout > time.Millisecond {
time.Sleep(timeout)
continue
}
break
}
return
}
func (e *TaskEngine) poller() {
nextWait := POLL_NEXT_DURATION
for {
select {
case <-time.After(nextWait): // Find work periodically
case <-e.ctx.Done(): ///////////////////// Graceful exit
return
}
nextWait = POLL_DURATION
accepted := e.pollerTryAllWork()
if accepted {
nextWait = POLL_NEXT_DURATION
}
if time.Since(e.lastFollowTime) > FOLLOW_FREQUENCY {
e.followWorkInDB()
}
}
}
// followWorkInDB implements "Follows"
func (e *TaskEngine) followWorkInDB() {
// Step 1: What are we following?
var lastFollowTime time.Time
lastFollowTime, e.lastFollowTime = e.lastFollowTime, time.Now()
for fromName, srcs := range e.follows {
var cList []int // Which work is done (that we follow) since we last checked?
err := e.db.Select(e.ctx, &cList, `SELECT h.task_id FROM harmony_task_history
WHERE h.work_end>$1 AND h.name=$2`, lastFollowTime.UTC(), fromName)
if err != nil {
log.Error("Could not query DB: ", err)
return
}
for _, src := range srcs {
for _, workAlreadyDone := range cList { // Were any tasks made to follow these tasks?
var ct int
err := e.db.QueryRow(e.ctx, `SELECT COUNT(*) FROM harmony_task
WHERE name=$1 AND previous_task=$2`, src.h.Name, workAlreadyDone).Scan(&ct)
if err != nil {
log.Error("Could not query harmony_task: ", err)
return // not recoverable here
}
if ct > 0 {
continue
}
// we need to create this task
b, err := src.h.Follows[fromName](TaskID(workAlreadyDone), src.h.AddTask)
if err != nil {
log.Errorw("Could not follow: ", "error", err)
continue
}
if !b {
// But someone may have beaten us to it.
log.Debugf("Unable to add task %s following Task(%d, %s)", src.h.Name, workAlreadyDone, fromName)
}
}
}
}
}
// pollerTryAllWork starts the next 1 task
func (e *TaskEngine) pollerTryAllWork() bool {
if time.Since(e.lastCleanup.Load().(time.Time)) > CLEANUP_FREQUENCY {
e.lastCleanup.Store(time.Now())
resources.CleanupMachines(e.ctx, e.db)
}
for _, v := range e.handlers {
if err := v.AssertMachineHasCapacity(); err != nil {
log.Debugf("skipped scheduling %s type tasks on due to %s", v.Name, err.Error())
continue
}
var unownedTasks []TaskID
err := e.db.Select(e.ctx, &unownedTasks, `SELECT id
FROM harmony_task
WHERE owner_id IS NULL AND name=$1
ORDER BY update_time`, v.Name)
if err != nil {
log.Error("Unable to read work ", err)
continue
}
if len(unownedTasks) > 0 {
accepted := v.considerWork(WorkSourcePoller, unownedTasks)
if accepted {
return true // accept new work slowly and in priority order
}
log.Warn("Work not accepted for " + strconv.Itoa(len(unownedTasks)) + " " + v.Name + " task(s)")
}
}
// if no work was accepted, are we bored? Then find work in priority order.
for _, v := range e.handlers {
v := v
if v.AssertMachineHasCapacity() != nil {
continue
}
if v.TaskTypeDetails.IAmBored != nil {
var added []TaskID
err := v.TaskTypeDetails.IAmBored(func(extraInfo func(TaskID, *harmonydb.Tx) (shouldCommit bool, seriousError error)) {
v.AddTask(func(tID TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) {
b, err := extraInfo(tID, tx)
if err == nil {
added = append(added, tID)
}
return b, err
})
})
if err != nil {
log.Error("IAmBored failed: ", err)
continue
}
if added != nil { // tiny chance a fail could make these bogus, but considerWork should then fail.
v.considerWork(WorkSourceIAmBored, added)
}
}
}
return false
}
// ResourcesAvailable determines what resources are still unassigned.
func (e *TaskEngine) ResourcesAvailable() resources.Resources {
tmp := e.reg.Resources
for _, t := range e.handlers {
ct := t.Count.Load()
tmp.Cpu -= int(ct) * t.Cost.Cpu
tmp.Gpu -= float64(ct) * t.Cost.Gpu
tmp.Ram -= uint64(ct) * t.Cost.Ram
}
return tmp
}
// Resources returns the resources available in the TaskEngine's registry.
func (e *TaskEngine) Resources() resources.Resources {
return e.reg.Resources
}

View File

@ -1,52 +0,0 @@
package harmonytask
import (
"errors"
"time"
"github.com/yugabyte/pgx/v5"
"github.com/filecoin-project/lotus/curiosrc/lib/passcall"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
)
func SingletonTaskAdder(minInterval time.Duration, task TaskInterface) func(AddTaskFunc) error {
return passcall.Every(minInterval, func(add AddTaskFunc) error {
taskName := task.TypeDetails().Name
add(func(taskID TaskID, tx *harmonydb.Tx) (shouldCommit bool, err error) {
var existingTaskID *int64
var lastRunTime time.Time
// Query to check the existing task entry
err = tx.QueryRow(`SELECT task_id, last_run_time FROM harmony_task_singletons WHERE task_name = $1`, taskName).Scan(&existingTaskID, &lastRunTime)
if err != nil {
if !errors.Is(err, pgx.ErrNoRows) {
return false, err // return error if query failed and it's not because of missing row
}
}
now := time.Now().UTC()
// Determine if the task should run based on the absence of a record or outdated last_run_time
shouldRun := err == pgx.ErrNoRows || (existingTaskID == nil && lastRunTime.Add(minInterval).Before(now))
if !shouldRun {
return false, nil
}
// Conditionally insert or update the task entry
n, err := tx.Exec(`
INSERT INTO harmony_task_singletons (task_name, task_id, last_run_time)
VALUES ($1, $2, $3)
ON CONFLICT (task_name) DO UPDATE
SET task_id = COALESCE(harmony_task_singletons.task_id, $2),
last_run_time = $3
WHERE harmony_task_singletons.task_id IS NULL
`, taskName, taskID, now)
if err != nil {
return false, err
}
return n > 0, nil
})
return nil
})
}

View File

@ -1,296 +0,0 @@
package harmonytask
import (
"context"
"errors"
"fmt"
"runtime"
"strconv"
"sync/atomic"
"time"
logging "github.com/ipfs/go-log/v2"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
)
var log = logging.Logger("harmonytask")
type taskTypeHandler struct {
TaskInterface
TaskTypeDetails
TaskEngine *TaskEngine
Count atomic.Int32
}
func (h *taskTypeHandler) AddTask(extra func(TaskID, *harmonydb.Tx) (bool, error)) {
var tID TaskID
retryWait := time.Millisecond * 100
retryAddTask:
_, err := h.TaskEngine.db.BeginTransaction(h.TaskEngine.ctx, func(tx *harmonydb.Tx) (bool, error) {
// create taskID (from DB)
err := tx.QueryRow(`INSERT INTO harmony_task (name, added_by, posted_time)
VALUES ($1, $2, CURRENT_TIMESTAMP) RETURNING id`, h.Name, h.TaskEngine.ownerID).Scan(&tID)
if err != nil {
return false, fmt.Errorf("could not insert into harmonyTask: %w", err)
}
return extra(tID, tx)
})
if err != nil {
if harmonydb.IsErrUniqueContraint(err) {
log.Debugf("addtask(%s) saw unique constraint, so it's added already.", h.Name)
return
}
if harmonydb.IsErrSerialization(err) {
time.Sleep(retryWait)
retryWait *= 2
goto retryAddTask
}
log.Errorw("Could not add task. AddTasFunc failed", "error", err, "type", h.Name)
return
}
}
const (
WorkSourcePoller = "poller"
WorkSourceRecover = "recovered"
WorkSourceIAmBored = "bored"
)
// considerWork is called to attempt to start work on a task-id of this task type.
// It presumes single-threaded calling, so there should not be a multi-threaded re-entry.
// The only caller should be the one work poller thread. This does spin off other threads,
// but those should not considerWork. Work completing may lower the resource numbers
// unexpectedly, but that will not invalidate work being already able to fit.
func (h *taskTypeHandler) considerWork(from string, ids []TaskID) (workAccepted bool) {
top:
if len(ids) == 0 {
return true // stop looking for takers
}
// 1. Can we do any more of this task type?
// NOTE: 0 is the default value, so this way people don't need to worry about
// this setting unless they want to limit the number of tasks of this type.
if h.Max > 0 && int(h.Count.Load()) >= h.Max {
log.Debugw("did not accept task", "name", h.Name, "reason", "at max already")
return false
}
// 2. Can we do any more work? From here onward, we presume the resource
// story will not change, so single-threaded calling is best.
err := h.AssertMachineHasCapacity()
if err != nil {
log.Debugw("did not accept task", "name", h.Name, "reason", "at capacity already: "+err.Error())
return false
}
h.TaskEngine.WorkOrigin = from
// 3. What does the impl say?
canAcceptAgain:
tID, err := h.CanAccept(ids, h.TaskEngine)
h.TaskEngine.WorkOrigin = ""
if err != nil {
log.Error(err)
return false
}
if tID == nil {
log.Infow("did not accept task", "task_id", ids[0], "reason", "CanAccept() refused", "name", h.Name)
return false
}
releaseStorage := func() {
}
if h.TaskTypeDetails.Cost.Storage != nil {
markComplete, err := h.TaskTypeDetails.Cost.Storage.Claim(int(*tID))
if err != nil {
log.Infow("did not accept task", "task_id", strconv.Itoa(int(*tID)), "reason", "storage claim failed", "name", h.Name, "error", err)
if len(ids) > 1 {
var tryAgain = make([]TaskID, 0, len(ids)-1)
for _, id := range ids {
if id != *tID {
tryAgain = append(tryAgain, id)
}
}
ids = tryAgain
goto canAcceptAgain
}
return false
}
releaseStorage = func() {
if err := markComplete(); err != nil {
log.Errorw("Could not release storage", "error", err)
}
}
}
// if recovering we don't need to try to claim anything because those tasks are already claimed by us
if from != WorkSourceRecover {
// 4. Can we claim the work for our hostname?
ct, err := h.TaskEngine.db.Exec(h.TaskEngine.ctx, "UPDATE harmony_task SET owner_id=$1 WHERE id=$2 AND owner_id IS NULL", h.TaskEngine.ownerID, *tID)
if err != nil {
log.Error(err)
releaseStorage()
return false
}
if ct == 0 {
log.Infow("did not accept task", "task_id", strconv.Itoa(int(*tID)), "reason", "already Taken", "name", h.Name)
releaseStorage()
var tryAgain = make([]TaskID, 0, len(ids)-1)
for _, id := range ids {
if id != *tID {
tryAgain = append(tryAgain, id)
}
}
ids = tryAgain
goto top
}
}
h.Count.Add(1)
go func() {
log.Infow("Beginning work on Task", "id", *tID, "from", from, "name", h.Name)
var done bool
var doErr error
workStart := time.Now()
defer func() {
if r := recover(); r != nil {
stackSlice := make([]byte, 4092)
sz := runtime.Stack(stackSlice, false)
log.Error("Recovered from a serious error "+
"while processing "+h.Name+" task "+strconv.Itoa(int(*tID))+": ", r,
" Stack: ", string(stackSlice[:sz]))
}
h.Count.Add(-1)
releaseStorage()
h.recordCompletion(*tID, workStart, done, doErr)
if done {
for _, fs := range h.TaskEngine.follows[h.Name] { // Do we know of any follows for this task type?
if _, err := fs.f(*tID, fs.h.AddTask); err != nil {
log.Error("Could not follow", "error", err, "from", h.Name, "to", fs.name)
}
}
}
}()
done, doErr = h.Do(*tID, func() bool {
var owner int
// Background here because we don't want GracefulRestart to block this save.
err := h.TaskEngine.db.QueryRow(context.Background(),
`SELECT owner_id FROM harmony_task WHERE id=$1`, *tID).Scan(&owner)
if err != nil {
log.Error("Cannot determine ownership: ", err)
return false
}
return owner == h.TaskEngine.ownerID
})
if doErr != nil {
log.Errorw("Do() returned error", "type", h.Name, "id", strconv.Itoa(int(*tID)), "error", doErr)
}
}()
return true
}
func (h *taskTypeHandler) recordCompletion(tID TaskID, workStart time.Time, done bool, doErr error) {
workEnd := time.Now()
retryWait := time.Millisecond * 100
retryRecordCompletion:
cm, err := h.TaskEngine.db.BeginTransaction(h.TaskEngine.ctx, func(tx *harmonydb.Tx) (bool, error) {
var postedTime time.Time
err := tx.QueryRow(`SELECT posted_time FROM harmony_task WHERE id=$1`, tID).Scan(&postedTime)
if err != nil {
return false, fmt.Errorf("could not log completion: %w ", err)
}
result := "unspecified error"
if done {
_, err = tx.Exec("DELETE FROM harmony_task WHERE id=$1", tID)
if err != nil {
return false, fmt.Errorf("could not log completion: %w", err)
}
result = ""
if doErr != nil {
result = "non-failing error: " + doErr.Error()
}
} else {
if doErr != nil {
result = "error: " + doErr.Error()
}
var deleteTask bool
if h.MaxFailures > 0 {
ct := uint(0)
err = tx.QueryRow(`SELECT count(*) FROM harmony_task_history
WHERE task_id=$1 AND result=FALSE`, tID).Scan(&ct)
if err != nil {
return false, fmt.Errorf("could not read task history: %w", err)
}
if ct >= h.MaxFailures {
deleteTask = true
}
}
if deleteTask {
_, err = tx.Exec("DELETE FROM harmony_task WHERE id=$1", tID)
if err != nil {
return false, fmt.Errorf("could not delete failed job: %w", err)
}
// Note: Extra Info is left laying around for later review & clean-up
} else {
_, err := tx.Exec(`UPDATE harmony_task SET owner_id=NULL WHERE id=$1`, tID)
if err != nil {
return false, fmt.Errorf("could not disown failed task: %v %v", tID, err)
}
}
}
_, err = tx.Exec(`INSERT INTO harmony_task_history
(task_id, name, posted, work_start, work_end, result, completed_by_host_and_port, err)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)`, tID, h.Name, postedTime.UTC(), workStart.UTC(), workEnd.UTC(), done, h.TaskEngine.hostAndPort, result)
if err != nil {
return false, fmt.Errorf("could not write history: %w", err)
}
return true, nil
})
if err != nil {
if harmonydb.IsErrSerialization(err) {
time.Sleep(retryWait)
retryWait *= 2
goto retryRecordCompletion
}
log.Error("Could not record transaction: ", err)
return
}
if !cm {
log.Error("Committing the task records failed")
}
}
func (h *taskTypeHandler) AssertMachineHasCapacity() error {
r := h.TaskEngine.ResourcesAvailable()
if r.Cpu-h.Cost.Cpu < 0 {
return errors.New("Did not accept " + h.Name + " task: out of cpu")
}
if h.Cost.Ram > r.Ram {
return errors.New("Did not accept " + h.Name + " task: out of RAM")
}
if r.Gpu-h.Cost.Gpu < 0 {
return errors.New("Did not accept " + h.Name + " task: out of available GPU")
}
if h.TaskTypeDetails.Cost.Storage != nil {
if !h.TaskTypeDetails.Cost.Storage.HasCapacity() {
return errors.New("Did not accept " + h.Name + " task: out of available Storage")
}
}
return nil
}

View File

@ -1,34 +0,0 @@
//go:build !darwin
// +build !darwin
package resources
import (
"os"
"strconv"
"strings"
ffi "github.com/filecoin-project/filecoin-ffi"
)
func getGPUDevices() float64 { // GPU boolean
if nstr := os.Getenv("HARMONY_OVERRIDE_GPUS"); nstr != "" {
n, err := strconv.ParseFloat(nstr, 64)
if err != nil {
logger.Errorf("parsing HARMONY_OVERRIDE_GPUS failed: %+v", err)
} else {
return n
}
}
gpus, err := ffi.GetGPUDevices()
logger.Infow("GPUs", "list", gpus)
if err != nil {
logger.Errorf("getting gpu devices failed: %+v", err)
}
all := strings.ToLower(strings.Join(gpus, ","))
if len(gpus) > 1 || strings.Contains(all, "ati") || strings.Contains(all, "nvidia") {
return float64(len(gpus))
}
return 0
}

View File

@ -1,8 +0,0 @@
//go:build darwin
// +build darwin
package resources
func getGPUDevices() float64 {
return 1.0 // likely-true value intended for non-production use.
}

View File

@ -1,22 +0,0 @@
//go:build darwin || freebsd || openbsd || dragonfly || netbsd
// +build darwin freebsd openbsd dragonfly netbsd
package resources
import (
"encoding/binary"
"syscall"
)
func sysctlUint64(name string) (uint64, error) {
s, err := syscall.Sysctl(name)
if err != nil {
return 0, err
}
// hack because the string conversion above drops a \0
b := []byte(s)
if len(b) < 8 {
b = append(b, 0)
}
return binary.LittleEndian.Uint64(b), nil
}

View File

@ -1,17 +0,0 @@
#ifndef CL_H
#define CL_H
#define CL_USE_DEPRECATED_OPENCL_1_1_APIS
#define CL_USE_DEPRECATED_OPENCL_1_2_APIS
#define CL_USE_DEPRECATED_OPENCL_2_0_APIS
#define CL_TARGET_OPENCL_VERSION 300
#ifdef __APPLE__
#include "OpenCL/opencl.h"
#else
#include "CL/opencl.h"
#endif
#endif /* CL_H */

View File

@ -1,93 +0,0 @@
// Package cl was borrowed from the go-opencl library which is more complex and
// doesn't compile well for our needs.
package cl
// #include "cl.h"
import "C"
import (
"fmt"
"unsafe"
)
const maxPlatforms = 32
type Platform struct {
id C.cl_platform_id
}
// Obtain the list of platforms available.
func GetPlatforms() ([]*Platform, error) {
var platformIds [maxPlatforms]C.cl_platform_id
var nPlatforms C.cl_uint
err := C.clGetPlatformIDs(C.cl_uint(maxPlatforms), &platformIds[0], &nPlatforms)
if err == -1001 { // No platforms found
return nil, nil
}
if err != C.CL_SUCCESS {
return nil, toError(err)
}
platforms := make([]*Platform, nPlatforms)
for i := 0; i < int(nPlatforms); i++ {
platforms[i] = &Platform{id: platformIds[i]}
}
return platforms, nil
}
const maxDeviceCount = 64
type DeviceType uint
const (
DeviceTypeAll DeviceType = C.CL_DEVICE_TYPE_ALL
)
type Device struct {
id C.cl_device_id
}
func (p *Platform) GetAllDevices() ([]*Device, error) {
var deviceIds [maxDeviceCount]C.cl_device_id
var numDevices C.cl_uint
var platformId C.cl_platform_id
if p != nil {
platformId = p.id
}
if err := C.clGetDeviceIDs(platformId, C.cl_device_type(DeviceTypeAll), C.cl_uint(maxDeviceCount), &deviceIds[0], &numDevices); err != C.CL_SUCCESS {
return nil, toError(err)
}
if numDevices > maxDeviceCount {
numDevices = maxDeviceCount
}
devices := make([]*Device, numDevices)
for i := 0; i < int(numDevices); i++ {
devices[i] = &Device{id: deviceIds[i]}
}
return devices, nil
}
func toError(code C.cl_int) error {
return ErrOther(code)
}
type ErrOther int
func (e ErrOther) Error() string {
return fmt.Sprintf("OpenCL: error %d", int(e))
}
// Size of global device memory in bytes.
func (d *Device) GlobalMemSize() int64 {
val, _ := d.getInfoUlong(C.CL_DEVICE_GLOBAL_MEM_SIZE, true)
return val
}
func (d *Device) getInfoUlong(param C.cl_device_info, panicOnError bool) (int64, error) {
var val C.cl_ulong
if err := C.clGetDeviceInfo(d.id, param, C.size_t(unsafe.Sizeof(val)), unsafe.Pointer(&val), nil); err != C.CL_SUCCESS {
if panicOnError {
panic("Should never fail")
}
return 0, toError(err)
}
return int64(val), nil
}

View File

@ -1,161 +0,0 @@
package resources
import (
"bytes"
"context"
"os/exec"
"regexp"
"runtime"
"sync/atomic"
"time"
"github.com/elastic/go-sysinfo"
logging "github.com/ipfs/go-log/v2"
"golang.org/x/sys/unix"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
)
var LOOKS_DEAD_TIMEOUT = 10 * time.Minute // Time w/o minute heartbeats
type Resources struct {
Cpu int
Gpu float64
Ram uint64
MachineID int
Storage
}
// Optional Storage management.
type Storage interface {
HasCapacity() bool
// This allows some other system to claim space for this task. Returns a cleanup function
Claim(taskID int) (func() error, error)
}
type Reg struct {
Resources
shutdown atomic.Bool
}
var logger = logging.Logger("harmonytask")
var lotusRE = regexp.MustCompile("lotus-worker|lotus-harmony|yugabyted|yb-master|yb-tserver")
func Register(db *harmonydb.DB, hostnameAndPort string) (*Reg, error) {
var reg Reg
var err error
reg.Resources, err = getResources()
if err != nil {
return nil, err
}
ctx := context.Background()
{ // Learn our owner_id while updating harmony_machines
var ownerID *int
// Upsert query with last_contact update, fetch the machine ID
// (note this isn't a simple insert .. on conflict because host_and_port isn't unique)
err := db.QueryRow(ctx, `
WITH upsert AS (
UPDATE harmony_machines
SET cpu = $2, ram = $3, gpu = $4, last_contact = CURRENT_TIMESTAMP
WHERE host_and_port = $1
RETURNING id
),
inserted AS (
INSERT INTO harmony_machines (host_and_port, cpu, ram, gpu, last_contact)
SELECT $1, $2, $3, $4, CURRENT_TIMESTAMP
WHERE NOT EXISTS (SELECT id FROM upsert)
RETURNING id
)
SELECT id FROM upsert
UNION ALL
SELECT id FROM inserted;
`, hostnameAndPort, reg.Cpu, reg.Ram, reg.Gpu).Scan(&ownerID)
if err != nil {
return nil, xerrors.Errorf("inserting machine entry: %w", err)
}
if ownerID == nil {
return nil, xerrors.Errorf("no owner id")
}
reg.MachineID = *ownerID
cleaned := CleanupMachines(context.Background(), db)
logger.Infow("Cleaned up machines", "count", cleaned)
}
go func() {
for {
time.Sleep(time.Minute)
if reg.shutdown.Load() {
return
}
_, err := db.Exec(ctx, `UPDATE harmony_machines SET last_contact=CURRENT_TIMESTAMP where id=$1`, reg.MachineID)
if err != nil {
logger.Error("Cannot keepalive ", err)
}
}
}()
return &reg, nil
}
func CleanupMachines(ctx context.Context, db *harmonydb.DB) int {
ct, err := db.Exec(ctx,
`DELETE FROM harmony_machines WHERE last_contact < CURRENT_TIMESTAMP - INTERVAL '1 MILLISECOND' * $1 `,
LOOKS_DEAD_TIMEOUT.Milliseconds()) // ms enables unit testing to change timeout.
if err != nil {
logger.Warn("unable to delete old machines: ", err)
}
return ct
}
func (res *Reg) Shutdown() {
res.shutdown.Store(true)
}
func getResources() (res Resources, err error) {
b, err := exec.Command(`ps`, `-ef`).CombinedOutput()
if err != nil {
logger.Warn("Could not safety check for 2+ processes: ", err)
} else {
found := 0
for _, b := range bytes.Split(b, []byte("\n")) {
if lotusRE.Match(b) {
found++
}
}
if found > 1 {
logger.Warn("curio's defaults are for running alone. Use task maximums or CGroups.")
}
}
h, err := sysinfo.Host()
if err != nil {
return Resources{}, err
}
mem, err := h.Memory()
if err != nil {
return Resources{}, err
}
res = Resources{
Cpu: runtime.NumCPU(),
Ram: mem.Available,
Gpu: getGPUDevices(),
}
return res, nil
}
func DiskFree(path string) (uint64, error) {
s := unix.Statfs_t{}
err := unix.Statfs(path, &s)
if err != nil {
return 0, err
}
return s.Bfree * uint64(s.Bsize), nil
}

View File

@ -1,19 +0,0 @@
package taskhelp
// SubsetIf returns a subset of the slice for which the predicate is true.
// It does not allocate memory, but rearranges the list in place.
// A non-zero list input will always return a non-zero list.
// The return value is the subset and a boolean indicating whether the subset was sliced.
func SliceIfFound[T any](slice []T, f func(T) bool) ([]T, bool) {
ct := 0
for i, v := range slice {
if f(v) {
slice[ct], slice[i] = slice[i], slice[ct]
ct++
}
}
if ct == 0 {
return slice, false
}
return slice[:ct], true
}

View File

@ -1,399 +0,0 @@
package itests
import (
"context"
"encoding/base64"
"flag"
"fmt"
"net"
"os"
"path"
"testing"
"time"
"github.com/docker/go-units"
"github.com/gbrlsnchs/jwt/v3"
"github.com/google/uuid"
logging "github.com/ipfs/go-log/v2"
manet "github.com/multiformats/go-multiaddr/net"
"github.com/stretchr/testify/require"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/v1api"
miner2 "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/cli/spcli"
"github.com/filecoin-project/lotus/curiosrc/cmd/curio/rpc"
"github.com/filecoin-project/lotus/curiosrc/cmd/curio/tasks"
"github.com/filecoin-project/lotus/curiosrc/deps"
"github.com/filecoin-project/lotus/curiosrc/ffiselect"
"github.com/filecoin-project/lotus/curiosrc/market/lmrpc"
"github.com/filecoin-project/lotus/curiosrc/seal"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/lotus/node/impl"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
func TestCurioNewActor(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
full, miner, esemble := kit.EnsembleMinimal(t,
kit.LatestActorsAt(-1),
kit.MockProofs(),
kit.WithSectorIndexDB(),
)
esemble.Start()
blockTime := 100 * time.Millisecond
esemble.BeginMiningMustPost(blockTime)
db := miner.BaseAPI.(*impl.StorageMinerAPI).HarmonyDB
var titles []string
err := db.Select(ctx, &titles, `SELECT title FROM harmony_config WHERE LENGTH(config) > 0`)
require.NoError(t, err)
require.NotEmpty(t, titles)
require.NotContains(t, titles, "base")
addr := miner.OwnerKey.Address
sectorSizeInt, err := units.RAMInBytes("8MiB")
require.NoError(t, err)
maddr, err := spcli.CreateStorageMiner(ctx, full, addr, addr, addr, abi.SectorSize(sectorSizeInt), 0)
require.NoError(t, err)
err = deps.CreateMinerConfig(ctx, full, db, []string{maddr.String()}, "FULL NODE API STRING")
require.NoError(t, err)
err = db.Select(ctx, &titles, `SELECT title FROM harmony_config WHERE LENGTH(config) > 0`)
require.NoError(t, err)
require.Contains(t, titles, "base")
baseCfg := config.DefaultCurioConfig()
var baseText string
err = db.QueryRow(ctx, "SELECT config FROM harmony_config WHERE title='base'").Scan(&baseText)
require.NoError(t, err)
_, err = deps.LoadConfigWithUpgrades(baseText, baseCfg)
require.NoError(t, err)
require.NotNil(t, baseCfg.Addresses)
require.GreaterOrEqual(t, len(baseCfg.Addresses), 1)
require.Contains(t, baseCfg.Addresses[0].MinerAddresses, maddr.String())
}
func TestCurioHappyPath(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
full, miner, esemble := kit.EnsembleMinimal(t,
kit.LatestActorsAt(-1),
kit.WithSectorIndexDB(),
kit.PresealSectors(32),
kit.ThroughRPC(),
)
esemble.Start()
blockTime := 100 * time.Millisecond
esemble.BeginMining(blockTime)
full.WaitTillChain(ctx, kit.HeightAtLeast(15))
err := miner.LogSetLevel(ctx, "*", "ERROR")
require.NoError(t, err)
err = full.LogSetLevel(ctx, "*", "ERROR")
require.NoError(t, err)
db := miner.BaseAPI.(*impl.StorageMinerAPI).HarmonyDB
token, err := full.AuthNew(ctx, api.AllPermissions)
require.NoError(t, err)
fapi := fmt.Sprintf("%s:%s", string(token), full.ListenAddr)
var titles []string
err = db.Select(ctx, &titles, `SELECT title FROM harmony_config WHERE LENGTH(config) > 0`)
require.NoError(t, err)
require.NotEmpty(t, titles)
require.NotContains(t, titles, "base")
addr := miner.OwnerKey.Address
sectorSizeInt, err := units.RAMInBytes("2KiB")
require.NoError(t, err)
maddr, err := spcli.CreateStorageMiner(ctx, full, addr, addr, addr, abi.SectorSize(sectorSizeInt), 0)
require.NoError(t, err)
err = deps.CreateMinerConfig(ctx, full, db, []string{maddr.String()}, fapi)
require.NoError(t, err)
err = db.Select(ctx, &titles, `SELECT title FROM harmony_config WHERE LENGTH(config) > 0`)
require.NoError(t, err)
require.Contains(t, titles, "base")
baseCfg := config.DefaultCurioConfig()
var baseText string
err = db.QueryRow(ctx, "SELECT config FROM harmony_config WHERE title='base'").Scan(&baseText)
require.NoError(t, err)
_, err = deps.LoadConfigWithUpgrades(baseText, baseCfg)
require.NoError(t, err)
require.NotNil(t, baseCfg.Addresses)
require.GreaterOrEqual(t, len(baseCfg.Addresses), 1)
require.Contains(t, baseCfg.Addresses[0].MinerAddresses, maddr.String())
temp := os.TempDir()
dir, err := os.MkdirTemp(temp, "curio")
require.NoError(t, err)
defer func() {
_ = os.Remove(dir)
}()
capi, enginerTerm, closure, finishCh := ConstructCurioTest(ctx, t, dir, db, full, maddr, baseCfg)
defer enginerTerm()
defer closure()
mid, err := address.IDFromAddress(maddr)
require.NoError(t, err)
mi, err := full.StateMinerInfo(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
nv, err := full.StateNetworkVersion(ctx, types.EmptyTSK)
require.NoError(t, err)
wpt := mi.WindowPoStProofType
spt, err := miner2.PreferredSealProofTypeFromWindowPoStType(nv, wpt, false)
require.NoError(t, err)
num, err := seal.AllocateSectorNumbers(ctx, full, db, maddr, 1, func(tx *harmonydb.Tx, numbers []abi.SectorNumber) (bool, error) {
for _, n := range numbers {
_, err := tx.Exec("insert into sectors_sdr_pipeline (sp_id, sector_number, reg_seal_proof) values ($1, $2, $3)", mid, n, spt)
if err != nil {
return false, xerrors.Errorf("inserting into sectors_sdr_pipeline: %w", err)
}
}
return true, nil
})
require.NoError(t, err)
require.Len(t, num, 1)
// TODO: add DDO deal, f05 deal 2 MiB each in the sector
var sectorParamsArr []struct {
SpID int64 `db:"sp_id"`
SectorNumber int64 `db:"sector_number"`
}
require.Eventuallyf(t, func() bool {
h, err := full.ChainHead(ctx)
require.NoError(t, err)
t.Logf("head: %d", h.Height())
err = db.Select(ctx, &sectorParamsArr, `
SELECT sp_id, sector_number
FROM sectors_sdr_pipeline
WHERE after_commit_msg_success = True`)
require.NoError(t, err)
return len(sectorParamsArr) == 1
}, 10*time.Minute, 1*time.Second, "sector did not finish sealing in 5 minutes")
require.Equal(t, sectorParamsArr[0].SectorNumber, int64(0))
require.Equal(t, sectorParamsArr[0].SpID, int64(mid))
_ = capi.Shutdown(ctx)
<-finishCh
}
func createCliContext(dir string) (*cli.Context, error) {
// Define flags for the command
flags := []cli.Flag{
&cli.StringFlag{
Name: "listen",
Usage: "host address and port the worker api will listen on",
Value: "0.0.0.0:12300",
EnvVars: []string{"LOTUS_WORKER_LISTEN"},
},
&cli.BoolFlag{
Name: "nosync",
Usage: "don't check full-node sync status",
},
&cli.BoolFlag{
Name: "halt-after-init",
Usage: "only run init, then return",
Hidden: true,
},
&cli.BoolFlag{
Name: "manage-fdlimit",
Usage: "manage open file limit",
Value: true,
},
&cli.StringFlag{
Name: "storage-json",
Usage: "path to json file containing storage config",
Value: "~/.curio/storage.json",
},
&cli.StringFlag{
Name: "journal",
Usage: "path to journal files",
Value: "~/.curio/",
},
&cli.StringSliceFlag{
Name: "layers",
Aliases: []string{"l", "layer"},
Usage: "list of layers to be interpreted (atop defaults)",
},
}
// Set up the command with flags
command := &cli.Command{
Name: "simulate",
Flags: flags,
Action: func(c *cli.Context) error {
fmt.Println("Listen address:", c.String("listen"))
fmt.Println("No-sync:", c.Bool("nosync"))
fmt.Println("Halt after init:", c.Bool("halt-after-init"))
fmt.Println("Manage file limit:", c.Bool("manage-fdlimit"))
fmt.Println("Storage config path:", c.String("storage-json"))
fmt.Println("Journal path:", c.String("journal"))
fmt.Println("Layers:", c.StringSlice("layers"))
return nil
},
}
// Create a FlagSet and populate it
set := flag.NewFlagSet("test", flag.ContinueOnError)
for _, f := range flags {
if err := f.Apply(set); err != nil {
return nil, xerrors.Errorf("Error applying flag: %s\n", err)
}
}
curioDir := path.Join(dir, "curio")
cflag := fmt.Sprintf("--storage-json=%s", curioDir)
storage := path.Join(dir, "storage.json")
sflag := fmt.Sprintf("--journal=%s", storage)
// Parse the flags with test values
err := set.Parse([]string{"--listen=0.0.0.0:12345", "--nosync", "--manage-fdlimit", sflag, cflag, "--layers=seal"})
if err != nil {
return nil, xerrors.Errorf("Error setting flag: %s\n", err)
}
// Create a cli.Context from the FlagSet
app := cli.NewApp()
ctx := cli.NewContext(app, set, nil)
ctx.Command = command
return ctx, nil
}
func ConstructCurioTest(ctx context.Context, t *testing.T, dir string, db *harmonydb.DB, full v1api.FullNode, maddr address.Address, cfg *config.CurioConfig) (api.Curio, func(), jsonrpc.ClientCloser, <-chan struct{}) {
ffiselect.IsTest = true
cctx, err := createCliContext(dir)
require.NoError(t, err)
shutdownChan := make(chan struct{})
{
var ctxclose func()
ctx, ctxclose = context.WithCancel(ctx)
go func() {
<-shutdownChan
ctxclose()
}()
}
dependencies := &deps.Deps{}
dependencies.DB = db
dependencies.Full = full
seal.SetDevnet(true)
err = os.Setenv("CURIO_REPO_PATH", dir)
require.NoError(t, err)
err = dependencies.PopulateRemainingDeps(ctx, cctx, false)
require.NoError(t, err)
taskEngine, err := tasks.StartTasks(ctx, dependencies)
require.NoError(t, err)
dependencies.Cfg.Subsystems.BoostAdapters = []string{fmt.Sprintf("%s:127.0.0.1:32000", maddr)}
err = lmrpc.ServeCurioMarketRPCFromConfig(dependencies.DB, dependencies.Full, dependencies.Cfg)
require.NoError(t, err)
go func() {
err = rpc.ListenAndServe(ctx, dependencies, shutdownChan) // Monitor for shutdown.
require.NoError(t, err)
}()
finishCh := node.MonitorShutdown(shutdownChan)
var machines []string
err = db.Select(ctx, &machines, `select host_and_port from harmony_machines`)
require.NoError(t, err)
require.Len(t, machines, 1)
laddr, err := net.ResolveTCPAddr("tcp", machines[0])
require.NoError(t, err)
ma, err := manet.FromNetAddr(laddr)
require.NoError(t, err)
var apiToken []byte
{
type jwtPayload struct {
Allow []auth.Permission
}
p := jwtPayload{
Allow: api.AllPermissions,
}
sk, err := base64.StdEncoding.DecodeString(cfg.Apis.StorageRPCSecret)
require.NoError(t, err)
apiToken, err = jwt.Sign(&p, jwt.NewHS256(sk))
require.NoError(t, err)
}
ctoken := fmt.Sprintf("%s:%s", string(apiToken), ma)
err = os.Setenv("CURIO_API_INFO", ctoken)
require.NoError(t, err)
capi, ccloser, err := rpc.GetCurioAPI(&cli.Context{})
require.NoError(t, err)
scfg := storiface.LocalStorageMeta{
ID: storiface.ID(uuid.New().String()),
Weight: 10,
CanSeal: true,
CanStore: true,
MaxStorage: 0,
Groups: []string{},
AllowTo: []string{},
}
err = capi.StorageInit(ctx, dir, scfg)
require.NoError(t, err)
err = capi.StorageAddLocal(ctx, dir)
require.NoError(t, err)
_ = logging.SetLogLevel("harmonytask", "DEBUG")
return capi, taskEngine.GracefullyTerminate, ccloser, finishCh
}

View File

@ -1,28 +0,0 @@
package passcall
import (
"sync"
"time"
)
// Every is a helper function that will call the provided callback
// function at most once every `passEvery` duration. If the function is called
// more frequently than that, it will return nil and not call the callback.
func Every[P, R any](passInterval time.Duration, cb func(P) R) func(P) R {
var lastCall time.Time
var lk sync.Mutex
return func(param P) R {
lk.Lock()
defer lk.Unlock()
if time.Since(lastCall) < passInterval {
return *new(R)
}
defer func() {
lastCall = time.Now()
}()
return cb(param)
}
}

View File

@ -1,547 +0,0 @@
package market
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/url"
"time"
logging "github.com/ipfs/go-log/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-padreader"
"github.com/filecoin-project/go-state-types/abi"
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/curiosrc/seal"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
lpiece "github.com/filecoin-project/lotus/storage/pipeline/piece"
)
var log = logging.Logger("piece-ingestor")
const loopFrequency = 10 * time.Second
type Ingester interface {
AllocatePieceToSector(ctx context.Context, maddr address.Address, piece lpiece.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (api.SectorOffset, error)
}
type PieceIngesterApi interface {
ChainHead(context.Context) (*types.TipSet, error)
StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error)
StateMinerAllocated(ctx context.Context, a address.Address, key types.TipSetKey) (*bitfield.BitField, error)
StateNetworkVersion(ctx context.Context, key types.TipSetKey) (network.Version, error)
StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error)
StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error)
}
type openSector struct {
number abi.SectorNumber
currentSize abi.PaddedPieceSize
earliestStartEpoch abi.ChainEpoch
index uint64
openedAt *time.Time
latestEndEpoch abi.ChainEpoch
}
type PieceIngester struct {
ctx context.Context
db *harmonydb.DB
api PieceIngesterApi
miner address.Address
mid uint64 // miner ID
windowPoStProofType abi.RegisteredPoStProof
synth bool
sectorSize abi.SectorSize
sealRightNow bool // Should be true only for CurioAPI AllocatePieceToSector method
maxWaitTime time.Duration
}
type pieceDetails struct {
Sector abi.SectorNumber `db:"sector_number"`
Size abi.PaddedPieceSize `db:"piece_size"`
StartEpoch abi.ChainEpoch `db:"deal_start_epoch"`
EndEpoch abi.ChainEpoch `db:"deal_end_epoch"`
Index uint64 `db:"piece_index"`
CreatedAt *time.Time `db:"created_at"`
}
type verifiedDeal struct {
isVerified bool
tmin abi.ChainEpoch
tmax abi.ChainEpoch
}
func NewPieceIngester(ctx context.Context, db *harmonydb.DB, api PieceIngesterApi, maddr address.Address, sealRightNow bool, maxWaitTime time.Duration) (*PieceIngester, error) {
mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
if err != nil {
return nil, err
}
mid, err := address.IDFromAddress(maddr)
if err != nil {
return nil, xerrors.Errorf("getting miner ID: %w", err)
}
pi := &PieceIngester{
ctx: ctx,
db: db,
api: api,
sealRightNow: sealRightNow,
miner: maddr,
maxWaitTime: maxWaitTime,
sectorSize: mi.SectorSize,
windowPoStProofType: mi.WindowPoStProofType,
mid: mid,
synth: false, // TODO: synthetic porep config
}
go pi.start()
return pi, nil
}
func (p *PieceIngester) start() {
ticker := time.NewTicker(loopFrequency)
defer ticker.Stop()
for {
select {
case <-p.ctx.Done():
return
case <-ticker.C:
err := p.Seal()
if err != nil {
log.Error(err)
}
}
}
}
func (p *PieceIngester) Seal() error {
head, err := p.api.ChainHead(p.ctx)
if err != nil {
return xerrors.Errorf("getting chain head: %w", err)
}
spt, err := p.getSealProofType()
if err != nil {
return xerrors.Errorf("getting seal proof type: %w", err)
}
shouldSeal := func(sector *openSector) bool {
// Start sealing a sector if
// 1. If sector is full
// 2. We have been waiting for MaxWaitDuration
// 3. StartEpoch is less than 8 hours // todo: make this config?
if sector.currentSize == abi.PaddedPieceSize(p.sectorSize) {
log.Debugf("start sealing sector %d of miner %d: %s", sector.number, p.miner.String(), "sector full")
return true
}
if time.Since(*sector.openedAt) > p.maxWaitTime {
log.Debugf("start sealing sector %d of miner %d: %s", sector.number, p.miner.String(), "MaxWaitTime reached")
return true
}
if sector.earliestStartEpoch < head.Height()+abi.ChainEpoch(960) {
log.Debugf("start sealing sector %d of miner %d: %s", sector.number, p.miner.String(), "earliest start epoch")
return true
}
return false
}
comm, err := p.db.BeginTransaction(p.ctx, func(tx *harmonydb.Tx) (commit bool, err error) {
openSectors, err := p.getOpenSectors(tx)
if err != nil {
return false, err
}
for _, sector := range openSectors {
sector := sector
if shouldSeal(sector) {
// Start sealing the sector
cn, err := tx.Exec(`INSERT INTO sectors_sdr_pipeline (sp_id, sector_number, reg_seal_proof) VALUES ($1, $2, $3);`, p.mid, sector.number, spt)
if err != nil {
return false, xerrors.Errorf("adding sector to pipeline: %w", err)
}
if cn != 1 {
return false, xerrors.Errorf("adding sector to pipeline: incorrect number of rows returned")
}
_, err = tx.Exec("SELECT transfer_and_delete_open_piece($1, $2)", p.mid, sector.number)
if err != nil {
return false, xerrors.Errorf("adding sector to pipeline: %w", err)
}
}
}
return true, nil
}, harmonydb.OptionRetry())
if err != nil {
return xerrors.Errorf("start sealing sector: %w", err)
}
if !comm {
return xerrors.Errorf("start sealing sector: commit failed")
}
return nil
}
func (p *PieceIngester) AllocatePieceToSector(ctx context.Context, maddr address.Address, piece lpiece.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (api.SectorOffset, error) {
if maddr != p.miner {
return api.SectorOffset{}, xerrors.Errorf("miner address doesn't match")
}
// check raw size
if piece.Size() != padreader.PaddedSize(uint64(rawSize)).Padded() {
return api.SectorOffset{}, xerrors.Errorf("raw size doesn't match padded piece size")
}
var propJson []byte
dataHdrJson, err := json.Marshal(header)
if err != nil {
return api.SectorOffset{}, xerrors.Errorf("json.Marshal(header): %w", err)
}
vd := verifiedDeal{
isVerified: false,
}
if piece.DealProposal != nil {
vd.isVerified = piece.DealProposal.VerifiedDeal
if vd.isVerified {
alloc, err := p.api.StateGetAllocationForPendingDeal(ctx, piece.DealID, types.EmptyTSK)
if err != nil {
return api.SectorOffset{}, xerrors.Errorf("getting pending allocation for deal %d: %w", piece.DealID, err)
}
if alloc == nil {
return api.SectorOffset{}, xerrors.Errorf("no allocation found for deal %d: %w", piece.DealID, err)
}
vd.tmin = alloc.TermMin
vd.tmax = alloc.TermMax
}
propJson, err = json.Marshal(piece.DealProposal)
if err != nil {
return api.SectorOffset{}, xerrors.Errorf("json.Marshal(piece.DealProposal): %w", err)
}
} else {
vd.isVerified = piece.PieceActivationManifest.VerifiedAllocationKey != nil
if vd.isVerified {
client, err := address.NewIDAddress(uint64(piece.PieceActivationManifest.VerifiedAllocationKey.Client))
if err != nil {
return api.SectorOffset{}, xerrors.Errorf("getting client address from actor ID: %w", err)
}
alloc, err := p.api.StateGetAllocation(ctx, client, verifregtypes.AllocationId(piece.PieceActivationManifest.VerifiedAllocationKey.ID), types.EmptyTSK)
if err != nil {
return api.SectorOffset{}, xerrors.Errorf("getting allocation details for %d: %w", piece.PieceActivationManifest.VerifiedAllocationKey.ID, err)
}
if alloc == nil {
return api.SectorOffset{}, xerrors.Errorf("no allocation found for ID %d: %w", piece.PieceActivationManifest.VerifiedAllocationKey.ID, err)
}
vd.tmin = alloc.TermMin
vd.tmax = alloc.TermMax
}
propJson, err = json.Marshal(piece.PieceActivationManifest)
if err != nil {
return api.SectorOffset{}, xerrors.Errorf("json.Marshal(piece.PieceActivationManifest): %w", err)
}
}
if !p.sealRightNow {
// Try to allocate the piece to an open sector
allocated, ret, err := p.allocateToExisting(ctx, piece, rawSize, source, dataHdrJson, propJson, vd)
if err != nil {
return api.SectorOffset{}, err
}
if allocated {
return ret, nil
}
}
// Allocation to open sector failed, create a new sector and add the piece to it
num, err := seal.AllocateSectorNumbers(ctx, p.api, p.db, maddr, 1, func(tx *harmonydb.Tx, numbers []abi.SectorNumber) (bool, error) {
if len(numbers) != 1 {
return false, xerrors.Errorf("expected one sector number")
}
n := numbers[0]
if piece.DealProposal != nil {
_, err = tx.Exec(`SELECT insert_sector_market_piece($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)`,
p.mid, n, 0,
piece.DealProposal.PieceCID, piece.DealProposal.PieceSize,
source.String(), dataHdrJson, rawSize, !piece.KeepUnsealed,
piece.PublishCid, piece.DealID, propJson, piece.DealSchedule.StartEpoch, piece.DealSchedule.EndEpoch)
if err != nil {
return false, xerrors.Errorf("adding deal to sector: %w", err)
}
} else {
_, err = tx.Exec(`SELECT insert_sector_ddo_piece($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)`,
p.mid, n, 0,
piece.PieceActivationManifest.CID, piece.PieceActivationManifest.Size,
source.String(), dataHdrJson, rawSize, !piece.KeepUnsealed,
piece.DealSchedule.StartEpoch, piece.DealSchedule.EndEpoch, propJson)
if err != nil {
return false, xerrors.Errorf("adding deal to sector: %w", err)
}
}
return true, nil
})
if err != nil {
return api.SectorOffset{}, xerrors.Errorf("allocating sector numbers: %w", err)
}
if len(num) != 1 {
return api.SectorOffset{}, xerrors.Errorf("expected one sector number")
}
if p.sealRightNow {
err = p.SectorStartSealing(ctx, num[0])
if err != nil {
return api.SectorOffset{}, xerrors.Errorf("SectorStartSealing: %w", err)
}
}
return api.SectorOffset{
Sector: num[0],
Offset: 0,
}, nil
}
func (p *PieceIngester) allocateToExisting(ctx context.Context, piece lpiece.PieceDealInfo, rawSize int64, source url.URL, dataHdrJson, propJson []byte, vd verifiedDeal) (bool, api.SectorOffset, error) {
var ret api.SectorOffset
var allocated bool
var rerr error
comm, err := p.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) {
openSectors, err := p.getOpenSectors(tx)
if err != nil {
return false, err
}
pieceSize := piece.Size()
for _, sec := range openSectors {
sec := sec
if sec.currentSize+pieceSize <= abi.PaddedPieceSize(p.sectorSize) {
if vd.isVerified {
sectorLifeTime := sec.latestEndEpoch - sec.earliestStartEpoch
// Allocation's TMin must fit in sector and TMax should be at least sector lifetime or more
// Based on https://github.com/filecoin-project/builtin-actors/blob/a0e34d22665ac8c84f02fea8a099216f29ffaeeb/actors/verifreg/src/lib.rs#L1071-L1086
if sectorLifeTime <= vd.tmin && sectorLifeTime >= vd.tmax {
continue
}
}
ret.Sector = sec.number
ret.Offset = sec.currentSize
// Insert market deal to DB for the sector
if piece.DealProposal != nil {
cn, err := tx.Exec(`SELECT insert_sector_market_piece($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)`,
p.mid, sec.number, sec.index+1,
piece.DealProposal.PieceCID, piece.DealProposal.PieceSize,
source.String(), dataHdrJson, rawSize, !piece.KeepUnsealed,
piece.PublishCid, piece.DealID, propJson, piece.DealSchedule.StartEpoch, piece.DealSchedule.EndEpoch)
if err != nil {
return false, fmt.Errorf("adding deal to sector: %v", err)
}
if cn != 1 {
return false, xerrors.Errorf("expected one piece")
}
} else { // Insert DDO deal to DB for the sector
cn, err := tx.Exec(`SELECT insert_sector_ddo_piece($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)`,
p.mid, sec.number, sec.index+1,
piece.PieceActivationManifest.CID, piece.PieceActivationManifest.Size,
source.String(), dataHdrJson, rawSize, !piece.KeepUnsealed,
piece.DealSchedule.StartEpoch, piece.DealSchedule.EndEpoch, propJson)
if err != nil {
return false, fmt.Errorf("adding deal to sector: %v", err)
}
if cn != 1 {
return false, xerrors.Errorf("expected one piece")
}
}
allocated = true
break
}
}
return true, nil
}, harmonydb.OptionRetry())
if !comm {
rerr = xerrors.Errorf("allocating piece to a sector: commit failed")
}
if err != nil {
rerr = xerrors.Errorf("allocating piece to a sector: %w", err)
}
return allocated, ret, rerr
}
func (p *PieceIngester) SectorStartSealing(ctx context.Context, sector abi.SectorNumber) error {
spt, err := p.getSealProofType()
if err != nil {
return xerrors.Errorf("getting seal proof type: %w", err)
}
comm, err := p.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) {
// Get current open sector pieces from DB
var pieces []pieceDetails
err = tx.Select(&pieces, `
SELECT
sector_number,
piece_size,
piece_index,
COALESCE(direct_start_epoch, f05_deal_start_epoch, 0) AS deal_start_epoch,
COALESCE(direct_end_epoch, f05_deal_end_epoch, 0) AS deal_end_epoch,
created_at
FROM
open_sector_pieces
WHERE
sp_id = $1 AND sector_number = $2
ORDER BY
piece_index DESC;`, p.mid, sector)
if err != nil {
return false, xerrors.Errorf("getting open sectors from DB")
}
if len(pieces) < 1 {
return false, xerrors.Errorf("sector %d is not waiting to be sealed", sector)
}
cn, err := tx.Exec(`INSERT INTO sectors_sdr_pipeline (sp_id, sector_number, reg_seal_proof) VALUES ($1, $2, $3);`, p.mid, sector, spt)
if err != nil {
return false, xerrors.Errorf("adding sector to pipeline: %w", err)
}
if cn != 1 {
return false, xerrors.Errorf("incorrect number of rows returned")
}
_, err = tx.Exec("SELECT transfer_and_delete_open_piece($1, $2)", p.mid, sector)
if err != nil {
return false, xerrors.Errorf("adding sector to pipeline: %w", err)
}
return true, nil
}, harmonydb.OptionRetry())
if err != nil {
return xerrors.Errorf("start sealing sector: %w", err)
}
if !comm {
return xerrors.Errorf("start sealing sector: commit failed")
}
return nil
}
func (p *PieceIngester) getOpenSectors(tx *harmonydb.Tx) ([]*openSector, error) {
// Get current open sector pieces from DB
var pieces []pieceDetails
err := tx.Select(&pieces, `
SELECT
sector_number,
piece_size,
piece_index,
COALESCE(direct_start_epoch, f05_deal_start_epoch, 0) AS deal_start_epoch,
COALESCE(direct_end_epoch, f05_deal_end_epoch, 0) AS deal_end_epoch,
created_at
FROM
open_sector_pieces
WHERE
sp_id = $1
ORDER BY
piece_index DESC;`, p.mid)
if err != nil {
return nil, xerrors.Errorf("getting open sectors from DB")
}
getStartEpoch := func(new abi.ChainEpoch, cur abi.ChainEpoch) abi.ChainEpoch {
if cur > 0 && cur < new {
return cur
}
return new
}
getEndEpoch := func(new abi.ChainEpoch, cur abi.ChainEpoch) abi.ChainEpoch {
if cur > 0 && cur > new {
return cur
}
return new
}
getOpenedAt := func(piece pieceDetails, cur *time.Time) *time.Time {
if piece.CreatedAt.Before(*cur) {
return piece.CreatedAt
}
return cur
}
sectorMap := map[abi.SectorNumber]*openSector{}
for _, pi := range pieces {
pi := pi
sector, ok := sectorMap[pi.Sector]
if !ok {
sectorMap[pi.Sector] = &openSector{
number: pi.Sector,
currentSize: pi.Size,
earliestStartEpoch: getStartEpoch(pi.StartEpoch, 0),
index: pi.Index,
openedAt: pi.CreatedAt,
latestEndEpoch: getEndEpoch(pi.EndEpoch, 0),
}
continue
}
sector.currentSize += pi.Size
sector.earliestStartEpoch = getStartEpoch(pi.StartEpoch, sector.earliestStartEpoch)
sector.latestEndEpoch = getEndEpoch(pi.EndEpoch, sector.earliestStartEpoch)
if sector.index < pi.Index {
sector.index = pi.Index
}
sector.openedAt = getOpenedAt(pi, sector.openedAt)
}
var os []*openSector
for _, v := range sectorMap {
v := v
os = append(os, v)
}
return os, nil
}
func (p *PieceIngester) getSealProofType() (abi.RegisteredSealProof, error) {
nv, err := p.api.StateNetworkVersion(p.ctx, types.EmptyTSK)
if err != nil {
return 0, xerrors.Errorf("getting network version: %w", err)
}
return miner.PreferredSealProofTypeFromWindowPoStType(nv, p.windowPoStProofType, p.synth)
}

View File

@ -1,33 +0,0 @@
package fakelm
import (
"context"
"github.com/google/uuid"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
// MinimalLMApi is a subset of the LotusMiner API that is exposed by Curio
// for consumption by boost
type MinimalLMApi interface {
ActorAddress(context.Context) (address.Address, error)
WorkerJobs(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error)
SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error)
SectorsList(context.Context) ([]abi.SectorNumber, error)
SectorsSummary(ctx context.Context) (map[api.SectorState]int, error)
SectorsListInStates(context.Context, []api.SectorState) ([]abi.SectorNumber, error)
StorageRedeclareLocal(context.Context, *storiface.ID, bool) error
ComputeDataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storiface.Data) (abi.PieceInfo, error)
SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d api.PieceDealInfo) (api.SectorOffset, error)
}

View File

@ -1,381 +0,0 @@
package fakelm
import (
"context"
"encoding/base64"
"net/http"
"net/url"
"github.com/gbrlsnchs/jwt/v3"
"github.com/google/uuid"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/curiosrc/market"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
"github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/lotus/storage/paths"
sealing "github.com/filecoin-project/lotus/storage/pipeline"
lpiece "github.com/filecoin-project/lotus/storage/pipeline/piece"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
type LMRPCProvider struct {
si paths.SectorIndex
full api.FullNode
maddr address.Address // lotus-miner RPC is single-actor
minerID abi.ActorID
ssize abi.SectorSize
pi market.Ingester
db *harmonydb.DB
conf *config.CurioConfig
}
func NewLMRPCProvider(si paths.SectorIndex, full api.FullNode, maddr address.Address, minerID abi.ActorID, ssize abi.SectorSize, pi market.Ingester, db *harmonydb.DB, conf *config.CurioConfig) *LMRPCProvider {
return &LMRPCProvider{
si: si,
full: full,
maddr: maddr,
minerID: minerID,
ssize: ssize,
pi: pi,
db: db,
conf: conf,
}
}
func (l *LMRPCProvider) ActorAddress(ctx context.Context) (address.Address, error) {
return l.maddr, nil
}
func (l *LMRPCProvider) WorkerJobs(ctx context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) {
// correct enough
return map[uuid.UUID][]storiface.WorkerJob{}, nil
}
func (l *LMRPCProvider) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) {
var ssip []struct {
PieceCID *string `db:"piece_cid"`
DealID *int64 `db:"f05_deal_id"`
Complete bool `db:"after_commit_msg_success"`
Failed bool `db:"failed"`
SDR bool `db:"after_sdr"`
PoRep bool `db:"after_porep"`
}
err := l.db.Select(ctx, &ssip, `
WITH CheckCommit AS (
SELECT
sp_id,
sector_number,
after_commit_msg,
failed,
after_sdr,
after_porep,
after_commit_msg_success
FROM
sectors_sdr_pipeline
WHERE
sp_id = $1 AND sector_number = $2
),
MetaPieces AS (
SELECT
mp.piece_cid,
mp.f05_deal_id,
cc.after_commit_msg_success,
cc.failed,
cc.after_sdr,
cc.after_porep
FROM
sectors_meta_pieces mp
INNER JOIN
CheckCommit cc ON mp.sp_id = cc.sp_id AND mp.sector_num = cc.sector_number
WHERE
cc.after_commit_msg IS TRUE
),
InitialPieces AS (
SELECT
ip.piece_cid,
ip.f05_deal_id,
cc.after_commit_msg_success,
cc.failed,
cc.after_sdr,
cc.after_porep
FROM
sectors_sdr_initial_pieces ip
INNER JOIN
CheckCommit cc ON ip.sp_id = cc.sp_id AND ip.sector_number = cc.sector_number
WHERE
cc.after_commit_msg IS FALSE
),
FallbackPieces AS (
SELECT
op.piece_cid,
op.f05_deal_id,
FALSE as after_commit_msg_success,
FALSE as failed,
FALSE as after_sdr,
FALSE as after_porep
FROM
open_sector_pieces op
WHERE
op.sp_id = $1 AND op.sector_number = $2
AND NOT EXISTS (SELECT 1 FROM sectors_sdr_pipeline sp WHERE sp.sp_id = op.sp_id AND sp.sector_number = op.sector_number)
)
SELECT * FROM MetaPieces
UNION ALL
SELECT * FROM InitialPieces
UNION ALL
SELECT * FROM FallbackPieces;`, l.minerID, sid)
if err != nil {
return api.SectorInfo{}, err
}
var deals []abi.DealID
if len(ssip) > 0 {
for _, d := range ssip {
if d.DealID != nil {
deals = append(deals, abi.DealID(*d.DealID))
}
}
}
spt, err := miner.SealProofTypeFromSectorSize(l.ssize, network.Version20, false) // good enough, just need this for ssize anyways
if err != nil {
return api.SectorInfo{}, err
}
ret := api.SectorInfo{
SectorID: sid,
CommD: nil,
CommR: nil,
Proof: nil,
Deals: deals,
Pieces: nil,
Ticket: api.SealTicket{},
Seed: api.SealSeed{},
PreCommitMsg: nil,
CommitMsg: nil,
Retries: 0,
ToUpgrade: false,
ReplicaUpdateMessage: nil,
LastErr: "",
Log: nil,
SealProof: spt,
Activation: 0,
Expiration: 0,
DealWeight: big.Zero(),
VerifiedDealWeight: big.Zero(),
InitialPledge: big.Zero(),
OnTime: 0,
Early: 0,
}
// If no rows found i.e. sector doesn't exist in DB
//assign ssip[0] to a local variable for easier reading.
currentSSIP := ssip[0]
switch {
case len(ssip) == 0:
ret.State = api.SectorState(sealing.UndefinedSectorState)
case currentSSIP.Failed:
ret.State = api.SectorState(sealing.FailedUnrecoverable)
case !currentSSIP.SDR:
ret.State = api.SectorState(sealing.WaitDeals)
case currentSSIP.SDR && !currentSSIP.PoRep:
ret.State = api.SectorState(sealing.PreCommit1)
case currentSSIP.SDR && currentSSIP.PoRep && !currentSSIP.Complete:
ret.State = api.SectorState(sealing.PreCommit2)
case currentSSIP.Complete:
ret.State = api.SectorState(sealing.Proving)
default:
return api.SectorInfo{}, nil
}
return ret, nil
}
func (l *LMRPCProvider) SectorsList(ctx context.Context) ([]abi.SectorNumber, error) {
decls, err := l.si.StorageList(ctx)
if err != nil {
return nil, err
}
var out []abi.SectorNumber
for _, decl := range decls {
for _, s := range decl {
if s.Miner != l.minerID {
continue
}
out = append(out, s.SectorID.Number)
}
}
return out, nil
}
type sectorParts struct {
sealed, unsealed, cache bool
inStorage bool
}
func (l *LMRPCProvider) SectorsSummary(ctx context.Context) (map[api.SectorState]int, error) {
decls, err := l.si.StorageList(ctx)
if err != nil {
return nil, err
}
states := map[abi.SectorID]sectorParts{}
for si, decll := range decls {
sinfo, err := l.si.StorageInfo(ctx, si)
if err != nil {
return nil, err
}
for _, decl := range decll {
if decl.Miner != l.minerID {
continue
}
state := states[abi.SectorID{Miner: decl.Miner, Number: decl.SectorID.Number}]
state.sealed = state.sealed || decl.Has(storiface.FTSealed)
state.unsealed = state.unsealed || decl.Has(storiface.FTUnsealed)
state.cache = state.cache || decl.Has(storiface.FTCache)
state.inStorage = state.inStorage || sinfo.CanStore
states[abi.SectorID{Miner: decl.Miner, Number: decl.SectorID.Number}] = state
}
}
out := map[api.SectorState]int{}
for _, state := range states {
switch {
case state.sealed && state.inStorage:
out[api.SectorState(sealing.Proving)]++
default:
// not even close to correct, but good enough for now
out[api.SectorState(sealing.PreCommit1)]++
}
}
return out, nil
}
func (l *LMRPCProvider) SectorsListInStates(ctx context.Context, want []api.SectorState) ([]abi.SectorNumber, error) {
decls, err := l.si.StorageList(ctx)
if err != nil {
return nil, err
}
wantProving, wantPrecommit1 := false, false
for _, s := range want {
switch s {
case api.SectorState(sealing.Proving):
wantProving = true
case api.SectorState(sealing.PreCommit1):
wantPrecommit1 = true
}
}
states := map[abi.SectorID]sectorParts{}
for si, decll := range decls {
sinfo, err := l.si.StorageInfo(ctx, si)
if err != nil {
return nil, err
}
for _, decl := range decll {
if decl.Miner != l.minerID {
continue
}
state := states[abi.SectorID{Miner: decl.Miner, Number: decl.SectorID.Number}]
state.sealed = state.sealed || decl.Has(storiface.FTSealed)
state.unsealed = state.unsealed || decl.Has(storiface.FTUnsealed)
state.cache = state.cache || decl.Has(storiface.FTCache)
state.inStorage = state.inStorage || sinfo.CanStore
states[abi.SectorID{Miner: decl.Miner, Number: decl.SectorID.Number}] = state
}
}
var out []abi.SectorNumber
for id, state := range states {
switch {
case state.sealed && state.inStorage:
if wantProving {
out = append(out, id.Number)
}
default:
// not even close to correct, but good enough for now
if wantPrecommit1 {
out = append(out, id.Number)
}
}
}
return out, nil
}
func (l *LMRPCProvider) StorageRedeclareLocal(ctx context.Context, id *storiface.ID, b bool) error {
// so this rescans and redeclares sectors on lotus-miner; whyyy is boost even calling this?
return nil
}
func (l *LMRPCProvider) IsUnsealed(ctx context.Context, sectorNum abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (bool, error) {
sectorID := abi.SectorID{Miner: l.minerID, Number: sectorNum}
si, err := l.si.StorageFindSector(ctx, sectorID, storiface.FTUnsealed, 0, false)
if err != nil {
return false, err
}
// yes, yes, technically sectors can be partially unsealed, but that is never done in practice
// and can't even be easily done with the current implementation
return len(si) > 0, nil
}
func (l *LMRPCProvider) ComputeDataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storiface.Data) (abi.PieceInfo, error) {
return abi.PieceInfo{}, xerrors.Errorf("not supported")
}
func (l *LMRPCProvider) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d api.PieceDealInfo) (api.SectorOffset, error) {
if d.DealProposal.PieceSize != abi.PaddedPieceSize(l.ssize) {
return api.SectorOffset{}, xerrors.Errorf("only full-sector pieces are supported")
}
return api.SectorOffset{}, xerrors.Errorf("not supported, use AllocatePieceToSector")
}
func (l *LMRPCProvider) AllocatePieceToSector(ctx context.Context, maddr address.Address, piece lpiece.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (api.SectorOffset, error) {
return l.pi.AllocatePieceToSector(ctx, maddr, piece, rawSize, source, header)
}
func (l *LMRPCProvider) AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error) {
type jwtPayload struct {
Allow []auth.Permission
}
p := jwtPayload{
Allow: perms,
}
sk, err := base64.StdEncoding.DecodeString(l.conf.Apis.StorageRPCSecret)
if err != nil {
return nil, xerrors.Errorf("decode secret: %w", err)
}
return jwt.Sign(&p, jwt.NewHS256(sk))
}
var _ MinimalLMApi = &LMRPCProvider{}

View File

@ -1,620 +0,0 @@
package lmrpc
import (
"context"
"errors"
"fmt"
"io"
"net"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/google/uuid"
logging "github.com/ipfs/go-log/v2"
manet "github.com/multiformats/go-multiaddr/net"
"github.com/yugabyte/pgx/v5"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
cumarket "github.com/filecoin-project/lotus/curiosrc/market"
"github.com/filecoin-project/lotus/curiosrc/market/fakelm"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
"github.com/filecoin-project/lotus/lib/nullreader"
"github.com/filecoin-project/lotus/metrics/proxy"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/lotus/storage/paths"
lpiece "github.com/filecoin-project/lotus/storage/pipeline/piece"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
var log = logging.Logger("lmrpc")
const backpressureWaitTime = 30 * time.Second
func ServeCurioMarketRPCFromConfig(db *harmonydb.DB, full api.FullNode, cfg *config.CurioConfig) error {
return forEachMarketRPC(cfg, func(maddr string, listen string) error {
addr, err := address.NewFromString(maddr)
if err != nil {
return xerrors.Errorf("parsing actor address: %w", err)
}
go func() {
err := ServeCurioMarketRPC(db, full, addr, cfg, listen)
if err != nil {
log.Errorf("failed to serve market rpc: %s", err)
}
}()
return nil
})
}
func MakeTokens(cfg *config.CurioConfig) (map[address.Address]string, error) {
out := map[address.Address]string{}
err := forEachMarketRPC(cfg, func(smaddr string, listen string) error {
ctx := context.Background()
laddr, err := net.ResolveTCPAddr("tcp", listen)
if err != nil {
return xerrors.Errorf("net resolve: %w", err)
}
if len(laddr.IP) == 0 || laddr.IP.IsUnspecified() {
return xerrors.Errorf("market rpc server listen address must be a specific address, not %s (probably missing bind IP)", listen)
}
// need minimal provider with just the config
lp := fakelm.NewLMRPCProvider(nil, nil, address.Undef, 0, 0, nil, nil, cfg)
tok, err := lp.AuthNew(ctx, api.AllPermissions)
if err != nil {
return err
}
// parse listen into multiaddr
ma, err := manet.FromNetAddr(laddr)
if err != nil {
return xerrors.Errorf("net from addr (%v): %w", laddr, err)
}
maddr, err := address.NewFromString(smaddr)
if err != nil {
return xerrors.Errorf("parsing actor address: %w", err)
}
token := fmt.Sprintf("%s:%s", tok, ma)
out[maddr] = token
return nil
})
return out, err
}
func forEachMarketRPC(cfg *config.CurioConfig, cb func(string, string) error) error {
for n, server := range cfg.Subsystems.BoostAdapters {
n := n
// server: [f0.. actor address]:[bind address]
// bind address is either a numeric port or a full address
// first split at first : to get the actor address and the bind address
split := strings.SplitN(server, ":", 2)
// if the split length is not 2, return an error
if len(split) != 2 {
return fmt.Errorf("bad market rpc server config %d %s, expected [f0.. actor address]:[bind address]", n, server)
}
// get the actor address and the bind address
strMaddr, strListen := split[0], split[1]
maddr, err := address.NewFromString(strMaddr)
if err != nil {
return xerrors.Errorf("parsing actor address: %w", err)
}
// check the listen address
if strListen == "" {
return fmt.Errorf("bad market rpc server config %d %s, expected [f0.. actor address]:[bind address]", n, server)
}
// if listen address is numeric, prepend the default host
if _, err := strconv.Atoi(strListen); err == nil {
strListen = "0.0.0.0:" + strListen
}
// check if the listen address is a valid address
if _, _, err := net.SplitHostPort(strListen); err != nil {
return fmt.Errorf("bad market rpc server config %d %s, expected [f0.. actor address]:[bind address]", n, server)
}
log.Infow("Starting market RPC server", "actor", maddr, "listen", strListen)
if err := cb(strMaddr, strListen); err != nil {
return err
}
}
return nil
}
func ServeCurioMarketRPC(db *harmonydb.DB, full api.FullNode, maddr address.Address, conf *config.CurioConfig, listen string) error {
ctx := context.Background()
pin, err := cumarket.NewPieceIngester(ctx, db, full, maddr, false, time.Duration(conf.Ingest.MaxDealWaitTime))
if err != nil {
return xerrors.Errorf("starting piece ingestor")
}
si := paths.NewDBIndex(nil, db)
mid, err := address.IDFromAddress(maddr)
if err != nil {
return xerrors.Errorf("getting miner id: %w", err)
}
mi, err := full.StateMinerInfo(ctx, maddr, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("getting miner info: %w", err)
}
lp := fakelm.NewLMRPCProvider(si, full, maddr, abi.ActorID(mid), mi.SectorSize, pin, db, conf)
laddr, err := net.ResolveTCPAddr("tcp", listen)
if err != nil {
return xerrors.Errorf("net resolve: %w", err)
}
if len(laddr.IP) == 0 || laddr.IP.IsUnspecified() {
return xerrors.Errorf("market rpc server listen address must be a specific address, not %s (probably missing bind IP)", listen)
}
rootUrl := url.URL{
Scheme: "http",
Host: laddr.String(),
}
ast := api.StorageMinerStruct{}
ast.CommonStruct.Internal.Version = func(ctx context.Context) (api.APIVersion, error) {
return api.APIVersion{
Version: "curio-proxy-v0",
APIVersion: api.MinerAPIVersion0,
BlockDelay: build.BlockDelaySecs,
}, nil
}
pieceInfoLk := new(sync.Mutex)
pieceInfos := map[uuid.UUID][]pieceInfo{}
ast.CommonStruct.Internal.AuthNew = lp.AuthNew
ast.Internal.ActorAddress = lp.ActorAddress
ast.Internal.WorkerJobs = lp.WorkerJobs
ast.Internal.SectorsStatus = lp.SectorsStatus
ast.Internal.SectorsList = lp.SectorsList
ast.Internal.SectorsSummary = lp.SectorsSummary
ast.Internal.SectorsListInStates = lp.SectorsListInStates
ast.Internal.StorageRedeclareLocal = lp.StorageRedeclareLocal
ast.Internal.ComputeDataCid = lp.ComputeDataCid
ast.Internal.SectorAddPieceToAny = sectorAddPieceToAnyOperation(maddr, rootUrl, conf, pieceInfoLk, pieceInfos, pin, db, mi.SectorSize)
ast.Internal.StorageList = si.StorageList
ast.Internal.StorageDetach = si.StorageDetach
ast.Internal.StorageReportHealth = si.StorageReportHealth
ast.Internal.StorageDeclareSector = si.StorageDeclareSector
ast.Internal.StorageDropSector = si.StorageDropSector
ast.Internal.StorageFindSector = si.StorageFindSector
ast.Internal.StorageInfo = si.StorageInfo
ast.Internal.StorageBestAlloc = si.StorageBestAlloc
ast.Internal.StorageLock = si.StorageLock
ast.Internal.StorageTryLock = si.StorageTryLock
ast.Internal.StorageGetLocks = si.StorageGetLocks
ast.Internal.SectorStartSealing = pin.SectorStartSealing
var pieceHandler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) {
// /piece?piece_id=xxxx
pieceUUID := r.URL.Query().Get("piece_id")
pu, err := uuid.Parse(pieceUUID)
if err != nil {
http.Error(w, "bad piece id", http.StatusBadRequest)
return
}
if r.Method != http.MethodGet {
http.Error(w, "bad method", http.StatusMethodNotAllowed)
return
}
fmt.Printf("%s request for piece from %s\n", pieceUUID, r.RemoteAddr)
pieceInfoLk.Lock()
pis, ok := pieceInfos[pu]
if !ok {
http.Error(w, "piece not found", http.StatusNotFound)
log.Warnw("piece not found", "piece_uuid", pu)
pieceInfoLk.Unlock()
return
}
// pop
pi := pis[0]
pis = pis[1:]
pieceInfos[pu] = pis
if len(pis) == 0 {
delete(pieceInfos, pu)
}
pieceInfoLk.Unlock()
start := time.Now()
pieceData := io.LimitReader(io.MultiReader(
pi.data,
nullreader.Reader{},
), int64(pi.size))
n, err := io.Copy(w, pieceData)
close(pi.done)
took := time.Since(start)
mbps := float64(n) / (1024 * 1024) / took.Seconds()
if err != nil {
log.Errorf("copying piece data: %s", err)
return
}
log.Infow("piece served", "piece_uuid", pu, "size", float64(n)/(1024*1024), "duration", took, "speed", mbps)
}
finalApi := proxy.LoggingAPI[api.StorageMiner, api.StorageMinerStruct](&ast)
mh, err := node.MinerHandler(finalApi, false) // todo permissioned
if err != nil {
return err
}
mux := http.NewServeMux()
mux.Handle("/piece", pieceHandler)
mux.Handle("/", mh) // todo: create a method for sealNow for sectors
server := &http.Server{
Addr: listen,
Handler: mux,
ReadTimeout: 48 * time.Hour,
WriteTimeout: 48 * time.Hour, // really high because we block until pieces are saved in PiecePark
}
return server.ListenAndServe()
}
type pieceInfo struct {
data storiface.Data
size abi.UnpaddedPieceSize
done chan struct{}
}
func sectorAddPieceToAnyOperation(maddr address.Address, rootUrl url.URL, conf *config.CurioConfig, pieceInfoLk *sync.Mutex, pieceInfos map[uuid.UUID][]pieceInfo, pin *cumarket.PieceIngester, db *harmonydb.DB, ssize abi.SectorSize) func(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storiface.Data, deal lpiece.PieceDealInfo) (api.SectorOffset, error) {
return func(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storiface.Data, deal lpiece.PieceDealInfo) (api.SectorOffset, error) {
if (deal.PieceActivationManifest == nil && deal.DealProposal == nil) || (deal.PieceActivationManifest != nil && deal.DealProposal != nil) {
return api.SectorOffset{}, xerrors.Errorf("deal info must have either deal proposal or piece manifest")
}
origPieceData := pieceData
defer func() {
closer, ok := origPieceData.(io.Closer)
if !ok {
log.Warnf("DataCid: cannot close pieceData reader %T because it is not an io.Closer", origPieceData)
return
}
if err := closer.Close(); err != nil {
log.Warnw("closing pieceData in DataCid", "error", err)
}
}()
pi := pieceInfo{
data: pieceData,
size: pieceSize,
done: make(chan struct{}),
}
pieceUUID := uuid.New()
if deal.DealProposal != nil {
log.Infow("piece assign request", "piece_cid", deal.PieceCID().String(), "provider", deal.DealProposal.Provider, "piece_uuid", pieceUUID)
}
pieceInfoLk.Lock()
pieceInfos[pieceUUID] = append(pieceInfos[pieceUUID], pi)
pieceInfoLk.Unlock()
// /piece?piece_cid=xxxx
dataUrl := rootUrl
dataUrl.Path = "/piece"
dataUrl.RawQuery = "piece_id=" + pieceUUID.String()
// add piece entry
refID, pieceWasCreated, err := addPieceEntry(ctx, db, conf, deal, pieceSize, dataUrl, ssize)
if err != nil {
return api.SectorOffset{}, err
}
// wait for piece to be parked
if pieceWasCreated {
<-pi.done
} else {
// If the piece was not created, we need to close the done channel
close(pi.done)
closeDataReader(pieceData)
}
{
// piece park is either done or currently happening from another AP call
// now we need to make sure that the piece is definitely parked successfully
// - in case of errors we return, and boost should be able to retry the call
// * If piece is completed, return
// * If piece is not completed but has null taskID, wait
// * If piece has a non-null taskID
// * If the task is in harmony_tasks, wait
// * Otherwise look for an error in harmony_task_history and return that
for {
var taskID *int64
var complete bool
err := db.QueryRow(ctx, `SELECT pp.task_id, pp.complete
FROM parked_pieces pp
JOIN parked_piece_refs ppr ON pp.id = ppr.piece_id
WHERE ppr.ref_id = $1;`, refID).Scan(&taskID, &complete)
if err != nil {
return api.SectorOffset{}, xerrors.Errorf("getting piece park status: %w", err)
}
if complete {
break
}
if taskID == nil {
// piece is not parked yet
time.Sleep(5 * time.Second)
continue
}
// check if task is in harmony_tasks
var taskName string
err = db.QueryRow(ctx, `SELECT name FROM harmony_task WHERE id = $1`, *taskID).Scan(&taskName)
if err == nil {
// task is in harmony_tasks, wait
time.Sleep(5 * time.Second)
continue
}
if err != pgx.ErrNoRows {
return api.SectorOffset{}, xerrors.Errorf("checking park-piece task in harmony_tasks: %w", err)
}
// task is not in harmony_tasks, check harmony_task_history (latest work_end)
var taskError string
var taskResult bool
err = db.QueryRow(ctx, `SELECT result, err FROM harmony_task_history WHERE task_id = $1 ORDER BY work_end DESC LIMIT 1`, *taskID).Scan(&taskResult, &taskError)
if err != nil {
return api.SectorOffset{}, xerrors.Errorf("checking park-piece task history: %w", err)
}
if !taskResult {
return api.SectorOffset{}, xerrors.Errorf("park-piece task failed: %s", taskError)
}
return api.SectorOffset{}, xerrors.Errorf("park task succeeded but piece is not marked as complete")
}
}
pieceIDUrl := url.URL{
Scheme: "pieceref",
Opaque: fmt.Sprintf("%d", refID),
}
// make a sector
so, err := pin.AllocatePieceToSector(ctx, maddr, deal, int64(pieceSize), pieceIDUrl, nil)
if err != nil {
return api.SectorOffset{}, err
}
log.Infow("piece assigned to sector", "piece_cid", deal.PieceCID().String(), "sector", so.Sector, "offset", so.Offset)
return so, nil
}
}
func addPieceEntry(ctx context.Context, db *harmonydb.DB, conf *config.CurioConfig, deal lpiece.PieceDealInfo, pieceSize abi.UnpaddedPieceSize, dataUrl url.URL, ssize abi.SectorSize) (int64, bool, error) {
var refID int64
var pieceWasCreated bool
for {
var backpressureWait bool
comm, err := db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) {
// BACKPRESSURE
wait, err := maybeApplyBackpressure(tx, conf.Ingest, ssize)
if err != nil {
return false, xerrors.Errorf("backpressure checks: %w", err)
}
if wait {
backpressureWait = true
return false, nil
}
var pieceID int64
// Attempt to select the piece ID first
err = tx.QueryRow(`SELECT id FROM parked_pieces WHERE piece_cid = $1`, deal.PieceCID().String()).Scan(&pieceID)
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
// Piece does not exist, attempt to insert
err = tx.QueryRow(`
INSERT INTO parked_pieces (piece_cid, piece_padded_size, piece_raw_size)
VALUES ($1, $2, $3)
ON CONFLICT (piece_cid) DO NOTHING
RETURNING id`, deal.PieceCID().String(), int64(pieceSize.Padded()), int64(pieceSize)).Scan(&pieceID)
if err != nil {
return false, xerrors.Errorf("inserting new parked piece and getting id: %w", err)
}
pieceWasCreated = true // New piece was created
} else {
// Some other error occurred during select
return false, xerrors.Errorf("checking existing parked piece: %w", err)
}
} else {
pieceWasCreated = false // Piece already exists, no new piece was created
}
// Add parked_piece_ref
err = tx.QueryRow(`INSERT INTO parked_piece_refs (piece_id, data_url)
VALUES ($1, $2) RETURNING ref_id`, pieceID, dataUrl.String()).Scan(&refID)
if err != nil {
return false, xerrors.Errorf("inserting parked piece ref: %w", err)
}
// If everything went well, commit the transaction
return true, nil // This will commit the transaction
}, harmonydb.OptionRetry())
if err != nil {
return refID, pieceWasCreated, xerrors.Errorf("inserting parked piece: %w", err)
}
if !comm {
if backpressureWait {
// Backpressure was applied, wait and try again
select {
case <-time.After(backpressureWaitTime):
case <-ctx.Done():
return refID, pieceWasCreated, xerrors.Errorf("context done while waiting for backpressure: %w", ctx.Err())
}
continue
}
return refID, pieceWasCreated, xerrors.Errorf("piece tx didn't commit")
}
break
}
return refID, pieceWasCreated, nil
}
func closeDataReader(pieceData storiface.Data) {
go func() {
// close the data reader (drain to eof if it's not a closer)
if closer, ok := pieceData.(io.Closer); ok {
if err := closer.Close(); err != nil {
log.Warnw("closing pieceData in DataCid", "error", err)
}
} else {
log.Warnw("pieceData is not an io.Closer", "type", fmt.Sprintf("%T", pieceData))
_, err := io.Copy(io.Discard, pieceData)
if err != nil {
log.Warnw("draining pieceData in DataCid", "error", err)
}
}
}()
}
func maybeApplyBackpressure(tx *harmonydb.Tx, cfg config.CurioIngestConfig, ssize abi.SectorSize) (wait bool, err error) {
var bufferedSDR, bufferedTrees, bufferedPoRep, waitDealSectors int
err = tx.QueryRow(`
WITH BufferedSDR AS (
SELECT COUNT(p.task_id_sdr) - COUNT(t.owner_id) AS buffered_sdr_count
FROM sectors_sdr_pipeline p
LEFT JOIN harmony_task t ON p.task_id_sdr = t.id
WHERE p.after_sdr = false
),
BufferedTrees AS (
SELECT COUNT(p.task_id_tree_r) - COUNT(t.owner_id) AS buffered_trees_count
FROM sectors_sdr_pipeline p
LEFT JOIN harmony_task t ON p.task_id_tree_r = t.id
WHERE p.after_sdr = true AND p.after_tree_r = false
),
BufferedPoRep AS (
SELECT COUNT(p.task_id_porep) - COUNT(t.owner_id) AS buffered_porep_count
FROM sectors_sdr_pipeline p
LEFT JOIN harmony_task t ON p.task_id_porep = t.id
WHERE p.after_tree_r = true AND p.after_porep = false
),
WaitDealSectors AS (
SELECT COUNT(DISTINCT sip.sector_number) AS wait_deal_sectors_count
FROM sectors_sdr_initial_pieces sip
LEFT JOIN sectors_sdr_pipeline sp ON sip.sp_id = sp.sp_id AND sip.sector_number = sp.sector_number
WHERE sp.sector_number IS NULL
)
SELECT
(SELECT buffered_sdr_count FROM BufferedSDR) AS total_buffered_sdr,
(SELECT buffered_trees_count FROM BufferedTrees) AS buffered_trees_count,
(SELECT buffered_porep_count FROM BufferedPoRep) AS buffered_porep_count,
(SELECT wait_deal_sectors_count FROM WaitDealSectors) AS wait_deal_sectors_count
`).Scan(&bufferedSDR, &bufferedTrees, &bufferedPoRep, &waitDealSectors)
if err != nil {
return false, xerrors.Errorf("counting buffered sectors: %w", err)
}
var pieceSizes []abi.PaddedPieceSize
err = tx.Select(&pieceSizes, `SELECT piece_padded_size FROM parked_pieces WHERE complete = false;`)
if err != nil {
return false, xerrors.Errorf("getting in-process pieces")
}
sectors := sectorCount(pieceSizes, abi.PaddedPieceSize(ssize))
if cfg.MaxQueueDealSector != 0 && waitDealSectors+sectors > cfg.MaxQueueDealSector {
log.Debugw("backpressure", "reason", "too many wait deal sectors", "wait_deal_sectors", waitDealSectors, "max", cfg.MaxQueueDealSector)
return true, nil
}
if bufferedSDR > cfg.MaxQueueSDR {
log.Debugw("backpressure", "reason", "too many SDR tasks", "buffered", bufferedSDR, "max", cfg.MaxQueueSDR)
return true, nil
}
if cfg.MaxQueueTrees != 0 && bufferedTrees > cfg.MaxQueueTrees {
log.Debugw("backpressure", "reason", "too many tree tasks", "buffered", bufferedTrees, "max", cfg.MaxQueueTrees)
return true, nil
}
if cfg.MaxQueuePoRep != 0 && bufferedPoRep > cfg.MaxQueuePoRep {
log.Debugw("backpressure", "reason", "too many PoRep tasks", "buffered", bufferedPoRep, "max", cfg.MaxQueuePoRep)
return true, nil
}
return false, nil
}
func sectorCount(sizes []abi.PaddedPieceSize, targetSize abi.PaddedPieceSize) int {
sort.Slice(sizes, func(i, j int) bool {
return sizes[i] > sizes[j]
})
sectors := make([]abi.PaddedPieceSize, 0)
for _, size := range sizes {
placed := false
for i := range sectors {
if sectors[i]+size <= targetSize {
sectors[i] += size
placed = true
break
}
}
if !placed {
sectors = append(sectors, size)
}
}
return len(sectors)
}

View File

@ -1,396 +0,0 @@
package message
import (
"bytes"
"context"
"time"
"github.com/google/uuid"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
"go.uber.org/multierr"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/curiosrc/harmony/harmonytask"
"github.com/filecoin-project/lotus/curiosrc/harmony/resources"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
"github.com/filecoin-project/lotus/lib/promise"
)
var log = logging.Logger("curio/message")
var SendLockedWait = 100 * time.Millisecond
type SenderAPI interface {
StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error)
WalletBalance(ctx context.Context, addr address.Address) (big.Int, error)
MpoolGetNonce(context.Context, address.Address) (uint64, error)
MpoolPush(context.Context, *types.SignedMessage) (cid.Cid, error)
}
type SignerAPI interface {
WalletSignMessage(context.Context, address.Address, *types.Message) (*types.SignedMessage, error)
}
// Sender abstracts away highly-available message sending with coordination through
// HarmonyDB. It make sure that nonces are assigned transactionally, and that
// messages are correctly broadcasted to the network. It ensures that messages
// are sent serially, and that failures to send don't cause nonce gaps.
type Sender struct {
api SenderAPI
sendTask *SendTask
db *harmonydb.DB
}
type SendTask struct {
sendTF promise.Promise[harmonytask.AddTaskFunc]
api SenderAPI
signer SignerAPI
db *harmonydb.DB
}
func (s *SendTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) {
ctx := context.TODO()
// get message from db
var dbMsg struct {
FromKey string `db:"from_key"`
ToAddr string `db:"to_addr"`
UnsignedData []byte `db:"unsigned_data"`
UnsignedCid string `db:"unsigned_cid"`
// may not be null if we have somehow already signed but failed to send this message
Nonce *uint64 `db:"nonce"`
SignedData []byte `db:"signed_data"`
}
err = s.db.QueryRow(ctx, `
SELECT from_key, nonce, to_addr, unsigned_data, unsigned_cid
FROM message_sends
WHERE send_task_id = $1`, taskID).Scan(
&dbMsg.FromKey, &dbMsg.Nonce, &dbMsg.ToAddr, &dbMsg.UnsignedData, &dbMsg.UnsignedCid)
if err != nil {
return false, xerrors.Errorf("getting message from db: %w", err)
}
// deserialize the message
var msg types.Message
err = msg.UnmarshalCBOR(bytes.NewReader(dbMsg.UnsignedData))
if err != nil {
return false, xerrors.Errorf("unmarshaling unsigned db message: %w", err)
}
// get db send lock
for {
// check if we still own the task
if !stillOwned() {
return false, xerrors.Errorf("lost ownership of task")
}
// try to acquire lock
cn, err := s.db.Exec(ctx, `
INSERT INTO message_send_locks (from_key, task_id, claimed_at)
VALUES ($1, $2, CURRENT_TIMESTAMP) ON CONFLICT (from_key) DO UPDATE
SET task_id = EXCLUDED.task_id, claimed_at = CURRENT_TIMESTAMP
WHERE message_send_locks.task_id = $2;`, dbMsg.FromKey, taskID)
if err != nil {
return false, xerrors.Errorf("acquiring send lock: %w", err)
}
if cn == 1 {
// we got the lock
break
}
// we didn't get the lock, wait a bit and try again
log.Infow("waiting for send lock", "task_id", taskID, "from", dbMsg.FromKey)
time.Sleep(SendLockedWait)
}
// defer release db send lock
defer func() {
_, err2 := s.db.Exec(ctx, `
DELETE from message_send_locks WHERE from_key = $1 AND task_id = $2`, dbMsg.FromKey, taskID)
if err2 != nil {
log.Errorw("releasing send lock", "task_id", taskID, "from", dbMsg.FromKey, "error", err2)
// make sure harmony retries this task so that we eventually release this lock
done = false
err = multierr.Append(err, xerrors.Errorf("releasing send lock: %w", err2))
}
}()
// assign nonce IF NOT ASSIGNED (max(api.MpoolGetNonce, db nonce+1))
var sigMsg *types.SignedMessage
if dbMsg.Nonce == nil {
msgNonce, err := s.api.MpoolGetNonce(ctx, msg.From)
if err != nil {
return false, xerrors.Errorf("getting nonce from mpool: %w", err)
}
// get nonce from db
var dbNonce *uint64
r := s.db.QueryRow(ctx, `
SELECT MAX(nonce) FROM message_sends WHERE from_key = $1 AND send_success = true`, msg.From.String())
if err := r.Scan(&dbNonce); err != nil {
return false, xerrors.Errorf("getting nonce from db: %w", err)
}
if dbNonce != nil && *dbNonce+1 > msgNonce {
msgNonce = *dbNonce + 1
}
msg.Nonce = msgNonce
// sign message
sigMsg, err = s.signer.WalletSignMessage(ctx, msg.From, &msg)
if err != nil {
return false, xerrors.Errorf("signing message: %w", err)
}
data, err := sigMsg.Serialize()
if err != nil {
return false, xerrors.Errorf("serializing message: %w", err)
}
jsonBytes, err := sigMsg.MarshalJSON()
if err != nil {
return false, xerrors.Errorf("marshaling message: %w", err)
}
// write to db
n, err := s.db.Exec(ctx, `
UPDATE message_sends SET nonce = $1, signed_data = $2, signed_json = $3, signed_cid = $4
WHERE send_task_id = $5`,
msg.Nonce, data, string(jsonBytes), sigMsg.Cid().String(), taskID)
if err != nil {
return false, xerrors.Errorf("updating db record: %w", err)
}
if n != 1 {
log.Errorw("updating db record: expected 1 row to be affected, got %d", n)
return false, xerrors.Errorf("updating db record: expected 1 row to be affected, got %d", n)
}
} else {
// Note: this handles an unlikely edge-case:
// We have previously signed the message but either failed to send it or failed to update the db
// note that when that happens the likely cause is the curio process losing its db connection
// or getting killed before it can update the db. In that case the message lock will still be held
// so it will be safe to rebroadcast the signed message
// deserialize the signed message
sigMsg = new(types.SignedMessage)
err = sigMsg.UnmarshalCBOR(bytes.NewReader(dbMsg.SignedData))
if err != nil {
return false, xerrors.Errorf("unmarshaling signed db message: %w", err)
}
}
// send!
_, err = s.api.MpoolPush(ctx, sigMsg)
// persist send result
var sendSuccess = err == nil
var sendError string
if err != nil {
sendError = err.Error()
}
_, err = s.db.Exec(ctx, `
UPDATE message_sends SET send_success = $1, send_error = $2, send_time = CURRENT_TIMESTAMP
WHERE send_task_id = $3`, sendSuccess, sendError, taskID)
if err != nil {
return false, xerrors.Errorf("updating db record: %w", err)
}
return true, nil
}
func (s *SendTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) {
if len(ids) == 0 {
// probably can't happen, but panicking is bad
return nil, nil
}
if s.signer == nil {
// can't sign messages here
return nil, nil
}
return &ids[0], nil
}
func (s *SendTask) TypeDetails() harmonytask.TaskTypeDetails {
return harmonytask.TaskTypeDetails{
Max: 1024,
Name: "SendMessage",
Cost: resources.Resources{
Cpu: 0,
Gpu: 0,
Ram: 1 << 20,
},
MaxFailures: 1000,
Follows: nil,
}
}
func (s *SendTask) Adder(taskFunc harmonytask.AddTaskFunc) {
s.sendTF.Set(taskFunc)
}
var _ harmonytask.TaskInterface = &SendTask{}
// NewSender creates a new Sender.
func NewSender(api SenderAPI, signer SignerAPI, db *harmonydb.DB) (*Sender, *SendTask) {
st := &SendTask{
api: api,
signer: signer,
db: db,
}
return &Sender{
api: api,
db: db,
sendTask: st,
}, st
}
// Send atomically assigns a nonce, signs, and pushes a message
// to mempool.
// maxFee is only used when GasFeeCap/GasPremium fields aren't specified
//
// When maxFee is set to 0, Send will guess appropriate fee
// based on current chain conditions
//
// Send behaves much like fullnodeApi.MpoolPushMessage, but it coordinates
// through HarmonyDB, making it safe to broadcast messages from multiple independent
// API nodes
//
// Send is also currently more strict about required parameters than MpoolPushMessage
func (s *Sender) Send(ctx context.Context, msg *types.Message, mss *api.MessageSendSpec, reason string) (cid.Cid, error) {
if mss == nil {
return cid.Undef, xerrors.Errorf("MessageSendSpec cannot be nil")
}
if (mss.MsgUuid != uuid.UUID{}) {
return cid.Undef, xerrors.Errorf("MessageSendSpec.MsgUuid must be zero")
}
fromA, err := s.api.StateAccountKey(ctx, msg.From, types.EmptyTSK)
if err != nil {
return cid.Undef, xerrors.Errorf("getting key address: %w", err)
}
msg.From = fromA
if msg.Nonce != 0 {
return cid.Undef, xerrors.Errorf("Send expects message nonce to be 0, was %d", msg.Nonce)
}
msg, err = s.api.GasEstimateMessageGas(ctx, msg, mss, types.EmptyTSK)
if err != nil {
return cid.Undef, xerrors.Errorf("GasEstimateMessageGas error: %w", err)
}
b, err := s.api.WalletBalance(ctx, msg.From)
if err != nil {
return cid.Undef, xerrors.Errorf("mpool push: getting origin balance: %w", err)
}
requiredFunds := big.Add(msg.Value, msg.RequiredFunds())
if b.LessThan(requiredFunds) {
return cid.Undef, xerrors.Errorf("mpool push: not enough funds: %s < %s", b, requiredFunds)
}
// push the task
taskAdder := s.sendTask.sendTF.Val(ctx)
unsBytes := new(bytes.Buffer)
err = msg.MarshalCBOR(unsBytes)
if err != nil {
return cid.Undef, xerrors.Errorf("marshaling message: %w", err)
}
var sendTaskID *harmonytask.TaskID
taskAdder(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) {
_, err := tx.Exec(`insert into message_sends (from_key, to_addr, send_reason, unsigned_data, unsigned_cid, send_task_id) values ($1, $2, $3, $4, $5, $6)`,
msg.From.String(), msg.To.String(), reason, unsBytes.Bytes(), msg.Cid().String(), id)
if err != nil {
return false, xerrors.Errorf("inserting message into db: %w", err)
}
sendTaskID = &id
return true, nil
})
if sendTaskID == nil {
return cid.Undef, xerrors.Errorf("failed to add task")
}
// wait for exec
var (
pollInterval = 50 * time.Millisecond
pollIntervalMul = 2
maxPollInterval = 5 * time.Second
pollLoops = 0
sigCid cid.Cid
sendErr error
)
for {
var err error
var sigCidStr, sendError *string
var sendSuccess *bool
err = s.db.QueryRow(ctx, `select signed_cid, send_success, send_error from message_sends where send_task_id = $1`, &sendTaskID).Scan(&sigCidStr, &sendSuccess, &sendError)
if err != nil {
return cid.Undef, xerrors.Errorf("getting cid for task: %w", err)
}
if sendSuccess == nil {
time.Sleep(pollInterval)
pollLoops++
pollInterval *= time.Duration(pollIntervalMul)
if pollInterval > maxPollInterval {
pollInterval = maxPollInterval
}
continue
}
if sigCidStr == nil || sendError == nil {
// should never happen because sendSuccess is already not null here
return cid.Undef, xerrors.Errorf("got null values for sigCidStr or sendError, this should never happen")
}
if !*sendSuccess {
sendErr = xerrors.Errorf("send error: %s", *sendError)
} else {
sigCid, err = cid.Parse(*sigCidStr)
if err != nil {
return cid.Undef, xerrors.Errorf("parsing signed cid: %w", err)
}
}
break
}
log.Infow("sent message", "cid", sigCid, "task_id", sendTaskID, "send_error", sendErr, "poll_loops", pollLoops)
return sigCid, sendErr
}

View File

@ -1,214 +0,0 @@
package message
import (
"context"
"encoding/json"
"sync/atomic"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/curiosrc/chainsched"
"github.com/filecoin-project/lotus/curiosrc/harmony/harmonytask"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
)
const MinConfidence = 6
type MessageWaiterApi interface {
StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error)
ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error)
ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error)
StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error)
ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error)
}
type MessageWatcher struct {
db *harmonydb.DB
ht *harmonytask.TaskEngine
api MessageWaiterApi
stopping, stopped chan struct{}
updateCh chan struct{}
bestTs atomic.Pointer[types.TipSetKey]
}
func NewMessageWatcher(db *harmonydb.DB, ht *harmonytask.TaskEngine, pcs *chainsched.CurioChainSched, api MessageWaiterApi) (*MessageWatcher, error) {
mw := &MessageWatcher{
db: db,
ht: ht,
api: api,
stopping: make(chan struct{}),
stopped: make(chan struct{}),
updateCh: make(chan struct{}),
}
go mw.run()
if err := pcs.AddHandler(mw.processHeadChange); err != nil {
return nil, err
}
return mw, nil
}
func (mw *MessageWatcher) run() {
defer close(mw.stopped)
for {
select {
case <-mw.stopping:
// todo cleanup assignments
return
case <-mw.updateCh:
mw.update()
}
}
}
func (mw *MessageWatcher) update() {
ctx := context.Background()
tsk := *mw.bestTs.Load()
ts, err := mw.api.ChainGetTipSet(ctx, tsk)
if err != nil {
log.Errorf("failed to get tipset: %+v", err)
return
}
lbts, err := mw.api.ChainGetTipSetByHeight(ctx, ts.Height()-MinConfidence, tsk)
if err != nil {
log.Errorf("failed to get tipset: %+v", err)
return
}
lbtsk := lbts.Key()
machineID := mw.ht.ResourcesAvailable().MachineID
// first if we see pending messages with null owner, assign them to ourselves
{
n, err := mw.db.Exec(ctx, `UPDATE message_waits SET waiter_machine_id = $1 WHERE waiter_machine_id IS NULL AND executed_tsk_cid IS NULL`, machineID)
if err != nil {
log.Errorf("failed to assign pending messages: %+v", err)
return
}
if n > 0 {
log.Debugw("assigned pending messages to ourselves", "assigned", n)
}
}
// get messages assigned to us
var msgs []struct {
Cid string `db:"signed_message_cid"`
From string `db:"from_key"`
Nonce uint64 `db:"nonce"`
FromAddr address.Address `db:"-"`
}
// really large limit in case of things getting stuck and backlogging severely
err = mw.db.Select(ctx, &msgs, `SELECT signed_message_cid, from_key, nonce FROM message_waits
JOIN message_sends ON signed_message_cid = signed_cid
WHERE waiter_machine_id = $1 LIMIT 10000`, machineID)
if err != nil {
log.Errorf("failed to get assigned messages: %+v", err)
return
}
// get address/nonce set to check
toCheck := make(map[address.Address]uint64)
for i := range msgs {
msgs[i].FromAddr, err = address.NewFromString(msgs[i].From)
if err != nil {
log.Errorf("failed to parse from address: %+v", err)
return
}
toCheck[msgs[i].FromAddr] = 0
}
// get the nonce for each address
for addr := range toCheck {
act, err := mw.api.StateGetActor(ctx, addr, lbtsk)
if err != nil {
log.Errorf("failed to get actor: %+v", err)
return
}
toCheck[addr] = act.Nonce
}
// check if any of the messages we have assigned to us are now on chain, and have been for MinConfidence epochs
for _, msg := range msgs {
if msg.Nonce > toCheck[msg.FromAddr] {
continue // definitely not on chain yet
}
look, err := mw.api.StateSearchMsg(ctx, lbtsk, cid.MustParse(msg.Cid), api.LookbackNoLimit, false)
if err != nil {
log.Errorf("failed to search for message: %+v", err)
return
}
if look == nil {
continue // not on chain yet (or not executed yet)
}
tskCid, err := look.TipSet.Cid()
if err != nil {
log.Errorf("failed to get tipset cid: %+v", err)
return
}
emsg, err := mw.api.ChainGetMessage(ctx, look.Message)
if err != nil {
log.Errorf("failed to get message: %+v", err)
return
}
execMsg, err := json.Marshal(emsg)
if err != nil {
log.Errorf("failed to marshal message: %+v", err)
return
}
// record in db
_, err = mw.db.Exec(ctx, `UPDATE message_waits SET
waiter_machine_id = NULL,
executed_tsk_cid = $1, executed_tsk_epoch = $2,
executed_msg_cid = $3, executed_msg_data = $4,
executed_rcpt_exitcode = $5, executed_rcpt_return = $6, executed_rcpt_gas_used = $7
WHERE signed_message_cid = $8`, tskCid, look.Height,
look.Message, execMsg,
look.Receipt.ExitCode, look.Receipt.Return, look.Receipt.GasUsed,
msg.Cid)
if err != nil {
log.Errorf("failed to update message wait: %+v", err)
return
}
}
}
func (mw *MessageWatcher) Stop(ctx context.Context) error {
close(mw.stopping)
select {
case <-mw.stopped:
case <-ctx.Done():
return ctx.Err()
}
return nil
}
func (mw *MessageWatcher) processHeadChange(ctx context.Context, revert *types.TipSet, apply *types.TipSet) error {
best := apply.Key()
mw.bestTs.Store(&best)
select {
case mw.updateCh <- struct{}{}:
default:
}
return nil
}

View File

@ -1,81 +0,0 @@
package multictladdr
import (
"context"
logging "github.com/ipfs/go-log/v2"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/storage/ctladdr"
)
var log = logging.Logger("curio/multictladdr")
type MultiAddressSelector struct {
MinerMap map[address.Address]api.AddressConfig
}
func (as *MultiAddressSelector) AddressFor(ctx context.Context, a ctladdr.NodeApi, minerID address.Address, mi api.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) {
if as == nil {
// should only happen in some tests
log.Warnw("smart address selection disabled, using worker address")
return mi.Worker, big.Zero(), nil
}
tmp := as.MinerMap[minerID]
var addrs []address.Address
switch use {
case api.PreCommitAddr:
addrs = append(addrs, tmp.PreCommitControl...)
case api.CommitAddr:
addrs = append(addrs, tmp.CommitControl...)
case api.TerminateSectorsAddr:
addrs = append(addrs, tmp.TerminateControl...)
case api.DealPublishAddr:
addrs = append(addrs, tmp.DealPublishControl...)
default:
defaultCtl := map[address.Address]struct{}{}
for _, a := range mi.ControlAddresses {
defaultCtl[a] = struct{}{}
}
delete(defaultCtl, mi.Owner)
delete(defaultCtl, mi.Worker)
configCtl := append([]address.Address{}, tmp.PreCommitControl...)
configCtl = append(configCtl, tmp.CommitControl...)
configCtl = append(configCtl, tmp.TerminateControl...)
configCtl = append(configCtl, tmp.DealPublishControl...)
for _, addr := range configCtl {
if addr.Protocol() != address.ID {
var err error
addr, err = a.StateLookupID(ctx, addr, types.EmptyTSK)
if err != nil {
log.Warnw("looking up control address", "address", addr, "error", err)
continue
}
}
delete(defaultCtl, addr)
}
for a := range defaultCtl {
addrs = append(addrs, a)
}
}
if len(addrs) == 0 || !tmp.DisableWorkerFallback {
addrs = append(addrs, mi.Worker)
}
if !tmp.DisableOwnerFallback {
addrs = append(addrs, mi.Owner)
}
return ctladdr.PickAddress(ctx, a, mi, goodFunds, minFunds, addrs)
}

View File

@ -1,135 +0,0 @@
package piece
import (
"context"
"time"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/curiosrc/ffi"
"github.com/filecoin-project/lotus/curiosrc/harmony/harmonytask"
"github.com/filecoin-project/lotus/curiosrc/harmony/resources"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
"github.com/filecoin-project/lotus/lib/promise"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
type CleanupPieceTask struct {
max int
db *harmonydb.DB
sc *ffi.SealCalls
TF promise.Promise[harmonytask.AddTaskFunc]
}
func NewCleanupPieceTask(db *harmonydb.DB, sc *ffi.SealCalls, max int) *CleanupPieceTask {
pt := &CleanupPieceTask{
db: db,
sc: sc,
max: max,
}
go pt.pollCleanupTasks(context.Background())
return pt
}
func (c *CleanupPieceTask) pollCleanupTasks(ctx context.Context) {
for {
// select pieces with no refs and null cleanup_task_id
var pieceIDs []struct {
ID storiface.PieceNumber `db:"id"`
}
err := c.db.Select(ctx, &pieceIDs, `SELECT id FROM parked_pieces WHERE cleanup_task_id IS NULL AND (SELECT count(*) FROM parked_piece_refs WHERE piece_id = parked_pieces.id) = 0`)
if err != nil {
log.Errorf("failed to get parked pieces: %s", err)
time.Sleep(PieceParkPollInterval)
continue
}
if len(pieceIDs) == 0 {
time.Sleep(PieceParkPollInterval)
continue
}
for _, pieceID := range pieceIDs {
pieceID := pieceID
// create a task for each piece
c.TF.Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, err error) {
// update
n, err := tx.Exec(`UPDATE parked_pieces SET cleanup_task_id = $1 WHERE id = $2 AND (SELECT count(*) FROM parked_piece_refs WHERE piece_id = parked_pieces.id) = 0`, id, pieceID.ID)
if err != nil {
return false, xerrors.Errorf("updating parked piece: %w", err)
}
// commit only if we updated the piece
return n > 0, nil
})
}
}
}
func (c *CleanupPieceTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) {
ctx := context.Background()
// select by cleanup_task_id
var pieceID int64
err = c.db.QueryRow(ctx, "SELECT id FROM parked_pieces WHERE cleanup_task_id = $1", taskID).Scan(&pieceID)
if err != nil {
return false, xerrors.Errorf("query parked_piece: %w", err)
}
// delete from parked_pieces where id = $1 where ref count = 0
// note: we delete from the db first because that guarantees that the piece is no longer in use
// if storage delete fails, it will be retried later is other cleanup tasks
n, err := c.db.Exec(ctx, "DELETE FROM parked_pieces WHERE id = $1 AND (SELECT count(*) FROM parked_piece_refs WHERE piece_id = $1) = 0", pieceID)
if err != nil {
return false, xerrors.Errorf("delete parked_piece: %w", err)
}
if n == 0 {
_, err = c.db.Exec(ctx, `UPDATE parked_pieces SET cleanup_task_id = NULL WHERE id = $1`, pieceID)
if err != nil {
return false, xerrors.Errorf("marking piece as complete: %w", err)
}
return true, nil
}
// remove from storage
err = c.sc.RemovePiece(ctx, storiface.PieceNumber(pieceID))
if err != nil {
log.Errorw("remove piece", "piece_id", pieceID, "error", err)
}
return true, nil
}
func (c *CleanupPieceTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) {
// the remove call runs on paths.Remote storage, so it doesn't really matter where it runs
id := ids[0]
return &id, nil
}
func (c *CleanupPieceTask) TypeDetails() harmonytask.TaskTypeDetails {
return harmonytask.TaskTypeDetails{
Max: c.max,
Name: "DropPiece",
Cost: resources.Resources{
Cpu: 1,
Gpu: 0,
Ram: 64 << 20,
Storage: nil,
},
MaxFailures: 10,
}
}
func (c *CleanupPieceTask) Adder(taskFunc harmonytask.AddTaskFunc) {
c.TF.Set(taskFunc)
}
var _ harmonytask.TaskInterface = &CleanupPieceTask{}

View File

@ -1,239 +0,0 @@
package piece
import (
"context"
"encoding/json"
"strconv"
"time"
"github.com/hashicorp/go-multierror"
logging "github.com/ipfs/go-log/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/curiosrc/ffi"
"github.com/filecoin-project/lotus/curiosrc/harmony/harmonytask"
"github.com/filecoin-project/lotus/curiosrc/harmony/resources"
"github.com/filecoin-project/lotus/curiosrc/seal"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
"github.com/filecoin-project/lotus/lib/promise"
"github.com/filecoin-project/lotus/storage/paths"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
var log = logging.Logger("cu-piece")
var PieceParkPollInterval = time.Second * 15
// ParkPieceTask gets a piece from some origin, and parks it in storage
// Pieces are always f00, piece ID is mapped to pieceCID in the DB
type ParkPieceTask struct {
db *harmonydb.DB
sc *ffi.SealCalls
TF promise.Promise[harmonytask.AddTaskFunc]
max int
}
func NewParkPieceTask(db *harmonydb.DB, sc *ffi.SealCalls, max int) (*ParkPieceTask, error) {
pt := &ParkPieceTask{
db: db,
sc: sc,
max: max,
}
ctx := context.Background()
// We should delete all incomplete pieces before we start
// as we would have lost reader for these. The RPC caller will get an error
// when Curio shuts down before parking a piece. They can always retry.
// Leaving these pieces we utilise unnecessary resources in the form of ParkPieceTask
_, err := db.Exec(ctx, `DELETE FROM parked_pieces WHERE complete = FALSE AND task_id IS NULL`)
if err != nil {
return nil, xerrors.Errorf("failed to delete incomplete parked pieces: %w", err)
}
go pt.pollPieceTasks(ctx)
return pt, nil
}
func (p *ParkPieceTask) pollPieceTasks(ctx context.Context) {
for {
// select parked pieces with no task_id
var pieceIDs []struct {
ID storiface.PieceNumber `db:"id"`
}
err := p.db.Select(ctx, &pieceIDs, `SELECT id FROM parked_pieces WHERE complete = FALSE AND task_id IS NULL`)
if err != nil {
log.Errorf("failed to get parked pieces: %s", err)
time.Sleep(PieceParkPollInterval)
continue
}
if len(pieceIDs) == 0 {
time.Sleep(PieceParkPollInterval)
continue
}
for _, pieceID := range pieceIDs {
pieceID := pieceID
// create a task for each piece
p.TF.Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, err error) {
// update
n, err := tx.Exec(`UPDATE parked_pieces SET task_id = $1 WHERE id = $2 AND complete = FALSE AND task_id IS NULL`, id, pieceID.ID)
if err != nil {
return false, xerrors.Errorf("updating parked piece: %w", err)
}
// commit only if we updated the piece
return n > 0, nil
})
}
}
}
func (p *ParkPieceTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) {
ctx := context.Background()
// Define a struct to hold piece data.
var piecesData []struct {
PieceID int64 `db:"id"`
PieceCreatedAt time.Time `db:"created_at"`
PieceCID string `db:"piece_cid"`
Complete bool `db:"complete"`
PiecePaddedSize int64 `db:"piece_padded_size"`
PieceRawSize string `db:"piece_raw_size"`
}
// Select the piece data using the task ID.
err = p.db.Select(ctx, &piecesData, `
SELECT id, created_at, piece_cid, complete, piece_padded_size, piece_raw_size
FROM parked_pieces
WHERE task_id = $1
`, taskID)
if err != nil {
return false, xerrors.Errorf("fetching piece data: %w", err)
}
if len(piecesData) == 0 {
return false, xerrors.Errorf("no piece data found for task_id: %d", taskID)
}
pieceData := piecesData[0]
if pieceData.Complete {
log.Warnw("park piece task already complete", "task_id", taskID, "piece_cid", pieceData.PieceCID)
return true, nil
}
// Define a struct for reference data.
var refData []struct {
DataURL string `db:"data_url"`
DataHeaders json.RawMessage `db:"data_headers"`
}
// Now, select the first reference data that has a URL.
err = p.db.Select(ctx, &refData, `
SELECT data_url, data_headers
FROM parked_piece_refs
WHERE piece_id = $1 AND data_url IS NOT NULL`, pieceData.PieceID)
if err != nil {
return false, xerrors.Errorf("fetching reference data: %w", err)
}
if len(refData) == 0 {
return false, xerrors.Errorf("no refs found for piece_id: %d", pieceData.PieceID)
}
// Convert piece_raw_size from string to int64.
pieceRawSize, err := strconv.ParseInt(pieceData.PieceRawSize, 10, 64)
if err != nil {
return false, xerrors.Errorf("parsing piece raw size: %w", err)
}
var merr error
for i := range refData {
if refData[i].DataURL != "" {
upr := &seal.UrlPieceReader{
Url: refData[0].DataURL,
RawSize: pieceRawSize,
}
defer func() {
_ = upr.Close()
}()
pnum := storiface.PieceNumber(pieceData.PieceID)
if err := p.sc.WritePiece(ctx, &taskID, pnum, pieceRawSize, upr); err != nil {
merr = multierror.Append(merr, xerrors.Errorf("write piece: %w", err))
continue
}
// Update the piece as complete after a successful write.
_, err = p.db.Exec(ctx, `UPDATE parked_pieces SET complete = TRUE task_id = NULL WHERE id = $1`, pieceData.PieceID)
if err != nil {
return false, xerrors.Errorf("marking piece as complete: %w", err)
}
return true, nil
}
return false, merr
}
// If no URL is found, this indicates an issue since at least one URL is expected.
return false, xerrors.Errorf("no data URL found for piece_id: %d", pieceData.PieceID)
}
func (p *ParkPieceTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) {
id := ids[0]
return &id, nil
}
func (p *ParkPieceTask) TypeDetails() harmonytask.TaskTypeDetails {
const maxSizePiece = 64 << 30
return harmonytask.TaskTypeDetails{
Max: p.max,
Name: "ParkPiece",
Cost: resources.Resources{
Cpu: 1,
Gpu: 0,
Ram: 64 << 20,
Storage: p.sc.Storage(p.taskToRef, storiface.FTPiece, storiface.FTNone, maxSizePiece, storiface.PathSealing, paths.MinFreeStoragePercentage),
},
MaxFailures: 10,
}
}
func (p *ParkPieceTask) taskToRef(id harmonytask.TaskID) (ffi.SectorRef, error) {
var pieceIDs []struct {
ID storiface.PieceNumber `db:"id"`
}
err := p.db.Select(context.Background(), &pieceIDs, `SELECT id FROM parked_pieces WHERE task_id = $1`, id)
if err != nil {
return ffi.SectorRef{}, xerrors.Errorf("getting piece id: %w", err)
}
if len(pieceIDs) != 1 {
return ffi.SectorRef{}, xerrors.Errorf("expected 1 piece id, got %d", len(pieceIDs))
}
pref := pieceIDs[0].ID.Ref()
return ffi.SectorRef{
SpID: int64(pref.ID.Miner),
SectorNumber: int64(pref.ID.Number),
RegSealProof: pref.ProofType,
}, nil
}
func (p *ParkPieceTask) Adder(taskFunc harmonytask.AddTaskFunc) {
p.TF.Set(taskFunc)
}
var _ harmonytask.TaskInterface = &ParkPieceTask{}

View File

@ -1,292 +0,0 @@
package proof
import (
"io"
"math/bits"
"os"
"runtime"
"sync"
"time"
"github.com/hashicorp/go-multierror"
"github.com/ipfs/go-cid"
pool "github.com/libp2p/go-buffer-pool"
"github.com/minio/sha256-simd"
"golang.org/x/xerrors"
commcid "github.com/filecoin-project/go-fil-commcid"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/storage/sealer/fr32"
)
const nodeSize = 32
const threadChunkSize = 1 << 20
func hashChunk(data [][]byte) {
l1Nodes := len(data[0]) / nodeSize / 2
d := sha256.New()
sumBuf := make([]byte, nodeSize)
for i := 0; i < l1Nodes; i++ {
levels := bits.TrailingZeros(^uint(i)) + 1
inNode := i * 2 // at level 0
outNode := i
for l := 0; l < levels; l++ {
d.Reset()
inNodeData := data[l][inNode*nodeSize : (inNode+2)*nodeSize]
d.Write(inNodeData)
copy(data[l+1][outNode*nodeSize:(outNode+1)*nodeSize], d.Sum(sumBuf[:0]))
// set top bits to 00
data[l+1][outNode*nodeSize+nodeSize-1] &= 0x3f
inNode--
inNode >>= 1
outNode >>= 1
}
}
}
func BuildTreeD(data io.Reader, unpaddedData bool, outPath string, size abi.PaddedPieceSize) (_ cid.Cid, err error) {
out, err := os.Create(outPath)
if err != nil {
return cid.Undef, err
}
defer func() {
cerr := out.Close()
if err != nil {
// remove the file, it's probably bad
rerr := os.Remove(outPath)
if rerr != nil {
err = multierror.Append(err, rerr)
}
}
if cerr != nil {
err = multierror.Append(err, cerr)
}
}()
outSize := treeSize(size)
// allocate space for the tree
err = out.Truncate(int64(outSize))
if err != nil {
return cid.Undef, err
}
// setup buffers
maxThreads := int64(size) / threadChunkSize
if maxThreads > int64(runtime.NumCPU())*15/10 {
maxThreads = int64(runtime.NumCPU()) * 15 / 10
}
if maxThreads < 1 {
maxThreads = 1
}
// allocate buffers
var bufLk sync.Mutex
workerBuffers := make([][][]byte, maxThreads) // [worker][level][levelSize]
for i := range workerBuffers {
workerBuffer := make([][]byte, 1)
bottomBufSize := int64(threadChunkSize)
if bottomBufSize > int64(size) {
bottomBufSize = int64(size)
}
workerBuffer[0] = pool.Get(int(bottomBufSize))
// append levels until we get to a 32 byte level
for len(workerBuffer[len(workerBuffer)-1]) > 32 {
newLevel := pool.Get(len(workerBuffer[len(workerBuffer)-1]) / 2)
workerBuffer = append(workerBuffer, newLevel)
}
workerBuffers[i] = workerBuffer
}
// prepare apex buffer
var apexBuf [][]byte
{
apexBottomSize := uint64(size) / uint64(len(workerBuffers[0][0]))
if apexBottomSize == 0 {
apexBottomSize = 1
}
apexBuf = make([][]byte, 1)
apexBuf[0] = pool.Get(int(apexBottomSize * nodeSize))
for len(apexBuf[len(apexBuf)-1]) > 32 {
newLevel := pool.Get(len(apexBuf[len(apexBuf)-1]) / 2)
apexBuf = append(apexBuf, newLevel)
}
}
// defer free pool buffers
defer func() {
for _, workerBuffer := range workerBuffers {
for _, level := range workerBuffer {
pool.Put(level)
}
}
for _, level := range apexBuf {
pool.Put(level)
}
}()
// start processing
var processed uint64
var workWg sync.WaitGroup
var errLock sync.Mutex
var oerr error
for processed < uint64(size) {
// get a buffer
bufLk.Lock()
if len(workerBuffers) == 0 {
bufLk.Unlock()
time.Sleep(50 * time.Microsecond)
continue
}
// pop last
workBuffer := workerBuffers[len(workerBuffers)-1]
workerBuffers = workerBuffers[:len(workerBuffers)-1]
bufLk.Unlock()
// before reading check that we didn't get a write error
errLock.Lock()
if oerr != nil {
errLock.Unlock()
return cid.Undef, oerr
}
errLock.Unlock()
// read data into the bottom level
// note: the bottom level will never be too big; data is power of two
// size, and if it's smaller than a single buffer, we only have one
// smaller buffer
processedSize := uint64(len(workBuffer[0]))
if unpaddedData {
workBuffer[0] = workBuffer[0][:abi.PaddedPieceSize(len(workBuffer[0])).Unpadded()]
}
_, err := io.ReadFull(data, workBuffer[0])
if err != nil && err != io.EOF {
return cid.Undef, err
}
// start processing
workWg.Add(1)
go func(startOffset uint64) {
defer workWg.Done()
if unpaddedData {
paddedBuf := pool.Get(int(abi.UnpaddedPieceSize(len(workBuffer[0])).Padded()))
fr32.PadSingle(workBuffer[0], paddedBuf)
pool.Put(workBuffer[0])
workBuffer[0] = paddedBuf
}
hashChunk(workBuffer)
// persist apex
{
apexHash := workBuffer[len(workBuffer)-1]
hashPos := startOffset / uint64(len(workBuffer[0])) * nodeSize
copy(apexBuf[0][hashPos:hashPos+nodeSize], apexHash)
}
// write results
offsetInLayer := startOffset
for layer, layerData := range workBuffer {
// layerOff is outSize:bits[most significant bit - layer]
layerOff := layerOffset(uint64(size), layer)
dataOff := offsetInLayer + layerOff
offsetInLayer /= 2
_, werr := out.WriteAt(layerData, int64(dataOff))
if werr != nil {
errLock.Lock()
oerr = multierror.Append(oerr, werr)
errLock.Unlock()
return
}
}
// return buffer
bufLk.Lock()
workerBuffers = append(workerBuffers, workBuffer)
bufLk.Unlock()
}(processed)
processed += processedSize
}
workWg.Wait()
if oerr != nil {
return cid.Undef, oerr
}
threadLayers := bits.Len(uint(len(workerBuffers[0][0])) / nodeSize)
if len(apexBuf) > 0 {
// hash the apex
hashChunk(apexBuf)
// write apex
for apexLayer, layerData := range apexBuf {
if apexLayer == 0 {
continue
}
layer := apexLayer + threadLayers - 1
layerOff := layerOffset(uint64(size), layer)
_, werr := out.WriteAt(layerData, int64(layerOff))
if werr != nil {
return cid.Undef, xerrors.Errorf("write apex: %w", werr)
}
}
}
var commp [32]byte
copy(commp[:], apexBuf[len(apexBuf)-1])
commCid, err := commcid.DataCommitmentV1ToCID(commp[:])
if err != nil {
return cid.Undef, err
}
return commCid, nil
}
func treeSize(data abi.PaddedPieceSize) uint64 {
bytesToAlloc := uint64(data)
// append bytes until we get to nodeSize
for todo := bytesToAlloc; todo > nodeSize; todo /= 2 {
bytesToAlloc += todo / 2
}
return bytesToAlloc
}
func layerOffset(size uint64, layer int) uint64 {
allOnes := uint64(0xffff_ffff_ffff_ffff)
// get 'layer' bits set to 1
layerOnes := allOnes >> uint64(64-layer)
// shift layerOnes to the left such that the highest bit is at the same position as the highest bit in size (which is power-of-two)
sizeBitPos := bits.Len64(size) - 1
layerOnes <<= sizeBitPos - (layer - 1)
return layerOnes
}

View File

@ -1,516 +0,0 @@
package proof
import (
"bufio"
"bytes"
"crypto/rand"
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"testing"
pool "github.com/libp2p/go-buffer-pool"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/storage/pipeline/lib/nullreader"
)
func TestTreeSize(t *testing.T) {
require.Equal(t, uint64(32), treeSize(abi.PaddedPieceSize(32)))
require.Equal(t, uint64(64+32), treeSize(abi.PaddedPieceSize(64)))
require.Equal(t, uint64(128+64+32), treeSize(abi.PaddedPieceSize(128)))
require.Equal(t, uint64(256+128+64+32), treeSize(abi.PaddedPieceSize(256)))
}
func TestTreeLayerOffset(t *testing.T) {
require.Equal(t, uint64(0), layerOffset(128, 0))
require.Equal(t, uint64(128), layerOffset(128, 1))
require.Equal(t, uint64(128+64), layerOffset(128, 2))
require.Equal(t, uint64(128+64+32), layerOffset(128, 3))
}
func TestHashChunk(t *testing.T) {
chunk := make([]byte, 64)
chunk[0] = 0x01
out := make([]byte, 32)
data := [][]byte{chunk, out}
hashChunk(data)
// 16 ab ab 34 1f b7 f3 70 e2 7e 4d ad cf 81 76 6d
// d0 df d0 ae 64 46 94 77 bb 2c f6 61 49 38 b2 2f
expect := []byte{
0x16, 0xab, 0xab, 0x34, 0x1f, 0xb7, 0xf3, 0x70,
0xe2, 0x7e, 0x4d, 0xad, 0xcf, 0x81, 0x76, 0x6d,
0xd0, 0xdf, 0xd0, 0xae, 0x64, 0x46, 0x94, 0x77,
0xbb, 0x2c, 0xf6, 0x61, 0x49, 0x38, 0xb2, 0x2f,
}
require.Equal(t, expect, out)
}
func TestHashChunk2L(t *testing.T) {
data0 := make([]byte, 128)
data0[0] = 0x01
l1 := make([]byte, 64)
l2 := make([]byte, 32)
data := [][]byte{data0, l1, l2}
hashChunk(data)
// 16 ab ab 34 1f b7 f3 70 e2 7e 4d ad cf 81 76 6d
// d0 df d0 ae 64 46 94 77 bb 2c f6 61 49 38 b2 2f
expectL1Left := []byte{
0x16, 0xab, 0xab, 0x34, 0x1f, 0xb7, 0xf3, 0x70,
0xe2, 0x7e, 0x4d, 0xad, 0xcf, 0x81, 0x76, 0x6d,
0xd0, 0xdf, 0xd0, 0xae, 0x64, 0x46, 0x94, 0x77,
0xbb, 0x2c, 0xf6, 0x61, 0x49, 0x38, 0xb2, 0x2f,
}
// f5 a5 fd 42 d1 6a 20 30 27 98 ef 6e d3 09 97 9b
// 43 00 3d 23 20 d9 f0 e8 ea 98 31 a9 27 59 fb 0b
expectL1Rest := []byte{
0xf5, 0xa5, 0xfd, 0x42, 0xd1, 0x6a, 0x20, 0x30,
0x27, 0x98, 0xef, 0x6e, 0xd3, 0x09, 0x97, 0x9b,
0x43, 0x00, 0x3d, 0x23, 0x20, 0xd9, 0xf0, 0xe8,
0xea, 0x98, 0x31, 0xa9, 0x27, 0x59, 0xfb, 0x0b,
}
require.Equal(t, expectL1Left, l1[:32])
require.Equal(t, expectL1Rest, l1[32:])
// 0d d6 da e4 1c 2f 75 55 01 29 59 4f b6 44 e4 a8
// 42 cf af b3 16 a2 d5 93 21 e3 88 fe 84 a1 ec 2f
expectL2 := []byte{
0x0d, 0xd6, 0xda, 0xe4, 0x1c, 0x2f, 0x75, 0x55,
0x01, 0x29, 0x59, 0x4f, 0xb6, 0x44, 0xe4, 0xa8,
0x42, 0xcf, 0xaf, 0xb3, 0x16, 0xa2, 0xd5, 0x93,
0x21, 0xe3, 0x88, 0xfe, 0x84, 0xa1, 0xec, 0x2f,
}
require.Equal(t, expectL2, l2)
}
func Test2K(t *testing.T) {
data := make([]byte, 2048)
data[0] = 0x01
tempFile := filepath.Join(t.TempDir(), "tree.dat")
commd, err := BuildTreeD(bytes.NewReader(data), false, tempFile, 2048)
require.NoError(t, err)
fmt.Println(commd)
// dump tree.dat
dat, err := os.ReadFile(tempFile)
require.NoError(t, err)
for i, b := range dat {
// 32 values per line
if i%32 == 0 {
fmt.Println()
// line offset hexdump style
fmt.Printf("%04x: ", i)
}
fmt.Printf("%02x ", b)
}
fmt.Println()
require.Equal(t, "baga6ea4seaqovgk4kr4eoifujh6jfmdqvw3m6zrvyjqzu6s6abkketui6jjoydi", commd.String())
}
const expectD8M = `00000000: 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
00000020: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
*
00800000: 16 ab ab 34 1f b7 f3 70 e2 7e 4d ad cf 81 76 6d d0 df d0 ae 64 46 94 77 bb 2c f6 61 49 38 b2 2f
00800020: f5 a5 fd 42 d1 6a 20 30 27 98 ef 6e d3 09 97 9b 43 00 3d 23 20 d9 f0 e8 ea 98 31 a9 27 59 fb 0b
*
00c00000: 0d d6 da e4 1c 2f 75 55 01 29 59 4f b6 44 e4 a8 42 cf af b3 16 a2 d5 93 21 e3 88 fe 84 a1 ec 2f
00c00020: 37 31 bb 99 ac 68 9f 66 ee f5 97 3e 4a 94 da 18 8f 4d dc ae 58 07 24 fc 6f 3f d6 0d fd 48 83 33
*
00e00000: 11 b1 c4 80 05 21 d5 e5 83 4a de b3 70 7c 74 15 9f f3 37 b0 96 16 3c 94 31 16 73 40 e7 b1 17 1d
00e00020: 64 2a 60 7e f8 86 b0 04 bf 2c 19 78 46 3a e1 d4 69 3a c0 f4 10 eb 2d 1b 7a 47 fe 20 5e 5e 75 0f
*
00f00000: ec 69 25 55 9b cc 52 84 0a 22 38 5b 2b 6b 35 b4 50 14 50 04 28 f4 59 fe c1 23 01 0f e7 ef 18 1c
00f00020: 57 a2 38 1a 28 65 2b f4 7f 6b ef 7a ca 67 9b e4 ae de 58 71 ab 5c f3 eb 2c 08 11 44 88 cb 85 26
*
00f80000: 3d d2 eb 19 3e e2 f0 47 34 87 bf 4b 83 aa 3a bd a9 c8 4e fa e5 52 6d 8a fd 61 2d 5d 9e 3d 79 34
00f80020: 1f 7a c9 59 55 10 e0 9e a4 1c 46 0b 17 64 30 bb 32 2c d6 fb 41 2e c5 7c b1 7d 98 9a 43 10 37 2f
*
00fc0000: ea 99 5c 54 78 47 20 b4 49 fc 92 b0 70 ad b6 cf 66 35 c2 61 9a 7a 5e 00 54 a2 4e 88 f2 52 ec 0d
00fc0020: fc 7e 92 82 96 e5 16 fa ad e9 86 b2 8f 92 d4 4a 4f 24 b9 35 48 52 23 37 6a 79 90 27 bc 18 f8 33
*
00fe0000: b9 97 02 8b 06 d7 2e 96 07 86 79 58 e1 5f 8d 07 b7 ae 37 ab 29 ab 3f a9 de fe c9 8e aa 37 6e 28
00fe0020: 08 c4 7b 38 ee 13 bc 43 f4 1b 91 5c 0e ed 99 11 a2 60 86 b3 ed 62 40 1b f9 d5 8b 8d 19 df f6 24
*
00ff0000: a0 c4 4f 7b a4 4c d2 3c 2e bf 75 98 7b e8 98 a5 63 80 73 b2 f9 11 cf ee ce 14 5a 77 58 0c 6c 12
00ff0020: b2 e4 7b fb 11 fa cd 94 1f 62 af 5c 75 0f 3e a5 cc 4d f5 17 d5 c4 f1 6d b2 b4 d7 7b ae c1 a3 2f
*
00ff8000: 89 2d 2b 00 a5 c1 54 10 94 ca 65 de 21 3b bd 45 90 14 15 ed d1 10 17 cd 29 f3 ed 75 73 02 a0 3f
00ff8020: f9 22 61 60 c8 f9 27 bf dc c4 18 cd f2 03 49 31 46 00 8e ae fb 7d 02 19 4d 5e 54 81 89 00 51 08
*
00ffc000: 22 48 54 8b ba a5 8f e2 db 0b 07 18 c1 d7 20 1f ed 64 c7 8d 7d 22 88 36 b2 a1 b2 f9 42 0b ef 3c
00ffc020: 2c 1a 96 4b b9 0b 59 eb fe 0f 6d a2 9a d6 5a e3 e4 17 72 4a 8f 7c 11 74 5a 40 ca c1 e5 e7 40 11
*
00ffe000: 1c 6a 48 08 3e 17 49 90 ef c0 56 ec b1 44 75 1d e2 76 d8 a5 1c 3d 93 d7 4c 81 92 48 ab 78 cc 30
00ffe020: fe e3 78 ce f1 64 04 b1 99 ed e0 b1 3e 11 b6 24 ff 9d 78 4f bb ed 87 8d 83 29 7e 79 5e 02 4f 02
*
00fff000: 0a b4 26 38 1b 72 cd 3b b3 e3 c7 82 18 fe 1f 18 3b 3a 19 db c4 d9 26 94 30 03 cd 01 b6 d1 8d 0b
00fff020: 8e 9e 24 03 fa 88 4c f6 23 7f 60 df 25 f8 3e e4 0d ca 9e d8 79 eb 6f 63 52 d1 50 84 f5 ad 0d 3f
*
00fff800: 16 0d 87 17 1b e7 ae e4 20 a3 54 24 cf df 4f fe a2 fd 7b 94 58 89 58 f3 45 11 57 fc 39 8f 34 26
00fff820: 75 2d 96 93 fa 16 75 24 39 54 76 e3 17 a9 85 80 f0 09 47 af b7 a3 05 40 d6 25 a9 29 1c c1 2a 07
*
00fffc00: 1f 40 60 11 da 08 f8 09 80 63 97 dc 1c 57 b9 87 83 37 5a 59 5d d6 81 42 6c 1e cd d4 3c ab e3 3c
00fffc20: 70 22 f6 0f 7e f6 ad fa 17 11 7a 52 61 9e 30 ce a8 2c 68 07 5a df 1c 66 77 86 ec 50 6e ef 2d 19
*
00fffe00: 51 4e dd 2f 6f 8f 6d fd 54 b0 d1 20 7b b7 06 df 85 c5 a3 19 0e af 38 72 37 20 c5 07 56 67 7f 14
00fffe20: d9 98 87 b9 73 57 3a 96 e1 13 93 64 52 36 c1 7b 1f 4c 70 34 d7 23 c7 a9 9f 70 9b b4 da 61 16 2b
*
00ffff00: 5a 1d 84 74 85 a3 4b 28 08 93 a9 cf b2 8b 54 44 67 12 8b eb c0 22 bd de c1 04 be ca b4 f4 81 31
00ffff20: d0 b5 30 db b0 b4 f2 5c 5d 2f 2a 28 df ee 80 8b 53 41 2a 02 93 1f 18 c4 99 f5 a2 54 08 6b 13 26
*
00ffff80: c5 fb f3 f9 4c c2 2b 3c 51 ad c1 ea af e9 4b a0 9f b2 73 f3 73 d2 10 1f 12 0b 11 c6 85 21 66 2f
00ffffa0: 84 c0 42 1b a0 68 5a 01 bf 79 5a 23 44 06 4f e4 24 bd 52 a9 d2 43 77 b3 94 ff 4c 4b 45 68 e8 11
00ffffc0: 23 40 4a 88 80 f9 cb c7 20 39 cb 86 14 35 9c 28 34 84 55 70 fe 95 19 0b bd 4d 93 41 42 e8 25 2c
`
func Test8MiB(t *testing.T) {
data := make([]byte, 8<<20)
data[0] = 0x01
tempFile := filepath.Join(t.TempDir(), "tree.dat")
commd, err := BuildTreeD(bytes.NewReader(data), false, tempFile, 8<<20)
require.NoError(t, err)
fmt.Println(commd)
// dump tree.dat
dat, err := os.ReadFile(tempFile)
require.NoError(t, err)
actualD := hexPrint32LDedup(bytes.NewReader(dat))
fmt.Println(actualD)
require.EqualValues(t, expectD8M, actualD)
require.Equal(t, "baga6ea4seaqcgqckrcapts6hea44xbqugwocqneekvyp5fizbo6u3e2biluckla", commd.String())
}
func Test8MiBUnpad(t *testing.T) {
data := make([]byte, abi.PaddedPieceSize(8<<20).Unpadded())
data[0] = 0x01
tempFile := filepath.Join(t.TempDir(), "tree.dat")
commd, err := BuildTreeD(bytes.NewReader(data), true, tempFile, 8<<20)
require.NoError(t, err)
fmt.Println(commd)
// dump tree.dat
dat, err := os.ReadFile(tempFile)
require.NoError(t, err)
actualD := hexPrint32LDedup(bytes.NewReader(dat))
fmt.Println(actualD)
require.EqualValues(t, expectD8M, actualD)
require.Equal(t, "baga6ea4seaqcgqckrcapts6hea44xbqugwocqneekvyp5fizbo6u3e2biluckla", commd.String())
}
/*func Test32Golden(t *testing.T) {
datFile, err := os.Open("../../seal/cac/sc-02-data-tree-d.dat")
require.NoError(t, err)
bufReader := bufio.NewReaderSize(datFile, 1<<20)
actualD := hexPrint32LDedup(bufReader)
fmt.Println(actualD)
}
*/
var expect32Null = `00000000: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
*
800000000: f5 a5 fd 42 d1 6a 20 30 27 98 ef 6e d3 09 97 9b 43 00 3d 23 20 d9 f0 e8 ea 98 31 a9 27 59 fb 0b
*
c00000000: 37 31 bb 99 ac 68 9f 66 ee f5 97 3e 4a 94 da 18 8f 4d dc ae 58 07 24 fc 6f 3f d6 0d fd 48 83 33
*
e00000000: 64 2a 60 7e f8 86 b0 04 bf 2c 19 78 46 3a e1 d4 69 3a c0 f4 10 eb 2d 1b 7a 47 fe 20 5e 5e 75 0f
*
f00000000: 57 a2 38 1a 28 65 2b f4 7f 6b ef 7a ca 67 9b e4 ae de 58 71 ab 5c f3 eb 2c 08 11 44 88 cb 85 26
*
f80000000: 1f 7a c9 59 55 10 e0 9e a4 1c 46 0b 17 64 30 bb 32 2c d6 fb 41 2e c5 7c b1 7d 98 9a 43 10 37 2f
*
fc0000000: fc 7e 92 82 96 e5 16 fa ad e9 86 b2 8f 92 d4 4a 4f 24 b9 35 48 52 23 37 6a 79 90 27 bc 18 f8 33
*
fe0000000: 08 c4 7b 38 ee 13 bc 43 f4 1b 91 5c 0e ed 99 11 a2 60 86 b3 ed 62 40 1b f9 d5 8b 8d 19 df f6 24
*
ff0000000: b2 e4 7b fb 11 fa cd 94 1f 62 af 5c 75 0f 3e a5 cc 4d f5 17 d5 c4 f1 6d b2 b4 d7 7b ae c1 a3 2f
*
ff8000000: f9 22 61 60 c8 f9 27 bf dc c4 18 cd f2 03 49 31 46 00 8e ae fb 7d 02 19 4d 5e 54 81 89 00 51 08
*
ffc000000: 2c 1a 96 4b b9 0b 59 eb fe 0f 6d a2 9a d6 5a e3 e4 17 72 4a 8f 7c 11 74 5a 40 ca c1 e5 e7 40 11
*
ffe000000: fe e3 78 ce f1 64 04 b1 99 ed e0 b1 3e 11 b6 24 ff 9d 78 4f bb ed 87 8d 83 29 7e 79 5e 02 4f 02
*
fff000000: 8e 9e 24 03 fa 88 4c f6 23 7f 60 df 25 f8 3e e4 0d ca 9e d8 79 eb 6f 63 52 d1 50 84 f5 ad 0d 3f
*
fff800000: 75 2d 96 93 fa 16 75 24 39 54 76 e3 17 a9 85 80 f0 09 47 af b7 a3 05 40 d6 25 a9 29 1c c1 2a 07
*
fffc00000: 70 22 f6 0f 7e f6 ad fa 17 11 7a 52 61 9e 30 ce a8 2c 68 07 5a df 1c 66 77 86 ec 50 6e ef 2d 19
*
fffe00000: d9 98 87 b9 73 57 3a 96 e1 13 93 64 52 36 c1 7b 1f 4c 70 34 d7 23 c7 a9 9f 70 9b b4 da 61 16 2b
*
ffff00000: d0 b5 30 db b0 b4 f2 5c 5d 2f 2a 28 df ee 80 8b 53 41 2a 02 93 1f 18 c4 99 f5 a2 54 08 6b 13 26
*
ffff80000: 84 c0 42 1b a0 68 5a 01 bf 79 5a 23 44 06 4f e4 24 bd 52 a9 d2 43 77 b3 94 ff 4c 4b 45 68 e8 11
*
ffffc0000: 65 f2 9e 5d 98 d2 46 c3 8b 38 8c fc 06 db 1f 6b 02 13 03 c5 a2 89 00 0b dc e8 32 a9 c3 ec 42 1c
*
ffffe0000: a2 24 75 08 28 58 50 96 5b 7e 33 4b 31 27 b0 c0 42 b1 d0 46 dc 54 40 21 37 62 7c d8 79 9c e1 3a
*
fffff0000: da fd ab 6d a9 36 44 53 c2 6d 33 72 6b 9f ef e3 43 be 8f 81 64 9e c0 09 aa d3 fa ff 50 61 75 08
*
fffff8000: d9 41 d5 e0 d6 31 4a 99 5c 33 ff bd 4f be 69 11 8d 73 d4 e5 fd 2c d3 1f 0f 7c 86 eb dd 14 e7 06
*
fffffc000: 51 4c 43 5c 3d 04 d3 49 a5 36 5f bd 59 ff c7 13 62 91 11 78 59 91 c1 a3 c5 3a f2 20 79 74 1a 2f
*
fffffe000: ad 06 85 39 69 d3 7d 34 ff 08 e0 9f 56 93 0a 4a d1 9a 89 de f6 0c bf ee 7e 1d 33 81 c1 e7 1c 37
*
ffffff000: 39 56 0e 7b 13 a9 3b 07 a2 43 fd 27 20 ff a7 cb 3e 1d 2e 50 5a b3 62 9e 79 f4 63 13 51 2c da 06
*
ffffff800: cc c3 c0 12 f5 b0 5e 81 1a 2b bf dd 0f 68 33 b8 42 75 b4 7b f2 29 c0 05 2a 82 48 4f 3c 1a 5b 3d
*
ffffffc00: 7d f2 9b 69 77 31 99 e8 f2 b4 0b 77 91 9d 04 85 09 ee d7 68 e2 c7 29 7b 1f 14 37 03 4f c3 c6 2c
*
ffffffe00: 66 ce 05 a3 66 75 52 cf 45 c0 2b cc 4e 83 92 91 9b de ac 35 de 2f f5 62 71 84 8e 9f 7b 67 51 07
*
fffffff00: d8 61 02 18 42 5a b5 e9 5b 1c a6 23 9d 29 a2 e4 20 d7 06 a9 6f 37 3e 2f 9c 9a 91 d7 59 d1 9b 01
*
fffffff80: 6d 36 4b 1e f8 46 44 1a 5a 4a 68 86 23 14 ac c0 a4 6f 01 67 17 e5 34 43 e8 39 ee df 83 c2 85 3c
*
fffffffc0: 07 7e 5f de 35 c5 0a 93 03 a5 50 09 e3 49 8a 4e be df f3 9c 42 b7 10 b7 30 d8 ec 7a c7 af a6 3e
`
func Test32G(t *testing.T) {
if os.Getenv("LOTUS_TEST_LARGE_SECTORS") != "1" {
t.Skip("skipping large sector test without env LOTUS_TEST_LARGE_SECTORS=1")
}
data := nullreader.NewNullReader(abi.PaddedPieceSize(32 << 30).Unpadded())
tempFile := filepath.Join(t.TempDir(), "tree.dat")
commd, err := BuildTreeD(data, true, tempFile, 32<<30)
require.NoError(t, err)
fmt.Println(commd)
// dump tree.dat
datFile, err := os.Open(tempFile)
require.NoError(t, err)
defer func() {
require.NoError(t, datFile.Close())
}()
actualD := hexPrint32LDedup(bufio.NewReaderSize(datFile, 1<<20))
fmt.Println(actualD)
require.EqualValues(t, expect32Null, actualD)
require.Equal(t, "baga6ea4seaqao7s73y24kcutaosvacpdjgfe5pw76ooefnyqw4ynr3d2y6x2mpq", commd.String())
}
func hexPrint32LDedup(r io.Reader) string {
var prevLine []byte
var outStr string
var duplicateLine bool
buffer := make([]byte, 32)
offset := 0
for {
n, err := r.Read(buffer)
if err == io.EOF {
break
}
if err != nil {
// Handle the error according to your application's requirements
fmt.Println("Error reading:", err)
break
}
if string(prevLine) == string(buffer) {
// Mark as duplicate and skip processing
duplicateLine = true
} else {
if duplicateLine {
// Output a marker for the previous duplicate line
outStr += "*\n"
duplicateLine = false
}
// Convert to hex and output
outStr += fmt.Sprintf("%08x: %s\n", offset, toHex(buffer))
// Update prevLine
if len(prevLine) != 32 {
prevLine = make([]byte, 32)
}
copy(prevLine, buffer)
}
offset += n
}
// If the last line was a duplicate, ensure we mark it
if duplicateLine {
outStr += "*\n"
}
return outStr
}
func toHex(data []byte) string {
var hexStr string
for _, b := range data {
hexStr += fmt.Sprintf("%02x ", b)
}
return hexStr
}
func BenchmarkHashChunk(b *testing.B) {
const benchSize = 1024 * 1024
// Generate 1 MiB of random data
randomData := make([]byte, benchSize)
if _, err := rand.Read(randomData); err != nil {
b.Fatalf("Failed to generate random data: %v", err)
}
// Prepare data structure for hashChunk
data := make([][]byte, 1)
data[0] = randomData
// append levels until we get to a 32 byte level
for len(data[len(data)-1]) > 32 {
newLevel := make([]byte, len(data[len(data)-1])/2)
data = append(data, newLevel)
}
b.SetBytes(benchSize) // Set the number of bytes for the benchmark
b.ResetTimer() // Start the timer after setup
for i := 0; i < b.N; i++ {
hashChunk(data)
// Use the result in some way to avoid compiler optimization
_ = data[1]
}
}
func BenchmarkBuildTreeD512M(b *testing.B) {
const dataSize = 512 * 1024 * 1024 // 512 MiB
// Generate 512 MiB of random data
data := make([]byte, dataSize)
if _, err := rand.Read(data); err != nil {
b.Fatalf("Failed to generate random data: %v", err)
}
// preallocate NumCPU+1 1MiB/512k/256k/...
// with Pool.Get / Pool.Put, so that they are in the pool
{
nc := runtime.NumCPU()
bufs := [][]byte{}
for i := 0; i < nc+1; i++ {
for sz := 1 << 20; sz > 32; sz >>= 1 {
b := pool.Get(sz)
bufs = append(bufs, b)
}
}
for _, b := range bufs {
pool.Put(b)
}
}
/*if b.N == 1 {
b.N = 10
}*/
b.SetBytes(int64(dataSize)) // Set the number of bytes for the benchmark
for i := 0; i < b.N; i++ {
// Create a temporary file for each iteration
tempFile, err := os.CreateTemp("", "tree.dat")
if err != nil {
b.Fatalf("Failed to create temporary file: %v", err)
}
tempFilePath := tempFile.Name()
err = tempFile.Close()
if err != nil {
b.Fatalf("Failed to close temporary file: %v", err)
}
b.StartTimer() // Start the timer for the BuildTreeD operation
_, err = BuildTreeD(bytes.NewReader(data), false, tempFilePath, dataSize)
if err != nil {
b.Fatalf("BuildTreeD failed: %v", err)
}
b.StopTimer() // Stop the timer after BuildTreeD completes
// Clean up the temporary file
err = os.Remove(tempFilePath)
if err != nil {
b.Fatalf("Failed to remove temporary file: %v", err)
}
}
}
func TestLayerOffset(t *testing.T) {
{
size := uint64(2048)
require.Equal(t, uint64(0), layerOffset(size, 0))
require.Equal(t, size, layerOffset(size, 1))
require.Equal(t, size+(size/2), layerOffset(size, 2))
require.Equal(t, size+(size/2)+(size/4), layerOffset(size, 3))
require.Equal(t, size+(size/2)+(size/4)+(size/8), layerOffset(size, 4))
require.Equal(t, size+(size/2)+(size/4)+(size/8)+(size/16), layerOffset(size, 5))
}
{
size := uint64(32 << 30)
maxLayers := 30
for i := 0; i <= maxLayers; i++ {
var expect uint64
for j := 0; j < i; j++ {
expect += size >> uint64(j)
}
fmt.Printf("layer %d: %d\n", i, expect)
require.Equal(t, expect, layerOffset(size, i))
}
}
{
size := uint64(64 << 30)
maxLayers := 31
for i := 0; i <= maxLayers; i++ {
var expect uint64
for j := 0; j < i; j++ {
expect += size >> uint64(j)
}
fmt.Printf("layer %d: %d\n", i, expect)
require.Equal(t, expect, layerOffset(size, i))
}
}
}

View File

@ -1,28 +0,0 @@
# Curio Sealer
## Overview
The Curio sealer is a collection of harmony tasks and a common poller
which implement the sealing functionality of the Filecoin protocol.
## Pipeline Tasks
* SDR pipeline
* `SDR` - Generate SDR layers
* `SDRTrees` - Generate tree files (TreeD, TreeR, TreeC)
* `PreCommitSubmit` - Submit precommit message to the network
* `PoRep` - Generate PoRep proof
* `CommitSubmit` - Submit commit message to the network
# Poller
The poller is a background process running on every node which runs any of the
SDR pipeline tasks. It periodically checks the state of sectors in the SDR pipeline
and schedules any tasks to run which will move the sector along the pipeline.
# Error Handling
* Pipeline tasks are expected to always finish successfully as harmonytask tasks.
If a sealing task encounters an error, it should mark the sector pipeline entry
as failed and exit without erroring. The poller will then figure out a recovery
strategy for the sector.

View File

@ -1,51 +0,0 @@
package seal
import (
"context"
"net/url"
"strconv"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
)
func DropSectorPieceRefs(ctx context.Context, db *harmonydb.DB, sid abi.SectorID) error {
//_, err := db.Exec(ctx, `SELECT FROM sectors_sdr_initial_pieces WHERE sp_id = $1 AND sector_number = $2`, sid.Miner, sid.Number)
var PieceURL []struct {
URL string `db:"data_url"`
}
err := db.Select(ctx, &PieceURL, `SELECT data_url FROM sectors_sdr_initial_pieces WHERE sp_id = $1 AND sector_number = $2`, sid.Miner, sid.Number)
if err != nil {
return xerrors.Errorf("getting piece url: %w", err)
}
for _, pu := range PieceURL {
gourl, err := url.Parse(pu.URL)
if err != nil {
log.Errorw("failed to parse piece url", "url", pu.URL, "error", err, "miner", sid.Miner, "sector", sid.Number)
continue
}
if gourl.Scheme == "pieceref" {
refID, err := strconv.ParseInt(gourl.Opaque, 10, 64)
if err != nil {
log.Errorw("failed to parse piece ref id", "url", pu.URL, "error", err, "miner", sid.Miner, "sector", sid.Number)
continue
}
n, err := db.Exec(ctx, `DELETE FROM parked_piece_refs WHERE ref_id = $1`, refID)
if err != nil {
log.Errorw("failed to delete piece ref", "url", pu.URL, "error", err, "miner", sid.Miner, "sector", sid.Number)
}
log.Debugw("deleted piece ref", "url", pu.URL, "miner", sid.Miner, "sector", sid.Number, "rows", n)
}
}
return err
}

View File

@ -1,304 +0,0 @@
package seal
import (
"context"
"time"
logging "github.com/ipfs/go-log/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/curiosrc/harmony/harmonytask"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
"github.com/filecoin-project/lotus/lib/promise"
)
var log = logging.Logger("cu/seal")
const (
pollerSDR = iota
pollerTreeD
pollerTreeRC
pollerPrecommitMsg
pollerPoRep
pollerCommitMsg
pollerFinalize
pollerMoveStorage
numPollers
)
const sealPollerInterval = 10 * time.Second
const seedEpochConfidence = 3
type SealPollerAPI interface {
StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorPreCommitOnChainInfo, error)
StateSectorGetInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)
ChainHead(context.Context) (*types.TipSet, error)
}
type SealPoller struct {
db *harmonydb.DB
api SealPollerAPI
pollers [numPollers]promise.Promise[harmonytask.AddTaskFunc]
}
func NewPoller(db *harmonydb.DB, api SealPollerAPI) *SealPoller {
return &SealPoller{
db: db,
api: api,
}
}
func (s *SealPoller) RunPoller(ctx context.Context) {
ticker := time.NewTicker(sealPollerInterval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
if err := s.poll(ctx); err != nil {
log.Errorw("polling failed", "error", err)
}
}
}
}
/*
NOTE: TaskIDs are ONLY set while the tasks are executing or waiting to execute.
This means that there are ~4 states each task can be in:
* Not run, and dependencies not solved (dependencies are 'After' fields of previous stages), task is null, After is false
* Not run, and dependencies solved, task is null, After is false
* Running or queued, task is set, After is false
* Finished, task is null, After is true
*/
type pollTask struct {
SpID int64 `db:"sp_id"`
SectorNumber int64 `db:"sector_number"`
TaskSDR *int64 `db:"task_id_sdr"`
AfterSDR bool `db:"after_sdr"`
TaskTreeD *int64 `db:"task_id_tree_d"`
AfterTreeD bool `db:"after_tree_d"`
TaskTreeC *int64 `db:"task_id_tree_c"`
AfterTreeC bool `db:"after_tree_c"`
TaskTreeR *int64 `db:"task_id_tree_r"`
AfterTreeR bool `db:"after_tree_r"`
TaskPrecommitMsg *int64 `db:"task_id_precommit_msg"`
AfterPrecommitMsg bool `db:"after_precommit_msg"`
AfterPrecommitMsgSuccess bool `db:"after_precommit_msg_success"`
SeedEpoch *int64 `db:"seed_epoch"`
TaskPoRep *int64 `db:"task_id_porep"`
PoRepProof []byte `db:"porep_proof"`
AfterPoRep bool `db:"after_porep"`
TaskFinalize *int64 `db:"task_id_finalize"`
AfterFinalize bool `db:"after_finalize"`
TaskMoveStorage *int64 `db:"task_id_move_storage"`
AfterMoveStorage bool `db:"after_move_storage"`
TaskCommitMsg *int64 `db:"task_id_commit_msg"`
AfterCommitMsg bool `db:"after_commit_msg"`
AfterCommitMsgSuccess bool `db:"after_commit_msg_success"`
Failed bool `db:"failed"`
FailedReason string `db:"failed_reason"`
}
func (s *SealPoller) poll(ctx context.Context) error {
var tasks []pollTask
err := s.db.Select(ctx, &tasks, `SELECT
sp_id, sector_number,
task_id_sdr, after_sdr,
task_id_tree_d, after_tree_d,
task_id_tree_c, after_tree_c,
task_id_tree_r, after_tree_r,
task_id_precommit_msg, after_precommit_msg,
after_precommit_msg_success, seed_epoch,
task_id_porep, porep_proof, after_porep,
task_id_finalize, after_finalize,
task_id_move_storage, after_move_storage,
task_id_commit_msg, after_commit_msg,
after_commit_msg_success,
failed, failed_reason
FROM sectors_sdr_pipeline WHERE after_commit_msg_success != TRUE OR after_move_storage != TRUE`)
if err != nil {
return err
}
for _, task := range tasks {
task := task
if task.Failed {
continue
}
ts, err := s.api.ChainHead(ctx)
if err != nil {
return xerrors.Errorf("getting chain head: %w", err)
}
s.pollStartSDR(ctx, task)
s.pollStartSDRTreeD(ctx, task)
s.pollStartSDRTreeRC(ctx, task)
s.pollStartPrecommitMsg(ctx, task)
s.mustPoll(s.pollPrecommitMsgLanded(ctx, task))
s.pollStartPoRep(ctx, task, ts)
s.pollStartFinalize(ctx, task, ts)
s.pollStartMoveStorage(ctx, task)
s.pollStartCommitMsg(ctx, task)
s.mustPoll(s.pollCommitMsgLanded(ctx, task))
}
return nil
}
func (s *SealPoller) pollStartSDR(ctx context.Context, task pollTask) {
if !task.AfterSDR && task.TaskSDR == nil && s.pollers[pollerSDR].IsSet() {
s.pollers[pollerSDR].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) {
n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_sdr = $1 WHERE sp_id = $2 AND sector_number = $3 AND task_id_sdr IS NULL`, id, task.SpID, task.SectorNumber)
if err != nil {
return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err)
}
if n != 1 {
return false, xerrors.Errorf("expected to update 1 row, updated %d", n)
}
return true, nil
})
}
}
func (t pollTask) afterSDR() bool {
return t.AfterSDR
}
func (s *SealPoller) pollStartSDRTreeD(ctx context.Context, task pollTask) {
if !task.AfterTreeD && task.TaskTreeD == nil && s.pollers[pollerTreeD].IsSet() && task.afterSDR() {
s.pollers[pollerTreeD].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) {
n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_tree_d = $1 WHERE sp_id = $2 AND sector_number = $3 AND after_sdr = TRUE AND task_id_tree_d IS NULL`, id, task.SpID, task.SectorNumber)
if err != nil {
return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err)
}
if n != 1 {
return false, xerrors.Errorf("expected to update 1 row, updated %d", n)
}
return true, nil
})
}
}
func (t pollTask) afterTreeD() bool {
return t.AfterTreeD && t.afterSDR()
}
func (s *SealPoller) pollStartSDRTreeRC(ctx context.Context, task pollTask) {
if !task.AfterTreeC && !task.AfterTreeR && task.TaskTreeC == nil && task.TaskTreeR == nil && s.pollers[pollerTreeRC].IsSet() && task.afterTreeD() {
s.pollers[pollerTreeRC].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) {
n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_tree_c = $1, task_id_tree_r = $1
WHERE sp_id = $2 AND sector_number = $3 AND after_tree_d = TRUE AND task_id_tree_c IS NULL AND task_id_tree_r IS NULL`, id, task.SpID, task.SectorNumber)
if err != nil {
return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err)
}
if n != 1 {
return false, xerrors.Errorf("expected to update 1 row, updated %d", n)
}
return true, nil
})
}
}
func (t pollTask) afterTreeRC() bool {
return t.AfterTreeC && t.AfterTreeR && t.afterTreeD()
}
func (t pollTask) afterPrecommitMsg() bool {
return t.AfterPrecommitMsg && t.afterTreeRC()
}
func (t pollTask) afterPrecommitMsgSuccess() bool {
return t.AfterPrecommitMsgSuccess && t.afterPrecommitMsg()
}
func (s *SealPoller) pollStartPoRep(ctx context.Context, task pollTask, ts *types.TipSet) {
if s.pollers[pollerPoRep].IsSet() && task.afterPrecommitMsgSuccess() && task.SeedEpoch != nil &&
task.TaskPoRep == nil && !task.AfterPoRep &&
ts.Height() >= abi.ChainEpoch(*task.SeedEpoch+seedEpochConfidence) {
s.pollers[pollerPoRep].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) {
n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_porep = $1 WHERE sp_id = $2 AND sector_number = $3 AND task_id_porep IS NULL`, id, task.SpID, task.SectorNumber)
if err != nil {
return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err)
}
if n != 1 {
return false, xerrors.Errorf("expected to update 1 row, updated %d", n)
}
return true, nil
})
}
}
func (t pollTask) afterPoRep() bool {
return t.AfterPoRep && t.afterPrecommitMsgSuccess()
}
func (s *SealPoller) pollStartFinalize(ctx context.Context, task pollTask, ts *types.TipSet) {
if s.pollers[pollerFinalize].IsSet() && task.afterPoRep() && !task.AfterFinalize && task.TaskFinalize == nil {
s.pollers[pollerFinalize].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) {
n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_finalize = $1 WHERE sp_id = $2 AND sector_number = $3 AND task_id_finalize IS NULL`, id, task.SpID, task.SectorNumber)
if err != nil {
return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err)
}
if n != 1 {
return false, xerrors.Errorf("expected to update 1 row, updated %d", n)
}
return true, nil
})
}
}
func (t pollTask) afterFinalize() bool {
return t.AfterFinalize && t.afterPoRep()
}
func (s *SealPoller) pollStartMoveStorage(ctx context.Context, task pollTask) {
if s.pollers[pollerMoveStorage].IsSet() && task.afterFinalize() && !task.AfterMoveStorage && task.TaskMoveStorage == nil {
s.pollers[pollerMoveStorage].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) {
n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_move_storage = $1 WHERE sp_id = $2 AND sector_number = $3 AND task_id_move_storage IS NULL`, id, task.SpID, task.SectorNumber)
if err != nil {
return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err)
}
if n != 1 {
return false, xerrors.Errorf("expected to update 1 row, updated %d", n)
}
return true, nil
})
}
}
func (s *SealPoller) mustPoll(err error) {
if err != nil {
log.Errorw("poller operation failed", "error", err)
}
}

View File

@ -1,108 +0,0 @@
package seal
import (
"context"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/curiosrc/harmony/harmonytask"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
)
func (s *SealPoller) pollStartCommitMsg(ctx context.Context, task pollTask) {
if task.afterPoRep() && len(task.PoRepProof) > 0 && task.TaskCommitMsg == nil && !task.AfterCommitMsg && s.pollers[pollerCommitMsg].IsSet() {
s.pollers[pollerCommitMsg].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) {
n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_commit_msg = $1 WHERE sp_id = $2 AND sector_number = $3 AND task_id_commit_msg IS NULL`, id, task.SpID, task.SectorNumber)
if err != nil {
return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err)
}
if n != 1 {
return false, xerrors.Errorf("expected to update 1 row, updated %d", n)
}
return true, nil
})
}
}
func (s *SealPoller) pollCommitMsgLanded(ctx context.Context, task pollTask) error {
if task.AfterCommitMsg && !task.AfterCommitMsgSuccess && s.pollers[pollerCommitMsg].IsSet() {
var execResult []dbExecResult
err := s.db.Select(ctx, &execResult, `SELECT spipeline.precommit_msg_cid, spipeline.commit_msg_cid, executed_tsk_cid, executed_tsk_epoch, executed_msg_cid, executed_rcpt_exitcode, executed_rcpt_gas_used
FROM sectors_sdr_pipeline spipeline
JOIN message_waits ON spipeline.commit_msg_cid = message_waits.signed_message_cid
WHERE sp_id = $1 AND sector_number = $2 AND executed_tsk_epoch IS NOT NULL`, task.SpID, task.SectorNumber)
if err != nil {
log.Errorw("failed to query message_waits", "error", err)
}
if len(execResult) > 0 {
maddr, err := address.NewIDAddress(uint64(task.SpID))
if err != nil {
return err
}
if exitcode.ExitCode(execResult[0].ExecutedRcptExitCode) != exitcode.Ok {
return s.pollCommitMsgFail(ctx, task, execResult[0])
}
si, err := s.api.StateSectorGetInfo(ctx, maddr, abi.SectorNumber(task.SectorNumber), types.EmptyTSK)
if err != nil {
return xerrors.Errorf("get sector info: %w", err)
}
if si == nil {
log.Errorw("todo handle missing sector info (not found after cron)", "sp", task.SpID, "sector", task.SectorNumber, "exec_epoch", execResult[0].ExecutedTskEpoch, "exec_tskcid", execResult[0].ExecutedTskCID, "msg_cid", execResult[0].ExecutedMsgCID)
// todo handdle missing sector info (not found after cron)
} else {
// yay!
_, err := s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline SET
after_commit_msg_success = TRUE, commit_msg_tsk = $1
WHERE sp_id = $2 AND sector_number = $3 AND after_commit_msg_success = FALSE`,
execResult[0].ExecutedTskCID, task.SpID, task.SectorNumber)
if err != nil {
return xerrors.Errorf("update sectors_sdr_pipeline: %w", err)
}
}
}
}
return nil
}
func (s *SealPoller) pollCommitMsgFail(ctx context.Context, task pollTask, execResult dbExecResult) error {
switch exitcode.ExitCode(execResult.ExecutedRcptExitCode) {
case exitcode.SysErrInsufficientFunds:
fallthrough
case exitcode.SysErrOutOfGas:
// just retry
return s.pollRetryCommitMsgSend(ctx, task, execResult)
default:
return xerrors.Errorf("commit message failed with exit code %s", exitcode.ExitCode(execResult.ExecutedRcptExitCode))
}
}
func (s *SealPoller) pollRetryCommitMsgSend(ctx context.Context, task pollTask, execResult dbExecResult) error {
if execResult.CommitMsgCID == nil {
return xerrors.Errorf("commit msg cid was nil")
}
// make the pipeline entry seem like precommit send didn't happen, next poll loop will retry
_, err := s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline SET
commit_msg_cid = NULL, task_id_commit_msg = NULL, after_commit_msg = FALSE
WHERE commit_msg_cid = $1 AND sp_id = $2 AND sector_number = $3 AND after_commit_msg_success = FALSE`,
*execResult.CommitMsgCID, task.SpID, task.SectorNumber)
if err != nil {
return xerrors.Errorf("update sectors_sdr_pipeline to retry precommit msg send: %w", err)
}
return nil
}

View File

@ -1,119 +0,0 @@
package seal
import (
"context"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/curiosrc/harmony/harmonytask"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
)
func (s *SealPoller) pollStartPrecommitMsg(ctx context.Context, task pollTask) {
if task.TaskPrecommitMsg == nil && !task.AfterPrecommitMsg && task.afterTreeRC() && s.pollers[pollerPrecommitMsg].IsSet() {
s.pollers[pollerPrecommitMsg].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) {
n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_precommit_msg = $1 WHERE sp_id = $2 AND sector_number = $3 AND task_id_precommit_msg IS NULL AND after_tree_r = TRUE AND after_tree_d = TRUE`, id, task.SpID, task.SectorNumber)
if err != nil {
return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err)
}
if n != 1 {
return false, xerrors.Errorf("expected to update 1 row, updated %d", n)
}
return true, nil
})
}
}
type dbExecResult struct {
PrecommitMsgCID *string `db:"precommit_msg_cid"`
CommitMsgCID *string `db:"commit_msg_cid"`
ExecutedTskCID string `db:"executed_tsk_cid"`
ExecutedTskEpoch int64 `db:"executed_tsk_epoch"`
ExecutedMsgCID string `db:"executed_msg_cid"`
ExecutedRcptExitCode int64 `db:"executed_rcpt_exitcode"`
ExecutedRcptGasUsed int64 `db:"executed_rcpt_gas_used"`
}
func (s *SealPoller) pollPrecommitMsgLanded(ctx context.Context, task pollTask) error {
if task.AfterPrecommitMsg && !task.AfterPrecommitMsgSuccess {
var execResult []dbExecResult
err := s.db.Select(ctx, &execResult, `SELECT spipeline.precommit_msg_cid, spipeline.commit_msg_cid, executed_tsk_cid, executed_tsk_epoch, executed_msg_cid, executed_rcpt_exitcode, executed_rcpt_gas_used
FROM sectors_sdr_pipeline spipeline
JOIN message_waits ON spipeline.precommit_msg_cid = message_waits.signed_message_cid
WHERE sp_id = $1 AND sector_number = $2 AND executed_tsk_epoch IS NOT NULL`, task.SpID, task.SectorNumber)
if err != nil {
log.Errorw("failed to query message_waits", "error", err)
}
if len(execResult) > 0 {
if exitcode.ExitCode(execResult[0].ExecutedRcptExitCode) != exitcode.Ok {
return s.pollPrecommitMsgFail(ctx, task, execResult[0])
}
maddr, err := address.NewIDAddress(uint64(task.SpID))
if err != nil {
return err
}
pci, err := s.api.StateSectorPreCommitInfo(ctx, maddr, abi.SectorNumber(task.SectorNumber), types.EmptyTSK)
if err != nil {
return xerrors.Errorf("get precommit info: %w", err)
}
if pci != nil {
randHeight := pci.PreCommitEpoch + policy.GetPreCommitChallengeDelay()
_, err := s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline SET
seed_epoch = $1, precommit_msg_tsk = $2, after_precommit_msg_success = TRUE
WHERE sp_id = $3 AND sector_number = $4 AND seed_epoch IS NULL`,
randHeight, execResult[0].ExecutedTskCID, task.SpID, task.SectorNumber)
if err != nil {
return xerrors.Errorf("update sectors_sdr_pipeline: %w", err)
}
} // todo handle missing precommit info (eg expired precommit)
}
}
return nil
}
func (s *SealPoller) pollPrecommitMsgFail(ctx context.Context, task pollTask, execResult dbExecResult) error {
switch exitcode.ExitCode(execResult.ExecutedRcptExitCode) {
case exitcode.SysErrInsufficientFunds:
fallthrough
case exitcode.SysErrOutOfGas:
// just retry
return s.pollRetryPrecommitMsgSend(ctx, task, execResult)
default:
return xerrors.Errorf("precommit message failed with exit code %s", exitcode.ExitCode(execResult.ExecutedRcptExitCode))
}
}
func (s *SealPoller) pollRetryPrecommitMsgSend(ctx context.Context, task pollTask, execResult dbExecResult) error {
if execResult.PrecommitMsgCID == nil {
return xerrors.Errorf("precommit msg cid was nil")
}
// make the pipeline entry seem like precommit send didn't happen, next poll loop will retry
_, err := s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline SET
precommit_msg_cid = NULL, task_id_precommit_msg = NULL, after_precommit_msg = FALSE
WHERE precommit_msg_cid = $1 AND sp_id = $2 AND sector_number = $3 AND after_precommit_msg_success = FALSE`,
*execResult.PrecommitMsgCID, task.SpID, task.SectorNumber)
if err != nil {
return xerrors.Errorf("update sectors_sdr_pipeline to retry precommit msg send: %w", err)
}
return nil
}

View File

@ -1,127 +0,0 @@
package seal
import (
"context"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
rlepluslazy "github.com/filecoin-project/go-bitfield/rle"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
)
type AllocAPI interface {
StateMinerAllocated(context.Context, address.Address, types.TipSetKey) (*bitfield.BitField, error)
}
func AllocateSectorNumbers(ctx context.Context, a AllocAPI, db *harmonydb.DB, maddr address.Address, count int, txcb ...func(*harmonydb.Tx, []abi.SectorNumber) (bool, error)) ([]abi.SectorNumber, error) {
chainAlloc, err := a.StateMinerAllocated(ctx, maddr, types.EmptyTSK)
if err != nil {
return nil, xerrors.Errorf("getting on-chain allocated sector numbers: %w", err)
}
mid, err := address.IDFromAddress(maddr)
if err != nil {
return nil, xerrors.Errorf("getting miner id: %w", err)
}
var res []abi.SectorNumber
comm, err := db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) {
res = nil // reset result in case of retry
// query from db, if exists unmarsal to bitfield
var dbAllocated bitfield.BitField
var rawJson []byte
err = tx.QueryRow("SELECT COALESCE(allocated, '[0]') from sectors_allocated_numbers sa FULL OUTER JOIN (SELECT 1) AS d ON TRUE WHERE sp_id = $1 OR sp_id IS NULL", mid).Scan(&rawJson)
if err != nil {
return false, xerrors.Errorf("querying allocated sector numbers: %w", err)
}
if rawJson != nil {
err = dbAllocated.UnmarshalJSON(rawJson)
if err != nil {
return false, xerrors.Errorf("unmarshaling allocated sector numbers: %w", err)
}
}
if err := dbAllocated.UnmarshalJSON(rawJson); err != nil {
return false, xerrors.Errorf("unmarshaling allocated sector numbers: %w", err)
}
merged, err := bitfield.MergeBitFields(*chainAlloc, dbAllocated)
if err != nil {
return false, xerrors.Errorf("merging allocated sector numbers: %w", err)
}
allAssignable, err := bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{Runs: []rlepluslazy.Run{
{
Val: true,
Len: abi.MaxSectorNumber,
},
}})
if err != nil {
return false, xerrors.Errorf("creating assignable sector numbers: %w", err)
}
inverted, err := bitfield.SubtractBitField(allAssignable, merged)
if err != nil {
return false, xerrors.Errorf("subtracting allocated sector numbers: %w", err)
}
toAlloc, err := inverted.Slice(0, uint64(count))
if err != nil {
return false, xerrors.Errorf("getting slice of allocated sector numbers: %w", err)
}
err = toAlloc.ForEach(func(u uint64) error {
res = append(res, abi.SectorNumber(u))
return nil
})
if err != nil {
return false, xerrors.Errorf("iterating allocated sector numbers: %w", err)
}
toPersist, err := bitfield.MergeBitFields(merged, toAlloc)
if err != nil {
return false, xerrors.Errorf("merging allocated sector numbers: %w", err)
}
rawJson, err = toPersist.MarshalJSON()
if err != nil {
return false, xerrors.Errorf("marshaling allocated sector numbers: %w", err)
}
_, err = tx.Exec("INSERT INTO sectors_allocated_numbers(sp_id, allocated) VALUES($1, $2) ON CONFLICT(sp_id) DO UPDATE SET allocated = $2", mid, rawJson)
if err != nil {
return false, xerrors.Errorf("persisting allocated sector numbers: %w", err)
}
for i, f := range txcb {
commit, err = f(tx, res)
if err != nil {
return false, xerrors.Errorf("executing tx callback %d: %w", i, err)
}
if !commit {
return false, nil
}
}
return true, nil
}, harmonydb.OptionRetry())
if err != nil {
return nil, xerrors.Errorf("allocating sector numbers: %w", err)
}
if !comm {
return nil, xerrors.Errorf("allocating sector numbers: commit failed")
}
return res, nil
}

View File

@ -1,156 +0,0 @@
package seal
import (
"context"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/curiosrc/ffi"
"github.com/filecoin-project/lotus/curiosrc/harmony/harmonytask"
"github.com/filecoin-project/lotus/curiosrc/harmony/resources"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
type FinalizeTask struct {
max int
sp *SealPoller
sc *ffi.SealCalls
db *harmonydb.DB
}
func NewFinalizeTask(max int, sp *SealPoller, sc *ffi.SealCalls, db *harmonydb.DB) *FinalizeTask {
return &FinalizeTask{
max: max,
sp: sp,
sc: sc,
db: db,
}
}
func (f *FinalizeTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) {
var tasks []struct {
SpID int64 `db:"sp_id"`
SectorNumber int64 `db:"sector_number"`
RegSealProof int64 `db:"reg_seal_proof"`
}
ctx := context.Background()
err = f.db.Select(ctx, &tasks, `
SELECT sp_id, sector_number, reg_seal_proof FROM sectors_sdr_pipeline WHERE task_id_finalize = $1`, taskID)
if err != nil {
return false, xerrors.Errorf("getting task: %w", err)
}
if len(tasks) != 1 {
return false, xerrors.Errorf("expected one task")
}
task := tasks[0]
var keepUnsealed bool
if err := f.db.QueryRow(ctx, `SELECT COALESCE(BOOL_OR(NOT data_delete_on_finalize), FALSE) FROM sectors_sdr_initial_pieces WHERE sp_id = $1 AND sector_number = $2`, task.SpID, task.SectorNumber).Scan(&keepUnsealed); err != nil {
return false, err
}
sector := storiface.SectorRef{
ID: abi.SectorID{
Miner: abi.ActorID(task.SpID),
Number: abi.SectorNumber(task.SectorNumber),
},
ProofType: abi.RegisteredSealProof(task.RegSealProof),
}
err = f.sc.FinalizeSector(ctx, sector, keepUnsealed)
if err != nil {
return false, xerrors.Errorf("finalizing sector: %w", err)
}
if err := DropSectorPieceRefs(ctx, f.db, sector.ID); err != nil {
return false, xerrors.Errorf("dropping sector piece refs: %w", err)
}
// set after_finalize
_, err = f.db.Exec(ctx, `UPDATE sectors_sdr_pipeline SET after_finalize = TRUE, task_id_finalize = NULL WHERE task_id_finalize = $1`, taskID)
if err != nil {
return false, xerrors.Errorf("updating task: %w", err)
}
return true, nil
}
func (f *FinalizeTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) {
var tasks []struct {
TaskID harmonytask.TaskID `db:"task_id_finalize"`
SpID int64 `db:"sp_id"`
SectorNumber int64 `db:"sector_number"`
StorageID string `db:"storage_id"`
}
if storiface.FTCache != 4 {
panic("storiface.FTCache != 4")
}
ctx := context.Background()
indIDs := make([]int64, len(ids))
for i, id := range ids {
indIDs[i] = int64(id)
}
err := f.db.Select(ctx, &tasks, `
SELECT p.task_id_finalize, p.sp_id, p.sector_number, l.storage_id FROM sectors_sdr_pipeline p
INNER JOIN sector_location l ON p.sp_id = l.miner_id AND p.sector_number = l.sector_num
WHERE task_id_finalize = ANY ($1) AND l.sector_filetype = 4
`, indIDs)
if err != nil {
return nil, xerrors.Errorf("getting tasks: %w", err)
}
ls, err := f.sc.LocalStorage(ctx)
if err != nil {
return nil, xerrors.Errorf("getting local storage: %w", err)
}
acceptables := map[harmonytask.TaskID]bool{}
for _, t := range ids {
acceptables[t] = true
}
for _, t := range tasks {
if _, ok := acceptables[t.TaskID]; !ok {
continue
}
for _, l := range ls {
if string(l.ID) == t.StorageID {
return &t.TaskID, nil
}
}
}
return nil, nil
}
func (f *FinalizeTask) TypeDetails() harmonytask.TaskTypeDetails {
return harmonytask.TaskTypeDetails{
Max: f.max,
Name: "Finalize",
Cost: resources.Resources{
Cpu: 1,
Gpu: 0,
Ram: 100 << 20,
},
MaxFailures: 10,
}
}
func (f *FinalizeTask) Adder(taskFunc harmonytask.AddTaskFunc) {
f.sp.pollers[pollerFinalize].Set(taskFunc)
}
var _ harmonytask.TaskInterface = &FinalizeTask{}

View File

@ -1,177 +0,0 @@
package seal
import (
"context"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/curiosrc/ffi"
"github.com/filecoin-project/lotus/curiosrc/harmony/harmonytask"
"github.com/filecoin-project/lotus/curiosrc/harmony/resources"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
"github.com/filecoin-project/lotus/storage/paths"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
type MoveStorageTask struct {
sp *SealPoller
sc *ffi.SealCalls
db *harmonydb.DB
max int
}
func NewMoveStorageTask(sp *SealPoller, sc *ffi.SealCalls, db *harmonydb.DB, max int) *MoveStorageTask {
return &MoveStorageTask{
max: max,
sp: sp,
sc: sc,
db: db,
}
}
func (m *MoveStorageTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) {
var tasks []struct {
SpID int64 `db:"sp_id"`
SectorNumber int64 `db:"sector_number"`
RegSealProof int64 `db:"reg_seal_proof"`
}
ctx := context.Background()
err = m.db.Select(ctx, &tasks, `
SELECT sp_id, sector_number, reg_seal_proof FROM sectors_sdr_pipeline WHERE task_id_move_storage = $1`, taskID)
if err != nil {
return false, xerrors.Errorf("getting task: %w", err)
}
if len(tasks) != 1 {
return false, xerrors.Errorf("expected one task")
}
task := tasks[0]
sector := storiface.SectorRef{
ID: abi.SectorID{
Miner: abi.ActorID(task.SpID),
Number: abi.SectorNumber(task.SectorNumber),
},
ProofType: abi.RegisteredSealProof(task.RegSealProof),
}
err = m.sc.MoveStorage(ctx, sector, &taskID)
if err != nil {
return false, xerrors.Errorf("moving storage: %w", err)
}
_, err = m.db.Exec(ctx, `UPDATE sectors_sdr_pipeline SET after_move_storage = TRUE, task_id_move_storage = NULL WHERE task_id_move_storage = $1`, taskID)
if err != nil {
return false, xerrors.Errorf("updating task: %w", err)
}
return true, nil
}
func (m *MoveStorageTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) {
ctx := context.Background()
/*
var tasks []struct {
TaskID harmonytask.TaskID `db:"task_id_finalize"`
SpID int64 `db:"sp_id"`
SectorNumber int64 `db:"sector_number"`
StorageID string `db:"storage_id"`
}
indIDs := make([]int64, len(ids))
for i, id := range ids {
indIDs[i] = int64(id)
}
err := m.db.Select(ctx, &tasks, `
select p.task_id_move_storage, p.sp_id, p.sector_number, l.storage_id from sectors_sdr_pipeline p
inner join sector_location l on p.sp_id=l.miner_id and p.sector_number=l.sector_num
where task_id_move_storage in ($1) and l.sector_filetype=4`, indIDs)
if err != nil {
return nil, xerrors.Errorf("getting tasks: %w", err)
}
ls, err := m.sc.LocalStorage(ctx)
if err != nil {
return nil, xerrors.Errorf("getting local storage: %w", err)
}
acceptables := map[harmonytask.TaskID]bool{}
for _, t := range ids {
acceptables[t] = true
}
for _, t := range tasks {
}
todo some smarts
* yield a schedule cycle/s if we have moves already in progress
*/
////
ls, err := m.sc.LocalStorage(ctx)
if err != nil {
return nil, xerrors.Errorf("getting local storage: %w", err)
}
var haveStorage bool
for _, l := range ls {
if l.CanStore {
haveStorage = true
break
}
}
if !haveStorage {
return nil, nil
}
id := ids[0]
return &id, nil
}
func (m *MoveStorageTask) TypeDetails() harmonytask.TaskTypeDetails {
ssize := abi.SectorSize(32 << 30) // todo task details needs taskID to get correct sector size
if isDevnet {
ssize = abi.SectorSize(2 << 20)
}
return harmonytask.TaskTypeDetails{
Max: m.max,
Name: "MoveStorage",
Cost: resources.Resources{
Cpu: 1,
Gpu: 0,
Ram: 128 << 20,
Storage: m.sc.Storage(m.taskToSector, storiface.FTNone, storiface.FTCache|storiface.FTSealed|storiface.FTUnsealed, ssize, storiface.PathStorage, paths.MinFreeStoragePercentage),
},
MaxFailures: 10,
}
}
func (m *MoveStorageTask) taskToSector(id harmonytask.TaskID) (ffi.SectorRef, error) {
var refs []ffi.SectorRef
err := m.db.Select(context.Background(), &refs, `SELECT sp_id, sector_number, reg_seal_proof FROM sectors_sdr_pipeline WHERE task_id_move_storage = $1`, id)
if err != nil {
return ffi.SectorRef{}, xerrors.Errorf("getting sector ref: %w", err)
}
if len(refs) != 1 {
return ffi.SectorRef{}, xerrors.Errorf("expected 1 sector ref, got %d", len(refs))
}
return refs[0], nil
}
func (m *MoveStorageTask) Adder(taskFunc harmonytask.AddTaskFunc) {
m.sp.pollers[pollerMoveStorage].Set(taskFunc)
}
var _ harmonytask.TaskInterface = &MoveStorageTask{}

View File

@ -1,177 +0,0 @@
package seal
import (
"bytes"
"context"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/curiosrc/ffi"
"github.com/filecoin-project/lotus/curiosrc/harmony/harmonytask"
"github.com/filecoin-project/lotus/curiosrc/harmony/resources"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
type PoRepAPI interface {
ChainHead(context.Context) (*types.TipSet, error)
StateGetRandomnessFromBeacon(context.Context, crypto.DomainSeparationTag, abi.ChainEpoch, []byte, types.TipSetKey) (abi.Randomness, error)
}
type PoRepTask struct {
db *harmonydb.DB
api PoRepAPI
sp *SealPoller
sc *ffi.SealCalls
max int
}
func NewPoRepTask(db *harmonydb.DB, api PoRepAPI, sp *SealPoller, sc *ffi.SealCalls, maxPoRep int) *PoRepTask {
return &PoRepTask{
db: db,
api: api,
sp: sp,
sc: sc,
max: maxPoRep,
}
}
func (p *PoRepTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) {
ctx := context.Background()
var sectorParamsArr []struct {
SpID int64 `db:"sp_id"`
SectorNumber int64 `db:"sector_number"`
RegSealProof abi.RegisteredSealProof `db:"reg_seal_proof"`
TicketEpoch abi.ChainEpoch `db:"ticket_epoch"`
TicketValue []byte `db:"ticket_value"`
SeedEpoch abi.ChainEpoch `db:"seed_epoch"`
SealedCID string `db:"tree_r_cid"`
UnsealedCID string `db:"tree_d_cid"`
}
err = p.db.Select(ctx, &sectorParamsArr, `
SELECT sp_id, sector_number, reg_seal_proof, ticket_epoch, ticket_value, seed_epoch, tree_r_cid, tree_d_cid
FROM sectors_sdr_pipeline
WHERE task_id_porep = $1`, taskID)
if err != nil {
return false, err
}
if len(sectorParamsArr) != 1 {
return false, xerrors.Errorf("expected 1 sector params, got %d", len(sectorParamsArr))
}
sectorParams := sectorParamsArr[0]
sealed, err := cid.Parse(sectorParams.SealedCID)
if err != nil {
return false, xerrors.Errorf("failed to parse sealed cid: %w", err)
}
unsealed, err := cid.Parse(sectorParams.UnsealedCID)
if err != nil {
return false, xerrors.Errorf("failed to parse unsealed cid: %w", err)
}
ts, err := p.api.ChainHead(ctx)
if err != nil {
return false, xerrors.Errorf("failed to get chain head: %w", err)
}
maddr, err := address.NewIDAddress(uint64(sectorParams.SpID))
if err != nil {
return false, xerrors.Errorf("failed to create miner address: %w", err)
}
buf := new(bytes.Buffer)
if err := maddr.MarshalCBOR(buf); err != nil {
return false, xerrors.Errorf("failed to marshal miner address: %w", err)
}
rand, err := p.api.StateGetRandomnessFromBeacon(ctx, crypto.DomainSeparationTag_InteractiveSealChallengeSeed, sectorParams.SeedEpoch, buf.Bytes(), ts.Key())
if err != nil {
return false, xerrors.Errorf("failed to get randomness for computing seal proof: %w", err)
}
sr := storiface.SectorRef{
ID: abi.SectorID{
Miner: abi.ActorID(sectorParams.SpID),
Number: abi.SectorNumber(sectorParams.SectorNumber),
},
ProofType: sectorParams.RegSealProof,
}
// COMPUTE THE PROOF!
proof, err := p.sc.PoRepSnark(ctx, sr, sealed, unsealed, sectorParams.TicketValue, abi.InteractiveSealRandomness(rand))
if err != nil {
//end, rerr := p.recoverErrors(ctx, sectorParams.SpID, sectorParams.SectorNumber, err)
//if rerr != nil {
// return false, xerrors.Errorf("recover errors: %w", rerr)
//}
//if end {
// // done, but the error handling has stored a different than success state
// return true, nil
//}
return false, xerrors.Errorf("failed to compute seal proof: %w", err)
}
// store success!
n, err := p.db.Exec(ctx, `UPDATE sectors_sdr_pipeline
SET after_porep = TRUE, seed_value = $3, porep_proof = $4, task_id_porep = NULL
WHERE sp_id = $1 AND sector_number = $2`,
sectorParams.SpID, sectorParams.SectorNumber, []byte(rand), proof)
if err != nil {
return false, xerrors.Errorf("store sdr success: updating pipeline: %w", err)
}
if n != 1 {
return false, xerrors.Errorf("store sdr success: updated %d rows", n)
}
return true, nil
}
func (p *PoRepTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) {
// todo sort by priority
id := ids[0]
return &id, nil
}
func (p *PoRepTask) TypeDetails() harmonytask.TaskTypeDetails {
gpu := 1.0
if isDevnet {
gpu = 0
}
res := harmonytask.TaskTypeDetails{
Max: p.max,
Name: "PoRep",
Cost: resources.Resources{
Cpu: 1,
Gpu: gpu,
Ram: 50 << 30, // todo correct value
MachineID: 0,
},
MaxFailures: 5,
Follows: nil,
}
if isDevnet {
res.Cost.Ram = 1 << 30
}
return res
}
func (p *PoRepTask) Adder(taskFunc harmonytask.AddTaskFunc) {
p.sp.pollers[pollerPoRep].Set(taskFunc)
}
var _ harmonytask.TaskInterface = &PoRepTask{}

View File

@ -1,279 +0,0 @@
package seal
import (
"bytes"
"context"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-commp-utils/nonffi"
"github.com/filecoin-project/go-commp-utils/zerocomm"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/curiosrc/ffi"
"github.com/filecoin-project/lotus/curiosrc/harmony/harmonytask"
"github.com/filecoin-project/lotus/curiosrc/harmony/resources"
"github.com/filecoin-project/lotus/lib/filler"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
"github.com/filecoin-project/lotus/storage/paths"
"github.com/filecoin-project/lotus/storage/sealer/ffiwrapper"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
var isDevnet = build.BlockDelaySecs < 30
func SetDevnet(value bool) {
isDevnet = value
}
func GetDevnet() bool {
return isDevnet
}
type SDRAPI interface {
ChainHead(context.Context) (*types.TipSet, error)
StateGetRandomnessFromTickets(context.Context, crypto.DomainSeparationTag, abi.ChainEpoch, []byte, types.TipSetKey) (abi.Randomness, error)
}
type SDRTask struct {
api SDRAPI
db *harmonydb.DB
sp *SealPoller
sc *ffi.SealCalls
max int
}
func NewSDRTask(api SDRAPI, db *harmonydb.DB, sp *SealPoller, sc *ffi.SealCalls, maxSDR int) *SDRTask {
return &SDRTask{
api: api,
db: db,
sp: sp,
sc: sc,
max: maxSDR,
}
}
func (s *SDRTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) {
ctx := context.Background()
var sectorParamsArr []struct {
SpID int64 `db:"sp_id"`
SectorNumber int64 `db:"sector_number"`
RegSealProof abi.RegisteredSealProof `db:"reg_seal_proof"`
}
err = s.db.Select(ctx, &sectorParamsArr, `
SELECT sp_id, sector_number, reg_seal_proof
FROM sectors_sdr_pipeline
WHERE task_id_sdr = $1`, taskID)
if err != nil {
return false, xerrors.Errorf("getting sector params: %w", err)
}
if len(sectorParamsArr) != 1 {
return false, xerrors.Errorf("expected 1 sector params, got %d", len(sectorParamsArr))
}
sectorParams := sectorParamsArr[0]
var pieces []struct {
PieceIndex int64 `db:"piece_index"`
PieceCID string `db:"piece_cid"`
PieceSize int64 `db:"piece_size"`
DataRawSize *int64 `db:"data_raw_size"`
}
err = s.db.Select(ctx, &pieces, `
SELECT piece_index, piece_cid, piece_size, data_raw_size
FROM sectors_sdr_initial_pieces
WHERE sp_id = $1 AND sector_number = $2 ORDER BY piece_index ASC`, sectorParams.SpID, sectorParams.SectorNumber)
if err != nil {
return false, xerrors.Errorf("getting pieces: %w", err)
}
ssize, err := sectorParams.RegSealProof.SectorSize()
if err != nil {
return false, xerrors.Errorf("getting sector size: %w", err)
}
var commd cid.Cid
var offset abi.UnpaddedPieceSize
var allocated abi.UnpaddedPieceSize
var pieceInfos []abi.PieceInfo
if len(pieces) > 0 {
for _, p := range pieces {
c, err := cid.Parse(p.PieceCID)
if err != nil {
return false, xerrors.Errorf("parsing piece cid: %w", err)
}
allocated += abi.UnpaddedPieceSize(*p.DataRawSize)
pads, padLength := ffiwrapper.GetRequiredPadding(offset.Padded(), abi.PaddedPieceSize(p.PieceSize))
offset += padLength.Unpadded()
for _, pad := range pads {
pieceInfos = append(pieceInfos, abi.PieceInfo{
Size: pad,
PieceCID: zerocomm.ZeroPieceCommitment(pad.Unpadded()),
})
}
pieceInfos = append(pieceInfos, abi.PieceInfo{
Size: abi.PaddedPieceSize(p.PieceSize),
PieceCID: c,
})
offset += abi.UnpaddedPieceSize(*p.DataRawSize)
}
fillerSize, err := filler.FillersFromRem(abi.PaddedPieceSize(ssize).Unpadded() - allocated)
if err != nil {
return false, xerrors.Errorf("failed to calculate the final padding: %w", err)
}
for _, fil := range fillerSize {
pieceInfos = append(pieceInfos, abi.PieceInfo{
Size: fil.Padded(),
PieceCID: zerocomm.ZeroPieceCommitment(fil),
})
}
commd, err = nonffi.GenerateUnsealedCID(sectorParams.RegSealProof, pieceInfos)
if err != nil {
return false, xerrors.Errorf("computing CommD: %w", err)
}
} else {
commd = zerocomm.ZeroPieceCommitment(abi.PaddedPieceSize(ssize).Unpadded())
}
sref := storiface.SectorRef{
ID: abi.SectorID{
Miner: abi.ActorID(sectorParams.SpID),
Number: abi.SectorNumber(sectorParams.SectorNumber),
},
ProofType: sectorParams.RegSealProof,
}
// get ticket
maddr, err := address.NewIDAddress(uint64(sectorParams.SpID))
if err != nil {
return false, xerrors.Errorf("getting miner address: %w", err)
}
// FAIL: api may be down
// FAIL-RESP: rely on harmony retry
ticket, ticketEpoch, err := s.getTicket(ctx, maddr)
if err != nil {
return false, xerrors.Errorf("getting ticket: %w", err)
}
// do the SDR!!
// FAIL: storage may not have enough space
// FAIL-RESP: rely on harmony retry
// LATEFAIL: compute error in sdr
// LATEFAIL-RESP: Check in Trees task should catch this; Will retry computing
// Trees; After one retry, it should return the sector to the
// SDR stage; max number of retries should be configurable
err = s.sc.GenerateSDR(ctx, taskID, sref, ticket, commd)
if err != nil {
return false, xerrors.Errorf("generating sdr: %w", err)
}
// store success!
n, err := s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline
SET after_sdr = true, ticket_epoch = $3, ticket_value = $4, task_id_sdr = NULL
WHERE sp_id = $1 AND sector_number = $2`,
sectorParams.SpID, sectorParams.SectorNumber, ticketEpoch, []byte(ticket))
if err != nil {
return false, xerrors.Errorf("store sdr success: updating pipeline: %w", err)
}
if n != 1 {
return false, xerrors.Errorf("store sdr success: updated %d rows", n)
}
return true, nil
}
func (s *SDRTask) getTicket(ctx context.Context, maddr address.Address) (abi.SealRandomness, abi.ChainEpoch, error) {
ts, err := s.api.ChainHead(ctx)
if err != nil {
return nil, 0, xerrors.Errorf("getting chain head: %w", err)
}
ticketEpoch := ts.Height() - policy.SealRandomnessLookback
buf := new(bytes.Buffer)
if err := maddr.MarshalCBOR(buf); err != nil {
return nil, 0, xerrors.Errorf("marshaling miner address: %w", err)
}
rand, err := s.api.StateGetRandomnessFromTickets(ctx, crypto.DomainSeparationTag_SealRandomness, ticketEpoch, buf.Bytes(), ts.Key())
if err != nil {
return nil, 0, xerrors.Errorf("getting randomness from tickets: %w", err)
}
return abi.SealRandomness(rand), ticketEpoch, nil
}
func (s *SDRTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) {
id := ids[0]
return &id, nil
}
func (s *SDRTask) TypeDetails() harmonytask.TaskTypeDetails {
ssize := abi.SectorSize(32 << 30) // todo task details needs taskID to get correct sector size
if isDevnet {
ssize = abi.SectorSize(2 << 20)
}
res := harmonytask.TaskTypeDetails{
Max: s.max,
Name: "SDR",
Cost: resources.Resources{ // todo offset for prefetch?
Cpu: 4, // todo multicore sdr
Gpu: 0,
Ram: 54 << 30,
Storage: s.sc.Storage(s.taskToSector, storiface.FTCache, storiface.FTNone, ssize, storiface.PathSealing, paths.MinFreeStoragePercentage),
},
MaxFailures: 2,
Follows: nil,
}
if isDevnet {
res.Cost.Ram = 1 << 30
res.Cost.Cpu = 1
}
return res
}
func (s *SDRTask) Adder(taskFunc harmonytask.AddTaskFunc) {
s.sp.pollers[pollerSDR].Set(taskFunc)
}
func (s *SDRTask) taskToSector(id harmonytask.TaskID) (ffi.SectorRef, error) {
var refs []ffi.SectorRef
err := s.db.Select(context.Background(), &refs, `SELECT sp_id, sector_number, reg_seal_proof FROM sectors_sdr_pipeline WHERE task_id_sdr = $1`, id)
if err != nil {
return ffi.SectorRef{}, xerrors.Errorf("getting sector ref: %w", err)
}
if len(refs) != 1 {
return ffi.SectorRef{}, xerrors.Errorf("expected 1 sector ref, got %d", len(refs))
}
return refs[0], nil
}
var _ harmonytask.TaskInterface = &SDRTask{}

View File

@ -1,423 +0,0 @@
package seal
import (
"bytes"
"context"
"encoding/json"
"fmt"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
cborutil "github.com/filecoin-project/go-cbor-util"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/builtin"
miner2 "github.com/filecoin-project/go-state-types/builtin/v13/miner"
verifreg13 "github.com/filecoin-project/go-state-types/builtin/v13/verifreg"
verifregtypes9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/curiosrc/harmony/harmonytask"
"github.com/filecoin-project/lotus/curiosrc/harmony/resources"
"github.com/filecoin-project/lotus/curiosrc/message"
"github.com/filecoin-project/lotus/curiosrc/multictladdr"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
"github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/lotus/storage/ctladdr"
)
type SubmitCommitAPI interface {
ChainHead(context.Context) (*types.TipSet, error)
StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error)
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (big.Int, error)
StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorPreCommitOnChainInfo, error)
StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes9.AllocationId, tsk types.TipSetKey) (*verifregtypes9.Allocation, error)
StateGetAllocationIdForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (verifregtypes9.AllocationId, error)
ctladdr.NodeApi
}
type commitConfig struct {
maxFee types.FIL
RequireActivationSuccess bool
RequireNotificationSuccess bool
}
type SubmitCommitTask struct {
sp *SealPoller
db *harmonydb.DB
api SubmitCommitAPI
sender *message.Sender
as *multictladdr.MultiAddressSelector
cfg commitConfig
}
func NewSubmitCommitTask(sp *SealPoller, db *harmonydb.DB, api SubmitCommitAPI, sender *message.Sender, as *multictladdr.MultiAddressSelector, cfg *config.CurioConfig) *SubmitCommitTask {
cnfg := commitConfig{
maxFee: cfg.Fees.MaxCommitGasFee,
RequireActivationSuccess: cfg.Subsystems.RequireActivationSuccess,
RequireNotificationSuccess: cfg.Subsystems.RequireNotificationSuccess,
}
return &SubmitCommitTask{
sp: sp,
db: db,
api: api,
sender: sender,
as: as,
cfg: cnfg,
}
}
func (s *SubmitCommitTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) {
ctx := context.Background()
var sectorParamsArr []struct {
SpID int64 `db:"sp_id"`
SectorNumber int64 `db:"sector_number"`
Proof []byte `db:"porep_proof"`
}
err = s.db.Select(ctx, &sectorParamsArr, `
SELECT sp_id, sector_number, porep_proof
FROM sectors_sdr_pipeline
WHERE task_id_commit_msg = $1`, taskID)
if err != nil {
return false, xerrors.Errorf("getting sector params: %w", err)
}
if len(sectorParamsArr) != 1 {
return false, xerrors.Errorf("expected 1 sector params, got %d", len(sectorParamsArr))
}
sectorParams := sectorParamsArr[0]
var pieces []struct {
PieceIndex int64 `db:"piece_index"`
PieceCID string `db:"piece_cid"`
PieceSize int64 `db:"piece_size"`
Proposal json.RawMessage `db:"f05_deal_proposal"`
Manifest json.RawMessage `db:"direct_piece_activation_manifest"`
DealID abi.DealID `db:"f05_deal_id"`
}
err = s.db.Select(ctx, &pieces, `
SELECT piece_index,
piece_cid,
piece_size,
f05_deal_proposal,
direct_piece_activation_manifest,
COALESCE(f05_deal_id, 0) AS f05_deal_id
FROM sectors_sdr_initial_pieces
WHERE sp_id = $1 AND sector_number = $2 ORDER BY piece_index ASC`, sectorParams.SpID, sectorParams.SectorNumber)
if err != nil {
return false, xerrors.Errorf("getting pieces: %w", err)
}
maddr, err := address.NewIDAddress(uint64(sectorParams.SpID))
if err != nil {
return false, xerrors.Errorf("getting miner address: %w", err)
}
ts, err := s.api.ChainHead(ctx)
if err != nil {
return false, xerrors.Errorf("getting chain head: %w", err)
}
pci, err := s.api.StateSectorPreCommitInfo(ctx, maddr, abi.SectorNumber(sectorParams.SectorNumber), ts.Key())
if err != nil {
return false, xerrors.Errorf("getting precommit info: %w", err)
}
if pci == nil {
return false, xerrors.Errorf("precommit info not found on chain")
}
mi, err := s.api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
if err != nil {
return false, xerrors.Errorf("getting miner info: %w", err)
}
params := miner.ProveCommitSectors3Params{
RequireActivationSuccess: s.cfg.RequireActivationSuccess,
RequireNotificationSuccess: s.cfg.RequireNotificationSuccess,
}
var pams []miner.PieceActivationManifest
for _, piece := range pieces {
if piece.Proposal != nil {
var prop *market.DealProposal
err = json.Unmarshal(piece.Proposal, &prop)
if err != nil {
return false, xerrors.Errorf("marshalling json to deal proposal: %w", err)
}
alloc, err := s.api.StateGetAllocationIdForPendingDeal(ctx, piece.DealID, types.EmptyTSK)
if err != nil {
return false, xerrors.Errorf("getting allocation for deal %d: %w", piece.DealID, err)
}
clid, err := s.api.StateLookupID(ctx, prop.Client, types.EmptyTSK)
if err != nil {
return false, xerrors.Errorf("getting client address for deal %d: %w", piece.DealID, err)
}
clientId, err := address.IDFromAddress(clid)
if err != nil {
return false, xerrors.Errorf("getting client address for deal %d: %w", piece.DealID, err)
}
var vac *miner2.VerifiedAllocationKey
if alloc != verifregtypes9.NoAllocationID {
vac = &miner2.VerifiedAllocationKey{
Client: abi.ActorID(clientId),
ID: verifreg13.AllocationId(alloc),
}
}
payload, err := cborutil.Dump(piece.DealID)
if err != nil {
return false, xerrors.Errorf("serializing deal id: %w", err)
}
pams = append(pams, miner.PieceActivationManifest{
CID: prop.PieceCID,
Size: prop.PieceSize,
VerifiedAllocationKey: vac,
Notify: []miner2.DataActivationNotification{
{
Address: market.Address,
Payload: payload,
},
},
})
} else {
var pam *miner.PieceActivationManifest
err = json.Unmarshal(piece.Manifest, &pam)
if err != nil {
return false, xerrors.Errorf("marshalling json to PieceManifest: %w", err)
}
err = s.allocationCheck(ctx, pam, pci, abi.ActorID(sectorParams.SpID), ts)
if err != nil {
return false, err
}
pams = append(pams, *pam)
}
}
params.SectorActivations = append(params.SectorActivations, miner.SectorActivationManifest{
SectorNumber: abi.SectorNumber(sectorParams.SectorNumber),
Pieces: pams,
})
params.SectorProofs = append(params.SectorProofs, sectorParams.Proof)
enc := new(bytes.Buffer)
if err := params.MarshalCBOR(enc); err != nil {
return false, xerrors.Errorf("could not serialize commit params: %w", err)
}
collateral, err := s.api.StateMinerInitialPledgeCollateral(ctx, maddr, pci.Info, ts.Key())
if err != nil {
return false, xerrors.Errorf("getting initial pledge collateral: %w", err)
}
collateral = big.Sub(collateral, pci.PreCommitDeposit)
if collateral.LessThan(big.Zero()) {
collateral = big.Zero()
}
a, _, err := s.as.AddressFor(ctx, s.api, maddr, mi, api.CommitAddr, collateral, big.Zero())
if err != nil {
return false, xerrors.Errorf("getting address for precommit: %w", err)
}
msg := &types.Message{
To: maddr,
From: a,
Method: builtin.MethodsMiner.ProveCommitSectors3,
Params: enc.Bytes(),
Value: collateral, // todo config for pulling from miner balance!!
}
mss := &api.MessageSendSpec{
MaxFee: abi.TokenAmount(s.cfg.maxFee),
}
mcid, err := s.sender.Send(ctx, msg, mss, "commit")
if err != nil {
return false, xerrors.Errorf("pushing message to mpool: %w", err)
}
_, err = s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline SET commit_msg_cid = $1, after_commit_msg = TRUE, task_id_commit_msg = NULL WHERE sp_id = $2 AND sector_number = $3`, mcid, sectorParams.SpID, sectorParams.SectorNumber)
if err != nil {
return false, xerrors.Errorf("updating commit_msg_cid: %w", err)
}
_, err = s.db.Exec(ctx, `INSERT INTO message_waits (signed_message_cid) VALUES ($1)`, mcid)
if err != nil {
return false, xerrors.Errorf("inserting into message_waits: %w", err)
}
if err := s.transferFinalizedSectorData(ctx, sectorParams.SpID, sectorParams.SectorNumber); err != nil {
return false, xerrors.Errorf("transferring finalized sector data: %w", err)
}
return true, nil
}
func (s *SubmitCommitTask) transferFinalizedSectorData(ctx context.Context, spID, sectorNum int64) error {
if _, err := s.db.Exec(ctx, `
INSERT INTO sectors_meta (
sp_id,
sector_num,
reg_seal_proof,
ticket_epoch,
ticket_value,
orig_sealed_cid,
orig_unsealed_cid,
cur_sealed_cid,
cur_unsealed_cid,
msg_cid_precommit,
msg_cid_commit,
seed_epoch,
seed_value
)
SELECT
sp_id,
sector_number as sector_num,
reg_seal_proof,
ticket_epoch,
ticket_value,
tree_r_cid as orig_sealed_cid,
tree_d_cid as orig_unsealed_cid,
tree_r_cid as cur_sealed_cid,
tree_d_cid as cur_unsealed_cid,
precommit_msg_cid,
commit_msg_cid,
seed_epoch,
seed_value
FROM
sectors_sdr_pipeline
WHERE
sp_id = $1 AND
sector_number = $2
ON CONFLICT (sp_id, sector_num) DO UPDATE SET
reg_seal_proof = excluded.reg_seal_proof,
ticket_epoch = excluded.ticket_epoch,
ticket_value = excluded.ticket_value,
orig_sealed_cid = excluded.orig_sealed_cid,
cur_sealed_cid = excluded.cur_sealed_cid,
msg_cid_precommit = excluded.msg_cid_precommit,
msg_cid_commit = excluded.msg_cid_commit,
seed_epoch = excluded.seed_epoch,
seed_value = excluded.seed_value;
`, spID, sectorNum); err != nil {
return fmt.Errorf("failed to insert/update sectors_meta: %w", err)
}
// Execute the query for piece metadata
if _, err := s.db.Exec(ctx, `
INSERT INTO sectors_meta_pieces (
sp_id,
sector_num,
piece_num,
piece_cid,
piece_size,
requested_keep_data,
raw_data_size,
start_epoch,
orig_end_epoch,
f05_deal_id,
ddo_pam
)
SELECT
sp_id,
sector_number AS sector_num,
piece_index AS piece_num,
piece_cid,
piece_size,
not data_delete_on_finalize as requested_keep_data,
data_raw_size,
COALESCE(f05_deal_start_epoch, direct_start_epoch) as start_epoch,
COALESCE(f05_deal_end_epoch, direct_end_epoch) as orig_end_epoch,
f05_deal_id,
direct_piece_activation_manifest as ddo_pam
FROM
sectors_sdr_initial_pieces
WHERE
sp_id = $1 AND
sector_number = $2
ON CONFLICT (sp_id, sector_num, piece_num) DO UPDATE SET
piece_cid = excluded.piece_cid,
piece_size = excluded.piece_size,
requested_keep_data = excluded.requested_keep_data,
raw_data_size = excluded.raw_data_size,
start_epoch = excluded.start_epoch,
orig_end_epoch = excluded.orig_end_epoch,
f05_deal_id = excluded.f05_deal_id,
ddo_pam = excluded.ddo_pam;
`, spID, sectorNum); err != nil {
return fmt.Errorf("failed to insert/update sector_meta_pieces: %w", err)
}
return nil
}
func (s *SubmitCommitTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) {
id := ids[0]
return &id, nil
}
func (s *SubmitCommitTask) TypeDetails() harmonytask.TaskTypeDetails {
return harmonytask.TaskTypeDetails{
Max: 128,
Name: "CommitSubmit",
Cost: resources.Resources{
Cpu: 0,
Gpu: 0,
Ram: 1 << 20,
},
MaxFailures: 16,
}
}
func (s *SubmitCommitTask) Adder(taskFunc harmonytask.AddTaskFunc) {
s.sp.pollers[pollerCommitMsg].Set(taskFunc)
}
func (s *SubmitCommitTask) allocationCheck(ctx context.Context, piece *miner.PieceActivationManifest, precomitInfo *miner.SectorPreCommitOnChainInfo, miner abi.ActorID, ts *types.TipSet) error {
// skip pieces not claiming an allocation
if piece.VerifiedAllocationKey == nil {
return nil
}
addr, err := address.NewIDAddress(uint64(piece.VerifiedAllocationKey.Client))
if err != nil {
return err
}
alloc, err := s.api.StateGetAllocation(ctx, addr, verifregtypes9.AllocationId(piece.VerifiedAllocationKey.ID), ts.Key())
if err != nil {
return err
}
if alloc == nil {
return xerrors.Errorf("no allocation found for piece %s with allocation ID %d", piece.CID.String(), piece.VerifiedAllocationKey.ID)
}
if alloc.Provider != miner {
return xerrors.Errorf("provider id mismatch for piece %s: expected %d and found %d", piece.CID.String(), miner, alloc.Provider)
}
if alloc.Size != piece.Size {
return xerrors.Errorf("size mismatch for piece %s: expected %d and found %d", piece.CID.String(), piece.Size, alloc.Size)
}
if precomitInfo.Info.Expiration < ts.Height()+alloc.TermMin {
return xerrors.Errorf("sector expiration %d is before than allocation TermMin %d for piece %s", precomitInfo.Info.Expiration, ts.Height()+alloc.TermMin, piece.CID.String())
}
if precomitInfo.Info.Expiration > ts.Height()+alloc.TermMax {
return xerrors.Errorf("sector expiration %d is later than allocation TermMax %d for piece %s", precomitInfo.Info.Expiration, ts.Height()+alloc.TermMax, piece.CID.String())
}
return nil
}
var _ harmonytask.TaskInterface = &SubmitCommitTask{}

View File

@ -1,297 +0,0 @@
package seal
import (
"bytes"
"context"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
actorstypes "github.com/filecoin-project/go-state-types/actors"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/builtin"
miner12 "github.com/filecoin-project/go-state-types/builtin/v12/miner"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/curiosrc/harmony/harmonytask"
"github.com/filecoin-project/lotus/curiosrc/harmony/resources"
"github.com/filecoin-project/lotus/curiosrc/message"
"github.com/filecoin-project/lotus/curiosrc/multictladdr"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
"github.com/filecoin-project/lotus/storage/ctladdr"
)
type SubmitPrecommitTaskApi interface {
ChainHead(context.Context) (*types.TipSet, error)
StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (big.Int, error)
StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error)
StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error)
ctladdr.NodeApi
}
type SubmitPrecommitTask struct {
sp *SealPoller
db *harmonydb.DB
api SubmitPrecommitTaskApi
sender *message.Sender
as *multictladdr.MultiAddressSelector
maxFee types.FIL
}
func NewSubmitPrecommitTask(sp *SealPoller, db *harmonydb.DB, api SubmitPrecommitTaskApi, sender *message.Sender, as *multictladdr.MultiAddressSelector, maxFee types.FIL) *SubmitPrecommitTask {
return &SubmitPrecommitTask{
sp: sp,
db: db,
api: api,
sender: sender,
as: as,
maxFee: maxFee,
}
}
func (s *SubmitPrecommitTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) {
ctx := context.Background()
// 1. Load sector info
var sectorParamsArr []struct {
SpID int64 `db:"sp_id"`
SectorNumber int64 `db:"sector_number"`
RegSealProof abi.RegisteredSealProof `db:"reg_seal_proof"`
TicketEpoch abi.ChainEpoch `db:"ticket_epoch"`
SealedCID string `db:"tree_r_cid"`
UnsealedCID string `db:"tree_d_cid"`
}
err = s.db.Select(ctx, &sectorParamsArr, `
SELECT sp_id, sector_number, reg_seal_proof, ticket_epoch, tree_r_cid, tree_d_cid
FROM sectors_sdr_pipeline
WHERE task_id_precommit_msg = $1`, taskID)
if err != nil {
return false, xerrors.Errorf("getting sector params: %w", err)
}
if len(sectorParamsArr) != 1 {
return false, xerrors.Errorf("expected 1 sector params, got %d", len(sectorParamsArr))
}
sectorParams := sectorParamsArr[0]
maddr, err := address.NewIDAddress(uint64(sectorParams.SpID))
if err != nil {
return false, xerrors.Errorf("getting miner address: %w", err)
}
sealedCID, err := cid.Parse(sectorParams.SealedCID)
if err != nil {
return false, xerrors.Errorf("parsing sealed CID: %w", err)
}
unsealedCID, err := cid.Parse(sectorParams.UnsealedCID)
if err != nil {
return false, xerrors.Errorf("parsing unsealed CID: %w", err)
}
// 2. Prepare message params
head, err := s.api.ChainHead(ctx)
if err != nil {
return false, xerrors.Errorf("getting chain head: %w", err)
}
params := miner.PreCommitSectorBatchParams2{}
expiration := sectorParams.TicketEpoch + miner12.MaxSectorExpirationExtension
params.Sectors = append(params.Sectors, miner.SectorPreCommitInfo{
SealProof: sectorParams.RegSealProof,
SectorNumber: abi.SectorNumber(sectorParams.SectorNumber),
SealedCID: sealedCID,
SealRandEpoch: sectorParams.TicketEpoch,
Expiration: expiration,
})
{
var pieces []struct {
PieceIndex int64 `db:"piece_index"`
PieceCID string `db:"piece_cid"`
PieceSize int64 `db:"piece_size"`
DealStartEpoch int64 `db:"deal_start_epoch"`
DealEndEpoch int64 `db:"deal_end_epoch"`
}
err = s.db.Select(ctx, &pieces, `
SELECT piece_index,
piece_cid,
piece_size,
COALESCE(f05_deal_end_epoch, direct_end_epoch, 0) AS deal_end_epoch,
COALESCE(f05_deal_start_epoch, direct_start_epoch, 0) AS deal_start_epoch
FROM sectors_sdr_initial_pieces
WHERE sp_id = $1 AND sector_number = $2 ORDER BY piece_index ASC`, sectorParams.SpID, sectorParams.SectorNumber)
if err != nil {
return false, xerrors.Errorf("getting pieces: %w", err)
}
if len(pieces) > 0 {
params.Sectors[0].UnsealedCid = &unsealedCID
for _, p := range pieces {
if p.DealStartEpoch > 0 && abi.ChainEpoch(p.DealStartEpoch) < head.Height() {
// deal start epoch is in the past, can't precommit this sector anymore
_, perr := s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline
SET failed = TRUE, failed_at = NOW(), failed_reason = 'past-start-epoch', failed_reason_msg = 'precommit: start epoch is in the past', task_id_precommit_msg = NULL
WHERE task_id_precommit_msg = $1`, taskID)
if perr != nil {
return false, xerrors.Errorf("persisting precommit start epoch expiry: %w", perr)
}
return true, xerrors.Errorf("deal start epoch is in the past")
}
if p.DealEndEpoch > 0 && abi.ChainEpoch(p.DealEndEpoch) > params.Sectors[0].Expiration {
params.Sectors[0].Expiration = abi.ChainEpoch(p.DealEndEpoch)
}
}
}
}
nv, err := s.api.StateNetworkVersion(ctx, types.EmptyTSK)
if err != nil {
return false, xerrors.Errorf("getting network version: %w", err)
}
av, err := actorstypes.VersionForNetwork(nv)
if err != nil {
return false, xerrors.Errorf("failed to get actors version: %w", err)
}
msd, err := policy.GetMaxProveCommitDuration(av, sectorParams.RegSealProof)
if err != nil {
return false, xerrors.Errorf("failed to get max prove commit duration: %w", err)
}
if minExpiration := sectorParams.TicketEpoch + policy.MaxPreCommitRandomnessLookback + msd + miner.MinSectorExpiration; params.Sectors[0].Expiration < minExpiration {
params.Sectors[0].Expiration = minExpiration
}
// 3. Check precommit
{
record, err := s.checkPrecommit(ctx, params)
if err != nil {
if record {
_, perr := s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline
SET failed = TRUE, failed_at = NOW(), failed_reason = 'precommit-check', failed_reason_msg = $1, task_id_precommit_msg = NULL
WHERE task_id_precommit_msg = $2`, err.Error(), taskID)
if perr != nil {
return false, xerrors.Errorf("persisting precommit check error: %w", perr)
}
}
return record, xerrors.Errorf("checking precommit: %w", err)
}
}
// 4. Prepare and send message
var pbuf bytes.Buffer
if err := params.MarshalCBOR(&pbuf); err != nil {
return false, xerrors.Errorf("serializing params: %w", err)
}
collateral, err := s.api.StateMinerPreCommitDepositForPower(ctx, maddr, params.Sectors[0], types.EmptyTSK)
if err != nil {
return false, xerrors.Errorf("getting precommit deposit: %w", err)
}
mi, err := s.api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
if err != nil {
return false, xerrors.Errorf("getting miner info: %w", err)
}
a, _, err := s.as.AddressFor(ctx, s.api, maddr, mi, api.PreCommitAddr, collateral, big.Zero())
if err != nil {
return false, xerrors.Errorf("getting address for precommit: %w", err)
}
msg := &types.Message{
To: maddr,
From: a,
Method: builtin.MethodsMiner.PreCommitSectorBatch2,
Params: pbuf.Bytes(),
Value: collateral, // todo config for pulling from miner balance!!
}
mss := &api.MessageSendSpec{
MaxFee: abi.TokenAmount(s.maxFee),
}
mcid, err := s.sender.Send(ctx, msg, mss, "precommit")
if err != nil {
return false, xerrors.Errorf("sending message: %w", err)
}
// set precommit_msg_cid
_, err = s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline
SET precommit_msg_cid = $1, after_precommit_msg = TRUE, task_id_precommit_msg = NULL
WHERE task_id_precommit_msg = $2`, mcid, taskID)
if err != nil {
return false, xerrors.Errorf("updating precommit_msg_cid: %w", err)
}
_, err = s.db.Exec(ctx, `INSERT INTO message_waits (signed_message_cid) VALUES ($1)`, mcid)
if err != nil {
return false, xerrors.Errorf("inserting into message_waits: %w", err)
}
return true, nil
}
func (s *SubmitPrecommitTask) checkPrecommit(ctx context.Context, params miner.PreCommitSectorBatchParams2) (record bool, err error) {
if len(params.Sectors) != 1 {
return false, xerrors.Errorf("expected 1 sector")
}
preCommitInfo := params.Sectors[0]
head, err := s.api.ChainHead(ctx)
if err != nil {
return false, xerrors.Errorf("getting chain head: %w", err)
}
height := head.Height()
//never commit P2 message before, check ticket expiration
ticketEarliest := height - policy.MaxPreCommitRandomnessLookback
if preCommitInfo.SealRandEpoch < ticketEarliest {
return true, xerrors.Errorf("ticket expired: seal height: %d, head: %d", preCommitInfo.SealRandEpoch+policy.SealRandomnessLookback, height)
}
return true, nil
}
func (s *SubmitPrecommitTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) {
id := ids[0]
return &id, nil
}
func (s *SubmitPrecommitTask) TypeDetails() harmonytask.TaskTypeDetails {
return harmonytask.TaskTypeDetails{
Max: 1024,
Name: "PreCommitSubmit",
Cost: resources.Resources{
Cpu: 0,
Gpu: 0,
Ram: 1 << 20,
},
MaxFailures: 16,
}
}
func (s *SubmitPrecommitTask) Adder(taskFunc harmonytask.AddTaskFunc) {
s.sp.pollers[pollerPrecommitMsg].Set(taskFunc)
}
var _ harmonytask.TaskInterface = &SubmitPrecommitTask{}

View File

@ -1,366 +0,0 @@
package seal
import (
"context"
"io"
"net/http"
"net/url"
"strconv"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-commp-utils/nonffi"
"github.com/filecoin-project/go-commp-utils/zerocomm"
"github.com/filecoin-project/go-padreader"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/curiosrc/ffi"
"github.com/filecoin-project/lotus/curiosrc/harmony/harmonytask"
"github.com/filecoin-project/lotus/curiosrc/harmony/resources"
"github.com/filecoin-project/lotus/lib/filler"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
"github.com/filecoin-project/lotus/storage/pipeline/lib/nullreader"
"github.com/filecoin-project/lotus/storage/sealer/ffiwrapper"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
type TreeDTask struct {
sp *SealPoller
db *harmonydb.DB
sc *ffi.SealCalls
max int
}
func (t *TreeDTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) {
if isDevnet {
return &ids[0], nil
}
if engine.Resources().Gpu > 0 {
return &ids[0], nil
}
return nil, nil
}
func (t *TreeDTask) TypeDetails() harmonytask.TaskTypeDetails {
ssize := abi.SectorSize(32 << 30) // todo task details needs taskID to get correct sector size
if isDevnet {
ssize = abi.SectorSize(2 << 20)
}
return harmonytask.TaskTypeDetails{
Max: t.max,
Name: "TreeD",
Cost: resources.Resources{
Cpu: 1,
Ram: 1 << 30,
Gpu: 0,
Storage: t.sc.Storage(t.taskToSector, storiface.FTNone, storiface.FTCache, ssize, storiface.PathSealing, 1.0),
},
MaxFailures: 3,
Follows: nil,
}
}
func (t *TreeDTask) taskToSector(id harmonytask.TaskID) (ffi.SectorRef, error) {
var refs []ffi.SectorRef
err := t.db.Select(context.Background(), &refs, `SELECT sp_id, sector_number, reg_seal_proof FROM sectors_sdr_pipeline WHERE task_id_tree_d = $1`, id)
if err != nil {
return ffi.SectorRef{}, xerrors.Errorf("getting sector ref: %w", err)
}
if len(refs) != 1 {
return ffi.SectorRef{}, xerrors.Errorf("expected 1 sector ref, got %d", len(refs))
}
return refs[0], nil
}
func (t *TreeDTask) Adder(taskFunc harmonytask.AddTaskFunc) {
t.sp.pollers[pollerTreeD].Set(taskFunc)
}
func NewTreeDTask(sp *SealPoller, db *harmonydb.DB, sc *ffi.SealCalls, maxTrees int) *TreeDTask {
return &TreeDTask{
sp: sp,
db: db,
sc: sc,
max: maxTrees,
}
}
func (t *TreeDTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) {
ctx := context.Background()
var sectorParamsArr []struct {
SpID int64 `db:"sp_id"`
SectorNumber int64 `db:"sector_number"`
RegSealProof abi.RegisteredSealProof `db:"reg_seal_proof"`
}
err = t.db.Select(ctx, &sectorParamsArr, `
SELECT sp_id, sector_number, reg_seal_proof
FROM sectors_sdr_pipeline
WHERE task_id_tree_d = $1`, taskID)
if err != nil {
return false, xerrors.Errorf("getting sector params: %w", err)
}
if len(sectorParamsArr) != 1 {
return false, xerrors.Errorf("expected 1 sector params, got %d", len(sectorParamsArr))
}
sectorParams := sectorParamsArr[0]
sref := storiface.SectorRef{
ID: abi.SectorID{
Miner: abi.ActorID(sectorParams.SpID),
Number: abi.SectorNumber(sectorParams.SectorNumber),
},
ProofType: sectorParams.RegSealProof,
}
// Fetch the Sector to local storage
fsPaths, pathIds, release, err := t.sc.PreFetch(ctx, sref, &taskID)
if err != nil {
return false, xerrors.Errorf("failed to prefetch sectors: %w", err)
}
defer release()
var pieces []struct {
PieceIndex int64 `db:"piece_index"`
PieceCID string `db:"piece_cid"`
PieceSize int64 `db:"piece_size"`
DataUrl *string `db:"data_url"`
DataHeaders *[]byte `db:"data_headers"`
DataRawSize *int64 `db:"data_raw_size"`
}
err = t.db.Select(ctx, &pieces, `
SELECT piece_index, piece_cid, piece_size, data_url, data_headers, data_raw_size
FROM sectors_sdr_initial_pieces
WHERE sp_id = $1 AND sector_number = $2 ORDER BY piece_index ASC`, sectorParams.SpID, sectorParams.SectorNumber)
if err != nil {
return false, xerrors.Errorf("getting pieces: %w", err)
}
ssize, err := sectorParams.RegSealProof.SectorSize()
if err != nil {
return false, xerrors.Errorf("getting sector size: %w", err)
}
var commd cid.Cid
var dataReader io.Reader
var unpaddedData bool
var closers []io.Closer
defer func() {
for _, c := range closers {
if err := c.Close(); err != nil {
log.Errorw("error closing piece reader", "error", err)
}
}
}()
if len(pieces) > 0 {
var pieceInfos []abi.PieceInfo
var pieceReaders []io.Reader
var offset abi.UnpaddedPieceSize
var allocated abi.UnpaddedPieceSize
for _, p := range pieces {
// make pieceInfo
c, err := cid.Parse(p.PieceCID)
if err != nil {
return false, xerrors.Errorf("parsing piece cid: %w", err)
}
allocated += abi.UnpaddedPieceSize(*p.DataRawSize)
pads, padLength := ffiwrapper.GetRequiredPadding(offset.Padded(), abi.PaddedPieceSize(p.PieceSize))
offset += padLength.Unpadded()
for _, pad := range pads {
pieceInfos = append(pieceInfos, abi.PieceInfo{
Size: pad,
PieceCID: zerocomm.ZeroPieceCommitment(pad.Unpadded()),
})
pieceReaders = append(pieceReaders, nullreader.NewNullReader(pad.Unpadded()))
}
pieceInfos = append(pieceInfos, abi.PieceInfo{
Size: abi.PaddedPieceSize(p.PieceSize),
PieceCID: c,
})
offset += abi.UnpaddedPieceSize(*p.DataRawSize)
// make pieceReader
if p.DataUrl != nil {
dataUrl := *p.DataUrl
goUrl, err := url.Parse(dataUrl)
if err != nil {
return false, xerrors.Errorf("parsing data URL: %w", err)
}
if goUrl.Scheme == "pieceref" {
// url is to a piece reference
refNum, err := strconv.ParseInt(goUrl.Opaque, 10, 64)
if err != nil {
return false, xerrors.Errorf("parsing piece reference number: %w", err)
}
// get pieceID
var pieceID []struct {
PieceID storiface.PieceNumber `db:"piece_id"`
}
err = t.db.Select(ctx, &pieceID, `SELECT piece_id FROM parked_piece_refs WHERE ref_id = $1`, refNum)
if err != nil {
return false, xerrors.Errorf("getting pieceID: %w", err)
}
if len(pieceID) != 1 {
return false, xerrors.Errorf("expected 1 pieceID, got %d", len(pieceID))
}
pr, err := t.sc.PieceReader(ctx, pieceID[0].PieceID)
if err != nil {
return false, xerrors.Errorf("getting piece reader: %w", err)
}
closers = append(closers, pr)
reader, _ := padreader.New(pr, uint64(*p.DataRawSize))
pieceReaders = append(pieceReaders, reader)
} else {
reader, _ := padreader.New(&UrlPieceReader{
Url: dataUrl,
RawSize: *p.DataRawSize,
}, uint64(*p.DataRawSize))
pieceReaders = append(pieceReaders, reader)
}
} else { // padding piece (w/o fr32 padding, added in TreeD)
pieceReaders = append(pieceReaders, nullreader.NewNullReader(abi.PaddedPieceSize(p.PieceSize).Unpadded()))
}
}
fillerSize, err := filler.FillersFromRem(abi.PaddedPieceSize(ssize).Unpadded() - allocated)
if err != nil {
return false, xerrors.Errorf("failed to calculate the final padding: %w", err)
}
for _, fil := range fillerSize {
pieceInfos = append(pieceInfos, abi.PieceInfo{
Size: fil.Padded(),
PieceCID: zerocomm.ZeroPieceCommitment(fil),
})
pieceReaders = append(pieceReaders, nullreader.NewNullReader(fil))
}
commd, err = nonffi.GenerateUnsealedCID(sectorParams.RegSealProof, pieceInfos)
if err != nil {
return false, xerrors.Errorf("computing CommD: %w", err)
}
dataReader = io.MultiReader(pieceReaders...)
unpaddedData = true
} else {
commd = zerocomm.ZeroPieceCommitment(abi.PaddedPieceSize(ssize).Unpadded())
dataReader = nullreader.NewNullReader(abi.UnpaddedPieceSize(ssize))
unpaddedData = false // nullreader includes fr32 zero bits
}
// Generate Tree D
err = t.sc.TreeD(ctx, sref, commd, abi.PaddedPieceSize(ssize), dataReader, unpaddedData, fsPaths, pathIds)
if err != nil {
return false, xerrors.Errorf("failed to generate TreeD: %w", err)
}
n, err := t.db.Exec(ctx, `UPDATE sectors_sdr_pipeline
SET after_tree_d = true, tree_d_cid = $3, task_id_tree_d = NULL WHERE sp_id = $1 AND sector_number = $2`,
sectorParams.SpID, sectorParams.SectorNumber, commd)
if err != nil {
return false, xerrors.Errorf("store TreeD success: updating pipeline: %w", err)
}
if n != 1 {
return false, xerrors.Errorf("store TreeD success: updated %d rows", n)
}
return true, nil
}
type UrlPieceReader struct {
Url string
RawSize int64 // the exact number of bytes read, if we read more or less that's an error
readSoFar int64
closed bool
active io.ReadCloser // auto-closed on EOF
}
func (u *UrlPieceReader) Read(p []byte) (n int, err error) {
// Check if we have already read the required amount of data
if u.readSoFar >= u.RawSize {
return 0, io.EOF
}
// If 'active' is nil, initiate the HTTP request
if u.active == nil {
resp, err := http.Get(u.Url)
if err != nil {
return 0, err
}
// Set 'active' to the response body
u.active = resp.Body
}
// Calculate the maximum number of bytes we can read without exceeding RawSize
toRead := u.RawSize - u.readSoFar
if int64(len(p)) > toRead {
p = p[:toRead]
}
n, err = u.active.Read(p)
// Update the number of bytes read so far
u.readSoFar += int64(n)
// If the number of bytes read exceeds RawSize, return an error
if u.readSoFar > u.RawSize {
return n, xerrors.New("read beyond the specified RawSize")
}
// If EOF is reached, close the reader
if err == io.EOF {
cerr := u.active.Close()
u.closed = true
if cerr != nil {
log.Errorf("error closing http piece reader: %s", cerr)
}
// if we're below the RawSize, return an unexpected EOF error
if u.readSoFar < u.RawSize {
log.Errorw("unexpected EOF", "readSoFar", u.readSoFar, "rawSize", u.RawSize, "url", u.Url)
return n, io.ErrUnexpectedEOF
}
}
return n, err
}
func (u *UrlPieceReader) Close() error {
if !u.closed {
u.closed = true
return u.active.Close()
}
return nil
}
var _ harmonytask.TaskInterface = &TreeDTask{}

Some files were not shown because too many files have changed in this diff Show More