Merge pull request #1469 from filecoin-project/feat/extract-sectorstorage
Extract sector-storage
This commit is contained in:
commit
34e9e9fe34
@ -11,8 +11,8 @@ import (
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
"github.com/filecoin-project/sector-storage"
|
||||
"github.com/filecoin-project/sector-storage/stores"
|
||||
)
|
||||
|
||||
// alias because cbor-gen doesn't like non-alias types
|
||||
|
@ -6,9 +6,9 @@ import (
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
"github.com/filecoin-project/sector-storage"
|
||||
"github.com/filecoin-project/sector-storage/sealtasks"
|
||||
"github.com/filecoin-project/sector-storage/stores"
|
||||
)
|
||||
|
||||
type WorkerApi interface {
|
||||
|
@ -21,9 +21,9 @@ import (
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
"github.com/filecoin-project/sector-storage"
|
||||
"github.com/filecoin-project/sector-storage/sealtasks"
|
||||
"github.com/filecoin-project/sector-storage/stores"
|
||||
)
|
||||
|
||||
// All permissions are listed in permissioned.go
|
||||
|
@ -37,7 +37,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/genesis"
|
||||
"github.com/filecoin-project/lotus/lib/sigs"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper"
|
||||
"github.com/filecoin-project/sector-storage/ffiwrapper"
|
||||
)
|
||||
|
||||
var log = logging.Logger("gen")
|
||||
|
@ -44,7 +44,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/lib/sigs"
|
||||
"github.com/filecoin-project/lotus/metrics"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper"
|
||||
"github.com/filecoin-project/sector-storage/ffiwrapper"
|
||||
)
|
||||
|
||||
var log = logging.Logger("chain")
|
||||
|
@ -13,7 +13,7 @@ import (
|
||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
||||
"github.com/filecoin-project/specs-actors/actors/runtime"
|
||||
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper"
|
||||
"github.com/filecoin-project/sector-storage/ffiwrapper"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -27,8 +27,8 @@ import (
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/genesis"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper/basicfs"
|
||||
"github.com/filecoin-project/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/sector-storage/ffiwrapper/basicfs"
|
||||
)
|
||||
|
||||
var log = logging.Logger("lotus-bench")
|
||||
|
@ -3,7 +3,7 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper"
|
||||
"github.com/filecoin-project/sector-storage/ffiwrapper"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
@ -26,9 +26,9 @@ import (
|
||||
"github.com/filecoin-project/lotus/lib/jsonrpc"
|
||||
"github.com/filecoin-project/lotus/lib/lotuslog"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
"github.com/filecoin-project/sector-storage"
|
||||
"github.com/filecoin-project/sector-storage/sealtasks"
|
||||
"github.com/filecoin-project/sector-storage/stores"
|
||||
)
|
||||
|
||||
var log = logging.Logger("main")
|
||||
|
@ -6,7 +6,7 @@ import (
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage"
|
||||
"github.com/filecoin-project/sector-storage"
|
||||
)
|
||||
|
||||
type worker struct {
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper"
|
||||
"github.com/filecoin-project/sector-storage/ffiwrapper"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
|
@ -24,9 +24,9 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/wallet"
|
||||
"github.com/filecoin-project/lotus/genesis"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper/basicfs"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
"github.com/filecoin-project/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/sector-storage/ffiwrapper/basicfs"
|
||||
"github.com/filecoin-project/sector-storage/stores"
|
||||
)
|
||||
|
||||
var log = logging.Logger("preseal")
|
||||
|
@ -42,9 +42,9 @@ import (
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
"github.com/filecoin-project/lotus/storage"
|
||||
"github.com/filecoin-project/lotus/storage/sealing"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
"github.com/filecoin-project/sector-storage"
|
||||
"github.com/filecoin-project/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/sector-storage/stores"
|
||||
)
|
||||
|
||||
var initCmd = &cli.Command{
|
||||
|
@ -20,7 +20,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
"github.com/filecoin-project/sector-storage/stores"
|
||||
)
|
||||
|
||||
const metaFile = "sectorstore.json"
|
||||
|
@ -3,7 +3,7 @@ package main
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage"
|
||||
"github.com/filecoin-project/sector-storage"
|
||||
"gopkg.in/urfave/cli.v2"
|
||||
"sort"
|
||||
|
||||
|
@ -35,7 +35,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/node/modules"
|
||||
"github.com/filecoin-project/lotus/node/modules/testing"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper"
|
||||
"github.com/filecoin-project/sector-storage/ffiwrapper"
|
||||
)
|
||||
|
||||
const (
|
||||
|
17
go.mod
17
go.mod
@ -8,13 +8,11 @@ require (
|
||||
github.com/BurntSushi/toml v0.3.1
|
||||
github.com/GeertJohan/go.rice v1.0.0
|
||||
github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee
|
||||
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.0.0
|
||||
github.com/davidlazar/go-crypto v0.0.0-20190912175916-7055855a373f // indirect
|
||||
github.com/docker/go-units v0.4.0
|
||||
github.com/elastic/go-sysinfo v1.3.0
|
||||
github.com/filecoin-project/chain-validation v0.0.6-0.20200325210556-5a3014759d9c
|
||||
github.com/filecoin-project/filecoin-ffi v0.0.0-20200304181354-4446ff8a1bb9
|
||||
github.com/filecoin-project/filecoin-ffi v0.0.0-20200326153646-e899cc1dd072
|
||||
github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be
|
||||
github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200131012142-05d80eeccc5e
|
||||
github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2
|
||||
@ -26,12 +24,12 @@ require (
|
||||
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663
|
||||
github.com/filecoin-project/go-statemachine v0.0.0-20200226041606-2074af6d51d9
|
||||
github.com/filecoin-project/go-statestore v0.1.0
|
||||
github.com/filecoin-project/sector-storage v0.0.0-20200327233338-c4b928d44163
|
||||
github.com/filecoin-project/specs-actors v0.0.0-20200324235424-aef9b20a9fb1
|
||||
github.com/filecoin-project/specs-storage v0.0.0-20200317133846-063ba163b217
|
||||
github.com/filecoin-project/specs-storage v0.0.0-20200317225704-7420bc655c38
|
||||
github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1
|
||||
github.com/go-ole/go-ole v1.2.4 // indirect
|
||||
github.com/google/uuid v1.1.1
|
||||
github.com/gorilla/mux v1.7.3
|
||||
github.com/gorilla/mux v1.7.4
|
||||
github.com/gorilla/websocket v1.4.1
|
||||
github.com/hashicorp/go-multierror v1.0.0
|
||||
github.com/hashicorp/golang-lru v0.5.4
|
||||
@ -52,12 +50,12 @@ require (
|
||||
github.com/ipfs/go-ipfs-ds-help v0.1.1
|
||||
github.com/ipfs/go-ipfs-exchange-interface v0.0.1
|
||||
github.com/ipfs/go-ipfs-exchange-offline v0.0.1
|
||||
github.com/ipfs/go-ipfs-files v0.0.4
|
||||
github.com/ipfs/go-ipfs-files v0.0.7
|
||||
github.com/ipfs/go-ipfs-routing v0.1.0
|
||||
github.com/ipfs/go-ipld-cbor v0.0.5-0.20200204214505-252690b78669
|
||||
github.com/ipfs/go-ipld-format v0.0.2
|
||||
github.com/ipfs/go-log v1.0.2
|
||||
github.com/ipfs/go-log/v2 v2.0.2
|
||||
github.com/ipfs/go-log v1.0.3
|
||||
github.com/ipfs/go-log/v2 v2.0.3
|
||||
github.com/ipfs/go-merkledag v0.2.4
|
||||
github.com/ipfs/go-path v0.0.7
|
||||
github.com/ipfs/go-unixfs v0.2.2-0.20190827150610-868af2e9e5cb
|
||||
@ -114,7 +112,6 @@ require (
|
||||
gopkg.in/urfave/cli.v2 v2.0.0-20180128182452-d3ae77c26ac8
|
||||
gotest.tools v2.2.0+incompatible
|
||||
honnef.co/go/tools v0.0.1-2020.1.3 // indirect
|
||||
launchpad.net/gocheck v0.0.0-20140225173054-000000000087 // indirect
|
||||
)
|
||||
|
||||
replace github.com/golangci/golangci-lint => github.com/golangci/golangci-lint v1.18.0
|
||||
|
66
go.sum
66
go.sum
@ -69,6 +69,8 @@ github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg=
|
||||
github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE=
|
||||
github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0=
|
||||
github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis=
|
||||
github.com/daaku/go.zipexe v1.0.0 h1:VSOgZtH418pH9L16hC/JrgSNJbbAL26pj7lmD1+CGdY=
|
||||
@ -103,13 +105,20 @@ github.com/elastic/go-sysinfo v1.3.0 h1:eb2XFGTMlSwG/yyU9Y8jVAYLIzU2sFzWXwo2gmet
|
||||
github.com/elastic/go-sysinfo v1.3.0/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0=
|
||||
github.com/elastic/go-windows v1.0.0 h1:qLURgZFkkrYyTTkvYpsZIgf83AUsdIHfvlJaqaZ7aSY=
|
||||
github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.8.0 h1:5bzFgL+oy7JITMTxUPJ00n7VxmYd/PdMp5mHFX40/RY=
|
||||
github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGjnw8=
|
||||
github.com/fd/go-nat v1.0.0/go.mod h1:BTBu/CKvMmOMUPkKVef1pngt2WFH/lg7E6yQnulfp6E=
|
||||
github.com/filecoin-project/chain-validation v0.0.3/go.mod h1:NCEGFjcWRjb8akWFSOXvU6n2efkWIqAeOKU6o5WBGQw=
|
||||
github.com/filecoin-project/chain-validation v0.0.6-0.20200325210556-5a3014759d9c h1:I5xX6g6ySpRm9v6j3B5ML1OgeZDnAY/ppftDjdP6OMc=
|
||||
github.com/filecoin-project/chain-validation v0.0.6-0.20200325210556-5a3014759d9c/go.mod h1:mXiAviXMZ2WVGmWNtjGr0JPMpNCNsPU774DawKZCzzM=
|
||||
github.com/filecoin-project/go-address v0.0.0-20191219011437-af739c490b4f/go.mod h1:rCbpXPva2NKF9/J4X6sr7hbKBgQCxyFtRj7KOZqoIms=
|
||||
github.com/filecoin-project/go-address v0.0.0-20200107215422-da8eea2842b5/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0=
|
||||
github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be h1:TooKBwR/g8jG0hZ3lqe9S5sy2vTUcLOZLlz3M5wGn2E=
|
||||
github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0=
|
||||
github.com/filecoin-project/go-amt-ipld v0.0.0-20191205011053-79efc22d6cdc h1:cODZD2YzpTUtrOSxbEnWFcQHidNRZiRdvLxySjGvG/M=
|
||||
github.com/filecoin-project/go-amt-ipld v0.0.0-20191205011053-79efc22d6cdc/go.mod h1:KsFPWjF+UUYl6n9A+qbg4bjFgAOneicFZtDH/LQEX2U=
|
||||
github.com/filecoin-project/go-amt-ipld/v2 v2.0.0/go.mod h1:PAZ5tvSfMfWE327osqFXKm7cBpCpBk2Nh0qKsJUmjjk=
|
||||
github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200131012142-05d80eeccc5e h1:IOoff6yAZSJ5zHCPY2jzGNwQYQU6ygsRVe/cSnJrY+o=
|
||||
github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200131012142-05d80eeccc5e/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg=
|
||||
github.com/filecoin-project/go-bitfield v0.0.0-20200309034705-8c7ac40bd550 h1:aockulLU8Qjkdj4FQz53WQpNosAIYk8DxRediRLkE5c=
|
||||
@ -122,25 +131,33 @@ github.com/filecoin-project/go-data-transfer v0.0.0-20191219005021-4accf56bd2ce
|
||||
github.com/filecoin-project/go-data-transfer v0.0.0-20191219005021-4accf56bd2ce/go.mod h1:b14UWxhxVCAjrQUYvVGrQRRsjAh79wXYejw9RbUcAww=
|
||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 h1:yvQJCW9mmi9zy+51xA01Ea2X7/dL7r8eKDPuGUjRmbo=
|
||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5/go.mod h1:JbkIgFF/Z9BDlvrJO1FuKkaWsH673/UdFaiVS6uIHlA=
|
||||
github.com/filecoin-project/go-fil-markets v0.0.0-20200114015428-74d100f305f8/go.mod h1:c8NTjvFVy1Ud02mmGDjOiMeawY2t6ALfrrdvAB01FQc=
|
||||
github.com/filecoin-project/go-fil-markets v0.0.0-20200318012938-6403a5bda668 h1:856ZUIBb2K8+C5nepxi4FQ/yeTSWdr4mWbjs1JbByGU=
|
||||
github.com/filecoin-project/go-fil-markets v0.0.0-20200318012938-6403a5bda668/go.mod h1:7EGCMycMpwICVzckXUfNL44HfIxG7pwoAVeOuZFGX/4=
|
||||
github.com/filecoin-project/go-padreader v0.0.0-20200210211231-548257017ca6 h1:92PET+sx1Hb4W/8CgFwGuxaKbttwY+UNspYZTvXY0vs=
|
||||
github.com/filecoin-project/go-padreader v0.0.0-20200210211231-548257017ca6/go.mod h1:0HgYnrkeSU4lu1p+LEOeDpFsNBssa0OGGriWdA4hvaE=
|
||||
github.com/filecoin-project/go-paramfetch v0.0.0-20200102181131-b20d579f2878/go.mod h1:40kI2Gv16mwcRsHptI3OAV4nlOEU7wVDc4RgMylNFjU=
|
||||
github.com/filecoin-project/go-paramfetch v0.0.1/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc=
|
||||
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663 h1:eYxi6vI5CyeXD15X1bB3bledDXbqKxqf0wQzTLgwYwA=
|
||||
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc=
|
||||
github.com/filecoin-project/go-sectorbuilder v0.0.1/go.mod h1:3OZ4E3B2OuwhJjtxR4r7hPU9bCfB+A+hm4alLEsaeDc=
|
||||
github.com/filecoin-project/go-sectorbuilder v0.0.2-0.20200203173614-42d67726bb62/go.mod h1:jNGVCDihkMFnraYVLH1xl4ceZQVxx/u4dOORrTKeRi0=
|
||||
github.com/filecoin-project/go-sectorbuilder v0.0.2-0.20200311224656-7d83652bdbed h1:4Wn3XEpFOctHFdtPuKWr6ejbxaC9rivWjSp7qw/sOZ0=
|
||||
github.com/filecoin-project/go-sectorbuilder v0.0.2-0.20200311224656-7d83652bdbed/go.mod h1:xAd/X905Ncgj8kkHsP2pmQUf6MQT2qJTDcOEfkwCjYc=
|
||||
github.com/filecoin-project/go-statemachine v0.0.0-20200226041606-2074af6d51d9 h1:k9qVR9ItcziSB2rxtlkN/MDWNlbsI6yzec+zjUatLW0=
|
||||
github.com/filecoin-project/go-statemachine v0.0.0-20200226041606-2074af6d51d9/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig=
|
||||
github.com/filecoin-project/go-statestore v0.1.0 h1:t56reH59843TwXHkMcwyuayStBIiWBRilQjQ+5IiwdQ=
|
||||
github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI=
|
||||
github.com/filecoin-project/lotus v0.2.10/go.mod h1:om5PQA9ZT0lf16qI7Fz/ZGLn4LDCMqPC8ntZA9uncRE=
|
||||
github.com/filecoin-project/sector-storage v0.0.0-20200327233338-c4b928d44163 h1:ZCvy0S7RI96OYRqMslHFi75vsD/DRYcY3yjRvdawC08=
|
||||
github.com/filecoin-project/sector-storage v0.0.0-20200327233338-c4b928d44163/go.mod h1:yT100eeKHGO9xU3rfkeM2/8NcBktxe2nBkDT4WmD1lA=
|
||||
github.com/filecoin-project/specs-actors v0.0.0-20200210130641-2d1fbd8672cf/go.mod h1:xtDZUB6pe4Pksa/bAJbJ693OilaC5Wbot9jMhLm3cZA=
|
||||
github.com/filecoin-project/specs-actors v0.0.0-20200226200336-94c9b92b2775/go.mod h1:0HAWYrvajFHDgRaKbF0rl+IybVLZL5z4gQ8koCMPhoU=
|
||||
github.com/filecoin-project/specs-actors v0.0.0-20200302223606-0eaf97b10aaf/go.mod h1:0HAWYrvajFHDgRaKbF0rl+IybVLZL5z4gQ8koCMPhoU=
|
||||
github.com/filecoin-project/specs-actors v0.0.0-20200324235424-aef9b20a9fb1 h1:IL6A1yAamz0HtLQEdZS57hnRZHPL11VIrQxMZ1Nn5hI=
|
||||
github.com/filecoin-project/specs-actors v0.0.0-20200324235424-aef9b20a9fb1/go.mod h1:5WngRgTN5Eo4+0SjCBqLzEr2l6Mj45DrP2606gBhqI0=
|
||||
github.com/filecoin-project/specs-storage v0.0.0-20200317133846-063ba163b217 h1:doPA79fSLg5TnY2rJhXs5dIZHP3IoCcIiCLKFGfgrY8=
|
||||
github.com/filecoin-project/specs-storage v0.0.0-20200317133846-063ba163b217/go.mod h1:dUmzHS7izOD6HW3/JpzFrjxnptxbsHXBlO8puK2UzBk=
|
||||
github.com/filecoin-project/specs-storage v0.0.0-20200317225704-7420bc655c38 h1:ky+rfX3bG1TjOBLn14V674q+iwZpalyKzZxGRNzA11I=
|
||||
github.com/filecoin-project/specs-storage v0.0.0-20200317225704-7420bc655c38/go.mod h1:dUmzHS7izOD6HW3/JpzFrjxnptxbsHXBlO8puK2UzBk=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 h1:EzDjxMg43q1tA2c0MV3tNbaontnHLplHyFF6M5KiVP0=
|
||||
@ -189,11 +206,14 @@ github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
|
||||
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
|
||||
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM=
|
||||
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU=
|
||||
github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48=
|
||||
github.com/gxed/pubsub v0.0.0-20180201040156-26ebdf44f824/go.mod h1:OiEWyHgK+CWrmOlVquHaIK1vhpUJydC9m0Je6mhaiNE=
|
||||
github.com/hannahhoward/cbor-gen-for v0.0.0-20191216214420-3e450425c40c/go.mod h1:WVPCl0HO/0RAL5+vBH2GMxBomlxBF70MAS78+Lu1//k=
|
||||
github.com/hannahhoward/cbor-gen-for v0.0.0-20191218204337-9ab7b1bcc099 h1:vQqOW42RRM5LoM/1K5dK940VipLqpH8lEVGrMz+mNjU=
|
||||
github.com/hannahhoward/cbor-gen-for v0.0.0-20191218204337-9ab7b1bcc099/go.mod h1:WVPCl0HO/0RAL5+vBH2GMxBomlxBF70MAS78+Lu1//k=
|
||||
@ -209,6 +229,7 @@ github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uG
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huin/goupnp v0.0.0-20180415215157-1395d1447324/go.mod h1:MZ2ZmwcBpvOoJ22IJsc7va19ZwoheaBk43rKg12SKag=
|
||||
github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo=
|
||||
github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc=
|
||||
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
|
||||
@ -231,6 +252,8 @@ github.com/ipfs/go-blockservice v0.0.7/go.mod h1:EOfb9k/Y878ZTRY/CH0x5+ATtaipfbR
|
||||
github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M=
|
||||
github.com/ipfs/go-blockservice v0.1.3-0.20190908200855-f22eea50656c h1:lN5IQA07VtLiTLAp/Scezp1ljFhXErC6yq4O1cu+yJ0=
|
||||
github.com/ipfs/go-blockservice v0.1.3-0.20190908200855-f22eea50656c/go.mod h1:t+411r7psEUhLueM8C7aPA7cxCclv4O3VsUVxt9kz2I=
|
||||
github.com/ipfs/go-car v0.0.3-0.20191203022317-23b0a85fd1b1/go.mod h1:rmd887mJxQRDfndfDEY3Liyx8gQVyfFFRSHdsnDSAlk=
|
||||
github.com/ipfs/go-car v0.0.3-0.20200121013634-f188c0e24291/go.mod h1:AG6sBpd2PWMccpAG7XLFBBQ/4rfBEtzUNeO2GSMesYk=
|
||||
github.com/ipfs/go-car v0.0.3-0.20200304012825-b6769248bfef h1:Zn2PZSkX8Go+SZpQmjVKNrkcgbNuIxUC/3MOQRDTIVw=
|
||||
github.com/ipfs/go-car v0.0.3-0.20200304012825-b6769248bfef/go.mod h1:7BMxYRi5cbR/GJ1A8mYSHvMLXLkHgYdrJ6VlNGobd0o=
|
||||
github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
|
||||
@ -253,10 +276,13 @@ github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ
|
||||
github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps=
|
||||
github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8=
|
||||
github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s=
|
||||
github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk=
|
||||
github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE=
|
||||
github.com/ipfs/go-ds-badger2 v0.0.0-20200123200730-d75eb2678a5d/go.mod h1:sTQFaWUoW0OvhXzfHnQ9j39L6fdlqDkptDYcpC1XrYE=
|
||||
github.com/ipfs/go-ds-badger2 v0.0.0-20200211201106-609c9d2a39c7 h1:2P493YpV0SsG9c0btHfZt9eZCO+tzLAelQyrwQQcey0=
|
||||
github.com/ipfs/go-ds-badger2 v0.0.0-20200211201106-609c9d2a39c7/go.mod h1:d/QTAGj3T4lF4CuFpywNnAQ0RbffuDc1BtGFAvuYWls=
|
||||
github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc=
|
||||
github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8=
|
||||
github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s=
|
||||
github.com/ipfs/go-filestore v0.0.2 h1:pcYwpjtXXwirtbjBXKVJM9CTa9F7/8v1EkfnDaHTO3s=
|
||||
github.com/ipfs/go-filestore v0.0.2/go.mod h1:KnZ41qJsCt2OX2mxZS0xsK3Psr0/oB93HMMssLujjVc=
|
||||
@ -264,11 +290,13 @@ github.com/ipfs/go-fs-lock v0.0.1 h1:XHX8uW4jQBYWHj59XXcjg7BHlHxV9ZOYs6Y43yb7/l0
|
||||
github.com/ipfs/go-fs-lock v0.0.1/go.mod h1:DNBekbboPKcxs1aukPSaOtFA3QfSdi5C855v0i9XJ8Y=
|
||||
github.com/ipfs/go-graphsync v0.0.4 h1:iF98+J8pcqvEb48IM0TemqeGARsCDtwQ73P9ejMZIuU=
|
||||
github.com/ipfs/go-graphsync v0.0.4/go.mod h1:6UACBjfOXEa8rQL3Q/JpZpWS0nZDCLx134WUkjrmFpQ=
|
||||
github.com/ipfs/go-hamt-ipld v0.0.14-0.20191218031521-b2c774a54db1/go.mod h1:8yRx0xLUps1Xq8ZDnIwIVdQRp7JjA55gGvCiRHT91Vk=
|
||||
github.com/ipfs/go-hamt-ipld v0.0.15-0.20200131012125-dd88a59d3f2e/go.mod h1:9aQJu/i/TaRDW6jqB5U217dLIDopn50wxLdHXM2CTfE=
|
||||
github.com/ipfs/go-hamt-ipld v0.0.15-0.20200204200533-99b8553ef242 h1:OYVGeYkGSRZdBJ35JHPXQ9deQxlLtJ3Ln0FuaJOu6x8=
|
||||
github.com/ipfs/go-hamt-ipld v0.0.15-0.20200204200533-99b8553ef242/go.mod h1:kq3Pi+UP3oHhAdKexE+kHHYRKMoFNuGero0R7q3hWGg=
|
||||
github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08=
|
||||
github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw=
|
||||
github.com/ipfs/go-ipfs-blockstore v0.1.1/go.mod h1:8gZOgIN5e+Xdg2YSGdwTTRbguSVjYyosIDRQCY8E9QM=
|
||||
github.com/ipfs/go-ipfs-blockstore v0.1.3/go.mod h1:iNWVBoSQ7eMcaGo8+L3pKZABGTdWcqj1/hpoUu5bDps=
|
||||
github.com/ipfs/go-ipfs-blockstore v0.1.4 h1:2SGI6U1B44aODevza8Rde3+dY30Pb+lbcObe1LETxOQ=
|
||||
github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ=
|
||||
@ -289,6 +317,9 @@ github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAz
|
||||
github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4=
|
||||
github.com/ipfs/go-ipfs-files v0.0.4 h1:WzRCivcybUQch/Qh6v8LBRhKtRsjnwyiuOV09mK7mrE=
|
||||
github.com/ipfs/go-ipfs-files v0.0.4/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4=
|
||||
github.com/ipfs/go-ipfs-files v0.0.7 h1:s5BRD12ndahqYifeH1S8Z73zqZhR+3IdKYAG9PiETs0=
|
||||
github.com/ipfs/go-ipfs-files v0.0.7/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs=
|
||||
github.com/ipfs/go-ipfs-flags v0.0.1/go.mod h1:RnXBb9WV53GSfTrSDVK61NLTFKvWc60n+K9EgCDh+rA=
|
||||
github.com/ipfs/go-ipfs-posinfo v0.0.1 h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs=
|
||||
github.com/ipfs/go-ipfs-posinfo v0.0.1/go.mod h1:SwyeVP+jCwiDu0C313l/8jg6ZxM0qqtlt2a0vILTc1A=
|
||||
github.com/ipfs/go-ipfs-pq v0.0.1 h1:zgUotX8dcAB/w/HidJh1zzc1yFq6Vm8J7T2F4itj/RU=
|
||||
@ -311,9 +342,13 @@ github.com/ipfs/go-log v1.0.0/go.mod h1:JO7RzlMK6rA+CIxFMLOuB6Wf5b81GDiKElL7UPSI
|
||||
github.com/ipfs/go-log v1.0.1/go.mod h1:HuWlQttfN6FWNHRhlY5yMk/lW7evQC0HHGOxEwMRR8I=
|
||||
github.com/ipfs/go-log v1.0.2 h1:s19ZwJxH8rPWzypjcDpqPLIyV7BnbLqvpli3iZoqYK0=
|
||||
github.com/ipfs/go-log v1.0.2/go.mod h1:1MNjMxe0u6xvJZgeqbJ8vdo2TKaGwZ1a0Bpza+sr2Sk=
|
||||
github.com/ipfs/go-log v1.0.3 h1:Gg7SUYSZ7BrqaKMwM+hRgcAkKv4QLfzP4XPQt5Sx/OI=
|
||||
github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A=
|
||||
github.com/ipfs/go-log/v2 v2.0.1/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0=
|
||||
github.com/ipfs/go-log/v2 v2.0.2 h1:xguurydRdfKMJjKyxNXNU8lYP0VZH1NUwJRwUorjuEw=
|
||||
github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0=
|
||||
github.com/ipfs/go-log/v2 v2.0.3 h1:Q2gXcBoCALyLN/pUQlz1qgu0x3uFV6FzP9oXhpfyJpc=
|
||||
github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0=
|
||||
github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto=
|
||||
github.com/ipfs/go-merkledag v0.1.0/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk=
|
||||
github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk=
|
||||
@ -335,12 +370,14 @@ github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2
|
||||
github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0=
|
||||
github.com/ipld/go-car v0.0.5-0.20200316204026-3e2cf7af0fab h1:+3Y6Jb3IBmG3t6e3r6TItnuciOaMOuGW7QIVEUa5vy4=
|
||||
github.com/ipld/go-car v0.0.5-0.20200316204026-3e2cf7af0fab/go.mod h1:yR5AsJ38xTwwgwGpbh60ICtdLPp5lGfuH28PAAzaEhM=
|
||||
github.com/ipld/go-ipld-prime v0.0.1/go.mod h1:bDDSvVz7vaK12FNvMeRYnpRFkSUPNQOiCYQezMD/P3w=
|
||||
github.com/ipld/go-ipld-prime v0.0.2-0.20191108012745-28a82f04c785 h1:fASnkvtR+SmB2y453RxmDD3Uvd4LonVUgFGk9JoDaZs=
|
||||
github.com/ipld/go-ipld-prime v0.0.2-0.20191108012745-28a82f04c785/go.mod h1:bDDSvVz7vaK12FNvMeRYnpRFkSUPNQOiCYQezMD/P3w=
|
||||
github.com/ipld/go-ipld-prime-proto v0.0.0-20191113031812-e32bd156a1e5 h1:lSip43rAdyGA+yRQuy6ju0ucZkWpYc1F2CTQtZTVW/4=
|
||||
github.com/ipld/go-ipld-prime-proto v0.0.0-20191113031812-e32bd156a1e5/go.mod h1:gcvzoEDBjwycpXt3LBE061wT9f46szXGHAmj9uoP6fU=
|
||||
github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c=
|
||||
github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4=
|
||||
github.com/jackpal/gateway v1.0.4/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA=
|
||||
github.com/jackpal/gateway v1.0.5 h1:qzXWUJfuMdlLMtt0a3Dgt+xkWQiA5itDEITVJtuSwMc=
|
||||
github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA=
|
||||
github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
@ -405,6 +442,7 @@ github.com/libp2p/go-libp2p v0.1.0/go.mod h1:6D/2OBauqLUoqcADOJpn9WbKqvaM07tDw68
|
||||
github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8=
|
||||
github.com/libp2p/go-libp2p v0.2.1/go.mod h1:HZbtEOrgZN4F1fGZVvkV+930Wx3DkqlpBlO8dIoZWds=
|
||||
github.com/libp2p/go-libp2p v0.3.0/go.mod h1:J7DPB1+zB5VLc8v/kKSD8+u2cbyIGI0Dh/Pf3Wprt+0=
|
||||
github.com/libp2p/go-libp2p v0.4.2/go.mod h1:MNmgUxUw5pMsdOzMlT0EE7oKjRasl+WyVwM0IBlpKgQ=
|
||||
github.com/libp2p/go-libp2p v0.6.0 h1:EFArryT9N7AVA70LCcOh8zxsW+FeDnxwcpWQx9k7+GM=
|
||||
github.com/libp2p/go-libp2p v0.6.0/go.mod h1:mfKWI7Soz3ABX+XEBR61lGbg+ewyMtJHVt043oWeqwg=
|
||||
github.com/libp2p/go-libp2p-autonat v0.0.6/go.mod h1:uZneLdOkZHro35xIhpbtTzLlgYturpu4J5+0cZK3MqE=
|
||||
@ -481,6 +519,7 @@ github.com/libp2p/go-libp2p-peerstore v0.0.6/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4T
|
||||
github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY=
|
||||
github.com/libp2p/go-libp2p-peerstore v0.1.2/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI=
|
||||
github.com/libp2p/go-libp2p-peerstore v0.1.3/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI=
|
||||
github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVdLxvqh7hXOujessqdzs=
|
||||
github.com/libp2p/go-libp2p-peerstore v0.2.0 h1:XcgJhI8WyUOCbHyRLNEX5542YNj8hnLSJ2G1InRjDhk=
|
||||
github.com/libp2p/go-libp2p-peerstore v0.2.0/go.mod h1:N2l3eVIeAitSg3Pi2ipSrJYnqhVnMNQZo9nkSCuAbnQ=
|
||||
github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k=
|
||||
@ -522,6 +561,7 @@ github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eq
|
||||
github.com/libp2p/go-libp2p-tls v0.1.0 h1:o4bjjAdnUjNgJoPoDd0wUaZH7K+EenlNWJpgyXB3ulA=
|
||||
github.com/libp2p/go-libp2p-tls v0.1.0/go.mod h1:VZdoSWQDeNpIIAFJFv+6uqTqpnIIDHcqZQSTC/A1TT0=
|
||||
github.com/libp2p/go-libp2p-transport v0.0.1/go.mod h1:UzbUs9X+PHOSw7S3ZmeOxfnwaQY5vGDzZmKPod3N3tk=
|
||||
github.com/libp2p/go-libp2p-transport v0.0.4/go.mod h1:StoY3sx6IqsP6XKoabsPnHCwqKXWUMWU7Rfcsubee/A=
|
||||
github.com/libp2p/go-libp2p-transport v0.0.5/go.mod h1:StoY3sx6IqsP6XKoabsPnHCwqKXWUMWU7Rfcsubee/A=
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.0.4/go.mod h1:RGq+tupk+oj7PzL2kn/m1w6YXxcIAYJYeI90h6BGgUc=
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA=
|
||||
@ -571,6 +611,7 @@ github.com/libp2p/go-testutil v0.0.1/go.mod h1:iAcJc/DKJQanJ5ws2V+u5ywdL2n12X1Wb
|
||||
github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc=
|
||||
github.com/libp2p/go-ws-transport v0.0.5/go.mod h1:Qbl4BxPfXXhhd/o0wcrgoaItHqA9tnZjoFZnxykuaXU=
|
||||
github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw7yT74kj3raBFuo=
|
||||
github.com/libp2p/go-ws-transport v0.1.2/go.mod h1:dsh2Ld8F+XNmzpkaAijmg5Is+e9l6/1tK/6VFOdN69Y=
|
||||
github.com/libp2p/go-ws-transport v0.2.0 h1:MJCw2OrPA9+76YNRvdo1wMnSOxb9Bivj6sVFY1Xrj6w=
|
||||
github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM=
|
||||
github.com/libp2p/go-yamux v1.2.1/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow=
|
||||
@ -774,8 +815,12 @@ github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc h1:BCPnHtcboa
|
||||
github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM=
|
||||
github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba h1:X4n8JG2e2biEZZXdBKt9HX7DN3bYGFUqljqqy0DqgnY=
|
||||
github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba/go.mod h1:CHQnYnQUEPydYCwuy8lmTHfGmdw9TKrhWV0xLx8l0oM=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20190910031516-c1cbffdb01bb/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20190917003517-d78d67427694/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20191116002219-891f55cd449d/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20191212224538-d370462a7e8a/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20200121162646-b63bacf5eaf8/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20200206220010-03c9665e2a66/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20200321164527-9340289d0ca7 h1:SVU2yhhHHamTPIMT9kk28KSYdO3ykTZeIp5p+6G9qNk=
|
||||
@ -787,6 +832,9 @@ github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h
|
||||
github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM=
|
||||
github.com/whyrusleeping/go-logging v0.0.1/go.mod h1:lDPYj54zutzG1XYfHAhcc7oNXEburHQBn+Iqd4yS4vE=
|
||||
github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f/go.mod h1:cZNvX9cFybI01GriPRMXDtczuvUhgbcYr9iCGaNlRv8=
|
||||
github.com/whyrusleeping/go-smux-multiplex v3.0.16+incompatible/go.mod h1:34LEDbeKFZInPUrAG+bjuJmUXONGdEFW7XL0SpTY1y4=
|
||||
github.com/whyrusleeping/go-smux-multistream v2.0.2+incompatible/go.mod h1:dRWHHvc4HDQSHh9gbKEBbUZ+f2Q8iZTPG3UOGYODxSQ=
|
||||
github.com/whyrusleeping/go-smux-yamux v2.0.8+incompatible/go.mod h1:6qHUzBXUbB9MXmw3AUdB52L8sEb/hScCqOdW2kj/wuI=
|
||||
github.com/whyrusleeping/mafmt v1.2.8 h1:TCghSl5kkwEE0j+sU/gudyhVMRlpBin8fMBBHg59EbA=
|
||||
github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA=
|
||||
github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4=
|
||||
@ -797,6 +845,7 @@ github.com/whyrusleeping/pubsub v0.0.0-20131020042734-02de8aa2db3d h1:wnjWu1N8UT
|
||||
github.com/whyrusleeping/pubsub v0.0.0-20131020042734-02de8aa2db3d/go.mod h1:g7ckxrjiFh8mi1AY7ox23PZD0g6QU/TxW3U3unX7I3A=
|
||||
github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow=
|
||||
github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg=
|
||||
github.com/whyrusleeping/yamux v1.1.5/go.mod h1:E8LnQQ8HKx5KD29HZFUwM1PxCOdPRzGwur1mcYhXcD8=
|
||||
github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
@ -809,25 +858,32 @@ go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
|
||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/dig v1.7.0/go.mod h1:z+dSd2TP9Usi48jL8M3v63iSBVkiwtVyMKxMZYYauPg=
|
||||
go.uber.org/dig v1.8.0 h1:1rR6hnL/bu1EVcjnRDN5kx1vbIjEJDTGhSQ2B3ddpcI=
|
||||
go.uber.org/dig v1.8.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw=
|
||||
go.uber.org/fx v1.9.0 h1:7OAz8ucp35AU8eydejpYG7QrbE8rLKzGhHbZlJi5LYY=
|
||||
go.uber.org/fx v1.9.0/go.mod h1:mFdUyAUuJ3w4jAckiKSKbldsxy1ojpAMJ+dVZg5Y0Aw=
|
||||
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
|
||||
go.uber.org/goleak v1.0.0 h1:qsup4IcBdlmsnGfqyLl4Ntn3C2XCCuKAE7DwHpScyUo=
|
||||
go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
|
||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||
go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo=
|
||||
go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
|
||||
go4.org v0.0.0-20190218023631-ce4c26f7be8e h1:m9LfARr2VIOW0vsV19kEKp/sWQvZnGobA8JHui/XJoY=
|
||||
go4.org v0.0.0-20190218023631-ce4c26f7be8e/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
||||
go4.org v0.0.0-20190313082347-94abd6928b1d h1:JkRdGP3zvTtTbabWSAC6n67ka30y7gOzWAah4XYJSfw=
|
||||
go4.org v0.0.0-20190313082347-94abd6928b1d/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
@ -853,12 +909,14 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180524181706-dfa909b99c79/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@ -910,9 +968,11 @@ golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d h1:62ap6LNOjDU6uGmKXHJbSfciMoV+FeI1sRXx/pLDL44=
|
||||
@ -938,8 +998,10 @@ golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtn
|
||||
golang.org/x/tools v0.0.0-20191030062658-86caa796c7ab/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200108195415-316d2f248479/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200318150045-ba25ddc85566 h1:OXjomkWHhzUx4+HldlJ2TsMxJdWgEo5CTtspD1wdhdk=
|
||||
golang.org/x/tools v0.0.0-20200318150045-ba25ddc85566/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
|
@ -13,8 +13,8 @@ import (
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/storage"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper"
|
||||
"github.com/filecoin-project/sector-storage"
|
||||
"github.com/filecoin-project/sector-storage/ffiwrapper"
|
||||
)
|
||||
|
||||
type retrievalProviderNode struct {
|
||||
|
@ -59,9 +59,9 @@ import (
|
||||
"github.com/filecoin-project/lotus/storage"
|
||||
"github.com/filecoin-project/lotus/storage/sealing"
|
||||
"github.com/filecoin-project/lotus/storage/sectorblocks"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
"github.com/filecoin-project/sector-storage"
|
||||
"github.com/filecoin-project/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/sector-storage/stores"
|
||||
)
|
||||
|
||||
var log = logging.Logger("builder")
|
||||
|
@ -2,7 +2,7 @@ package config
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage"
|
||||
"github.com/filecoin-project/sector-storage"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
@ -2,7 +2,7 @@ package config
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
"github.com/filecoin-project/sector-storage/stores"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
@ -3,7 +3,7 @@ package client
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper"
|
||||
"github.com/filecoin-project/sector-storage/ffiwrapper"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
|
@ -12,7 +12,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/client"
|
||||
"github.com/filecoin-project/lotus/lib/jsonrpc"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage"
|
||||
"github.com/filecoin-project/sector-storage"
|
||||
)
|
||||
|
||||
type remoteWorker struct {
|
||||
|
@ -21,9 +21,9 @@ import (
|
||||
"github.com/filecoin-project/lotus/node/impl/common"
|
||||
"github.com/filecoin-project/lotus/storage"
|
||||
"github.com/filecoin-project/lotus/storage/sectorblocks"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
"github.com/filecoin-project/sector-storage"
|
||||
"github.com/filecoin-project/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/sector-storage/stores"
|
||||
)
|
||||
|
||||
type StorageMinerAPI struct {
|
||||
|
@ -49,9 +49,9 @@ import (
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
"github.com/filecoin-project/lotus/storage"
|
||||
"github.com/filecoin-project/lotus/storage/sealing"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
"github.com/filecoin-project/sector-storage"
|
||||
"github.com/filecoin-project/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/sector-storage/stores"
|
||||
)
|
||||
|
||||
func minerAddrFromDS(ds dtypes.MetadataDS) (address.Address, error) {
|
||||
|
@ -6,7 +6,7 @@ import (
|
||||
"crypto/rand"
|
||||
"github.com/filecoin-project/lotus/lib/lotuslog"
|
||||
"github.com/filecoin-project/lotus/storage/mockstorage"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper"
|
||||
"github.com/filecoin-project/sector-storage/ffiwrapper"
|
||||
"io/ioutil"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
@ -43,8 +43,8 @@ import (
|
||||
"github.com/filecoin-project/lotus/node/modules"
|
||||
modtest "github.com/filecoin-project/lotus/node/modules/testing"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/mock"
|
||||
"github.com/filecoin-project/sector-storage"
|
||||
"github.com/filecoin-project/sector-storage/mock"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -3,7 +3,7 @@ package repo
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
"github.com/filecoin-project/sector-storage/stores"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
@ -2,7 +2,7 @@ package repo
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
"github.com/filecoin-project/sector-storage/stores"
|
||||
|
||||
"github.com/ipfs/go-datastore"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
|
@ -16,7 +16,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
"github.com/filecoin-project/sector-storage/stores"
|
||||
)
|
||||
|
||||
type MemRepo struct {
|
||||
|
@ -14,7 +14,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p-core/host"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage"
|
||||
"github.com/filecoin-project/sector-storage"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
|
@ -3,7 +3,7 @@ package mockstorage
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
commcid "github.com/filecoin-project/go-fil-commcid"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/mock"
|
||||
"github.com/filecoin-project/sector-storage/mock"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin/market"
|
||||
@ -12,8 +12,8 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/wallet"
|
||||
"github.com/filecoin-project/lotus/genesis"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/zerocomm"
|
||||
"github.com/filecoin-project/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/sector-storage/zerocomm"
|
||||
)
|
||||
|
||||
func PreSeal(ssize abi.SectorSize, maddr address.Address, sectors int) (*genesis.Miner, *types.KeyInfo, error) {
|
||||
|
@ -15,7 +15,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/zerocomm"
|
||||
"github.com/filecoin-project/sector-storage/zerocomm"
|
||||
)
|
||||
|
||||
// TODO: For now we handle this by halting state execution, when we get jsonrpc reconnecting
|
||||
|
@ -2,7 +2,7 @@ package sealing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper"
|
||||
"github.com/filecoin-project/sector-storage/ffiwrapper"
|
||||
"io"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
@ -2,7 +2,7 @@ package sealing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper"
|
||||
"github.com/filecoin-project/sector-storage/ffiwrapper"
|
||||
"io"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
@ -22,7 +22,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/events"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage"
|
||||
"github.com/filecoin-project/sector-storage"
|
||||
)
|
||||
|
||||
const SectorStorePrefix = "/sectors"
|
||||
|
@ -1,76 +0,0 @@
|
||||
package basicfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
)
|
||||
|
||||
type sectorFile struct {
|
||||
abi.SectorID
|
||||
stores.SectorFileType
|
||||
}
|
||||
|
||||
type Provider struct {
|
||||
Root string
|
||||
|
||||
lk sync.Mutex
|
||||
waitSector map[sectorFile]chan struct{}
|
||||
}
|
||||
|
||||
func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing bool) (stores.SectorPaths, func(), error) {
|
||||
if err := os.Mkdir(filepath.Join(b.Root, stores.FTUnsealed.String()), 0755); err != nil && !os.IsExist(err) {
|
||||
return stores.SectorPaths{}, nil, err
|
||||
}
|
||||
if err := os.Mkdir(filepath.Join(b.Root, stores.FTSealed.String()), 0755); err != nil && !os.IsExist(err) {
|
||||
return stores.SectorPaths{}, nil, err
|
||||
}
|
||||
if err := os.Mkdir(filepath.Join(b.Root, stores.FTCache.String()), 0755); err != nil && !os.IsExist(err) {
|
||||
return stores.SectorPaths{}, nil, err
|
||||
}
|
||||
|
||||
done := func() {}
|
||||
|
||||
out := stores.SectorPaths{
|
||||
Id: id,
|
||||
}
|
||||
|
||||
for _, fileType := range stores.PathTypes {
|
||||
if !existing.Has(fileType) && !allocate.Has(fileType) {
|
||||
continue
|
||||
}
|
||||
|
||||
b.lk.Lock()
|
||||
if b.waitSector == nil {
|
||||
b.waitSector = map[sectorFile]chan struct{}{}
|
||||
}
|
||||
ch, found := b.waitSector[sectorFile{id, fileType}]
|
||||
if !found {
|
||||
ch = make(chan struct{}, 1)
|
||||
b.waitSector[sectorFile{id, fileType}] = ch
|
||||
}
|
||||
b.lk.Unlock()
|
||||
|
||||
select {
|
||||
case ch <- struct{}{}:
|
||||
case <-ctx.Done():
|
||||
done()
|
||||
return stores.SectorPaths{}, nil, ctx.Err()
|
||||
}
|
||||
|
||||
prevDone := done
|
||||
done = func() {
|
||||
prevDone()
|
||||
<-ch
|
||||
}
|
||||
|
||||
stores.SetPathByType(&out, fileType, filepath.Join(b.Root, fileType.String(), stores.SectorName(id)))
|
||||
}
|
||||
|
||||
return out, done, nil
|
||||
}
|
@ -1,72 +0,0 @@
|
||||
package ffiwrapper
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
SealProofType abi.RegisteredProof
|
||||
PoStProofType abi.RegisteredProof
|
||||
|
||||
_ struct{} // guard against nameless init
|
||||
}
|
||||
|
||||
func sizeFromConfig(cfg Config) (abi.SectorSize, error) {
|
||||
if cfg.SealProofType == abi.RegisteredProof(0) {
|
||||
return abi.SectorSize(0), xerrors.New("must specify a seal proof type from abi.RegisteredProof")
|
||||
}
|
||||
|
||||
if cfg.PoStProofType == abi.RegisteredProof(0) {
|
||||
return abi.SectorSize(0), xerrors.New("must specify a PoSt proof type from abi.RegisteredProof")
|
||||
}
|
||||
|
||||
s1, err := SectorSizeForRegisteredProof(cfg.SealProofType)
|
||||
if err != nil {
|
||||
return abi.SectorSize(0), err
|
||||
}
|
||||
|
||||
s2, err := SectorSizeForRegisteredProof(cfg.PoStProofType)
|
||||
if err != nil {
|
||||
return abi.SectorSize(0), err
|
||||
}
|
||||
|
||||
if s1 != s2 {
|
||||
return abi.SectorSize(0), xerrors.Errorf("seal sector size %d does not equal PoSt sector size %d", s1, s2)
|
||||
}
|
||||
|
||||
return s1, nil
|
||||
}
|
||||
|
||||
// TODO: remove this method after implementing it along side the registered proofs and importing it from there.
|
||||
func SectorSizeForRegisteredProof(p abi.RegisteredProof) (abi.SectorSize, error) {
|
||||
switch p {
|
||||
case abi.RegisteredProof_StackedDRG32GiBSeal, abi.RegisteredProof_StackedDRG32GiBPoSt:
|
||||
return 32 << 30, nil
|
||||
case abi.RegisteredProof_StackedDRG2KiBSeal, abi.RegisteredProof_StackedDRG2KiBPoSt:
|
||||
return 2 << 10, nil
|
||||
case abi.RegisteredProof_StackedDRG8MiBSeal, abi.RegisteredProof_StackedDRG8MiBPoSt:
|
||||
return 8 << 20, nil
|
||||
case abi.RegisteredProof_StackedDRG512MiBSeal, abi.RegisteredProof_StackedDRG512MiBPoSt:
|
||||
return 512 << 20, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("unsupported registered proof %d", p)
|
||||
}
|
||||
}
|
||||
|
||||
func ProofTypeFromSectorSize(ssize abi.SectorSize) (abi.RegisteredProof, abi.RegisteredProof, error) {
|
||||
switch ssize {
|
||||
case 2 << 10:
|
||||
return abi.RegisteredProof_StackedDRG2KiBPoSt, abi.RegisteredProof_StackedDRG2KiBSeal, nil
|
||||
case 8 << 20:
|
||||
return abi.RegisteredProof_StackedDRG8MiBPoSt, abi.RegisteredProof_StackedDRG8MiBSeal, nil
|
||||
case 512 << 20:
|
||||
return abi.RegisteredProof_StackedDRG512MiBPoSt, abi.RegisteredProof_StackedDRG512MiBSeal, nil
|
||||
case 32 << 30:
|
||||
return abi.RegisteredProof_StackedDRG32GiBPoSt, abi.RegisteredProof_StackedDRG32GiBSeal, nil
|
||||
default:
|
||||
return 0, 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize)
|
||||
}
|
||||
}
|
@ -1,53 +0,0 @@
|
||||
package ffiwrapper
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
func toReadableFile(r io.Reader, n int64) (*os.File, func() error, error) {
|
||||
f, ok := r.(*os.File)
|
||||
if ok {
|
||||
return f, func() error { return nil }, nil
|
||||
}
|
||||
|
||||
var w *os.File
|
||||
|
||||
f, w, err := os.Pipe()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var wait sync.Mutex
|
||||
var werr error
|
||||
|
||||
wait.Lock()
|
||||
go func() {
|
||||
defer wait.Unlock()
|
||||
|
||||
var copied int64
|
||||
copied, werr = io.CopyN(w, r, n)
|
||||
if werr != nil {
|
||||
log.Warnf("toReadableFile: copy error: %+v", werr)
|
||||
}
|
||||
|
||||
err := w.Close()
|
||||
if werr == nil && err != nil {
|
||||
werr = err
|
||||
log.Warnf("toReadableFile: close error: %+v", err)
|
||||
return
|
||||
}
|
||||
if copied != n {
|
||||
log.Warnf("copied different amount than expected: %d != %d", copied, n)
|
||||
werr = xerrors.Errorf("copied different amount than expected: %d != %d", copied, n)
|
||||
}
|
||||
}()
|
||||
|
||||
return f, func() error {
|
||||
wait.Lock()
|
||||
return werr
|
||||
}, nil
|
||||
}
|
@ -1,18 +0,0 @@
|
||||
package ffiwrapper
|
||||
|
||||
// /////
|
||||
// Proofs
|
||||
|
||||
// 1 / n
|
||||
const SectorChallengeRatioDiv = 25
|
||||
|
||||
const MaxFallbackPostChallengeCount = 10
|
||||
|
||||
// extracted from lotus/chain/types/blockheader
|
||||
func ElectionPostChallengeCount(sectors uint64, faults uint64) uint64 {
|
||||
if sectors-faults == 0 {
|
||||
return 0
|
||||
}
|
||||
// ceil(sectors / SectorChallengeRatioDiv)
|
||||
return (sectors-faults-1)/SectorChallengeRatioDiv + 1
|
||||
}
|
@ -1,41 +0,0 @@
|
||||
package ffiwrapper
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
)
|
||||
|
||||
var log = logging.Logger("ffiwrapper")
|
||||
|
||||
type Sealer struct {
|
||||
sealProofType abi.RegisteredProof
|
||||
postProofType abi.RegisteredProof
|
||||
ssize abi.SectorSize // a function of sealProofType and postProofType
|
||||
|
||||
sectors SectorProvider
|
||||
stopping chan struct{}
|
||||
}
|
||||
|
||||
func fallbackPostChallengeCount(sectors uint64, faults uint64) uint64 {
|
||||
challengeCount := ElectionPostChallengeCount(sectors, faults)
|
||||
if challengeCount > MaxFallbackPostChallengeCount {
|
||||
return MaxFallbackPostChallengeCount
|
||||
}
|
||||
return challengeCount
|
||||
}
|
||||
|
||||
func (sb *Sealer) Stop() {
|
||||
close(sb.stopping)
|
||||
}
|
||||
|
||||
func (sb *Sealer) SectorSize() abi.SectorSize {
|
||||
return sb.ssize
|
||||
}
|
||||
|
||||
func (sb *Sealer) SealProofType() abi.RegisteredProof {
|
||||
return sb.sealProofType
|
||||
}
|
||||
|
||||
func (sb *Sealer) PoStProofType() abi.RegisteredProof {
|
||||
return sb.postProofType
|
||||
}
|
@ -1,340 +0,0 @@
|
||||
//+build cgo
|
||||
|
||||
package ffiwrapper
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"math/bits"
|
||||
"os"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/zerocomm"
|
||||
)
|
||||
|
||||
var _ Storage = &Sealer{}
|
||||
|
||||
func New(sectors SectorProvider, cfg *Config) (*Sealer, error) {
|
||||
sectorSize, err := sizeFromConfig(*cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sb := &Sealer{
|
||||
sealProofType: cfg.SealProofType,
|
||||
postProofType: cfg.PoStProofType,
|
||||
ssize: sectorSize,
|
||||
|
||||
sectors: sectors,
|
||||
|
||||
stopping: make(chan struct{}),
|
||||
}
|
||||
|
||||
return sb, nil
|
||||
}
|
||||
|
||||
func (sb *Sealer) NewSector(ctx context.Context, sector abi.SectorID) error {
|
||||
// TODO: Allocate the sector here instead of in addpiece
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPieceSizes []abi.UnpaddedPieceSize, pieceSize abi.UnpaddedPieceSize, file storage.Data) (abi.PieceInfo, error) {
|
||||
f, werr, err := toReadableFile(file, int64(pieceSize))
|
||||
if err != nil {
|
||||
return abi.PieceInfo{}, err
|
||||
}
|
||||
|
||||
var done func()
|
||||
var stagedFile *os.File
|
||||
|
||||
defer func() {
|
||||
if done != nil {
|
||||
done()
|
||||
}
|
||||
|
||||
if stagedFile != nil {
|
||||
if err := stagedFile.Close(); err != nil {
|
||||
log.Errorf("closing staged file: %+v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
var stagedPath stores.SectorPaths
|
||||
if len(existingPieceSizes) == 0 {
|
||||
stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, 0, stores.FTUnsealed, true)
|
||||
if err != nil {
|
||||
return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err)
|
||||
}
|
||||
|
||||
stagedFile, err = os.Create(stagedPath.Unsealed)
|
||||
if err != nil {
|
||||
return abi.PieceInfo{}, xerrors.Errorf("opening sector file: %w", err)
|
||||
}
|
||||
} else {
|
||||
stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, 0, true)
|
||||
if err != nil {
|
||||
return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err)
|
||||
}
|
||||
|
||||
stagedFile, err = os.OpenFile(stagedPath.Unsealed, os.O_RDWR, 0644)
|
||||
if err != nil {
|
||||
return abi.PieceInfo{}, xerrors.Errorf("opening sector file: %w", err)
|
||||
}
|
||||
|
||||
if _, err := stagedFile.Seek(0, io.SeekEnd); err != nil {
|
||||
return abi.PieceInfo{}, xerrors.Errorf("seek end: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
_, _, pieceCID, err := ffi.WriteWithAlignment(sb.sealProofType, f, pieceSize, stagedFile, existingPieceSizes)
|
||||
if err != nil {
|
||||
return abi.PieceInfo{}, err
|
||||
}
|
||||
|
||||
if err := f.Close(); err != nil {
|
||||
return abi.PieceInfo{}, err
|
||||
}
|
||||
|
||||
return abi.PieceInfo{
|
||||
Size: pieceSize.Padded(),
|
||||
PieceCID: pieceCID,
|
||||
}, werr()
|
||||
}
|
||||
|
||||
func (sb *Sealer) ReadPieceFromSealedSector(ctx context.Context, sector abi.SectorID, offset UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealedCID cid.Cid) (io.ReadCloser, error) {
|
||||
path, doneUnsealed, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTUnsealed, false)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("acquire unsealed sector path: %w", err)
|
||||
}
|
||||
defer doneUnsealed()
|
||||
f, err := os.OpenFile(path.Unsealed, os.O_RDONLY, 0644)
|
||||
if err == nil {
|
||||
if _, err := f.Seek(int64(offset), io.SeekStart); err != nil {
|
||||
return nil, xerrors.Errorf("seek: %w", err)
|
||||
}
|
||||
|
||||
lr := io.LimitReader(f, int64(size))
|
||||
|
||||
return &struct {
|
||||
io.Reader
|
||||
io.Closer
|
||||
}{
|
||||
Reader: lr,
|
||||
Closer: f,
|
||||
}, nil
|
||||
}
|
||||
if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sealed, doneSealed, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed|stores.FTCache, 0, false)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("acquire sealed/cache sector path: %w", err)
|
||||
}
|
||||
defer doneSealed()
|
||||
|
||||
// TODO: GC for those
|
||||
// (Probably configurable count of sectors to be kept unsealed, and just
|
||||
// remove last used one (or use whatever other cache policy makes sense))
|
||||
err = ffi.Unseal(
|
||||
sb.sealProofType,
|
||||
sealed.Cache,
|
||||
sealed.Sealed,
|
||||
path.Unsealed,
|
||||
sector.Number,
|
||||
sector.Miner,
|
||||
ticket,
|
||||
unsealedCID,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("unseal failed: %w", err)
|
||||
}
|
||||
|
||||
f, err = os.OpenFile(string(path.Unsealed), os.O_RDONLY, 0644)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := f.Seek(int64(offset), io.SeekStart); err != nil {
|
||||
return nil, xerrors.Errorf("seek: %w", err)
|
||||
}
|
||||
|
||||
lr := io.LimitReader(f, int64(size))
|
||||
|
||||
return &struct {
|
||||
io.Reader
|
||||
io.Closer
|
||||
}{
|
||||
Reader: lr,
|
||||
Closer: f,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
|
||||
paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTSealed|stores.FTCache, true)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("acquiring sector paths: %w", err)
|
||||
}
|
||||
defer done()
|
||||
|
||||
e, err := os.OpenFile(paths.Sealed, os.O_RDWR|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("ensuring sealed file exists: %w", err)
|
||||
}
|
||||
if err := e.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := os.Mkdir(paths.Cache, 0755); err != nil {
|
||||
if os.IsExist(err) {
|
||||
log.Warnf("existing cache in %s; removing", paths.Cache)
|
||||
|
||||
if err := os.RemoveAll(paths.Cache); err != nil {
|
||||
return nil, xerrors.Errorf("remove existing sector cache from %s (sector %d): %w", paths.Cache, sector, err)
|
||||
}
|
||||
|
||||
if err := os.Mkdir(paths.Cache, 0755); err != nil {
|
||||
return nil, xerrors.Errorf("mkdir cache path after cleanup: %w", err)
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var sum abi.UnpaddedPieceSize
|
||||
for _, piece := range pieces {
|
||||
sum += piece.Size.Unpadded()
|
||||
}
|
||||
ussize := abi.PaddedPieceSize(sb.ssize).Unpadded()
|
||||
if sum != ussize {
|
||||
return nil, xerrors.Errorf("aggregated piece sizes don't match sector size: %d != %d (%d)", sum, ussize, int64(ussize-sum))
|
||||
}
|
||||
|
||||
// TODO: context cancellation respect
|
||||
p1o, err := ffi.SealPreCommitPhase1(
|
||||
sb.sealProofType,
|
||||
paths.Cache,
|
||||
paths.Unsealed,
|
||||
paths.Sealed,
|
||||
sector.Number,
|
||||
sector.Miner,
|
||||
ticket,
|
||||
pieces,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("presealing sector %d (%s): %w", sector.Number, paths.Unsealed, err)
|
||||
}
|
||||
return p1o, nil
|
||||
}
|
||||
|
||||
func (sb *Sealer) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) {
|
||||
paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTSealed|stores.FTCache, 0, true)
|
||||
if err != nil {
|
||||
return storage.SectorCids{}, xerrors.Errorf("acquiring sector paths: %w", err)
|
||||
}
|
||||
defer done()
|
||||
|
||||
sealedCID, unsealedCID, err := ffi.SealPreCommitPhase2(phase1Out, paths.Cache, paths.Sealed)
|
||||
if err != nil {
|
||||
return storage.SectorCids{}, xerrors.Errorf("presealing sector %d (%s): %w", sector.Number, paths.Unsealed, err)
|
||||
}
|
||||
|
||||
return storage.SectorCids{
|
||||
Unsealed: unsealedCID,
|
||||
Sealed: sealedCID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (sb *Sealer) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) {
|
||||
paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTSealed|stores.FTCache, 0, true)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("acquire sector paths: %w", err)
|
||||
}
|
||||
defer done()
|
||||
output, err := ffi.SealCommitPhase1(
|
||||
sb.sealProofType,
|
||||
cids.Sealed,
|
||||
cids.Unsealed,
|
||||
paths.Cache,
|
||||
paths.Sealed,
|
||||
sector.Number,
|
||||
sector.Miner,
|
||||
ticket,
|
||||
seed,
|
||||
pieces,
|
||||
)
|
||||
if err != nil {
|
||||
log.Warn("StandaloneSealCommit error: ", err)
|
||||
log.Warnf("num:%d tkt:%v seed:%v, pi:%v sealedCID:%v, unsealedCID:%v", sector.Number, ticket, seed, pieces, cids.Sealed, cids.Unsealed)
|
||||
|
||||
return nil, xerrors.Errorf("StandaloneSealCommit: %w", err)
|
||||
}
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func (sb *Sealer) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.Commit1Out) (storage.Proof, error) {
|
||||
return ffi.SealCommitPhase2(phase1Out, sector.Number, sector.Miner)
|
||||
}
|
||||
|
||||
func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID) error {
|
||||
paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTCache, 0, false)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("acquiring sector cache path: %w", err)
|
||||
}
|
||||
defer done()
|
||||
|
||||
return ffi.ClearCache(paths.Cache)
|
||||
}
|
||||
|
||||
func GeneratePieceCIDFromFile(proofType abi.RegisteredProof, piece io.Reader, pieceSize abi.UnpaddedPieceSize) (cid.Cid, error) {
|
||||
f, werr, err := toReadableFile(piece, int64(pieceSize))
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
pieceCID, err := ffi.GeneratePieceCIDFromFile(proofType, f, pieceSize)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
return pieceCID, werr()
|
||||
}
|
||||
|
||||
func GenerateUnsealedCID(proofType abi.RegisteredProof, pieces []abi.PieceInfo) (cid.Cid, error) {
|
||||
var sum abi.PaddedPieceSize
|
||||
for _, p := range pieces {
|
||||
sum += p.Size
|
||||
}
|
||||
|
||||
ssize, err := SectorSizeForRegisteredProof(proofType)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
{
|
||||
// pad remaining space with 0 CommPs
|
||||
toFill := uint64(abi.PaddedPieceSize(ssize) - sum)
|
||||
n := bits.OnesCount64(toFill)
|
||||
for i := 0; i < n; i++ {
|
||||
next := bits.TrailingZeros64(toFill)
|
||||
psize := uint64(1) << uint(next)
|
||||
toFill ^= psize
|
||||
|
||||
unpadded := abi.PaddedPieceSize(psize).Unpadded()
|
||||
pieces = append(pieces, abi.PieceInfo{
|
||||
Size: unpadded.Padded(),
|
||||
PieceCID: zerocomm.ZeroPieceCommitment(unpadded),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return ffi.GenerateUnsealedCID(proofType, pieces)
|
||||
}
|
@ -1,354 +0,0 @@
|
||||
package ffiwrapper
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
logging "github.com/ipfs/go-log"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
paramfetch "github.com/filecoin-project/go-paramfetch"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper/basicfs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
logging.SetLogLevel("*", "INFO") //nolint: errcheck
|
||||
}
|
||||
|
||||
var sectorSize = abi.SectorSize(2048)
|
||||
var sealProofType = abi.RegisteredProof_StackedDRG2KiBSeal
|
||||
var postProofType = abi.RegisteredProof_StackedDRG2KiBPoSt
|
||||
|
||||
type seal struct {
|
||||
id abi.SectorID
|
||||
cids storage.SectorCids
|
||||
pi abi.PieceInfo
|
||||
ticket abi.SealRandomness
|
||||
}
|
||||
|
||||
func (s *seal) precommit(t *testing.T, sb *Sealer, id abi.SectorID, done func()) {
|
||||
defer done()
|
||||
dlen := abi.PaddedPieceSize(sectorSize).Unpadded()
|
||||
|
||||
var err error
|
||||
r := io.LimitReader(rand.New(rand.NewSource(42+int64(id.Number))), int64(dlen))
|
||||
s.pi, err = sb.AddPiece(context.TODO(), id, []abi.UnpaddedPieceSize{}, dlen, r)
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
}
|
||||
|
||||
s.ticket = abi.SealRandomness{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2}
|
||||
|
||||
p1, err := sb.SealPreCommit1(context.TODO(), id, s.ticket, []abi.PieceInfo{s.pi})
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
}
|
||||
cids, err := sb.SealPreCommit2(context.TODO(), id, p1)
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
}
|
||||
s.cids = cids
|
||||
}
|
||||
|
||||
func (s *seal) commit(t *testing.T, sb *Sealer, done func()) {
|
||||
defer done()
|
||||
seed := abi.InteractiveSealRandomness{0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9}
|
||||
|
||||
pc1, err := sb.SealCommit1(context.TODO(), s.id, s.ticket, seed, []abi.PieceInfo{s.pi}, s.cids)
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
}
|
||||
proof, err := sb.SealCommit2(context.TODO(), s.id, pc1)
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
}
|
||||
|
||||
ok, err := ProofVerifier.VerifySeal(abi.SealVerifyInfo{
|
||||
SectorID: s.id,
|
||||
OnChain: abi.OnChainSealVerifyInfo{
|
||||
SealedCID: s.cids.Sealed,
|
||||
RegisteredProof: sealProofType,
|
||||
Proof: proof,
|
||||
SectorNumber: s.id.Number,
|
||||
},
|
||||
Randomness: s.ticket,
|
||||
InteractiveRandomness: seed,
|
||||
UnsealedCID: s.cids.Unsealed,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
}
|
||||
|
||||
if !ok {
|
||||
t.Fatal("proof failed to validate")
|
||||
}
|
||||
}
|
||||
|
||||
func post(t *testing.T, sb *Sealer, seals ...seal) time.Time {
|
||||
randomness := abi.PoStRandomness{0, 9, 2, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 7}
|
||||
|
||||
sis := make([]abi.SectorInfo, len(seals))
|
||||
for i, s := range seals {
|
||||
sis[i] = abi.SectorInfo{
|
||||
RegisteredProof: sealProofType,
|
||||
SectorNumber: s.id.Number,
|
||||
SealedCID: s.cids.Sealed,
|
||||
}
|
||||
}
|
||||
|
||||
candidates, err := sb.GenerateEPostCandidates(context.TODO(), seals[0].id.Miner, sis, randomness, []abi.SectorNumber{})
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
}
|
||||
|
||||
genCandidates := time.Now()
|
||||
|
||||
if len(candidates) != 1 {
|
||||
t.Fatal("expected 1 candidate")
|
||||
}
|
||||
|
||||
candidatesPrime := make([]abi.PoStCandidate, len(candidates))
|
||||
for idx := range candidatesPrime {
|
||||
candidatesPrime[idx] = candidates[idx].Candidate
|
||||
}
|
||||
|
||||
proofs, err := sb.ComputeElectionPoSt(context.TODO(), seals[0].id.Miner, sis, randomness, candidatesPrime)
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
}
|
||||
|
||||
ePoStChallengeCount := ElectionPostChallengeCount(uint64(len(sis)), 0)
|
||||
|
||||
ok, err := ProofVerifier.VerifyElectionPost(context.TODO(), abi.PoStVerifyInfo{
|
||||
Randomness: randomness,
|
||||
Candidates: candidatesPrime,
|
||||
Proofs: proofs,
|
||||
EligibleSectors: sis,
|
||||
Prover: seals[0].id.Miner,
|
||||
ChallengeCount: ePoStChallengeCount,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatal("bad post")
|
||||
}
|
||||
|
||||
return genCandidates
|
||||
}
|
||||
|
||||
func getGrothParamFileAndVerifyingKeys(s abi.SectorSize) {
|
||||
dat := build.ParametersJson()
|
||||
|
||||
err := paramfetch.GetParams(dat, uint64(s))
|
||||
if err != nil {
|
||||
panic(xerrors.Errorf("failed to acquire Groth parameters for 2KiB sectors: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
// TestDownloadParams exists only so that developers and CI can pre-download
|
||||
// Groth parameters and verifying keys before running the tests which rely on
|
||||
// those parameters and keys. To do this, run the following command:
|
||||
//
|
||||
// go test -run=^TestDownloadParams
|
||||
//
|
||||
func TestDownloadParams(t *testing.T) {
|
||||
getGrothParamFileAndVerifyingKeys(sectorSize)
|
||||
}
|
||||
|
||||
func TestSealAndVerify(t *testing.T) {
|
||||
if runtime.NumCPU() < 10 && os.Getenv("CI") == "" { // don't bother on slow hardware
|
||||
t.Skip("this is slow")
|
||||
}
|
||||
_ = os.Setenv("RUST_LOG", "info")
|
||||
|
||||
getGrothParamFileAndVerifyingKeys(sectorSize)
|
||||
|
||||
cdir, err := ioutil.TempDir("", "sbtest-c-")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
miner := abi.ActorID(123)
|
||||
|
||||
cfg := &Config{
|
||||
SealProofType: sealProofType,
|
||||
PoStProofType: postProofType,
|
||||
}
|
||||
|
||||
sp := &basicfs.Provider{
|
||||
Root: cdir,
|
||||
}
|
||||
sb, err := New(sp, cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
}
|
||||
cleanup := func() {
|
||||
if t.Failed() {
|
||||
fmt.Printf("not removing %s\n", cdir)
|
||||
return
|
||||
}
|
||||
if err := os.RemoveAll(cdir); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
si := abi.SectorID{Miner: miner, Number: 1}
|
||||
|
||||
s := seal{id: si}
|
||||
|
||||
start := time.Now()
|
||||
|
||||
s.precommit(t, sb, si, func() {})
|
||||
|
||||
precommit := time.Now()
|
||||
|
||||
s.commit(t, sb, func() {})
|
||||
|
||||
commit := time.Now()
|
||||
|
||||
genCandidiates := post(t, sb, s)
|
||||
|
||||
epost := time.Now()
|
||||
|
||||
post(t, sb, s)
|
||||
|
||||
if err := sb.FinalizeSector(context.TODO(), si); err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
}
|
||||
|
||||
fmt.Printf("PreCommit: %s\n", precommit.Sub(start).String())
|
||||
fmt.Printf("Commit: %s\n", commit.Sub(precommit).String())
|
||||
fmt.Printf("GenCandidates: %s\n", genCandidiates.Sub(commit).String())
|
||||
fmt.Printf("EPoSt: %s\n", epost.Sub(genCandidiates).String())
|
||||
}
|
||||
|
||||
func TestSealPoStNoCommit(t *testing.T) {
|
||||
if runtime.NumCPU() < 10 && os.Getenv("CI") == "" { // don't bother on slow hardware
|
||||
t.Skip("this is slow")
|
||||
}
|
||||
_ = os.Setenv("RUST_LOG", "info")
|
||||
|
||||
getGrothParamFileAndVerifyingKeys(sectorSize)
|
||||
|
||||
dir, err := ioutil.TempDir("", "sbtest")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
miner := abi.ActorID(123)
|
||||
|
||||
cfg := &Config{
|
||||
SealProofType: sealProofType,
|
||||
PoStProofType: postProofType,
|
||||
}
|
||||
sp := &basicfs.Provider{
|
||||
Root: dir,
|
||||
}
|
||||
sb, err := New(sp, cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
if t.Failed() {
|
||||
fmt.Printf("not removing %s\n", dir)
|
||||
return
|
||||
}
|
||||
if err := os.RemoveAll(dir); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
si := abi.SectorID{Miner: miner, Number: 1}
|
||||
|
||||
s := seal{id: si}
|
||||
|
||||
start := time.Now()
|
||||
|
||||
s.precommit(t, sb, si, func() {})
|
||||
|
||||
precommit := time.Now()
|
||||
|
||||
if err := sb.FinalizeSector(context.TODO(), si); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
genCandidiates := post(t, sb, s)
|
||||
|
||||
epost := time.Now()
|
||||
|
||||
fmt.Printf("PreCommit: %s\n", precommit.Sub(start).String())
|
||||
fmt.Printf("GenCandidates: %s\n", genCandidiates.Sub(precommit).String())
|
||||
fmt.Printf("EPoSt: %s\n", epost.Sub(genCandidiates).String())
|
||||
}
|
||||
|
||||
func TestSealAndVerify2(t *testing.T) {
|
||||
if runtime.NumCPU() < 10 && os.Getenv("CI") == "" { // don't bother on slow hardware
|
||||
t.Skip("this is slow")
|
||||
}
|
||||
_ = os.Setenv("RUST_LOG", "trace")
|
||||
|
||||
getGrothParamFileAndVerifyingKeys(sectorSize)
|
||||
|
||||
dir, err := ioutil.TempDir("", "sbtest")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
miner := abi.ActorID(123)
|
||||
|
||||
cfg := &Config{
|
||||
SealProofType: sealProofType,
|
||||
PoStProofType: postProofType,
|
||||
}
|
||||
sp := &basicfs.Provider{
|
||||
Root: dir,
|
||||
}
|
||||
sb, err := New(sp, cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
if err := os.RemoveAll(dir); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
defer cleanup()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
si1 := abi.SectorID{Miner: miner, Number: 1}
|
||||
si2 := abi.SectorID{Miner: miner, Number: 2}
|
||||
|
||||
s1 := seal{id: si1}
|
||||
s2 := seal{id: si2}
|
||||
|
||||
wg.Add(2)
|
||||
go s1.precommit(t, sb, si1, wg.Done) //nolint: staticcheck
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
go s2.precommit(t, sb, si2, wg.Done) //nolint: staticcheck
|
||||
wg.Wait()
|
||||
|
||||
wg.Add(2)
|
||||
go s1.commit(t, sb, wg.Done) //nolint: staticcheck
|
||||
go s2.commit(t, sb, wg.Done) //nolint: staticcheck
|
||||
wg.Wait()
|
||||
|
||||
post(t, sb, s1, s2)
|
||||
}
|
@ -1,50 +0,0 @@
|
||||
package ffiwrapper
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper/basicfs"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
)
|
||||
|
||||
type UnpaddedByteIndex uint64
|
||||
|
||||
type Validator interface {
|
||||
CanCommit(sector stores.SectorPaths) (bool, error)
|
||||
CanProve(sector stores.SectorPaths) (bool, error)
|
||||
}
|
||||
|
||||
type StorageSealer interface {
|
||||
storage.Sealer
|
||||
storage.Storage
|
||||
}
|
||||
|
||||
type Storage interface {
|
||||
storage.Prover
|
||||
StorageSealer
|
||||
|
||||
ReadPieceFromSealedSector(context.Context, abi.SectorID, UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
type Verifier interface {
|
||||
VerifySeal(abi.SealVerifyInfo) (bool, error)
|
||||
VerifyElectionPost(ctx context.Context, info abi.PoStVerifyInfo) (bool, error)
|
||||
VerifyFallbackPost(ctx context.Context, info abi.PoStVerifyInfo) (bool, error)
|
||||
}
|
||||
|
||||
var ErrSectorNotFound = errors.New("sector not found")
|
||||
|
||||
type SectorProvider interface {
|
||||
// * returns ErrSectorNotFound if a requested existing sector doesn't exist
|
||||
// * returns an error when allocate is set, and existing isn't, and the sector exists
|
||||
AcquireSector(ctx context.Context, id abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing bool) (stores.SectorPaths, func(), error)
|
||||
}
|
||||
|
||||
var _ SectorProvider = &basicfs.Provider{}
|
@ -1,143 +0,0 @@
|
||||
//+build cgo
|
||||
|
||||
package ffiwrapper
|
||||
|
||||
import (
|
||||
"context"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
)
|
||||
|
||||
func (sb *Sealer) ComputeElectionPoSt(ctx context.Context, miner abi.ActorID, sectorInfo []abi.SectorInfo, challengeSeed abi.PoStRandomness, winners []abi.PoStCandidate) ([]abi.PoStProof, error) {
|
||||
challengeSeed[31] = 0
|
||||
|
||||
privsects, err := sb.pubSectorToPriv(ctx, miner, sectorInfo, nil) // TODO: faults
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ffi.GeneratePoSt(miner, privsects, challengeSeed, winners)
|
||||
}
|
||||
|
||||
func (sb *Sealer) GenerateFallbackPoSt(ctx context.Context, miner abi.ActorID, sectorInfo []abi.SectorInfo, challengeSeed abi.PoStRandomness, faults []abi.SectorNumber) (storage.FallbackPostOut, error) {
|
||||
privsectors, err := sb.pubSectorToPriv(ctx, miner, sectorInfo, faults)
|
||||
if err != nil {
|
||||
return storage.FallbackPostOut{}, err
|
||||
}
|
||||
|
||||
challengeCount := fallbackPostChallengeCount(uint64(len(sectorInfo)), uint64(len(faults)))
|
||||
challengeSeed[31] = 0
|
||||
|
||||
candidates, err := ffi.GenerateCandidates(miner, challengeSeed, challengeCount, privsectors)
|
||||
if err != nil {
|
||||
return storage.FallbackPostOut{}, err
|
||||
}
|
||||
|
||||
winners := make([]abi.PoStCandidate, len(candidates))
|
||||
for idx := range winners {
|
||||
winners[idx] = candidates[idx].Candidate
|
||||
}
|
||||
|
||||
proof, err := ffi.GeneratePoSt(miner, privsectors, challengeSeed, winners)
|
||||
return storage.FallbackPostOut{
|
||||
PoStInputs: ffiToStorageCandidates(candidates),
|
||||
Proof: proof,
|
||||
}, err
|
||||
}
|
||||
|
||||
func (sb *Sealer) GenerateEPostCandidates(ctx context.Context, miner abi.ActorID, sectorInfo []abi.SectorInfo, challengeSeed abi.PoStRandomness, faults []abi.SectorNumber) ([]storage.PoStCandidateWithTicket, error) {
|
||||
privsectors, err := sb.pubSectorToPriv(ctx, miner, sectorInfo, faults)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
challengeSeed[31] = 0
|
||||
|
||||
challengeCount := ElectionPostChallengeCount(uint64(len(sectorInfo)), uint64(len(faults)))
|
||||
pc, err := ffi.GenerateCandidates(miner, challengeSeed, challengeCount, privsectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ffiToStorageCandidates(pc), nil
|
||||
}
|
||||
|
||||
func ffiToStorageCandidates(pc []ffi.PoStCandidateWithTicket) []storage.PoStCandidateWithTicket {
|
||||
out := make([]storage.PoStCandidateWithTicket, len(pc))
|
||||
for i := range out {
|
||||
out[i] = storage.PoStCandidateWithTicket{
|
||||
Candidate: pc[i].Candidate,
|
||||
Ticket: pc[i].Ticket,
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []abi.SectorInfo, faults []abi.SectorNumber) (ffi.SortedPrivateSectorInfo, error) {
|
||||
fmap := map[abi.SectorNumber]struct{}{}
|
||||
for _, fault := range faults {
|
||||
fmap[fault] = struct{}{}
|
||||
}
|
||||
|
||||
var out []ffi.PrivateSectorInfo
|
||||
for _, s := range sectorInfo {
|
||||
if _, faulty := fmap[s.SectorNumber]; faulty {
|
||||
continue
|
||||
}
|
||||
|
||||
paths, done, err := sb.sectors.AcquireSector(ctx, abi.SectorID{Miner: mid, Number: s.SectorNumber}, stores.FTCache|stores.FTSealed, 0, false)
|
||||
if err != nil {
|
||||
return ffi.SortedPrivateSectorInfo{}, xerrors.Errorf("acquire sector paths: %w", err)
|
||||
}
|
||||
done() // TODO: This is a tiny bit suboptimal
|
||||
|
||||
postProofType, err := s.RegisteredProof.RegisteredPoStProof()
|
||||
if err != nil {
|
||||
return ffi.SortedPrivateSectorInfo{}, xerrors.Errorf("acquiring registered PoSt proof from sector info %+v: %w", s, err)
|
||||
}
|
||||
|
||||
out = append(out, ffi.PrivateSectorInfo{
|
||||
CacheDirPath: paths.Cache,
|
||||
PoStProofType: postProofType,
|
||||
SealedSectorPath: paths.Sealed,
|
||||
SectorInfo: s,
|
||||
})
|
||||
}
|
||||
|
||||
return ffi.NewSortedPrivateSectorInfo(out...), nil
|
||||
}
|
||||
|
||||
var _ Verifier = ProofVerifier
|
||||
|
||||
type proofVerifier struct{}
|
||||
|
||||
var ProofVerifier = proofVerifier{}
|
||||
|
||||
func (proofVerifier) VerifySeal(info abi.SealVerifyInfo) (bool, error) {
|
||||
return ffi.VerifySeal(info)
|
||||
}
|
||||
|
||||
func (proofVerifier) VerifyElectionPost(ctx context.Context, info abi.PoStVerifyInfo) (bool, error) {
|
||||
return verifyPost(ctx, info)
|
||||
}
|
||||
|
||||
func (proofVerifier) VerifyFallbackPost(ctx context.Context, info abi.PoStVerifyInfo) (bool, error) {
|
||||
return verifyPost(ctx, info)
|
||||
}
|
||||
|
||||
func verifyPost(ctx context.Context, info abi.PoStVerifyInfo) (bool, error) {
|
||||
_, span := trace.StartSpan(ctx, "VerifyPoSt")
|
||||
defer span.End()
|
||||
|
||||
info.Randomness[31] = 0
|
||||
|
||||
return ffi.VerifyPoSt(info)
|
||||
}
|
@ -1,212 +0,0 @@
|
||||
package sectorstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/elastic/go-sysinfo"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
storage2 "github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
)
|
||||
|
||||
var pathTypes = []stores.SectorFileType{stores.FTUnsealed, stores.FTSealed, stores.FTCache}
|
||||
|
||||
type WorkerConfig struct {
|
||||
SealProof abi.RegisteredProof
|
||||
TaskTypes []sealtasks.TaskType
|
||||
}
|
||||
|
||||
type LocalWorker struct {
|
||||
scfg *ffiwrapper.Config
|
||||
storage stores.Store
|
||||
localStore *stores.Local
|
||||
sindex stores.SectorIndex
|
||||
|
||||
acceptTasks map[sealtasks.TaskType]struct{}
|
||||
}
|
||||
|
||||
func NewLocalWorker(wcfg WorkerConfig, store stores.Store, local *stores.Local, sindex stores.SectorIndex) *LocalWorker {
|
||||
ppt, err := wcfg.SealProof.RegisteredPoStProof()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
acceptTasks := map[sealtasks.TaskType]struct{}{}
|
||||
for _, taskType := range wcfg.TaskTypes {
|
||||
acceptTasks[taskType] = struct{}{}
|
||||
}
|
||||
|
||||
return &LocalWorker{
|
||||
scfg: &ffiwrapper.Config{
|
||||
SealProofType: wcfg.SealProof,
|
||||
PoStProofType: ppt,
|
||||
},
|
||||
storage: store,
|
||||
localStore: local,
|
||||
sindex: sindex,
|
||||
|
||||
acceptTasks: acceptTasks,
|
||||
}
|
||||
}
|
||||
|
||||
type localWorkerPathProvider struct {
|
||||
w *LocalWorker
|
||||
}
|
||||
|
||||
func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing bool) (stores.SectorPaths, func(), error) {
|
||||
paths, storageIDs, done, err := l.w.storage.AcquireSector(ctx, sector, existing, allocate, sealing)
|
||||
if err != nil {
|
||||
return stores.SectorPaths{}, nil, err
|
||||
}
|
||||
|
||||
log.Debugf("acquired sector %d (e:%d; a:%d): %v", sector, existing, allocate, paths)
|
||||
|
||||
return paths, func() {
|
||||
done()
|
||||
|
||||
for _, fileType := range pathTypes {
|
||||
if fileType&allocate == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
sid := stores.PathByType(storageIDs, fileType)
|
||||
|
||||
if err := l.w.sindex.StorageDeclareSector(ctx, stores.ID(sid), sector, fileType); err != nil {
|
||||
log.Errorf("declare sector error: %+v", err)
|
||||
}
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (l *LocalWorker) sb() (ffiwrapper.Storage, error) {
|
||||
return ffiwrapper.New(&localWorkerPathProvider{w: l}, l.scfg)
|
||||
}
|
||||
|
||||
func (l *LocalWorker) NewSector(ctx context.Context, sector abi.SectorID) error {
|
||||
sb, err := l.sb()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return sb.NewSector(ctx, sector)
|
||||
}
|
||||
|
||||
func (l *LocalWorker) AddPiece(ctx context.Context, sector abi.SectorID, epcs []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) {
|
||||
sb, err := l.sb()
|
||||
if err != nil {
|
||||
return abi.PieceInfo{}, err
|
||||
}
|
||||
|
||||
return sb.AddPiece(ctx, sector, epcs, sz, r)
|
||||
}
|
||||
|
||||
func (l *LocalWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage2.PreCommit1Out, err error) {
|
||||
sb, err := l.sb()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sb.SealPreCommit1(ctx, sector, ticket, pieces)
|
||||
}
|
||||
|
||||
func (l *LocalWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage2.PreCommit1Out) (cids storage2.SectorCids, err error) {
|
||||
sb, err := l.sb()
|
||||
if err != nil {
|
||||
return storage2.SectorCids{}, err
|
||||
}
|
||||
|
||||
return sb.SealPreCommit2(ctx, sector, phase1Out)
|
||||
}
|
||||
|
||||
func (l *LocalWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage2.SectorCids) (output storage2.Commit1Out, err error) {
|
||||
sb, err := l.sb()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sb.SealCommit1(ctx, sector, ticket, seed, pieces, cids)
|
||||
}
|
||||
|
||||
func (l *LocalWorker) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage2.Commit1Out) (proof storage2.Proof, err error) {
|
||||
sb, err := l.sb()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sb.SealCommit2(ctx, sector, phase1Out)
|
||||
}
|
||||
|
||||
func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID) error {
|
||||
sb, err := l.sb()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sb.FinalizeSector(ctx, sector); err != nil {
|
||||
return xerrors.Errorf("finalizing sector: %w", err)
|
||||
}
|
||||
|
||||
if err := l.storage.Remove(ctx, sector, stores.FTUnsealed); err != nil {
|
||||
return xerrors.Errorf("removing unsealed data: %w", err)
|
||||
}
|
||||
|
||||
if err := l.storage.MoveStorage(ctx, sector, stores.FTSealed|stores.FTCache); err != nil {
|
||||
return xerrors.Errorf("moving sealed data to storage: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LocalWorker) TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) {
|
||||
return l.acceptTasks, nil
|
||||
}
|
||||
|
||||
func (l *LocalWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) {
|
||||
return l.localStore.Local(ctx)
|
||||
}
|
||||
|
||||
func (l *LocalWorker) Info(context.Context) (WorkerInfo, error) {
|
||||
hostname, err := os.Hostname() // TODO: allow overriding from config
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
gpus, err := ffi.GetGPUDevices()
|
||||
if err != nil {
|
||||
log.Errorf("getting gpu devices failed: %+v", err)
|
||||
}
|
||||
|
||||
h, err := sysinfo.Host()
|
||||
if err != nil {
|
||||
return WorkerInfo{}, xerrors.Errorf("getting host info: %w", err)
|
||||
}
|
||||
|
||||
mem, err := h.Memory()
|
||||
if err != nil {
|
||||
return WorkerInfo{}, xerrors.Errorf("getting memory info: %w", err)
|
||||
}
|
||||
|
||||
return WorkerInfo{
|
||||
Hostname: hostname,
|
||||
Resources: WorkerResources{
|
||||
MemPhysical: mem.Total,
|
||||
MemSwap: mem.VirtualTotal,
|
||||
MemReserved: mem.VirtualUsed + mem.Total - mem.Available, // TODO: sub this process
|
||||
GPUs: gpus,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (l *LocalWorker) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ Worker = &LocalWorker{}
|
@ -1,460 +0,0 @@
|
||||
package sectorstorage
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
)
|
||||
|
||||
var log = logging.Logger("advmgr")
|
||||
|
||||
var ErrNoWorkers = errors.New("no suitable workers found")
|
||||
|
||||
type URLs []string
|
||||
|
||||
type Worker interface {
|
||||
ffiwrapper.StorageSealer
|
||||
|
||||
TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error)
|
||||
|
||||
// Returns paths accessible to the worker
|
||||
Paths(context.Context) ([]stores.StoragePath, error)
|
||||
|
||||
Info(context.Context) (WorkerInfo, error)
|
||||
|
||||
Close() error
|
||||
}
|
||||
|
||||
type WorkerInfo struct {
|
||||
Hostname string
|
||||
|
||||
Resources WorkerResources
|
||||
}
|
||||
|
||||
type WorkerResources struct {
|
||||
MemPhysical uint64
|
||||
MemSwap uint64
|
||||
|
||||
MemReserved uint64 // Used by system / other processes
|
||||
|
||||
GPUs []string
|
||||
}
|
||||
|
||||
type SectorManager interface {
|
||||
SectorSize() abi.SectorSize
|
||||
|
||||
ReadPieceFromSealedSector(context.Context, abi.SectorID, ffiwrapper.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (io.ReadCloser, error)
|
||||
|
||||
ffiwrapper.StorageSealer
|
||||
storage.Prover
|
||||
}
|
||||
|
||||
type WorkerID uint64
|
||||
|
||||
type Manager struct {
|
||||
scfg *ffiwrapper.Config
|
||||
|
||||
ls stores.LocalStorage
|
||||
storage *stores.Remote
|
||||
localStore *stores.Local
|
||||
remoteHnd *stores.FetchHandler
|
||||
index stores.SectorIndex
|
||||
|
||||
storage.Prover
|
||||
|
||||
workersLk sync.Mutex
|
||||
nextWorker WorkerID
|
||||
workers map[WorkerID]*workerHandle
|
||||
|
||||
newWorkers chan *workerHandle
|
||||
schedule chan *workerRequest
|
||||
workerFree chan WorkerID
|
||||
closing chan struct{}
|
||||
|
||||
schedQueue *list.List // List[*workerRequest]
|
||||
}
|
||||
|
||||
type SealerConfig struct {
|
||||
// Local worker config
|
||||
AllowPreCommit1 bool
|
||||
AllowPreCommit2 bool
|
||||
AllowCommit bool
|
||||
}
|
||||
|
||||
type StorageAuth http.Header
|
||||
|
||||
func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg *ffiwrapper.Config, sc SealerConfig, urls URLs, sa StorageAuth) (*Manager, error) {
|
||||
lstor, err := stores.NewLocal(ctx, ls, si, urls)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor}, cfg)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("creating prover instance: %w", err)
|
||||
}
|
||||
|
||||
stor := stores.NewRemote(lstor, si, http.Header(sa))
|
||||
|
||||
m := &Manager{
|
||||
scfg: cfg,
|
||||
|
||||
ls: ls,
|
||||
storage: stor,
|
||||
localStore: lstor,
|
||||
remoteHnd: &stores.FetchHandler{Local: lstor},
|
||||
index: si,
|
||||
|
||||
nextWorker: 0,
|
||||
workers: map[WorkerID]*workerHandle{},
|
||||
|
||||
newWorkers: make(chan *workerHandle),
|
||||
schedule: make(chan *workerRequest),
|
||||
workerFree: make(chan WorkerID),
|
||||
closing: make(chan struct{}),
|
||||
|
||||
schedQueue: list.New(),
|
||||
|
||||
Prover: prover,
|
||||
}
|
||||
|
||||
go m.runSched()
|
||||
|
||||
localTasks := []sealtasks.TaskType{
|
||||
sealtasks.TTAddPiece, sealtasks.TTCommit1, sealtasks.TTFinalize,
|
||||
}
|
||||
if sc.AllowPreCommit1 {
|
||||
localTasks = append(localTasks, sealtasks.TTPreCommit1)
|
||||
}
|
||||
if sc.AllowPreCommit2 {
|
||||
localTasks = append(localTasks, sealtasks.TTPreCommit2)
|
||||
}
|
||||
if sc.AllowCommit {
|
||||
localTasks = append(localTasks, sealtasks.TTCommit2)
|
||||
}
|
||||
|
||||
err = m.AddWorker(ctx, NewLocalWorker(WorkerConfig{
|
||||
SealProof: cfg.SealProofType,
|
||||
TaskTypes: localTasks,
|
||||
}, stor, lstor, si))
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("adding local worker: %w", err)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m *Manager) AddLocalStorage(ctx context.Context, path string) error {
|
||||
path, err := homedir.Expand(path)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("expanding local path: %w", err)
|
||||
}
|
||||
|
||||
if err := m.localStore.OpenPath(ctx, path); err != nil {
|
||||
return xerrors.Errorf("opening local path: %w", err)
|
||||
}
|
||||
|
||||
if err := m.ls.SetStorage(func(sc *stores.StorageConfig) {
|
||||
sc.StoragePaths = append(sc.StoragePaths, stores.LocalPath{Path: path})
|
||||
}); err != nil {
|
||||
return xerrors.Errorf("get storage config: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) AddWorker(ctx context.Context, w Worker) error {
|
||||
info, err := w.Info(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting worker info: %w", err)
|
||||
}
|
||||
|
||||
m.newWorkers <- &workerHandle{
|
||||
w: w,
|
||||
info: info,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
m.remoteHnd.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
func (m *Manager) SectorSize() abi.SectorSize {
|
||||
sz, _ := m.scfg.SealProofType.SectorSize()
|
||||
return sz
|
||||
}
|
||||
|
||||
func (m *Manager) ReadPieceFromSealedSector(context.Context, abi.SectorID, ffiwrapper.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (io.ReadCloser, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *Manager) getWorkersByPaths(task sealtasks.TaskType, inPaths []stores.StorageInfo) ([]WorkerID, map[WorkerID]stores.StorageInfo) {
|
||||
m.workersLk.Lock()
|
||||
defer m.workersLk.Unlock()
|
||||
|
||||
var workers []WorkerID
|
||||
paths := map[WorkerID]stores.StorageInfo{}
|
||||
|
||||
for i, worker := range m.workers {
|
||||
tt, err := worker.w.TaskTypes(context.TODO())
|
||||
if err != nil {
|
||||
log.Errorf("error getting supported worker task types: %+v", err)
|
||||
continue
|
||||
}
|
||||
if _, ok := tt[task]; !ok {
|
||||
log.Debugf("dropping worker %d; task %s not supported (supports %v)", i, task, tt)
|
||||
continue
|
||||
}
|
||||
|
||||
phs, err := worker.w.Paths(context.TODO())
|
||||
if err != nil {
|
||||
log.Errorf("error getting worker paths: %+v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// check if the worker has access to the path we selected
|
||||
var st *stores.StorageInfo
|
||||
for _, p := range phs {
|
||||
for _, meta := range inPaths {
|
||||
if p.ID == meta.ID {
|
||||
if st != nil && st.Weight > p.Weight {
|
||||
continue
|
||||
}
|
||||
|
||||
p := meta // copy
|
||||
st = &p
|
||||
}
|
||||
}
|
||||
}
|
||||
if st == nil {
|
||||
log.Debugf("skipping worker %d; doesn't have any of %v", i, inPaths)
|
||||
log.Debugf("skipping worker %d; only has %v", i, phs)
|
||||
continue
|
||||
}
|
||||
|
||||
paths[i] = *st
|
||||
workers = append(workers, i)
|
||||
}
|
||||
|
||||
return workers, paths
|
||||
}
|
||||
|
||||
func (m *Manager) getWorker(ctx context.Context, taskType sealtasks.TaskType, accept []WorkerID) (Worker, func(), error) {
|
||||
ret := make(chan workerResponse)
|
||||
|
||||
select {
|
||||
case m.schedule <- &workerRequest{
|
||||
taskType: taskType,
|
||||
accept: accept,
|
||||
|
||||
cancel: ctx.Done(),
|
||||
ret: ret,
|
||||
}:
|
||||
case <-m.closing:
|
||||
return nil, nil, xerrors.New("closing")
|
||||
case <-ctx.Done():
|
||||
return nil, nil, ctx.Err()
|
||||
}
|
||||
|
||||
select {
|
||||
case resp := <-ret:
|
||||
return resp.worker, resp.done, resp.err
|
||||
case <-m.closing:
|
||||
return nil, nil, xerrors.New("closing")
|
||||
case <-ctx.Done():
|
||||
return nil, nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) NewSector(ctx context.Context, sector abi.SectorID) error {
|
||||
log.Warnf("stub NewSector")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPieces []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) {
|
||||
// TODO: consider multiple paths vs workers when initially allocating
|
||||
|
||||
var best []stores.StorageInfo
|
||||
var err error
|
||||
if len(existingPieces) == 0 { // new
|
||||
best, err = m.index.StorageBestAlloc(ctx, stores.FTUnsealed, true)
|
||||
} else { // append to existing
|
||||
best, err = m.index.StorageFindSector(ctx, sector, stores.FTUnsealed, false)
|
||||
}
|
||||
if err != nil {
|
||||
return abi.PieceInfo{}, xerrors.Errorf("finding sector path: %w", err)
|
||||
}
|
||||
|
||||
log.Debugf("find workers for %v", best)
|
||||
candidateWorkers, _ := m.getWorkersByPaths(sealtasks.TTAddPiece, best)
|
||||
|
||||
if len(candidateWorkers) == 0 {
|
||||
return abi.PieceInfo{}, ErrNoWorkers
|
||||
}
|
||||
|
||||
worker, done, err := m.getWorker(ctx, sealtasks.TTAddPiece, candidateWorkers)
|
||||
if err != nil {
|
||||
return abi.PieceInfo{}, xerrors.Errorf("scheduling worker: %w", err)
|
||||
}
|
||||
defer done()
|
||||
|
||||
// TODO: select(candidateWorkers, ...)
|
||||
// TODO: remove the sectorbuilder abstraction, pass path directly
|
||||
return worker.AddPiece(ctx, sector, existingPieces, sz, r)
|
||||
}
|
||||
|
||||
func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
|
||||
// TODO: also consider where the unsealed data sits
|
||||
|
||||
best, err := m.index.StorageBestAlloc(ctx, stores.FTCache|stores.FTSealed, true)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("finding path for sector sealing: %w", err)
|
||||
}
|
||||
|
||||
candidateWorkers, _ := m.getWorkersByPaths(sealtasks.TTPreCommit1, best)
|
||||
if len(candidateWorkers) == 0 {
|
||||
return nil, ErrNoWorkers
|
||||
}
|
||||
|
||||
worker, done, err := m.getWorker(ctx, sealtasks.TTPreCommit1, candidateWorkers)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("scheduling worker: %w", err)
|
||||
}
|
||||
defer done()
|
||||
|
||||
// TODO: select(candidateWorkers, ...)
|
||||
// TODO: remove the sectorbuilder abstraction, pass path directly
|
||||
return worker.SealPreCommit1(ctx, sector, ticket, pieces)
|
||||
}
|
||||
|
||||
func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (cids storage.SectorCids, err error) {
|
||||
// TODO: allow workers to fetch the sectors
|
||||
|
||||
best, err := m.index.StorageFindSector(ctx, sector, stores.FTCache|stores.FTSealed, true)
|
||||
if err != nil {
|
||||
return storage.SectorCids{}, xerrors.Errorf("finding path for sector sealing: %w", err)
|
||||
}
|
||||
|
||||
candidateWorkers, _ := m.getWorkersByPaths(sealtasks.TTPreCommit2, best)
|
||||
if len(candidateWorkers) == 0 {
|
||||
return storage.SectorCids{}, ErrNoWorkers
|
||||
}
|
||||
|
||||
worker, done, err := m.getWorker(ctx, sealtasks.TTPreCommit2, candidateWorkers)
|
||||
if err != nil {
|
||||
return storage.SectorCids{}, xerrors.Errorf("scheduling worker: %w", err)
|
||||
}
|
||||
defer done()
|
||||
|
||||
// TODO: select(candidateWorkers, ...)
|
||||
// TODO: remove the sectorbuilder abstraction, pass path directly
|
||||
return worker.SealPreCommit2(ctx, sector, phase1Out)
|
||||
}
|
||||
|
||||
func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (output storage.Commit1Out, err error) {
|
||||
best, err := m.index.StorageFindSector(ctx, sector, stores.FTCache|stores.FTSealed, true)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("finding path for sector sealing: %w", err)
|
||||
}
|
||||
|
||||
candidateWorkers, _ := m.getWorkersByPaths(sealtasks.TTCommit1, best)
|
||||
if len(candidateWorkers) == 0 {
|
||||
return nil, ErrNoWorkers
|
||||
}
|
||||
|
||||
// TODO: Try very hard to execute on worker with access to the sectors
|
||||
worker, done, err := m.getWorker(ctx, sealtasks.TTCommit1, candidateWorkers)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("scheduling worker: %w", err)
|
||||
}
|
||||
defer done()
|
||||
|
||||
// TODO: select(candidateWorkers, ...)
|
||||
// TODO: remove the sectorbuilder abstraction, pass path directly
|
||||
return worker.SealCommit1(ctx, sector, ticket, seed, pieces, cids)
|
||||
}
|
||||
|
||||
func (m *Manager) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.Commit1Out) (proof storage.Proof, err error) {
|
||||
var candidateWorkers []WorkerID
|
||||
|
||||
m.workersLk.Lock()
|
||||
for id, worker := range m.workers {
|
||||
tt, err := worker.w.TaskTypes(ctx)
|
||||
if err != nil {
|
||||
log.Errorf("error getting supported worker task types: %+v", err)
|
||||
continue
|
||||
}
|
||||
if _, ok := tt[sealtasks.TTCommit2]; !ok {
|
||||
continue
|
||||
}
|
||||
candidateWorkers = append(candidateWorkers, id)
|
||||
}
|
||||
m.workersLk.Unlock()
|
||||
if len(candidateWorkers) == 0 {
|
||||
return nil, ErrNoWorkers
|
||||
}
|
||||
|
||||
worker, done, err := m.getWorker(ctx, sealtasks.TTCommit2, candidateWorkers)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("scheduling worker: %w", err)
|
||||
}
|
||||
defer done()
|
||||
|
||||
return worker.SealCommit2(ctx, sector, phase1Out)
|
||||
}
|
||||
|
||||
func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID) error {
|
||||
best, err := m.index.StorageFindSector(ctx, sector, stores.FTCache|stores.FTSealed|stores.FTUnsealed, true)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("finding sealed sector: %w", err)
|
||||
}
|
||||
|
||||
candidateWorkers, _ := m.getWorkersByPaths(sealtasks.TTFinalize, best)
|
||||
if len(candidateWorkers) == 0 {
|
||||
return ErrNoWorkers
|
||||
}
|
||||
|
||||
// TODO: Remove sector from sealing stores
|
||||
// TODO: Move the sector to long-term storage
|
||||
return m.workers[candidateWorkers[0]].w.FinalizeSector(ctx, sector)
|
||||
}
|
||||
|
||||
func (m *Manager) StorageLocal(ctx context.Context) (map[stores.ID]string, error) {
|
||||
l, err := m.localStore.Local(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out := map[stores.ID]string{}
|
||||
for _, st := range l {
|
||||
out[st.ID] = st.LocalPath
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (m *Manager) FsStat(ctx context.Context, id stores.ID) (stores.FsStat, error) {
|
||||
return m.storage.FsStat(ctx, id)
|
||||
}
|
||||
|
||||
func (m *Manager) Close() error {
|
||||
close(m.closing)
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ SectorManager = &Manager{}
|
@ -1,363 +0,0 @@
|
||||
package mock
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"sync"
|
||||
|
||||
commcid "github.com/filecoin-project/go-fil-commcid"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
"github.com/ipfs/go-cid"
|
||||
logging "github.com/ipfs/go-log"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper"
|
||||
)
|
||||
|
||||
var log = logging.Logger("sbmock")
|
||||
|
||||
type SectorMgr struct {
|
||||
sectors map[abi.SectorID]*sectorState
|
||||
sectorSize abi.SectorSize
|
||||
nextSectorID abi.SectorNumber
|
||||
rateLimit chan struct{}
|
||||
proofType abi.RegisteredProof
|
||||
|
||||
lk sync.Mutex
|
||||
}
|
||||
|
||||
type mockVerif struct{}
|
||||
|
||||
func NewMockSectorMgr(threads int, ssize abi.SectorSize) *SectorMgr {
|
||||
rt, _, err := ffiwrapper.ProofTypeFromSectorSize(ssize)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return &SectorMgr{
|
||||
sectors: make(map[abi.SectorID]*sectorState),
|
||||
sectorSize: ssize,
|
||||
nextSectorID: 5,
|
||||
rateLimit: make(chan struct{}, threads),
|
||||
proofType: rt,
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
statePacking = iota
|
||||
statePreCommit
|
||||
stateCommit
|
||||
)
|
||||
|
||||
type sectorState struct {
|
||||
pieces []cid.Cid
|
||||
failed bool
|
||||
|
||||
state int
|
||||
|
||||
lk sync.Mutex
|
||||
}
|
||||
|
||||
func (sb *SectorMgr) RateLimit() func() {
|
||||
sb.rateLimit <- struct{}{}
|
||||
|
||||
// TODO: probably want to copy over rate limit code
|
||||
return func() {
|
||||
<-sb.rateLimit
|
||||
}
|
||||
}
|
||||
|
||||
func (sb *SectorMgr) NewSector(ctx context.Context, sector abi.SectorID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sb *SectorMgr) AddPiece(ctx context.Context, sectorId abi.SectorID, existingPieces []abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) {
|
||||
log.Warn("Add piece: ", sectorId, size, sb.proofType)
|
||||
sb.lk.Lock()
|
||||
ss, ok := sb.sectors[sectorId]
|
||||
if !ok {
|
||||
ss = §orState{
|
||||
state: statePacking,
|
||||
}
|
||||
sb.sectors[sectorId] = ss
|
||||
}
|
||||
sb.lk.Unlock()
|
||||
ss.lk.Lock()
|
||||
defer ss.lk.Unlock()
|
||||
|
||||
c, err := ffiwrapper.GeneratePieceCIDFromFile(sb.proofType, r, size)
|
||||
if err != nil {
|
||||
return abi.PieceInfo{}, xerrors.Errorf("failed to generate piece cid: %w", err)
|
||||
}
|
||||
|
||||
log.Warn("Generated Piece CID: ", c)
|
||||
|
||||
ss.pieces = append(ss.pieces, c)
|
||||
return abi.PieceInfo{
|
||||
Size: size.Padded(),
|
||||
PieceCID: c,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (sb *SectorMgr) SectorSize() abi.SectorSize {
|
||||
return sb.sectorSize
|
||||
}
|
||||
|
||||
func (sb *SectorMgr) AcquireSectorNumber() (abi.SectorNumber, error) {
|
||||
sb.lk.Lock()
|
||||
defer sb.lk.Unlock()
|
||||
id := sb.nextSectorID
|
||||
sb.nextSectorID++
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func (sb *SectorMgr) SealPreCommit1(ctx context.Context, sid abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
|
||||
sb.lk.Lock()
|
||||
ss, ok := sb.sectors[sid]
|
||||
sb.lk.Unlock()
|
||||
if !ok {
|
||||
return nil, xerrors.Errorf("no sector with id %d in storage", sid)
|
||||
}
|
||||
|
||||
ss.lk.Lock()
|
||||
defer ss.lk.Unlock()
|
||||
|
||||
ussize := abi.PaddedPieceSize(sb.sectorSize).Unpadded()
|
||||
|
||||
// TODO: verify pieces in sinfo.pieces match passed in pieces
|
||||
|
||||
var sum abi.UnpaddedPieceSize
|
||||
for _, p := range pieces {
|
||||
sum += p.Size.Unpadded()
|
||||
}
|
||||
|
||||
if sum != ussize {
|
||||
return nil, xerrors.Errorf("aggregated piece sizes don't match up: %d != %d", sum, ussize)
|
||||
}
|
||||
|
||||
if ss.state != statePacking {
|
||||
return nil, xerrors.Errorf("cannot call pre-seal on sector not in 'packing' state")
|
||||
}
|
||||
|
||||
opFinishWait(ctx)
|
||||
|
||||
ss.state = statePreCommit
|
||||
|
||||
pis := make([]abi.PieceInfo, len(ss.pieces))
|
||||
for i, piece := range ss.pieces {
|
||||
pis[i] = abi.PieceInfo{
|
||||
Size: pieces[i].Size,
|
||||
PieceCID: piece,
|
||||
}
|
||||
}
|
||||
|
||||
commd, err := MockVerifier.GenerateDataCommitment(sb.proofType, pis)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cc, _, err := commcid.CIDToCommitment(commd)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
cc[0] ^= 'd'
|
||||
|
||||
return cc, nil
|
||||
}
|
||||
|
||||
func (sb *SectorMgr) SealPreCommit2(ctx context.Context, sid abi.SectorID, phase1Out storage.PreCommit1Out) (cids storage.SectorCids, err error) {
|
||||
db := []byte(string(phase1Out))
|
||||
db[0] ^= 'd'
|
||||
|
||||
d := commcid.DataCommitmentV1ToCID(db)
|
||||
|
||||
commr := make([]byte, 32)
|
||||
for i := range db {
|
||||
commr[32-(i+1)] = db[i]
|
||||
}
|
||||
|
||||
commR := commcid.DataCommitmentV1ToCID(commr)
|
||||
|
||||
return storage.SectorCids{
|
||||
Unsealed: d,
|
||||
Sealed: commR,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (sb *SectorMgr) SealCommit1(ctx context.Context, sid abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (output storage.Commit1Out, err error) {
|
||||
sb.lk.Lock()
|
||||
ss, ok := sb.sectors[sid]
|
||||
sb.lk.Unlock()
|
||||
if !ok {
|
||||
return nil, xerrors.Errorf("no such sector %d", sid)
|
||||
}
|
||||
ss.lk.Lock()
|
||||
defer ss.lk.Unlock()
|
||||
|
||||
if ss.failed {
|
||||
return nil, xerrors.Errorf("[mock] cannot commit failed sector %d", sid)
|
||||
}
|
||||
|
||||
if ss.state != statePreCommit {
|
||||
return nil, xerrors.Errorf("cannot commit sector that has not been precommitted")
|
||||
}
|
||||
|
||||
opFinishWait(ctx)
|
||||
|
||||
var out [32]byte
|
||||
for i := range out {
|
||||
out[i] = cids.Unsealed.Bytes()[i] + cids.Sealed.Bytes()[31-i] - ticket[i]*seed[i] ^ byte(sid.Number&0xff)
|
||||
}
|
||||
|
||||
return out[:], nil
|
||||
}
|
||||
|
||||
func (sb *SectorMgr) SealCommit2(ctx context.Context, sid abi.SectorID, phase1Out storage.Commit1Out) (proof storage.Proof, err error) {
|
||||
var out [32]byte
|
||||
for i := range out {
|
||||
out[i] = phase1Out[i] ^ byte(sid.Number&0xff)
|
||||
}
|
||||
|
||||
return out[:], nil
|
||||
}
|
||||
|
||||
// Test Instrumentation Methods
|
||||
|
||||
func (sb *SectorMgr) FailSector(sid abi.SectorID) error {
|
||||
sb.lk.Lock()
|
||||
defer sb.lk.Unlock()
|
||||
ss, ok := sb.sectors[sid]
|
||||
if !ok {
|
||||
return fmt.Errorf("no such sector in storage")
|
||||
}
|
||||
|
||||
ss.failed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func opFinishWait(ctx context.Context) {
|
||||
val, ok := ctx.Value("opfinish").(chan struct{})
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
<-val
|
||||
}
|
||||
|
||||
func AddOpFinish(ctx context.Context) (context.Context, func()) {
|
||||
done := make(chan struct{})
|
||||
|
||||
return context.WithValue(ctx, "opfinish", done), func() {
|
||||
close(done)
|
||||
}
|
||||
}
|
||||
|
||||
func (sb *SectorMgr) GenerateFallbackPoSt(context.Context, abi.ActorID, []abi.SectorInfo, abi.PoStRandomness, []abi.SectorNumber) (storage.FallbackPostOut, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (sb *SectorMgr) ComputeElectionPoSt(ctx context.Context, mid abi.ActorID, sectorInfo []abi.SectorInfo, challengeSeed abi.PoStRandomness, winners []abi.PoStCandidate) ([]abi.PoStProof, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (sb *SectorMgr) GenerateEPostCandidates(ctx context.Context, mid abi.ActorID, sectorInfo []abi.SectorInfo, challengeSeed abi.PoStRandomness, faults []abi.SectorNumber) ([]storage.PoStCandidateWithTicket, error) {
|
||||
if len(faults) > 0 {
|
||||
panic("todo")
|
||||
}
|
||||
|
||||
n := ffiwrapper.ElectionPostChallengeCount(uint64(len(sectorInfo)), uint64(len(faults)))
|
||||
if n > uint64(len(sectorInfo)) {
|
||||
n = uint64(len(sectorInfo))
|
||||
}
|
||||
|
||||
out := make([]storage.PoStCandidateWithTicket, n)
|
||||
|
||||
seed := big.NewInt(0).SetBytes(challengeSeed[:])
|
||||
start := seed.Mod(seed, big.NewInt(int64(len(sectorInfo)))).Int64()
|
||||
|
||||
for i := range out {
|
||||
out[i] = storage.PoStCandidateWithTicket{
|
||||
Candidate: abi.PoStCandidate{
|
||||
SectorID: abi.SectorID{
|
||||
Number: abi.SectorNumber((int(start) + i) % len(sectorInfo)),
|
||||
Miner: mid,
|
||||
},
|
||||
PartialTicket: abi.PartialTicket(challengeSeed),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (sb *SectorMgr) ReadPieceFromSealedSector(ctx context.Context, sectorID abi.SectorID, offset ffiwrapper.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, commD cid.Cid) (io.ReadCloser, error) {
|
||||
if len(sb.sectors[sectorID].pieces) > 1 {
|
||||
panic("implme")
|
||||
}
|
||||
return ioutil.NopCloser(io.LimitReader(bytes.NewReader(sb.sectors[sectorID].pieces[0].Bytes()[offset:]), int64(size))), nil
|
||||
}
|
||||
|
||||
func (sb *SectorMgr) StageFakeData(mid abi.ActorID) (abi.SectorID, []abi.PieceInfo, error) {
|
||||
usize := abi.PaddedPieceSize(sb.sectorSize).Unpadded()
|
||||
sid, err := sb.AcquireSectorNumber()
|
||||
if err != nil {
|
||||
return abi.SectorID{}, nil, err
|
||||
}
|
||||
|
||||
buf := make([]byte, usize)
|
||||
rand.Read(buf)
|
||||
|
||||
id := abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: sid,
|
||||
}
|
||||
|
||||
pi, err := sb.AddPiece(context.TODO(), id, nil, usize, bytes.NewReader(buf))
|
||||
if err != nil {
|
||||
return abi.SectorID{}, nil, err
|
||||
}
|
||||
|
||||
return id, []abi.PieceInfo{pi}, nil
|
||||
}
|
||||
|
||||
func (sb *SectorMgr) FinalizeSector(context.Context, abi.SectorID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m mockVerif) VerifyElectionPost(ctx context.Context, pvi abi.PoStVerifyInfo) (bool, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m mockVerif) VerifyFallbackPost(ctx context.Context, pvi abi.PoStVerifyInfo) (bool, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m mockVerif) VerifySeal(svi abi.SealVerifyInfo) (bool, error) {
|
||||
if len(svi.OnChain.Proof) != 32 { // Real ones are longer, but this should be fine
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for i, b := range svi.OnChain.Proof {
|
||||
if b != svi.UnsealedCID.Bytes()[i]+svi.OnChain.SealedCID.Bytes()[31-i]-svi.InteractiveRandomness[i]*svi.Randomness[i] {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (m mockVerif) GenerateDataCommitment(pt abi.RegisteredProof, pieces []abi.PieceInfo) (cid.Cid, error) {
|
||||
return ffiwrapper.GenerateUnsealedCID(pt, pieces)
|
||||
}
|
||||
|
||||
var MockVerifier = mockVerif{}
|
||||
|
||||
var _ ffiwrapper.Verifier = MockVerifier
|
||||
var _ sectorstorage.SectorManager = &SectorMgr{}
|
@ -1,45 +0,0 @@
|
||||
package mock
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
)
|
||||
|
||||
func TestOpFinish(t *testing.T) {
|
||||
sb := NewMockSectorMgr(1, 2048)
|
||||
|
||||
sid, pieces, err := sb.StageFakeData(123)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx, done := AddOpFinish(context.TODO())
|
||||
|
||||
finished := make(chan struct{})
|
||||
go func() {
|
||||
_, err := sb.SealPreCommit1(ctx, sid, abi.SealRandomness{}, pieces)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
close(finished)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-finished:
|
||||
t.Fatal("should not finish until we tell it to")
|
||||
case <-time.After(time.Second / 2):
|
||||
}
|
||||
|
||||
done()
|
||||
|
||||
select {
|
||||
case <-finished:
|
||||
case <-time.After(time.Second / 2):
|
||||
t.Fatal("should finish after we tell it to")
|
||||
}
|
||||
}
|
@ -1,9 +0,0 @@
|
||||
package mock
|
||||
|
||||
func CommDR(in []byte) (out [32]byte) {
|
||||
for i, b := range in {
|
||||
out[i] = ^b
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
@ -1,136 +0,0 @@
|
||||
package sectorstorage
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
)
|
||||
|
||||
var FSOverheadSeal = map[stores.SectorFileType]int{ // 10x overheads
|
||||
stores.FTUnsealed: 10,
|
||||
stores.FTSealed: 10,
|
||||
stores.FTCache: 70, // TODO: confirm for 32G
|
||||
}
|
||||
|
||||
var FsOverheadFinalized = map[stores.SectorFileType]int{
|
||||
stores.FTUnsealed: 10,
|
||||
stores.FTSealed: 10,
|
||||
stores.FTCache: 2,
|
||||
}
|
||||
|
||||
type Resources struct {
|
||||
MinMemory uint64 // What Must be in RAM for decent perf
|
||||
MaxMemory uint64 // Memory required (swap + ram)
|
||||
|
||||
MultiThread bool
|
||||
CanGPU bool
|
||||
|
||||
BaseMinMemory uint64 // What Must be in RAM for decent perf (shared between threads)
|
||||
}
|
||||
|
||||
const MaxCachingOverhead = 32 << 30
|
||||
|
||||
var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredProof]Resources{
|
||||
sealtasks.TTAddPiece: {
|
||||
abi.RegisteredProof_StackedDRG32GiBSeal: Resources{ // This is probably a bit conservative
|
||||
MaxMemory: 32 << 30,
|
||||
MinMemory: 32 << 30,
|
||||
|
||||
MultiThread: false,
|
||||
|
||||
BaseMinMemory: 1 << 30,
|
||||
},
|
||||
abi.RegisteredProof_StackedDRG512MiBSeal: Resources{
|
||||
MaxMemory: 1 << 30,
|
||||
MinMemory: 1 << 30,
|
||||
|
||||
MultiThread: false,
|
||||
|
||||
BaseMinMemory: 1 << 30,
|
||||
},
|
||||
},
|
||||
sealtasks.TTPreCommit1: {
|
||||
abi.RegisteredProof_StackedDRG32GiBSeal: Resources{
|
||||
MaxMemory: 64 << 30,
|
||||
MinMemory: 32 << 30,
|
||||
|
||||
MultiThread: false,
|
||||
|
||||
BaseMinMemory: 30 << 30,
|
||||
},
|
||||
abi.RegisteredProof_StackedDRG512MiBSeal: Resources{
|
||||
MaxMemory: 3 << 29, // 1.5G
|
||||
MinMemory: 1 << 30,
|
||||
|
||||
MultiThread: false,
|
||||
|
||||
BaseMinMemory: 1 << 30,
|
||||
},
|
||||
},
|
||||
sealtasks.TTPreCommit2: {
|
||||
abi.RegisteredProof_StackedDRG32GiBSeal: Resources{
|
||||
MaxMemory: 96 << 30,
|
||||
MinMemory: 64 << 30,
|
||||
|
||||
MultiThread: true,
|
||||
|
||||
BaseMinMemory: 30 << 30,
|
||||
},
|
||||
abi.RegisteredProof_StackedDRG512MiBSeal: Resources{
|
||||
MaxMemory: 3 << 29, // 1.5G
|
||||
MinMemory: 1 << 30,
|
||||
|
||||
MultiThread: true,
|
||||
|
||||
BaseMinMemory: 1 << 30,
|
||||
},
|
||||
},
|
||||
sealtasks.TTCommit1: { // Very short (~100ms), so params are very light
|
||||
abi.RegisteredProof_StackedDRG32GiBSeal: Resources{
|
||||
MaxMemory: 1 << 30,
|
||||
MinMemory: 1 << 30,
|
||||
|
||||
MultiThread: false,
|
||||
|
||||
BaseMinMemory: 1 << 30,
|
||||
},
|
||||
abi.RegisteredProof_StackedDRG512MiBSeal: Resources{
|
||||
MaxMemory: 1 << 30,
|
||||
MinMemory: 1 << 30,
|
||||
|
||||
MultiThread: false,
|
||||
|
||||
BaseMinMemory: 1 << 30,
|
||||
},
|
||||
},
|
||||
sealtasks.TTCommit2: { // TODO: Measure more accurately
|
||||
abi.RegisteredProof_StackedDRG32GiBSeal: Resources{
|
||||
MaxMemory: 110 << 30,
|
||||
MinMemory: 60 << 30,
|
||||
|
||||
MultiThread: true,
|
||||
CanGPU: true,
|
||||
|
||||
BaseMinMemory: 64 << 30, // params
|
||||
},
|
||||
abi.RegisteredProof_StackedDRG512MiBSeal: Resources{
|
||||
MaxMemory: 3 << 29, // 1.5G
|
||||
MinMemory: 1 << 30,
|
||||
|
||||
MultiThread: false, // This is fine
|
||||
CanGPU: true,
|
||||
|
||||
BaseMinMemory: 10 << 30,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
// for now we just reuse params for 2kib and 8mib from 512mib
|
||||
|
||||
for taskType := range ResourceTable {
|
||||
ResourceTable[taskType][abi.RegisteredProof_StackedDRG8MiBSeal] = ResourceTable[taskType][abi.RegisteredProof_StackedDRG512MiBSeal]
|
||||
ResourceTable[taskType][abi.RegisteredProof_StackedDRG2KiBSeal] = ResourceTable[taskType][abi.RegisteredProof_StackedDRG512MiBSeal]
|
||||
}
|
||||
}
|
@ -1,25 +0,0 @@
|
||||
package sectorstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/stores"
|
||||
)
|
||||
|
||||
type readonlyProvider struct {
|
||||
stor *stores.Local
|
||||
}
|
||||
|
||||
func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing bool) (stores.SectorPaths, func(), error) {
|
||||
if allocate != stores.FTNone {
|
||||
return stores.SectorPaths{}, nil, xerrors.New("read-only storage")
|
||||
}
|
||||
|
||||
p, _, done, err := l.stor.AcquireSector(ctx, id, existing, allocate, sealing)
|
||||
|
||||
return p, done, err
|
||||
}
|
@ -1,256 +0,0 @@
|
||||
package sectorstorage
|
||||
|
||||
import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/sealtasks"
|
||||
)
|
||||
|
||||
const mib = 1 << 20
|
||||
|
||||
type workerRequest struct {
|
||||
taskType sealtasks.TaskType
|
||||
accept []WorkerID // ordered by preference
|
||||
|
||||
ret chan<- workerResponse
|
||||
cancel <-chan struct{}
|
||||
}
|
||||
|
||||
type workerResponse struct {
|
||||
err error
|
||||
|
||||
worker Worker
|
||||
done func()
|
||||
}
|
||||
|
||||
func (r *workerRequest) respond(resp workerResponse) {
|
||||
select {
|
||||
case r.ret <- resp:
|
||||
case <-r.cancel:
|
||||
log.Warnf("request got cancelled before we could respond")
|
||||
if resp.done != nil {
|
||||
resp.done()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type workerHandle struct {
|
||||
w Worker
|
||||
|
||||
info WorkerInfo
|
||||
|
||||
memUsedMin uint64
|
||||
memUsedMax uint64
|
||||
gpuUsed bool
|
||||
cpuUse int // -1 - multicore thing; 0 - free; 1+ - singlecore things
|
||||
}
|
||||
|
||||
func (m *Manager) runSched() {
|
||||
for {
|
||||
select {
|
||||
case w := <-m.newWorkers:
|
||||
m.schedNewWorker(w)
|
||||
case req := <-m.schedule:
|
||||
resp, err := m.maybeSchedRequest(req)
|
||||
if err != nil {
|
||||
req.respond(workerResponse{err: err})
|
||||
continue
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
req.respond(*resp)
|
||||
continue
|
||||
}
|
||||
|
||||
m.schedQueue.PushBack(req)
|
||||
case wid := <-m.workerFree:
|
||||
m.onWorkerFreed(wid)
|
||||
case <-m.closing:
|
||||
m.schedClose()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) onWorkerFreed(wid WorkerID) {
|
||||
for e := m.schedQueue.Front(); e != nil; e = e.Next() {
|
||||
req := e.Value.(*workerRequest)
|
||||
var ok bool
|
||||
for _, id := range req.accept {
|
||||
if id == wid {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
resp, err := m.maybeSchedRequest(req)
|
||||
if err != nil {
|
||||
req.respond(workerResponse{err: err})
|
||||
continue
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
req.respond(*resp)
|
||||
|
||||
pe := e.Prev()
|
||||
m.schedQueue.Remove(e)
|
||||
if pe == nil {
|
||||
pe = m.schedQueue.Front()
|
||||
}
|
||||
if pe == nil {
|
||||
break
|
||||
}
|
||||
e = pe
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) maybeSchedRequest(req *workerRequest) (*workerResponse, error) {
|
||||
m.workersLk.Lock()
|
||||
defer m.workersLk.Unlock()
|
||||
|
||||
tried := 0
|
||||
|
||||
for _, id := range req.accept {
|
||||
w, ok := m.workers[id]
|
||||
if !ok {
|
||||
log.Warnf("requested worker %d is not in scheduler", id)
|
||||
}
|
||||
tried++
|
||||
|
||||
canDo, err := m.canHandleRequest(id, w, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !canDo {
|
||||
continue
|
||||
}
|
||||
|
||||
return m.makeResponse(id, w, req), nil
|
||||
}
|
||||
|
||||
if tried == 0 {
|
||||
return nil, xerrors.New("maybeSchedRequest didn't find any good workers")
|
||||
}
|
||||
|
||||
return nil, nil // put in waiting queue
|
||||
}
|
||||
|
||||
func (m *Manager) makeResponse(wid WorkerID, w *workerHandle, req *workerRequest) *workerResponse {
|
||||
needRes := ResourceTable[req.taskType][m.scfg.SealProofType]
|
||||
|
||||
w.gpuUsed = needRes.CanGPU
|
||||
if needRes.MultiThread {
|
||||
w.cpuUse = -1
|
||||
} else {
|
||||
if w.cpuUse != -1 {
|
||||
w.cpuUse++
|
||||
} else {
|
||||
log.Warnf("sched: makeResponse for worker %d: worker cpu is in multicore use, but a single core task was scheduled", wid)
|
||||
}
|
||||
}
|
||||
|
||||
w.memUsedMin += needRes.MinMemory
|
||||
w.memUsedMax += needRes.MaxMemory
|
||||
|
||||
return &workerResponse{
|
||||
err: nil,
|
||||
worker: w.w,
|
||||
done: func() {
|
||||
m.workersLk.Lock()
|
||||
|
||||
if needRes.CanGPU {
|
||||
w.gpuUsed = false
|
||||
}
|
||||
|
||||
if needRes.MultiThread {
|
||||
w.cpuUse = 0
|
||||
} else if w.cpuUse != -1 {
|
||||
w.cpuUse--
|
||||
}
|
||||
|
||||
w.memUsedMin -= needRes.MinMemory
|
||||
w.memUsedMax -= needRes.MaxMemory
|
||||
|
||||
m.workersLk.Unlock()
|
||||
|
||||
select {
|
||||
case m.workerFree <- wid:
|
||||
case <-m.closing:
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) canHandleRequest(wid WorkerID, w *workerHandle, req *workerRequest) (bool, error) {
|
||||
needRes, ok := ResourceTable[req.taskType][m.scfg.SealProofType]
|
||||
if !ok {
|
||||
return false, xerrors.Errorf("canHandleRequest: missing ResourceTable entry for %s/%d", req.taskType, m.scfg.SealProofType)
|
||||
}
|
||||
|
||||
res := w.info.Resources
|
||||
|
||||
// TODO: dedupe needRes.BaseMinMemory per task type (don't add if that task is already running)
|
||||
minNeedMem := res.MemReserved + w.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory
|
||||
if minNeedMem > res.MemPhysical {
|
||||
log.Debugf("sched: not scheduling on worker %d; not enough physical memory - need: %dM, have %dM", wid, minNeedMem/mib, res.MemPhysical/mib)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
maxNeedMem := res.MemReserved + w.memUsedMax + needRes.MaxMemory + needRes.BaseMinMemory
|
||||
if m.scfg.SealProofType == abi.RegisteredProof_StackedDRG32GiBSeal {
|
||||
maxNeedMem += MaxCachingOverhead
|
||||
}
|
||||
if maxNeedMem > res.MemSwap+res.MemPhysical {
|
||||
log.Debugf("sched: not scheduling on worker %d; not enough virtual memory - need: %dM, have %dM", wid, maxNeedMem/mib, (res.MemSwap+res.MemPhysical)/mib)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if needRes.MultiThread {
|
||||
if w.cpuUse != 0 {
|
||||
log.Debugf("sched: not scheduling on worker %d; multicore process needs free CPU", wid)
|
||||
return false, nil
|
||||
}
|
||||
} else {
|
||||
if w.cpuUse == -1 {
|
||||
log.Debugf("sched: not scheduling on worker %d; CPU in use by a multicore process", wid)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
if len(res.GPUs) > 0 && needRes.CanGPU {
|
||||
if w.gpuUsed {
|
||||
log.Debugf("sched: not scheduling on worker %d; GPU in use", wid)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (m *Manager) schedNewWorker(w *workerHandle) {
|
||||
m.workersLk.Lock()
|
||||
defer m.workersLk.Unlock()
|
||||
|
||||
id := m.nextWorker
|
||||
m.workers[id] = w
|
||||
m.nextWorker++
|
||||
}
|
||||
|
||||
func (m *Manager) schedClose() {
|
||||
m.workersLk.Lock()
|
||||
defer m.workersLk.Unlock()
|
||||
|
||||
for i, w := range m.workers {
|
||||
if err := w.w.Close(); err != nil {
|
||||
log.Errorf("closing worker %d: %+v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
@ -1,13 +0,0 @@
|
||||
package sealtasks
|
||||
|
||||
type TaskType string
|
||||
|
||||
const (
|
||||
TTAddPiece TaskType = "seal/v0/addpiece"
|
||||
TTPreCommit1 TaskType = "seal/v0/precommit/1"
|
||||
TTPreCommit2 TaskType = "seal/v0/precommit/2"
|
||||
TTCommit1 TaskType = "seal/v0/commit/1" // NOTE: We use this to transfer the sector into miner-local storage for now; Don't use on workers!
|
||||
TTCommit2 TaskType = "seal/v0/commit/2"
|
||||
|
||||
TTFinalize TaskType = "seal/v0/finalize"
|
||||
)
|
@ -1,29 +0,0 @@
|
||||
package sectorstorage
|
||||
|
||||
type WorkerStats struct {
|
||||
Info WorkerInfo
|
||||
|
||||
MemUsedMin uint64
|
||||
MemUsedMax uint64
|
||||
GpuUsed bool
|
||||
CpuUse int
|
||||
}
|
||||
|
||||
func (m *Manager) WorkerStats() map[uint64]WorkerStats {
|
||||
m.workersLk.Lock()
|
||||
defer m.workersLk.Unlock()
|
||||
|
||||
out := map[uint64]WorkerStats{}
|
||||
|
||||
for id, handle := range m.workers {
|
||||
out[uint64(id)] = WorkerStats{
|
||||
Info: handle.info,
|
||||
MemUsedMin: handle.memUsedMin,
|
||||
MemUsedMax: handle.memUsedMax,
|
||||
GpuUsed: handle.gpuUsed,
|
||||
CpuUse: handle.cpuUse,
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
@ -1,91 +0,0 @@
|
||||
package stores
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
)
|
||||
|
||||
const (
|
||||
FTUnsealed SectorFileType = 1 << iota
|
||||
FTSealed
|
||||
FTCache
|
||||
)
|
||||
|
||||
const (
|
||||
FTNone SectorFileType = 0
|
||||
)
|
||||
|
||||
type SectorFileType int
|
||||
|
||||
func (t SectorFileType) String() string {
|
||||
switch t {
|
||||
case FTUnsealed:
|
||||
return "unsealed"
|
||||
case FTSealed:
|
||||
return "sealed"
|
||||
case FTCache:
|
||||
return "cache"
|
||||
default:
|
||||
return fmt.Sprintf("<unknown %d>", t)
|
||||
}
|
||||
}
|
||||
|
||||
func (t SectorFileType) Has(singleType SectorFileType) bool {
|
||||
return t&singleType == singleType
|
||||
}
|
||||
|
||||
type SectorPaths struct {
|
||||
Id abi.SectorID
|
||||
|
||||
Unsealed string
|
||||
Sealed string
|
||||
Cache string
|
||||
}
|
||||
|
||||
func ParseSectorID(baseName string) (abi.SectorID, error) {
|
||||
var n abi.SectorNumber
|
||||
var mid abi.ActorID
|
||||
read, err := fmt.Sscanf(baseName, "s-t0%d-%d", &mid, &n)
|
||||
if err != nil {
|
||||
return abi.SectorID{}, xerrors.Errorf("sscanf sector name ('%s'): %w", baseName, err)
|
||||
}
|
||||
|
||||
if read != 2 {
|
||||
return abi.SectorID{}, xerrors.Errorf("parseSectorID expected to scan 2 values, got %d", read)
|
||||
}
|
||||
|
||||
return abi.SectorID{
|
||||
Miner: mid,
|
||||
Number: n,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func SectorName(sid abi.SectorID) string {
|
||||
return fmt.Sprintf("s-t0%d-%d", sid.Miner, sid.Number)
|
||||
}
|
||||
|
||||
func PathByType(sps SectorPaths, fileType SectorFileType) string {
|
||||
switch fileType {
|
||||
case FTUnsealed:
|
||||
return sps.Unsealed
|
||||
case FTSealed:
|
||||
return sps.Sealed
|
||||
case FTCache:
|
||||
return sps.Cache
|
||||
}
|
||||
|
||||
panic("requested unknown path type")
|
||||
}
|
||||
|
||||
func SetPathByType(sps *SectorPaths, fileType SectorFileType, p string) {
|
||||
switch fileType {
|
||||
case FTUnsealed:
|
||||
sps.Unsealed = p
|
||||
case FTSealed:
|
||||
sps.Sealed = p
|
||||
case FTCache:
|
||||
sps.Cache = p
|
||||
}
|
||||
}
|
@ -1,150 +0,0 @@
|
||||
package stores
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/tarutil"
|
||||
)
|
||||
|
||||
var log = logging.Logger("stores")
|
||||
|
||||
type FetchHandler struct {
|
||||
*Local
|
||||
}
|
||||
|
||||
func (handler *FetchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // /remote/
|
||||
mux := mux.NewRouter()
|
||||
|
||||
mux.HandleFunc("/remote/stat/{id}", handler.remoteStatFs).Methods("GET")
|
||||
mux.HandleFunc("/remote/{type}/{id}", handler.remoteGetSector).Methods("GET")
|
||||
mux.HandleFunc("/remote/{type}/{id}", handler.remoteDeleteSector).Methods("DELETE")
|
||||
|
||||
mux.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
func (handler *FetchHandler) remoteStatFs(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
id := ID(vars["id"])
|
||||
|
||||
st, err := handler.Local.FsStat(r.Context(), id)
|
||||
switch err {
|
||||
case errPathNotFound:
|
||||
w.WriteHeader(404)
|
||||
return
|
||||
case nil:
|
||||
break
|
||||
default:
|
||||
w.WriteHeader(500)
|
||||
log.Errorf("%+v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := json.NewEncoder(w).Encode(&st); err != nil {
|
||||
log.Warnf("error writing stat response: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Request) {
|
||||
log.Infof("SERVE GET %s", r.URL)
|
||||
vars := mux.Vars(r)
|
||||
|
||||
id, err := ParseSectorID(vars["id"])
|
||||
if err != nil {
|
||||
log.Error("%+v", err)
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
|
||||
ft, err := ftFromString(vars["type"])
|
||||
if err != nil {
|
||||
log.Error("%+v", err)
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
paths, _, done, err := handler.Local.AcquireSector(r.Context(), id, ft, FTNone, false)
|
||||
if err != nil {
|
||||
log.Error("%+v", err)
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
defer done()
|
||||
|
||||
path := PathByType(paths, ft)
|
||||
if path == "" {
|
||||
log.Error("acquired path was empty")
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
|
||||
stat, err := os.Stat(path)
|
||||
if err != nil {
|
||||
log.Error("%+v", err)
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
|
||||
var rd io.Reader
|
||||
if stat.IsDir() {
|
||||
rd, err = tarutil.TarDirectory(path)
|
||||
w.Header().Set("Content-Type", "application/x-tar")
|
||||
} else {
|
||||
rd, err = os.OpenFile(path, os.O_RDONLY, 0644)
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("%+v", err)
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(200)
|
||||
if _, err := io.Copy(w, rd); err != nil { // TODO: default 32k buf may be too small
|
||||
log.Error("%+v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (handler *FetchHandler) remoteDeleteSector(w http.ResponseWriter, r *http.Request) {
|
||||
log.Infof("SERVE DELETE %s", r.URL)
|
||||
vars := mux.Vars(r)
|
||||
|
||||
id, err := ParseSectorID(vars["id"])
|
||||
if err != nil {
|
||||
log.Error("%+v", err)
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
|
||||
ft, err := ftFromString(vars["type"])
|
||||
if err != nil {
|
||||
log.Error("%+v", err)
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
|
||||
if err := handler.Remove(r.Context(), id, ft); err != nil {
|
||||
log.Error("%+v", err)
|
||||
w.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func ftFromString(t string) (SectorFileType, error) {
|
||||
switch t {
|
||||
case FTUnsealed.String():
|
||||
return FTUnsealed, nil
|
||||
case FTSealed.String():
|
||||
return FTSealed, nil
|
||||
case FTCache.String():
|
||||
return FTCache, nil
|
||||
default:
|
||||
return 0, xerrors.Errorf("unknown sector file type: '%s'", t)
|
||||
}
|
||||
}
|
@ -1,319 +0,0 @@
|
||||
package stores
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
gopath "path"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
||||
)
|
||||
|
||||
// ID identifies sector storage by UUID. One sector storage should map to one
|
||||
// filesystem, local or networked / shared by multiple machines
|
||||
type ID string
|
||||
|
||||
type StorageInfo struct {
|
||||
ID ID
|
||||
URLs []string // TODO: Support non-http transports
|
||||
Weight uint64
|
||||
|
||||
CanSeal bool
|
||||
CanStore bool
|
||||
}
|
||||
|
||||
type SectorIndex interface { // part of storage-miner api
|
||||
StorageAttach(context.Context, StorageInfo, FsStat) error
|
||||
StorageInfo(context.Context, ID) (StorageInfo, error)
|
||||
// TODO: StorageUpdateStats(FsStat)
|
||||
|
||||
StorageDeclareSector(ctx context.Context, storageId ID, s abi.SectorID, ft SectorFileType) error
|
||||
StorageDropSector(ctx context.Context, storageId ID, s abi.SectorID, ft SectorFileType) error
|
||||
StorageFindSector(ctx context.Context, sector abi.SectorID, ft SectorFileType, allowFetch bool) ([]StorageInfo, error)
|
||||
|
||||
StorageBestAlloc(ctx context.Context, allocate SectorFileType, sealing bool) ([]StorageInfo, error)
|
||||
}
|
||||
|
||||
type Decl struct {
|
||||
abi.SectorID
|
||||
SectorFileType
|
||||
}
|
||||
|
||||
type storageEntry struct {
|
||||
info *StorageInfo
|
||||
fsi FsStat
|
||||
}
|
||||
|
||||
type Index struct {
|
||||
lk sync.RWMutex
|
||||
|
||||
sectors map[Decl][]ID
|
||||
stores map[ID]*storageEntry
|
||||
}
|
||||
|
||||
func NewIndex() *Index {
|
||||
return &Index{
|
||||
sectors: map[Decl][]ID{},
|
||||
stores: map[ID]*storageEntry{},
|
||||
}
|
||||
}
|
||||
|
||||
func (i *Index) StorageList(ctx context.Context) (map[ID][]Decl, error) {
|
||||
byID := map[ID]map[abi.SectorID]SectorFileType{}
|
||||
|
||||
for id := range i.stores {
|
||||
byID[id] = map[abi.SectorID]SectorFileType{}
|
||||
}
|
||||
for decl, ids := range i.sectors {
|
||||
for _, id := range ids {
|
||||
byID[id][decl.SectorID] |= decl.SectorFileType
|
||||
}
|
||||
}
|
||||
|
||||
out := map[ID][]Decl{}
|
||||
for id, m := range byID {
|
||||
out[id] = []Decl{}
|
||||
for sectorID, fileType := range m {
|
||||
out[id] = append(out[id], Decl{
|
||||
SectorID: sectorID,
|
||||
SectorFileType: fileType,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (i *Index) StorageAttach(ctx context.Context, si StorageInfo, st FsStat) error {
|
||||
i.lk.Lock()
|
||||
defer i.lk.Unlock()
|
||||
|
||||
log.Infof("New sector storage: %s", si.ID)
|
||||
|
||||
if _, ok := i.stores[si.ID]; ok {
|
||||
for _, u := range si.URLs {
|
||||
if _, err := url.Parse(u); err != nil {
|
||||
return xerrors.Errorf("failed to parse url %s: %w", si.URLs, err)
|
||||
}
|
||||
}
|
||||
|
||||
uloop:
|
||||
for _, u := range si.URLs {
|
||||
for _, l := range i.stores[si.ID].info.URLs {
|
||||
if u == l {
|
||||
continue uloop
|
||||
}
|
||||
}
|
||||
|
||||
i.stores[si.ID].info.URLs = append(i.stores[si.ID].info.URLs, u)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
i.stores[si.ID] = &storageEntry{
|
||||
info: &si,
|
||||
fsi: st,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *Index) StorageDeclareSector(ctx context.Context, storageId ID, s abi.SectorID, ft SectorFileType) error {
|
||||
i.lk.Lock()
|
||||
defer i.lk.Unlock()
|
||||
|
||||
for _, fileType := range PathTypes {
|
||||
if fileType&ft == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
d := Decl{s, fileType}
|
||||
|
||||
for _, sid := range i.sectors[d] {
|
||||
if sid == storageId {
|
||||
log.Warnf("sector %v redeclared in %s", s, storageId)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
i.sectors[d] = append(i.sectors[d], storageId)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *Index) StorageDropSector(ctx context.Context, storageId ID, s abi.SectorID, ft SectorFileType) error {
|
||||
i.lk.Lock()
|
||||
defer i.lk.Unlock()
|
||||
|
||||
for _, fileType := range PathTypes {
|
||||
if fileType&ft == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
d := Decl{s, fileType}
|
||||
|
||||
if len(i.sectors[d]) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
rewritten := make([]ID, 0, len(i.sectors[d])-1)
|
||||
for _, sid := range i.sectors[d] {
|
||||
if sid == storageId {
|
||||
continue
|
||||
}
|
||||
|
||||
rewritten = append(rewritten, sid)
|
||||
}
|
||||
if len(rewritten) == 0 {
|
||||
delete(i.sectors, d)
|
||||
return nil
|
||||
}
|
||||
|
||||
i.sectors[d] = rewritten
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft SectorFileType, allowFetch bool) ([]StorageInfo, error) {
|
||||
i.lk.RLock()
|
||||
defer i.lk.RUnlock()
|
||||
|
||||
storageIDs := map[ID]uint64{}
|
||||
|
||||
for _, pathType := range PathTypes {
|
||||
if ft&pathType == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, id := range i.sectors[Decl{s, pathType}] {
|
||||
storageIDs[id]++
|
||||
}
|
||||
}
|
||||
|
||||
out := make([]StorageInfo, 0, len(storageIDs))
|
||||
|
||||
for id, n := range storageIDs {
|
||||
st, ok := i.stores[id]
|
||||
if !ok {
|
||||
log.Warnf("storage %s is not present in sector index (referenced by sector %v)", id, s)
|
||||
continue
|
||||
}
|
||||
|
||||
urls := make([]string, len(st.info.URLs))
|
||||
for k, u := range st.info.URLs {
|
||||
rl, err := url.Parse(u)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to parse url: %w", err)
|
||||
}
|
||||
|
||||
rl.Path = gopath.Join(rl.Path, ft.String(), SectorName(s))
|
||||
urls[k] = rl.String()
|
||||
}
|
||||
|
||||
out = append(out, StorageInfo{
|
||||
ID: id,
|
||||
URLs: urls,
|
||||
Weight: st.info.Weight * n, // storage with more sector types is better
|
||||
CanSeal: st.info.CanSeal,
|
||||
CanStore: st.info.CanStore,
|
||||
})
|
||||
}
|
||||
|
||||
if allowFetch {
|
||||
for id, st := range i.stores {
|
||||
if _, ok := storageIDs[id]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
urls := make([]string, len(st.info.URLs))
|
||||
for k, u := range st.info.URLs {
|
||||
rl, err := url.Parse(u)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to parse url: %w", err)
|
||||
}
|
||||
|
||||
rl.Path = gopath.Join(rl.Path, ft.String(), SectorName(s))
|
||||
urls[k] = rl.String()
|
||||
}
|
||||
|
||||
out = append(out, StorageInfo{
|
||||
ID: id,
|
||||
URLs: urls,
|
||||
Weight: st.info.Weight * 0, // TODO: something better than just '0'
|
||||
CanSeal: st.info.CanSeal,
|
||||
CanStore: st.info.CanStore,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (i *Index) StorageInfo(ctx context.Context, id ID) (StorageInfo, error) {
|
||||
i.lk.RLock()
|
||||
defer i.lk.RUnlock()
|
||||
|
||||
si, found := i.stores[id]
|
||||
if !found {
|
||||
return StorageInfo{}, xerrors.Errorf("sector store not found")
|
||||
}
|
||||
|
||||
return *si.info, nil
|
||||
}
|
||||
|
||||
func (i *Index) StorageBestAlloc(ctx context.Context, allocate SectorFileType, sealing bool) ([]StorageInfo, error) {
|
||||
i.lk.RLock()
|
||||
defer i.lk.RUnlock()
|
||||
|
||||
var candidates []storageEntry
|
||||
|
||||
for _, p := range i.stores {
|
||||
if sealing && !p.info.CanSeal {
|
||||
log.Debugf("alloc: not considering %s; can't seal", p.info.ID)
|
||||
continue
|
||||
}
|
||||
if !sealing && !p.info.CanStore {
|
||||
log.Debugf("alloc: not considering %s; can't store", p.info.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO: filter out of space
|
||||
|
||||
candidates = append(candidates, *p)
|
||||
}
|
||||
|
||||
if len(candidates) == 0 {
|
||||
return nil, xerrors.New("no good path found")
|
||||
}
|
||||
|
||||
sort.Slice(candidates, func(i, j int) bool {
|
||||
iw := big.Mul(big.NewInt(int64(candidates[i].fsi.Available)), big.NewInt(int64(candidates[i].info.Weight)))
|
||||
jw := big.Mul(big.NewInt(int64(candidates[j].fsi.Available)), big.NewInt(int64(candidates[j].info.Weight)))
|
||||
|
||||
return iw.GreaterThan(jw)
|
||||
})
|
||||
|
||||
out := make([]StorageInfo, len(candidates))
|
||||
for i, candidate := range candidates {
|
||||
out[i] = *candidate.info
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (i *Index) FindSector(id abi.SectorID, typ SectorFileType) ([]ID, error) {
|
||||
i.lk.RLock()
|
||||
defer i.lk.RUnlock()
|
||||
|
||||
return i.sectors[Decl{
|
||||
SectorID: id,
|
||||
SectorFileType: typ,
|
||||
}], nil
|
||||
}
|
||||
|
||||
var _ SectorIndex = &Index{}
|
@ -1,38 +0,0 @@
|
||||
package stores
|
||||
|
||||
import (
|
||||
"context"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
)
|
||||
|
||||
type Store interface {
|
||||
AcquireSector(ctx context.Context, s abi.SectorID, existing SectorFileType, allocate SectorFileType, sealing bool) (paths SectorPaths, stores SectorPaths, done func(), err error)
|
||||
Remove(ctx context.Context, s abi.SectorID, types SectorFileType) error
|
||||
|
||||
// move sectors into storage
|
||||
MoveStorage(ctx context.Context, s abi.SectorID, types SectorFileType) error
|
||||
|
||||
FsStat(ctx context.Context, id ID) (FsStat, error)
|
||||
}
|
||||
|
||||
func Stat(path string) (FsStat, error) {
|
||||
var stat syscall.Statfs_t
|
||||
if err := syscall.Statfs(path, &stat); err != nil {
|
||||
return FsStat{}, xerrors.Errorf("statfs: %w", err)
|
||||
}
|
||||
|
||||
return FsStat{
|
||||
Capacity: stat.Blocks * uint64(stat.Bsize),
|
||||
Available: stat.Bavail * uint64(stat.Bsize),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type FsStat struct {
|
||||
Capacity uint64
|
||||
Available uint64 // Available to use for sector storage
|
||||
Used uint64
|
||||
}
|
@ -1,389 +0,0 @@
|
||||
package stores
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"math/bits"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
)
|
||||
|
||||
type StoragePath struct {
|
||||
ID ID
|
||||
Weight uint64
|
||||
|
||||
LocalPath string
|
||||
|
||||
CanSeal bool
|
||||
CanStore bool
|
||||
}
|
||||
|
||||
// [path]/sectorstore.json
|
||||
type LocalStorageMeta struct {
|
||||
ID ID
|
||||
Weight uint64 // 0 = readonly
|
||||
|
||||
CanSeal bool
|
||||
CanStore bool
|
||||
}
|
||||
|
||||
// .lotusstorage/storage.json
|
||||
type StorageConfig struct {
|
||||
StoragePaths []LocalPath
|
||||
}
|
||||
|
||||
type LocalPath struct {
|
||||
Path string
|
||||
}
|
||||
|
||||
type LocalStorage interface {
|
||||
GetStorage() (StorageConfig, error)
|
||||
SetStorage(func(*StorageConfig)) error
|
||||
}
|
||||
|
||||
const MetaFile = "sectorstore.json"
|
||||
|
||||
var PathTypes = []SectorFileType{FTUnsealed, FTSealed, FTCache}
|
||||
|
||||
type Local struct {
|
||||
localStorage LocalStorage
|
||||
index SectorIndex
|
||||
urls []string
|
||||
|
||||
paths map[ID]*path
|
||||
|
||||
localLk sync.RWMutex
|
||||
}
|
||||
|
||||
type path struct {
|
||||
local string // absolute local path
|
||||
}
|
||||
|
||||
func NewLocal(ctx context.Context, ls LocalStorage, index SectorIndex, urls []string) (*Local, error) {
|
||||
l := &Local{
|
||||
localStorage: ls,
|
||||
index: index,
|
||||
urls: urls,
|
||||
|
||||
paths: map[ID]*path{},
|
||||
}
|
||||
return l, l.open(ctx)
|
||||
}
|
||||
|
||||
func (st *Local) OpenPath(ctx context.Context, p string) error {
|
||||
st.localLk.Lock()
|
||||
defer st.localLk.Unlock()
|
||||
|
||||
mb, err := ioutil.ReadFile(filepath.Join(p, MetaFile))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("reading storage metadata for %s: %w", p, err)
|
||||
}
|
||||
|
||||
var meta LocalStorageMeta
|
||||
if err := json.Unmarshal(mb, &meta); err != nil {
|
||||
return xerrors.Errorf("unmarshalling storage metadata for %s: %w", p, err)
|
||||
}
|
||||
|
||||
// TODO: Check existing / dedupe
|
||||
|
||||
out := &path{
|
||||
local: p,
|
||||
}
|
||||
|
||||
fst, err := Stat(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = st.index.StorageAttach(ctx, StorageInfo{
|
||||
ID: meta.ID,
|
||||
URLs: st.urls,
|
||||
Weight: meta.Weight,
|
||||
CanSeal: meta.CanSeal,
|
||||
CanStore: meta.CanStore,
|
||||
}, fst)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("declaring storage in index: %w", err)
|
||||
}
|
||||
|
||||
for _, t := range PathTypes {
|
||||
ents, err := ioutil.ReadDir(filepath.Join(p, t.String()))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(filepath.Join(p, t.String()), 0755); err != nil {
|
||||
return xerrors.Errorf("openPath mkdir '%s': %w", filepath.Join(p, t.String()), err)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
return xerrors.Errorf("listing %s: %w", filepath.Join(p, t.String()), err)
|
||||
}
|
||||
|
||||
for _, ent := range ents {
|
||||
sid, err := ParseSectorID(ent.Name())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse sector id %s: %w", ent.Name(), err)
|
||||
}
|
||||
|
||||
if err := st.index.StorageDeclareSector(ctx, meta.ID, sid, t); err != nil {
|
||||
return xerrors.Errorf("declare sector %d(t:%d) -> %s: %w", sid, t, meta.ID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
st.paths[meta.ID] = out
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (st *Local) open(ctx context.Context) error {
|
||||
cfg, err := st.localStorage.GetStorage()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting local storage config: %w", err)
|
||||
}
|
||||
|
||||
for _, path := range cfg.StoragePaths {
|
||||
err := st.OpenPath(ctx, path.Path)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("opening path %s: %w", path.Path, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, existing SectorFileType, allocate SectorFileType, sealing bool) (SectorPaths, SectorPaths, func(), error) {
|
||||
if existing|allocate != existing^allocate {
|
||||
return SectorPaths{}, SectorPaths{}, nil, xerrors.New("can't both find and allocate a sector")
|
||||
}
|
||||
|
||||
st.localLk.RLock()
|
||||
|
||||
var out SectorPaths
|
||||
var storageIDs SectorPaths
|
||||
|
||||
for _, fileType := range PathTypes {
|
||||
if fileType&existing == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
si, err := st.index.StorageFindSector(ctx, sid, fileType, false)
|
||||
if err != nil {
|
||||
log.Warnf("finding existing sector %d(t:%d) failed: %+v", sid, fileType, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, info := range si {
|
||||
p, ok := st.paths[info.ID]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if p.local == "" { // TODO: can that even be the case?
|
||||
continue
|
||||
}
|
||||
|
||||
spath := filepath.Join(p.local, fileType.String(), SectorName(sid))
|
||||
SetPathByType(&out, fileType, spath)
|
||||
SetPathByType(&storageIDs, fileType, string(info.ID))
|
||||
|
||||
existing ^= fileType
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, fileType := range PathTypes {
|
||||
if fileType&allocate == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
sis, err := st.index.StorageBestAlloc(ctx, fileType, sealing)
|
||||
if err != nil {
|
||||
st.localLk.RUnlock()
|
||||
return SectorPaths{}, SectorPaths{}, nil, xerrors.Errorf("finding best storage for allocating : %w", err)
|
||||
}
|
||||
|
||||
var best string
|
||||
var bestID ID
|
||||
|
||||
for _, si := range sis {
|
||||
p, ok := st.paths[si.ID]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if p.local == "" { // TODO: can that even be the case?
|
||||
continue
|
||||
}
|
||||
|
||||
if sealing && !si.CanSeal {
|
||||
continue
|
||||
}
|
||||
|
||||
if !sealing && !si.CanStore {
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO: Check free space
|
||||
|
||||
best = filepath.Join(p.local, fileType.String(), SectorName(sid))
|
||||
bestID = si.ID
|
||||
}
|
||||
|
||||
if best == "" {
|
||||
st.localLk.RUnlock()
|
||||
return SectorPaths{}, SectorPaths{}, nil, xerrors.Errorf("couldn't find a suitable path for a sector")
|
||||
}
|
||||
|
||||
SetPathByType(&out, fileType, best)
|
||||
SetPathByType(&storageIDs, fileType, string(bestID))
|
||||
allocate ^= fileType
|
||||
}
|
||||
|
||||
return out, storageIDs, st.localLk.RUnlock, nil
|
||||
}
|
||||
|
||||
func (st *Local) Local(ctx context.Context) ([]StoragePath, error) {
|
||||
st.localLk.RLock()
|
||||
defer st.localLk.RUnlock()
|
||||
|
||||
var out []StoragePath
|
||||
for id, p := range st.paths {
|
||||
if p.local == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
si, err := st.index.StorageInfo(ctx, id)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get storage info for %s: %w", id, err)
|
||||
}
|
||||
|
||||
out = append(out, StoragePath{
|
||||
ID: id,
|
||||
Weight: si.Weight,
|
||||
LocalPath: p.local,
|
||||
CanSeal: si.CanSeal,
|
||||
CanStore: si.CanStore,
|
||||
})
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (st *Local) Remove(ctx context.Context, sid abi.SectorID, typ SectorFileType) error {
|
||||
if bits.OnesCount(uint(typ)) != 1 {
|
||||
return xerrors.New("delete expects one file type")
|
||||
}
|
||||
|
||||
si, err := st.index.StorageFindSector(ctx, sid, typ, false)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("finding existing sector %d(t:%d) failed: %w", sid, typ, err)
|
||||
}
|
||||
|
||||
if len(si) == 0 {
|
||||
return xerrors.Errorf("can't delete sector %v(%d), not found", sid, typ)
|
||||
}
|
||||
|
||||
for _, info := range si {
|
||||
p, ok := st.paths[info.ID]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if p.local == "" { // TODO: can that even be the case?
|
||||
continue
|
||||
}
|
||||
|
||||
if err := st.index.StorageDropSector(ctx, info.ID, sid, typ); err != nil {
|
||||
return xerrors.Errorf("dropping sector from index: %w", err)
|
||||
}
|
||||
|
||||
spath := filepath.Join(p.local, typ.String(), SectorName(sid))
|
||||
log.Infof("remove %s", spath)
|
||||
|
||||
if err := os.RemoveAll(spath); err != nil {
|
||||
log.Errorf("removing sector (%v) from %s: %+v", sid, spath, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, types SectorFileType) error {
|
||||
dest, destIds, sdone, err := st.AcquireSector(ctx, s, FTNone, types, false)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("acquire dest storage: %w", err)
|
||||
}
|
||||
defer sdone()
|
||||
|
||||
src, srcIds, ddone, err := st.AcquireSector(ctx, s, types, FTNone, false)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("acquire src storage: %w", err)
|
||||
}
|
||||
defer ddone()
|
||||
|
||||
for _, fileType := range PathTypes {
|
||||
if fileType&types == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
sst, err := st.index.StorageInfo(ctx, ID(PathByType(srcIds, fileType)))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get source storage info: %w", err)
|
||||
}
|
||||
|
||||
dst, err := st.index.StorageInfo(ctx, ID(PathByType(destIds, fileType)))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get source storage info: %w", err)
|
||||
}
|
||||
|
||||
if sst.ID == dst.ID {
|
||||
log.Debugf("not moving %v(%d); src and dest are the same", s, fileType)
|
||||
continue
|
||||
}
|
||||
|
||||
if sst.CanStore {
|
||||
log.Debugf("not moving %v(%d); source supports storage", s, fileType)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("moving %v(%d) to storage: %s(se:%t; st:%t) -> %s(se:%t; st:%t)", s, fileType, sst.ID, sst.CanSeal, sst.CanStore, dst.ID, dst.CanSeal, dst.CanStore)
|
||||
|
||||
if err := st.index.StorageDropSector(ctx, ID(PathByType(srcIds, fileType)), s, fileType); err != nil {
|
||||
return xerrors.Errorf("dropping source sector from index: %w", err)
|
||||
}
|
||||
|
||||
if err := move(PathByType(src, fileType), PathByType(dest, fileType)); err != nil {
|
||||
// TODO: attempt some recovery (check if src is still there, re-declare)
|
||||
return xerrors.Errorf("moving sector %v(%d): %w", s, fileType, err)
|
||||
}
|
||||
|
||||
if err := st.index.StorageDeclareSector(ctx, ID(PathByType(destIds, fileType)), s, fileType); err != nil {
|
||||
return xerrors.Errorf("declare sector %d(t:%d) -> %s: %w", s, fileType, ID(PathByType(destIds, fileType)), err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var errPathNotFound = xerrors.Errorf("fsstat: path not found")
|
||||
|
||||
func (st *Local) FsStat(ctx context.Context, id ID) (FsStat, error) {
|
||||
st.localLk.RLock()
|
||||
defer st.localLk.RUnlock()
|
||||
|
||||
p, ok := st.paths[id]
|
||||
if !ok {
|
||||
return FsStat{}, errPathNotFound
|
||||
}
|
||||
|
||||
return Stat(p.local)
|
||||
}
|
||||
|
||||
var _ Store = &Local{}
|
@ -1,307 +0,0 @@
|
||||
package stores
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"math/bits"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
gopath "path"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
files "github.com/ipfs/go-ipfs-files"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/tarutil"
|
||||
)
|
||||
|
||||
type Remote struct {
|
||||
local *Local
|
||||
index SectorIndex
|
||||
auth http.Header
|
||||
|
||||
fetchLk sync.Mutex // TODO: this can be much smarter
|
||||
// TODO: allow multiple parallel fetches
|
||||
// (make sure to not fetch the same sector data twice)
|
||||
}
|
||||
|
||||
func NewRemote(local *Local, index SectorIndex, auth http.Header) *Remote {
|
||||
return &Remote{
|
||||
local: local,
|
||||
index: index,
|
||||
auth: auth,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, existing SectorFileType, allocate SectorFileType, sealing bool) (SectorPaths, SectorPaths, func(), error) {
|
||||
if existing|allocate != existing^allocate {
|
||||
return SectorPaths{}, SectorPaths{}, nil, xerrors.New("can't both find and allocate a sector")
|
||||
}
|
||||
|
||||
r.fetchLk.Lock()
|
||||
defer r.fetchLk.Unlock()
|
||||
|
||||
paths, stores, done, err := r.local.AcquireSector(ctx, s, existing, allocate, sealing)
|
||||
if err != nil {
|
||||
return SectorPaths{}, SectorPaths{}, nil, xerrors.Errorf("local acquire error: %w", err)
|
||||
}
|
||||
|
||||
for _, fileType := range PathTypes {
|
||||
if fileType&existing == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if PathByType(paths, fileType) != "" {
|
||||
continue
|
||||
}
|
||||
|
||||
ap, storageID, url, rdone, err := r.acquireFromRemote(ctx, s, fileType, sealing)
|
||||
if err != nil {
|
||||
done()
|
||||
return SectorPaths{}, SectorPaths{}, nil, err
|
||||
}
|
||||
|
||||
done = mergeDone(done, rdone)
|
||||
SetPathByType(&paths, fileType, ap)
|
||||
SetPathByType(&stores, fileType, string(storageID))
|
||||
|
||||
if err := r.index.StorageDeclareSector(ctx, storageID, s, fileType); err != nil {
|
||||
log.Warnf("declaring sector %v in %s failed: %+v", s, storageID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO: some way to allow having duplicated sectors in the system for perf
|
||||
if err := r.deleteFromRemote(ctx, url); err != nil {
|
||||
log.Warnf("deleting sector %v from %s (delete %s): %+v", s, storageID, url, err)
|
||||
}
|
||||
}
|
||||
|
||||
return paths, stores, done, nil
|
||||
}
|
||||
|
||||
func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, fileType SectorFileType, sealing bool) (string, ID, string, func(), error) {
|
||||
si, err := r.index.StorageFindSector(ctx, s, fileType, false)
|
||||
if err != nil {
|
||||
return "", "", "", nil, err
|
||||
}
|
||||
|
||||
sort.Slice(si, func(i, j int) bool {
|
||||
return si[i].Weight < si[j].Weight
|
||||
})
|
||||
|
||||
apaths, ids, done, err := r.local.AcquireSector(ctx, s, FTNone, fileType, sealing)
|
||||
if err != nil {
|
||||
return "", "", "", nil, xerrors.Errorf("allocate local sector for fetching: %w", err)
|
||||
}
|
||||
dest := PathByType(apaths, fileType)
|
||||
storageID := PathByType(ids, fileType)
|
||||
|
||||
var merr error
|
||||
for _, info := range si {
|
||||
// TODO: see what we have local, prefer that
|
||||
|
||||
for _, url := range info.URLs {
|
||||
err := r.fetch(ctx, url, dest)
|
||||
if err != nil {
|
||||
merr = multierror.Append(merr, xerrors.Errorf("fetch error %s (storage %s) -> %s: %w", url, info.ID, dest, err))
|
||||
continue
|
||||
}
|
||||
|
||||
if merr != nil {
|
||||
log.Warnw("acquireFromRemote encountered errors when fetching sector from remote", "errors", merr)
|
||||
}
|
||||
return dest, ID(storageID), url, done, nil
|
||||
}
|
||||
}
|
||||
|
||||
done()
|
||||
return "", "", "", nil, xerrors.Errorf("failed to acquire sector %v from remote (tried %v): %w", s, si, merr)
|
||||
}
|
||||
|
||||
func (r *Remote) fetch(ctx context.Context, url, outname string) error {
|
||||
log.Infof("Fetch %s -> %s", url, outname)
|
||||
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("request: %w", err)
|
||||
}
|
||||
req.Header = r.auth
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("do request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return xerrors.Errorf("non-200 code: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
/*bar := pb.New64(w.sizeForType(typ))
|
||||
bar.ShowPercent = true
|
||||
bar.ShowSpeed = true
|
||||
bar.Units = pb.U_BYTES
|
||||
|
||||
barreader := bar.NewProxyReader(resp.Body)
|
||||
|
||||
bar.Start()
|
||||
defer bar.Finish()*/
|
||||
|
||||
mediatype, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type"))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse media type: %w", err)
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(outname); err != nil {
|
||||
return xerrors.Errorf("removing dest: %w", err)
|
||||
}
|
||||
|
||||
switch mediatype {
|
||||
case "application/x-tar":
|
||||
return tarutil.ExtractTar(resp.Body, outname)
|
||||
case "application/octet-stream":
|
||||
return files.WriteTo(files.NewReaderFile(resp.Body), outname)
|
||||
default:
|
||||
return xerrors.Errorf("unknown content type: '%s'", mediatype)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Remote) MoveStorage(ctx context.Context, s abi.SectorID, types SectorFileType) error {
|
||||
// Make sure we have the data local
|
||||
_, _, ddone, err := r.AcquireSector(ctx, s, types, FTNone, false)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("acquire src storage (remote): %w", err)
|
||||
}
|
||||
ddone()
|
||||
|
||||
return r.local.MoveStorage(ctx, s, types)
|
||||
}
|
||||
|
||||
func (r *Remote) Remove(ctx context.Context, sid abi.SectorID, typ SectorFileType) error {
|
||||
if bits.OnesCount(uint(typ)) != 1 {
|
||||
return xerrors.New("delete expects one file type")
|
||||
}
|
||||
|
||||
if err := r.local.Remove(ctx, sid, typ); err != nil {
|
||||
return xerrors.Errorf("remove from local: %w", err)
|
||||
}
|
||||
|
||||
si, err := r.index.StorageFindSector(ctx, sid, typ, false)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("finding existing sector %d(t:%d) failed: %w", sid, typ, err)
|
||||
}
|
||||
|
||||
for _, info := range si {
|
||||
for _, url := range info.URLs {
|
||||
if err := r.deleteFromRemote(ctx, url); err != nil {
|
||||
log.Warnf("remove %s: %+v", url, err)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Remote) deleteFromRemote(ctx context.Context, url string) error {
|
||||
log.Infof("Delete %s", url)
|
||||
|
||||
req, err := http.NewRequest("DELETE", url, nil)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("request: %w", err)
|
||||
}
|
||||
req.Header = r.auth
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("do request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return xerrors.Errorf("non-200 code: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Remote) FsStat(ctx context.Context, id ID) (FsStat, error) {
|
||||
st, err := r.local.FsStat(ctx, id)
|
||||
switch err {
|
||||
case nil:
|
||||
return st, nil
|
||||
case errPathNotFound:
|
||||
break
|
||||
default:
|
||||
return FsStat{}, xerrors.Errorf("local stat: %w", err)
|
||||
}
|
||||
|
||||
si, err := r.index.StorageInfo(ctx, id)
|
||||
if err != nil {
|
||||
return FsStat{}, xerrors.Errorf("getting remote storage info: %w", err)
|
||||
}
|
||||
|
||||
if len(si.URLs) == 0 {
|
||||
return FsStat{}, xerrors.Errorf("no known URLs for remote storage %s", id)
|
||||
}
|
||||
|
||||
rl, err := url.Parse(si.URLs[0])
|
||||
if err != nil {
|
||||
return FsStat{}, xerrors.Errorf("failed to parse url: %w", err)
|
||||
}
|
||||
|
||||
rl.Path = gopath.Join(rl.Path, "stat", string(id))
|
||||
|
||||
req, err := http.NewRequest("GET", rl.String(), nil)
|
||||
if err != nil {
|
||||
return FsStat{}, xerrors.Errorf("request: %w", err)
|
||||
}
|
||||
req.Header = r.auth
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return FsStat{}, xerrors.Errorf("do request: %w", err)
|
||||
}
|
||||
switch resp.StatusCode {
|
||||
case 200:
|
||||
break
|
||||
case 404:
|
||||
return FsStat{}, errPathNotFound
|
||||
case 500:
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return FsStat{}, xerrors.Errorf("fsstat: got http 500, then failed to read the error: %w", err)
|
||||
}
|
||||
|
||||
return FsStat{}, xerrors.Errorf("fsstat: got http 500: %s", string(b))
|
||||
}
|
||||
|
||||
var out FsStat
|
||||
if err := json.NewDecoder(resp.Body).Decode(&out); err != nil {
|
||||
return FsStat{}, xerrors.Errorf("decoding fsstat: %w", err)
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func mergeDone(a func(), b func()) func() {
|
||||
return func() {
|
||||
a()
|
||||
b()
|
||||
}
|
||||
}
|
||||
|
||||
var _ Store = &Remote{}
|
@ -1,43 +0,0 @@
|
||||
package stores
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/mitchellh/go-homedir"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
func move(from, to string) error {
|
||||
from, err := homedir.Expand(from)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("move: expanding from: %w", err)
|
||||
}
|
||||
|
||||
to, err = homedir.Expand(to)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("move: expanding to: %w", err)
|
||||
}
|
||||
|
||||
if filepath.Base(from) != filepath.Base(to) {
|
||||
return xerrors.Errorf("move: base names must match ('%s' != '%s')", filepath.Base(from), filepath.Base(to))
|
||||
}
|
||||
|
||||
log.Debugw("move sector data", "from", from, "to", to)
|
||||
|
||||
toDir := filepath.Dir(to)
|
||||
|
||||
// `mv` has decades of experience in moving files quickly; don't pretend we
|
||||
// can do better
|
||||
|
||||
var errOut bytes.Buffer
|
||||
cmd := exec.Command("/usr/bin/env", "mv", "-t", toDir, from)
|
||||
cmd.Stderr = &errOut
|
||||
if err := cmd.Run(); err != nil {
|
||||
return xerrors.Errorf("exec mv (stderr: %s): %w", strings.TrimSpace(errOut.String()), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -1,92 +0,0 @@
|
||||
package tarutil
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"golang.org/x/xerrors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
)
|
||||
|
||||
var log = logging.Logger("tarutil")
|
||||
|
||||
func ExtractTar(body io.Reader, dir string) error {
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return xerrors.Errorf("mkdir: %w", err)
|
||||
}
|
||||
|
||||
tr := tar.NewReader(body)
|
||||
for {
|
||||
header, err := tr.Next()
|
||||
switch err {
|
||||
default:
|
||||
return err
|
||||
case io.EOF:
|
||||
return nil
|
||||
|
||||
case nil:
|
||||
}
|
||||
|
||||
f, err := os.Create(filepath.Join(dir, header.Name))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("creating file %s: %w", filepath.Join(dir, header.Name), err)
|
||||
}
|
||||
|
||||
if _, err := io.Copy(f, tr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := f.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TarDirectory(dir string) (io.ReadCloser, error) {
|
||||
r, w := io.Pipe()
|
||||
|
||||
go func() {
|
||||
_ = w.CloseWithError(writeTarDirectory(dir, w))
|
||||
}()
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func writeTarDirectory(dir string, w io.Writer) error {
|
||||
tw := tar.NewWriter(w)
|
||||
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
h, err := tar.FileInfoHeader(file, "")
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting header for file %s: %w", file.Name(), err)
|
||||
}
|
||||
|
||||
if err := tw.WriteHeader(h); err != nil {
|
||||
return xerrors.Errorf("wiritng header for file %s: %w", file.Name(), err)
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(filepath.Join(dir, file.Name()), os.O_RDONLY, 644)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("opening %s for reading: %w", file.Name(), err)
|
||||
}
|
||||
|
||||
if _, err := io.Copy(tw, f); err != nil {
|
||||
return xerrors.Errorf("copy data for file %s: %w", file.Name(), err)
|
||||
}
|
||||
|
||||
if err := f.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -1,55 +0,0 @@
|
||||
package zerocomm
|
||||
|
||||
import (
|
||||
"math/bits"
|
||||
|
||||
commcid "github.com/filecoin-project/go-fil-commcid"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"github.com/ipfs/go-cid"
|
||||
)
|
||||
|
||||
const Levels = 37
|
||||
const Skip = 2 // can't generate for 32, 64b
|
||||
|
||||
var PieceComms = [Levels - Skip][32]byte{
|
||||
{0x37, 0x31, 0xbb, 0x99, 0xac, 0x68, 0x9f, 0x66, 0xee, 0xf5, 0x97, 0x3e, 0x4a, 0x94, 0xda, 0x18, 0x8f, 0x4d, 0xdc, 0xae, 0x58, 0x7, 0x24, 0xfc, 0x6f, 0x3f, 0xd6, 0xd, 0xfd, 0x48, 0x83, 0x33},
|
||||
{0x64, 0x2a, 0x60, 0x7e, 0xf8, 0x86, 0xb0, 0x4, 0xbf, 0x2c, 0x19, 0x78, 0x46, 0x3a, 0xe1, 0xd4, 0x69, 0x3a, 0xc0, 0xf4, 0x10, 0xeb, 0x2d, 0x1b, 0x7a, 0x47, 0xfe, 0x20, 0x5e, 0x5e, 0x75, 0xf},
|
||||
{0x57, 0xa2, 0x38, 0x1a, 0x28, 0x65, 0x2b, 0xf4, 0x7f, 0x6b, 0xef, 0x7a, 0xca, 0x67, 0x9b, 0xe4, 0xae, 0xde, 0x58, 0x71, 0xab, 0x5c, 0xf3, 0xeb, 0x2c, 0x8, 0x11, 0x44, 0x88, 0xcb, 0x85, 0x26},
|
||||
{0x1f, 0x7a, 0xc9, 0x59, 0x55, 0x10, 0xe0, 0x9e, 0xa4, 0x1c, 0x46, 0xb, 0x17, 0x64, 0x30, 0xbb, 0x32, 0x2c, 0xd6, 0xfb, 0x41, 0x2e, 0xc5, 0x7c, 0xb1, 0x7d, 0x98, 0x9a, 0x43, 0x10, 0x37, 0x2f},
|
||||
{0xfc, 0x7e, 0x92, 0x82, 0x96, 0xe5, 0x16, 0xfa, 0xad, 0xe9, 0x86, 0xb2, 0x8f, 0x92, 0xd4, 0x4a, 0x4f, 0x24, 0xb9, 0x35, 0x48, 0x52, 0x23, 0x37, 0x6a, 0x79, 0x90, 0x27, 0xbc, 0x18, 0xf8, 0x33},
|
||||
{0x8, 0xc4, 0x7b, 0x38, 0xee, 0x13, 0xbc, 0x43, 0xf4, 0x1b, 0x91, 0x5c, 0xe, 0xed, 0x99, 0x11, 0xa2, 0x60, 0x86, 0xb3, 0xed, 0x62, 0x40, 0x1b, 0xf9, 0xd5, 0x8b, 0x8d, 0x19, 0xdf, 0xf6, 0x24},
|
||||
{0xb2, 0xe4, 0x7b, 0xfb, 0x11, 0xfa, 0xcd, 0x94, 0x1f, 0x62, 0xaf, 0x5c, 0x75, 0xf, 0x3e, 0xa5, 0xcc, 0x4d, 0xf5, 0x17, 0xd5, 0xc4, 0xf1, 0x6d, 0xb2, 0xb4, 0xd7, 0x7b, 0xae, 0xc1, 0xa3, 0x2f},
|
||||
{0xf9, 0x22, 0x61, 0x60, 0xc8, 0xf9, 0x27, 0xbf, 0xdc, 0xc4, 0x18, 0xcd, 0xf2, 0x3, 0x49, 0x31, 0x46, 0x0, 0x8e, 0xae, 0xfb, 0x7d, 0x2, 0x19, 0x4d, 0x5e, 0x54, 0x81, 0x89, 0x0, 0x51, 0x8},
|
||||
{0x2c, 0x1a, 0x96, 0x4b, 0xb9, 0xb, 0x59, 0xeb, 0xfe, 0xf, 0x6d, 0xa2, 0x9a, 0xd6, 0x5a, 0xe3, 0xe4, 0x17, 0x72, 0x4a, 0x8f, 0x7c, 0x11, 0x74, 0x5a, 0x40, 0xca, 0xc1, 0xe5, 0xe7, 0x40, 0x11},
|
||||
{0xfe, 0xe3, 0x78, 0xce, 0xf1, 0x64, 0x4, 0xb1, 0x99, 0xed, 0xe0, 0xb1, 0x3e, 0x11, 0xb6, 0x24, 0xff, 0x9d, 0x78, 0x4f, 0xbb, 0xed, 0x87, 0x8d, 0x83, 0x29, 0x7e, 0x79, 0x5e, 0x2, 0x4f, 0x2},
|
||||
{0x8e, 0x9e, 0x24, 0x3, 0xfa, 0x88, 0x4c, 0xf6, 0x23, 0x7f, 0x60, 0xdf, 0x25, 0xf8, 0x3e, 0xe4, 0xd, 0xca, 0x9e, 0xd8, 0x79, 0xeb, 0x6f, 0x63, 0x52, 0xd1, 0x50, 0x84, 0xf5, 0xad, 0xd, 0x3f},
|
||||
{0x75, 0x2d, 0x96, 0x93, 0xfa, 0x16, 0x75, 0x24, 0x39, 0x54, 0x76, 0xe3, 0x17, 0xa9, 0x85, 0x80, 0xf0, 0x9, 0x47, 0xaf, 0xb7, 0xa3, 0x5, 0x40, 0xd6, 0x25, 0xa9, 0x29, 0x1c, 0xc1, 0x2a, 0x7},
|
||||
{0x70, 0x22, 0xf6, 0xf, 0x7e, 0xf6, 0xad, 0xfa, 0x17, 0x11, 0x7a, 0x52, 0x61, 0x9e, 0x30, 0xce, 0xa8, 0x2c, 0x68, 0x7, 0x5a, 0xdf, 0x1c, 0x66, 0x77, 0x86, 0xec, 0x50, 0x6e, 0xef, 0x2d, 0x19},
|
||||
{0xd9, 0x98, 0x87, 0xb9, 0x73, 0x57, 0x3a, 0x96, 0xe1, 0x13, 0x93, 0x64, 0x52, 0x36, 0xc1, 0x7b, 0x1f, 0x4c, 0x70, 0x34, 0xd7, 0x23, 0xc7, 0xa9, 0x9f, 0x70, 0x9b, 0xb4, 0xda, 0x61, 0x16, 0x2b},
|
||||
{0xd0, 0xb5, 0x30, 0xdb, 0xb0, 0xb4, 0xf2, 0x5c, 0x5d, 0x2f, 0x2a, 0x28, 0xdf, 0xee, 0x80, 0x8b, 0x53, 0x41, 0x2a, 0x2, 0x93, 0x1f, 0x18, 0xc4, 0x99, 0xf5, 0xa2, 0x54, 0x8, 0x6b, 0x13, 0x26},
|
||||
{0x84, 0xc0, 0x42, 0x1b, 0xa0, 0x68, 0x5a, 0x1, 0xbf, 0x79, 0x5a, 0x23, 0x44, 0x6, 0x4f, 0xe4, 0x24, 0xbd, 0x52, 0xa9, 0xd2, 0x43, 0x77, 0xb3, 0x94, 0xff, 0x4c, 0x4b, 0x45, 0x68, 0xe8, 0x11},
|
||||
{0x65, 0xf2, 0x9e, 0x5d, 0x98, 0xd2, 0x46, 0xc3, 0x8b, 0x38, 0x8c, 0xfc, 0x6, 0xdb, 0x1f, 0x6b, 0x2, 0x13, 0x3, 0xc5, 0xa2, 0x89, 0x0, 0xb, 0xdc, 0xe8, 0x32, 0xa9, 0xc3, 0xec, 0x42, 0x1c},
|
||||
{0xa2, 0x24, 0x75, 0x8, 0x28, 0x58, 0x50, 0x96, 0x5b, 0x7e, 0x33, 0x4b, 0x31, 0x27, 0xb0, 0xc0, 0x42, 0xb1, 0xd0, 0x46, 0xdc, 0x54, 0x40, 0x21, 0x37, 0x62, 0x7c, 0xd8, 0x79, 0x9c, 0xe1, 0x3a},
|
||||
{0xda, 0xfd, 0xab, 0x6d, 0xa9, 0x36, 0x44, 0x53, 0xc2, 0x6d, 0x33, 0x72, 0x6b, 0x9f, 0xef, 0xe3, 0x43, 0xbe, 0x8f, 0x81, 0x64, 0x9e, 0xc0, 0x9, 0xaa, 0xd3, 0xfa, 0xff, 0x50, 0x61, 0x75, 0x8},
|
||||
{0xd9, 0x41, 0xd5, 0xe0, 0xd6, 0x31, 0x4a, 0x99, 0x5c, 0x33, 0xff, 0xbd, 0x4f, 0xbe, 0x69, 0x11, 0x8d, 0x73, 0xd4, 0xe5, 0xfd, 0x2c, 0xd3, 0x1f, 0xf, 0x7c, 0x86, 0xeb, 0xdd, 0x14, 0xe7, 0x6},
|
||||
{0x51, 0x4c, 0x43, 0x5c, 0x3d, 0x4, 0xd3, 0x49, 0xa5, 0x36, 0x5f, 0xbd, 0x59, 0xff, 0xc7, 0x13, 0x62, 0x91, 0x11, 0x78, 0x59, 0x91, 0xc1, 0xa3, 0xc5, 0x3a, 0xf2, 0x20, 0x79, 0x74, 0x1a, 0x2f},
|
||||
{0xad, 0x6, 0x85, 0x39, 0x69, 0xd3, 0x7d, 0x34, 0xff, 0x8, 0xe0, 0x9f, 0x56, 0x93, 0xa, 0x4a, 0xd1, 0x9a, 0x89, 0xde, 0xf6, 0xc, 0xbf, 0xee, 0x7e, 0x1d, 0x33, 0x81, 0xc1, 0xe7, 0x1c, 0x37},
|
||||
{0x39, 0x56, 0xe, 0x7b, 0x13, 0xa9, 0x3b, 0x7, 0xa2, 0x43, 0xfd, 0x27, 0x20, 0xff, 0xa7, 0xcb, 0x3e, 0x1d, 0x2e, 0x50, 0x5a, 0xb3, 0x62, 0x9e, 0x79, 0xf4, 0x63, 0x13, 0x51, 0x2c, 0xda, 0x6},
|
||||
{0xcc, 0xc3, 0xc0, 0x12, 0xf5, 0xb0, 0x5e, 0x81, 0x1a, 0x2b, 0xbf, 0xdd, 0xf, 0x68, 0x33, 0xb8, 0x42, 0x75, 0xb4, 0x7b, 0xf2, 0x29, 0xc0, 0x5, 0x2a, 0x82, 0x48, 0x4f, 0x3c, 0x1a, 0x5b, 0x3d},
|
||||
{0x7d, 0xf2, 0x9b, 0x69, 0x77, 0x31, 0x99, 0xe8, 0xf2, 0xb4, 0xb, 0x77, 0x91, 0x9d, 0x4, 0x85, 0x9, 0xee, 0xd7, 0x68, 0xe2, 0xc7, 0x29, 0x7b, 0x1f, 0x14, 0x37, 0x3, 0x4f, 0xc3, 0xc6, 0x2c},
|
||||
{0x66, 0xce, 0x5, 0xa3, 0x66, 0x75, 0x52, 0xcf, 0x45, 0xc0, 0x2b, 0xcc, 0x4e, 0x83, 0x92, 0x91, 0x9b, 0xde, 0xac, 0x35, 0xde, 0x2f, 0xf5, 0x62, 0x71, 0x84, 0x8e, 0x9f, 0x7b, 0x67, 0x51, 0x7},
|
||||
{0xd8, 0x61, 0x2, 0x18, 0x42, 0x5a, 0xb5, 0xe9, 0x5b, 0x1c, 0xa6, 0x23, 0x9d, 0x29, 0xa2, 0xe4, 0x20, 0xd7, 0x6, 0xa9, 0x6f, 0x37, 0x3e, 0x2f, 0x9c, 0x9a, 0x91, 0xd7, 0x59, 0xd1, 0x9b, 0x1},
|
||||
{0x6d, 0x36, 0x4b, 0x1e, 0xf8, 0x46, 0x44, 0x1a, 0x5a, 0x4a, 0x68, 0x86, 0x23, 0x14, 0xac, 0xc0, 0xa4, 0x6f, 0x1, 0x67, 0x17, 0xe5, 0x34, 0x43, 0xe8, 0x39, 0xee, 0xdf, 0x83, 0xc2, 0x85, 0x3c},
|
||||
{0x7, 0x7e, 0x5f, 0xde, 0x35, 0xc5, 0xa, 0x93, 0x3, 0xa5, 0x50, 0x9, 0xe3, 0x49, 0x8a, 0x4e, 0xbe, 0xdf, 0xf3, 0x9c, 0x42, 0xb7, 0x10, 0xb7, 0x30, 0xd8, 0xec, 0x7a, 0xc7, 0xaf, 0xa6, 0x3e},
|
||||
{0xe6, 0x40, 0x5, 0xa6, 0xbf, 0xe3, 0x77, 0x79, 0x53, 0xb8, 0xad, 0x6e, 0xf9, 0x3f, 0xf, 0xca, 0x10, 0x49, 0xb2, 0x4, 0x16, 0x54, 0xf2, 0xa4, 0x11, 0xf7, 0x70, 0x27, 0x99, 0xce, 0xce, 0x2},
|
||||
{0x25, 0x9d, 0x3d, 0x6b, 0x1f, 0x4d, 0x87, 0x6d, 0x11, 0x85, 0xe1, 0x12, 0x3a, 0xf6, 0xf5, 0x50, 0x1a, 0xf0, 0xf6, 0x7c, 0xf1, 0x5b, 0x52, 0x16, 0x25, 0x5b, 0x7b, 0x17, 0x8d, 0x12, 0x5, 0x1d},
|
||||
{0x3f, 0x9a, 0x4d, 0x41, 0x1d, 0xa4, 0xef, 0x1b, 0x36, 0xf3, 0x5f, 0xf0, 0xa1, 0x95, 0xae, 0x39, 0x2a, 0xb2, 0x3f, 0xee, 0x79, 0x67, 0xb7, 0xc4, 0x1b, 0x3, 0xd1, 0x61, 0x3f, 0xc2, 0x92, 0x39},
|
||||
{0xfe, 0x4e, 0xf3, 0x28, 0xc6, 0x1a, 0xa3, 0x9c, 0xfd, 0xb2, 0x48, 0x4e, 0xaa, 0x32, 0xa1, 0x51, 0xb1, 0xfe, 0x3d, 0xfd, 0x1f, 0x96, 0xdd, 0x8c, 0x97, 0x11, 0xfd, 0x86, 0xd6, 0xc5, 0x81, 0x13},
|
||||
{0xf5, 0x5d, 0x68, 0x90, 0xe, 0x2d, 0x83, 0x81, 0xec, 0xcb, 0x81, 0x64, 0xcb, 0x99, 0x76, 0xf2, 0x4b, 0x2d, 0xe0, 0xdd, 0x61, 0xa3, 0x1b, 0x97, 0xce, 0x6e, 0xb2, 0x38, 0x50, 0xd5, 0xe8, 0x19},
|
||||
{0xaa, 0xaa, 0x8c, 0x4c, 0xb4, 0xa, 0xac, 0xee, 0x1e, 0x2, 0xdc, 0x65, 0x42, 0x4b, 0x2a, 0x6c, 0x8e, 0x99, 0xf8, 0x3, 0xb7, 0x2f, 0x79, 0x29, 0xc4, 0x10, 0x1d, 0x7f, 0xae, 0x6b, 0xff, 0x32},
|
||||
}
|
||||
|
||||
func ZeroPieceCommitment(sz abi.UnpaddedPieceSize) cid.Cid {
|
||||
level := bits.TrailingZeros64(uint64(sz.Padded())) - Skip - 5 // 2^5 = 32
|
||||
return commcid.PieceCommitmentV1ToCID(PieceComms[level][:])
|
||||
}
|
@ -1,115 +0,0 @@
|
||||
package zerocomm_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
commcid "github.com/filecoin-project/go-fil-commcid"
|
||||
abi "github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/storage/sectorstorage/zerocomm"
|
||||
)
|
||||
|
||||
func TestComms(t *testing.T) {
|
||||
t.Skip("don't have enough ram") // no, but seriously, currently this needs like 3tb of /tmp
|
||||
|
||||
var expPieceComms [zerocomm.Levels - zerocomm.Skip]cid.Cid
|
||||
|
||||
{
|
||||
l2, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredProof_StackedDRG2KiBPoSt, bytes.NewReader(make([]byte, 127)), 127)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expPieceComms[0] = l2
|
||||
}
|
||||
|
||||
for i := 1; i < zerocomm.Levels-2; i++ {
|
||||
var err error
|
||||
sz := abi.UnpaddedPieceSize(127 << uint(i))
|
||||
fmt.Println(i, sz)
|
||||
r := io.LimitReader(&NullReader{}, int64(sz))
|
||||
|
||||
expPieceComms[i], err = ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredProof_StackedDRG2KiBPoSt, r, sz)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
for i, comm := range expPieceComms {
|
||||
c, err := commcid.CIDToPieceCommitmentV1(comm)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(c) != string(zerocomm.PieceComms[i][:]) {
|
||||
t.Errorf("zero commitment %d didn't match", i)
|
||||
}
|
||||
}
|
||||
|
||||
for _, comm := range expPieceComms { // Could do codegen, but this is good enough
|
||||
fmt.Printf("%#v,\n", comm)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommsSmall(t *testing.T) {
|
||||
var expPieceComms [8]cid.Cid
|
||||
lvls := len(expPieceComms) + zerocomm.Skip
|
||||
|
||||
{
|
||||
l2, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredProof_StackedDRG2KiBPoSt, bytes.NewReader(make([]byte, 127)), 127)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expPieceComms[0] = l2
|
||||
}
|
||||
|
||||
for i := 1; i < lvls-2; i++ {
|
||||
var err error
|
||||
sz := abi.UnpaddedPieceSize(127 << uint(i))
|
||||
fmt.Println(i, sz)
|
||||
r := io.LimitReader(&NullReader{}, int64(sz))
|
||||
|
||||
expPieceComms[i], err = ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredProof_StackedDRG2KiBPoSt, r, sz)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
for i, comm := range expPieceComms {
|
||||
c, err := commcid.CIDToPieceCommitmentV1(comm)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(c) != string(zerocomm.PieceComms[i][:]) {
|
||||
t.Errorf("zero commitment %d didn't match", i)
|
||||
}
|
||||
}
|
||||
|
||||
for _, comm := range expPieceComms { // Could do codegen, but this is good enough
|
||||
fmt.Printf("%#v,\n", comm)
|
||||
}
|
||||
}
|
||||
|
||||
func TestForSise(t *testing.T) {
|
||||
exp, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredProof_StackedDRG2KiBPoSt, bytes.NewReader(make([]byte, 1016)), 1016)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
actual := zerocomm.ZeroPieceCommitment(1016)
|
||||
if !exp.Equals(actual) {
|
||||
t.Errorf("zero commitment didn't match")
|
||||
}
|
||||
}
|
||||
|
||||
type NullReader struct{}
|
||||
|
||||
func (NullReader) Read(out []byte) (int, error) {
|
||||
for i := range out {
|
||||
out[i] = 0
|
||||
}
|
||||
return len(out), nil
|
||||
}
|
Loading…
Reference in New Issue
Block a user