Merge pull request #2980 from filecoin-project/next

Marge latest next into ntwk-calibration
This commit is contained in:
Łukasz Magiera 2020-08-11 12:27:42 +02:00 committed by GitHub
commit 99de61a6f8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
131 changed files with 15961 additions and 122 deletions

View File

@ -122,10 +122,10 @@ type FullNode interface {
// It fails if message fails to execute.
GasEstimateGasLimit(context.Context, *types.Message, types.TipSetKey) (int64, error)
// GasEsitmateGasPremium estimates what gas price should be used for a
// GasEstimateGasPremium estimates what gas price should be used for a
// message to have high likelihood of inclusion in `nblocksincl` epochs.
GasEsitmateGasPremium(_ context.Context, nblocksincl uint64,
GasEstimateGasPremium(_ context.Context, nblocksincl uint64,
sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error)
// MethodGroup: Sync

View File

@ -87,7 +87,7 @@ type FullNodeStruct struct {
BeaconGetEntry func(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"`
GasEsitmateGasPremium func(context.Context, uint64, address.Address, int64, types.TipSetKey) (types.BigInt, error) `perm:"read"`
GasEstimateGasPremium func(context.Context, uint64, address.Address, int64, types.TipSetKey) (types.BigInt, error) `perm:"read"`
GasEstimateGasLimit func(context.Context, *types.Message, types.TipSetKey) (int64, error) `perm:"read"`
GasEstimateFeeCap func(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error) `perm:"read"`
@ -257,7 +257,7 @@ type StorageMinerStruct struct {
StorageAttach func(context.Context, stores.StorageInfo, fsutil.FsStat) error `perm:"admin"`
StorageDeclareSector func(context.Context, stores.ID, abi.SectorID, stores.SectorFileType, bool) error `perm:"admin"`
StorageDropSector func(context.Context, stores.ID, abi.SectorID, stores.SectorFileType) error `perm:"admin"`
StorageFindSector func(context.Context, abi.SectorID, stores.SectorFileType, bool) ([]stores.SectorStorageInfo, error) `perm:"admin"`
StorageFindSector func(context.Context, abi.SectorID, stores.SectorFileType, abi.RegisteredSealProof, bool) ([]stores.SectorStorageInfo, error) `perm:"admin"`
StorageInfo func(context.Context, stores.ID) (stores.StorageInfo, error) `perm:"admin"`
StorageBestAlloc func(ctx context.Context, allocate stores.SectorFileType, spt abi.RegisteredSealProof, sealing stores.PathType) ([]stores.StorageInfo, error) `perm:"admin"`
StorageReportHealth func(ctx context.Context, id stores.ID, report stores.HealthReport) error `perm:"admin"`
@ -434,9 +434,9 @@ func (c *FullNodeStruct) ClientDealSize(ctx context.Context, root cid.Cid) (api.
return c.Internal.ClientDealSize(ctx, root)
}
func (c *FullNodeStruct) GasEsitmateGasPremium(ctx context.Context, nblocksincl uint64,
func (c *FullNodeStruct) GasEstimateGasPremium(ctx context.Context, nblocksincl uint64,
sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) {
return c.Internal.GasEsitmateGasPremium(ctx, nblocksincl, sender, gaslimit, tsk)
return c.Internal.GasEstimateGasPremium(ctx, nblocksincl, sender, gaslimit, tsk)
}
func (c *FullNodeStruct) GasEstimateFeeCap(ctx context.Context, msg *types.Message,
maxqueueblks int64, tsk types.TipSetKey) (types.BigInt, error) {
@ -980,8 +980,8 @@ func (c *StorageMinerStruct) StorageDropSector(ctx context.Context, storageId st
return c.Internal.StorageDropSector(ctx, storageId, s, ft)
}
func (c *StorageMinerStruct) StorageFindSector(ctx context.Context, si abi.SectorID, types stores.SectorFileType, allowFetch bool) ([]stores.SectorStorageInfo, error) {
return c.Internal.StorageFindSector(ctx, si, types, allowFetch)
func (c *StorageMinerStruct) StorageFindSector(ctx context.Context, si abi.SectorID, types stores.SectorFileType, spt abi.RegisteredSealProof, allowFetch bool) ([]stores.SectorStorageInfo, error) {
return c.Internal.StorageFindSector(ctx, si, types, spt, allowFetch)
}
func (c *StorageMinerStruct) StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) {

View File

@ -44,16 +44,11 @@ func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSect
mine := true
done := make(chan struct{})
blockNotif := make(chan struct{}, 1)
go func() {
defer close(done)
for mine {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, error) {
select {
case blockNotif <- struct{}{}:
default:
}
}}); err != nil {
t.Error(err)
@ -61,7 +56,7 @@ func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSect
}
}()
pledgeSectors(t, ctx, miner, nSectors, 0, blockNotif)
pledgeSectors(t, ctx, miner, nSectors, 0, nil)
mine = false
<-done

View File

@ -25,7 +25,7 @@ func buildType() string {
}
// BuildVersion is the local build version, set by build system
const BuildVersion = "0.4.3"
const BuildVersion = "0.4.4"
func UserVersion() string {
return BuildVersion + buildType() + CurrentCommit
@ -53,7 +53,7 @@ func (ve Version) EqMajorMinor(v2 Version) bool {
}
// APIVersion is a semver version of the rpc api exposed
var APIVersion Version = newVer(0, 9, 0)
var APIVersion Version = newVer(0, 10, 0)
//nolint:varcheck,deadcode
const (

View File

@ -393,7 +393,7 @@ func (client *BlockSync) sendRequestToPeer(
&res)
if err != nil {
client.peerTracker.logFailure(peer, build.Clock.Since(connectionStart))
return nil, err
return nil, xerrors.Errorf("failed to read blocksync response: %w", err)
}
// FIXME: Move all this together at the top using a defer as done elsewhere.

View File

@ -14,6 +14,7 @@ var (
MemPoolSizeLimitHiDefault = 30000
MemPoolSizeLimitLoDefault = 20000
PruneCooldownDefault = time.Minute
GasLimitOverestimation = 1.25
ConfigKey = datastore.NewKey("/mpool/config")
)
@ -34,6 +35,10 @@ func loadConfig(ds dtypes.MetadataDS) (*types.MpoolConfig, error) {
}
cfg := new(types.MpoolConfig)
err = json.Unmarshal(cfgBytes, cfg)
if cfg.GasLimitOverestimation == 0 {
// TODO: remove in next reset
cfg.GasLimitOverestimation = GasLimitOverestimation
}
return cfg, err
}
@ -69,5 +74,6 @@ func DefaultConfig() *types.MpoolConfig {
SizeLimitLow: MemPoolSizeLimitLoDefault,
ReplaceByFeeRatio: ReplaceByFeeRatioDefault,
PruneCooldown: PruneCooldownDefault,
GasLimitOverestimation: GasLimitOverestimation,
}
}

View File

@ -56,6 +56,7 @@ var (
ErrInvalidToAddr = errors.New("message had invalid to address")
ErrBroadcastAnyway = errors.New("broadcasting message despite validation fail")
ErrRBFTooLowPremium = errors.New("replace by fee has too low GasPremium")
)
const (
@ -135,8 +136,9 @@ func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool) (bool, error) {
} else {
log.Info("add with duplicate nonce")
return false, xerrors.Errorf("message from %s with nonce %d already in mpool,"+
" increase GasPremium to %s from %s to trigger replace by fee",
m.Message.From, m.Message.Nonce, minPrice, m.Message.GasPremium)
" increase GasPremium to %s from %s to trigger replace by fee: %w",
m.Message.From, m.Message.Nonce, minPrice, m.Message.GasPremium,
ErrRBFTooLowPremium)
}
}
}
@ -517,7 +519,7 @@ func (mp *MessagePool) addLocked(m *types.SignedMessage) error {
incr, err := mset.add(m, mp)
if err != nil {
log.Info(err)
return err // TODO(review): this error return was dropped at some point, was it on purpose?
return err
}
if incr {

View File

@ -59,7 +59,7 @@ func (mp *MessagePool) selectMessages(curTs, ts *types.TipSet) ([]*types.SignedM
// defer only here so if we have no pending messages we don't spam
defer func() {
log.Infof("message selection took %s", time.Since(start))
log.Infow("message selection done", "took", time.Since(start))
}()
// 0b. Select all priority messages that fit in the block
@ -72,11 +72,13 @@ func (mp *MessagePool) selectMessages(curTs, ts *types.TipSet) ([]*types.SignedM
}
// 1. Create a list of dependent message chains with maximal gas reward per limit consumed
startChains := time.Now()
var chains []*msgChain
for actor, mset := range pending {
next := mp.createMessageChains(actor, mset, baseFee, ts)
chains = append(chains, next...)
}
log.Infow("create message chains done", "took", time.Since(startChains))
// 2. Sort the chains
sort.Slice(chains, func(i, j int) bool {
@ -90,6 +92,7 @@ func (mp *MessagePool) selectMessages(curTs, ts *types.TipSet) ([]*types.SignedM
// 3. Merge the head chains to produce the list of messages selected for inclusion, subject to
// the block gas limit.
startMerge := time.Now()
last := len(chains)
for i, chain := range chains {
// does it fit in the block?
@ -108,6 +111,7 @@ func (mp *MessagePool) selectMessages(curTs, ts *types.TipSet) ([]*types.SignedM
last = i
break
}
log.Infow("merge message chains done", "took", time.Since(startMerge))
// 4. We have reached the edge of what we can fit wholesale; if we still have available gasLimit
// to pack some more chains, then trim the last chain and push it down.
@ -115,6 +119,7 @@ func (mp *MessagePool) selectMessages(curTs, ts *types.TipSet) ([]*types.SignedM
// dependency cannot be (fully) included.
// We do this in a loop because the blocker might have been inordinately large and we might
// have to do it multiple times to satisfy tail packing.
startTail := time.Now()
tailLoop:
for gasLimit >= minGas && last < len(chains) {
// trim
@ -157,11 +162,17 @@ tailLoop:
// -- mark the end.
last = len(chains)
}
log.Infow("pack tail chains done", "took", time.Since(startTail))
return result, nil
}
func (mp *MessagePool) selectPriorityMessages(pending map[address.Address]map[uint64]*types.SignedMessage, baseFee types.BigInt, ts *types.TipSet) ([]*types.SignedMessage, int64) {
start := time.Now()
defer func() {
log.Infow("select priority messages done", "took", time.Since(start))
}()
result := make([]*types.SignedMessage, 0, mp.cfg.SizeLimitLow)
gasLimit := int64(build.BlockGasLimit)
minGas := int64(gasguess.MinGas)
@ -242,8 +253,15 @@ tailLoop:
}
func (mp *MessagePool) getPendingMessages(curTs, ts *types.TipSet) (map[address.Address]map[uint64]*types.SignedMessage, error) {
start := time.Now()
result := make(map[address.Address]map[uint64]*types.SignedMessage)
haveCids := make(map[cid.Cid]struct{})
defer func() {
if time.Since(start) > time.Millisecond {
log.Infow("get pending messages done", "took", time.Since(start))
}
}()
// are we in sync?
inSync := false

View File

@ -286,6 +286,78 @@ func TestMessageChains(t *testing.T) {
}
func TestMessageChainSkipping(t *testing.T) {
// regression test for chain skip bug
mp, tma := makeTestMpool()
// the actors
w1, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
a1, err := w1.GenerateKey(crypto.SigTypeBLS)
if err != nil {
t.Fatal(err)
}
w2, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
a2, err := w2.GenerateKey(crypto.SigTypeBLS)
if err != nil {
t.Fatal(err)
}
block := mock.MkBlock(nil, 1, 1)
ts := mock.TipSet(block)
gasLimit := gasguess.Costs[gasguess.CostKey{builtin.StorageMarketActorCodeID, 2}]
baseFee := types.NewInt(0)
tma.setBalance(a1, 1) // in FIL
tma.setStateNonce(a1, 10)
mset := make(map[uint64]*types.SignedMessage)
for i := 0; i < 20; i++ {
bias := (20 - i) / 3
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(1+i%3+bias))
mset[uint64(i)] = m
}
chains := mp.createMessageChains(a1, mset, baseFee, ts)
if len(chains) != 4 {
t.Fatalf("expected 4 chains, got %d", len(chains))
}
for i, chain := range chains {
var expectedLen int
switch {
case i == 0:
expectedLen = 2
case i > 2:
expectedLen = 2
default:
expectedLen = 3
}
if len(chain.msgs) != expectedLen {
t.Fatalf("expected %d message in chain %d but got %d", expectedLen, i, len(chain.msgs))
}
}
nextNonce := 10
for _, chain := range chains {
for _, m := range chain.msgs {
if m.Message.Nonce != uint64(nextNonce) {
t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce)
}
nextNonce++
}
}
}
func TestBasicMessageSelection(t *testing.T) {
mp, tma := makeTestMpool()
@ -492,3 +564,82 @@ func TestMessageSelectionTrimming(t *testing.T) {
}
}
func TestPriorityMessageSelection(t *testing.T) {
mp, tma := makeTestMpool()
// the actors
w1, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
a1, err := w1.GenerateKey(crypto.SigTypeBLS)
if err != nil {
t.Fatal(err)
}
w2, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
a2, err := w2.GenerateKey(crypto.SigTypeBLS)
if err != nil {
t.Fatal(err)
}
block := mock.MkBlock(nil, 1, 1)
ts := mock.TipSet(block)
tma.applyBlock(t, block)
gasLimit := gasguess.Costs[gasguess.CostKey{builtin.StorageMarketActorCodeID, 2}]
tma.setBalance(a1, 1) // in FIL
tma.setBalance(a2, 1) // in FIL
mp.cfg.PriorityAddrs = []address.Address{a1}
nMessages := 10
for i := 0; i < nMessages; i++ {
bias := (nMessages - i) / 3
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(1+i%3+bias))
mustAdd(t, mp, m)
m = makeTestMessage(w2, a2, a1, uint64(i), gasLimit, uint64(1+i%3+bias))
mustAdd(t, mp, m)
}
msgs, err := mp.SelectMessages(ts)
if err != nil {
t.Fatal(err)
}
if len(msgs) != 20 {
t.Fatalf("expected 20 messages but got %d", len(msgs))
}
// messages from a1 must be first
nextNonce := uint64(0)
for i := 0; i < 10; i++ {
m := msgs[i]
if m.Message.From != a1 {
t.Fatal("expected messages from a1 before messages from a2")
}
if m.Message.Nonce != nextNonce {
t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce)
}
nextNonce++
}
nextNonce = 0
for i := 10; i < 20; i++ {
m := msgs[i]
if m.Message.From != a2 {
t.Fatal("expected messages from a2 after messages from a1")
}
if m.Message.Nonce != nextNonce {
t.Fatalf("expected nonce %d but got %d", nextNonce, m.Message.Nonce)
}
nextNonce++
}
}

View File

@ -108,7 +108,10 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
ts = sm.cs.GetHeaviestTipSet()
}
state := ts.ParentState()
state, _, err := sm.TipSetState(ctx, ts)
if err != nil {
return nil, xerrors.Errorf("computing tipset state: %w", err)
}
r := store.NewChainRand(sm.cs, ts.Cids(), ts.Height())
@ -122,7 +125,7 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
vmopt := &vm.VMOpts{
StateBase: state,
Epoch: ts.Height(),
Epoch: ts.Height() + 1,
Rand: r,
Bstore: sm.cs.Blockstore(),
Syscalls: sm.cs.VMSys(),

View File

@ -510,7 +510,7 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs
)
stats.Record(ctx, metrics.MessageValidationFailure.M(1))
switch {
case xerrors.Is(err, messagepool.ErrBroadcastAnyway):
case xerrors.Is(err, messagepool.ErrBroadcastAnyway) || xerrors.Is(err, messagepool.ErrRBFTooLowPremium):
return pubsub.ValidationIgnore
default:
return pubsub.ValidationReject

View File

@ -12,6 +12,7 @@ type MpoolConfig struct {
SizeLimitLow int
ReplaceByFeeRatio float64
PruneCooldown time.Duration
GasLimitOverestimation float64
}
func (mc *MpoolConfig) Clone() *MpoolConfig {

View File

@ -29,12 +29,12 @@ func ComputeGasOverestimationBurn(gasUsed, gasLimit int64) (int64, int64) {
return 0, gasLimit
}
// over = gasLimit/gasUsed - 1 - 0.3
// over = gasLimit/gasUsed - 1 - 0.1
// over = min(over, 1)
// gasToBurn = (gasLimit - gasUsed) * over
// so to factor out division from `over`
// over*gasUsed = min(gasLimit - (13*gasUsed)/10, gasUsed)
// over*gasUsed = min(gasLimit - (11*gasUsed)/10, gasUsed)
// gasToBurn = ((gasLimit - gasUsed)*over*gasUsed) / gasUsed
over := gasLimit - (gasOveruseNum*gasUsed)/gasOveruseDenom
if over < 0 {

View File

@ -419,7 +419,8 @@ var chainListCmd = &cli.Command{
}
tss = otss
for i, ts := range tss {
fmt.Printf("%d: %d blocks\n", ts.Height(), len(ts.Blocks()))
pbf := ts.Blocks()[0].ParentBaseFee
fmt.Printf("%d: %d blocks (baseFee: %s -> maxFee: %s)\n", ts.Height(), len(ts.Blocks()), ts.Blocks()[0].ParentBaseFee, types.FIL(types.BigMul(pbf, types.NewInt(uint64(build.BlockGasLimit)))))
for _, b := range ts.Blocks() {
msgs, err := api.ChainGetBlockMessages(ctx, b.Cid())
@ -445,7 +446,7 @@ var chainListCmd = &cli.Command{
avgpremium = big.Div(psum, big.NewInt(int64(lenmsgs)))
}
fmt.Printf("\t%s: \t%d msgs, gasLimit: %d / %d (%0.2f%%), avgPrice: %s\n", b.Miner, len(msgs.BlsMessages)+len(msgs.SecpkMessages), limitSum, build.BlockGasLimit, 100*float64(limitSum)/float64(build.BlockGasLimit), avgpremium)
fmt.Printf("\t%s: \t%d msgs, gasLimit: %d / %d (%0.2f%%), avgPremium: %s\n", b.Miner, len(msgs.BlsMessages)+len(msgs.SecpkMessages), limitSum, build.BlockGasLimit, 100*float64(limitSum)/float64(build.BlockGasLimit), avgpremium)
}
if i < len(tss)-1 {
msgs, err := api.ChainGetParentMessages(ctx, tss[i+1].Blocks()[0].Cid())
@ -1030,9 +1031,9 @@ var chainGasPriceCmd = &cli.Command{
nb := []int{1, 2, 3, 5, 10, 20, 50, 100, 300}
for _, nblocks := range nb {
addr := builtin.SystemActorAddr // TODO: make real when used in GasEsitmateGasPremium
addr := builtin.SystemActorAddr // TODO: make real when used in GasEstimateGasPremium
est, err := api.GasEsitmateGasPremium(ctx, uint64(nblocks), addr, 10000, types.EmptyTSK)
est, err := api.GasEstimateGasPremium(ctx, uint64(nblocks), addr, 10000, types.EmptyTSK)
if err != nil {
return err
}

View File

@ -205,7 +205,7 @@ type processTipSetApi interface {
StateMinerInitialPledgeCollateral(ctx context.Context, addr address.Address, precommitInfo miner.SectorPreCommitInfo, tsk types.TipSetKey) (types.BigInt, error)
StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error)
MpoolPushMessage(ctx context.Context, msg *types.Message) (*types.SignedMessage, error)
GasEsitmateGasPremium(ctx context.Context, nblocksincl uint64, sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error)
GasEstimateGasPremium(ctx context.Context, nblocksincl uint64, sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error)
WalletBalance(ctx context.Context, addr address.Address) (types.BigInt, error)
}
@ -293,7 +293,7 @@ func ProcessTipset(ctx context.Context, api processTipSetApi, tipset *types.TipS
refundValue := refunds.GetRefund(maddr)
// We want to try and ensure these messages get mined quickly
gasPremium, err := api.GasEsitmateGasPremium(ctx, 0, wallet, 0, tipset.Key())
gasPremium, err := api.GasEstimateGasPremium(ctx, 0, wallet, 0, tipset.Key())
if err != nil {
log.Warnw("failed to estimate gas premium", "err", err)
continue

View File

@ -294,17 +294,17 @@ var storageFindCmd = &cli.Command{
Number: abi.SectorNumber(snum),
}
u, err := nodeApi.StorageFindSector(ctx, sid, stores.FTUnsealed, false)
u, err := nodeApi.StorageFindSector(ctx, sid, stores.FTUnsealed, 0, false)
if err != nil {
return xerrors.Errorf("finding unsealed: %w", err)
}
s, err := nodeApi.StorageFindSector(ctx, sid, stores.FTSealed, false)
s, err := nodeApi.StorageFindSector(ctx, sid, stores.FTSealed, 0, false)
if err != nil {
return xerrors.Errorf("finding sealed: %w", err)
}
c, err := nodeApi.StorageFindSector(ctx, sid, stores.FTCache, false)
c, err := nodeApi.StorageFindSector(ctx, sid, stores.FTCache, 0, false)
if err != nil {
return xerrors.Errorf("finding cache: %w", err)
}

View File

@ -0,0 +1,79 @@
version: 2.1
orbs:
go: gotest/tools@0.0.9
executors:
golang:
docker:
- image: circleci/golang:1.13
resource_class: 2xlarge
commands:
prepare-git-checkout:
steps:
- checkout
- run: git submodule sync
- run: git submodule update --init --recursive
install-build-dependencies:
steps:
- run: sudo apt-get update
- run: sudo apt-get install -y jq ocl-icd-opencl-dev
- run: ./extern/filecoin-ffi/install-filcrypto
download-groth-params-and-verifying-keys:
steps:
- restore_cache:
name: Restore parameters cache
keys:
- 'v26a-2k-lotus-params'
paths:
- /var/tmp/filecoin-proof-parameters/
- run: |
DIR=$(pwd)
cd $(mktemp -d)
go get github.com/filecoin-project/go-paramfetch/paramfetch
go build -o go-paramfetch github.com/filecoin-project/go-paramfetch/paramfetch
./go-paramfetch 2048 "${DIR}/parameters.json"
- save_cache:
name: Save parameters cache
key: 'v26a-2k-lotus-params'
paths:
- /var/tmp/filecoin-proof-parameters/
jobs:
test:
executor: golang
environment:
RUST_LOG: info
steps:
- prepare-git-checkout
- install-build-dependencies
- download-groth-params-and-verifying-keys
- run: go test -v -timeout 10m ./...
mod-tidy-check:
executor: golang
steps:
- prepare-git-checkout
- go/mod-download
- go/mod-tidy-check
gofmt-check:
executor: golang
steps:
- prepare-git-checkout
- go/mod-download
- run: "! go fmt ./... 2>&1 | read"
lint-check:
executor: golang
steps:
- prepare-git-checkout
- install-build-dependencies
- go/mod-download
- go/install-golangci-lint:
gobin: $HOME/.local/bin
version: 1.23.8
- run:
command: $HOME/.local/bin/golangci-lint run -v --concurrency 2
workflows:
version: 2.1
build_and_test:
jobs:
- mod-tidy-check
- lint-check
- gofmt-check
- test

2
extern/sector-storage/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
.update-modules
.filecoin-build

4
extern/sector-storage/.gitmodules vendored Normal file
View File

@ -0,0 +1,4 @@
[submodule "extern/filecoin-ffi"]
path = extern/filecoin-ffi
url = https://github.com/filecoin-project/filecoin-ffi.git
branch = master

5
extern/sector-storage/LICENSE-APACHE vendored Normal file
View File

@ -0,0 +1,5 @@
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.

19
extern/sector-storage/LICENSE-MIT vendored Normal file
View File

@ -0,0 +1,19 @@
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

29
extern/sector-storage/Makefile vendored Normal file
View File

@ -0,0 +1,29 @@
all: build
.PHONY: all
SUBMODULES=
FFI_PATH:=./extern/filecoin-ffi/
FFI_DEPS:=.install-filcrypto
FFI_DEPS:=$(addprefix $(FFI_PATH),$(FFI_DEPS))
$(FFI_DEPS): .filecoin-build ;
.filecoin-build: $(FFI_PATH)
$(MAKE) -C $(FFI_PATH) $(FFI_DEPS:$(FFI_PATH)%=%)
@touch $@
.update-modules:
git submodule update --init --recursive
@touch $@
test: .update-modules .filecoin-build
go test -v ./...
.PHONY: test
SUBMODULES+=test
build: $(SUBMODULES)
clean:
rm -f .filecoin-build
rm -f .update-modules

61
extern/sector-storage/README.md vendored Normal file
View File

@ -0,0 +1,61 @@
# sector-storage
[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io)
[![CircleCI](https://circleci.com/gh/filecoin-project/sector-storage.svg?style=svg)](https://circleci.com/gh/filecoin-project/sector-storage)
[![standard-readme compliant](https://img.shields.io/badge/standard--readme-OK-green.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme)
> a concrete implementation of the [specs-storage](https://github.com/filecoin-project/specs-storage) interface
The sector-storage project provides a implementation-nonspecific reference implementation of the [specs-storage](https://github.com/filecoin-project/specs-storage) interface.
## Disclaimer
Please report your issues with regards to sector-storage at the [lotus issue tracker](https://github.com/filecoin-project/lotus/issues)
## Architecture
![high-level architecture](docs/sector-storage.svg)
### `Manager`
Manages is the top-level piece of the storage system gluing all the other pieces
together. It also implements scheduling logic.
### `package stores`
This package implements the sector storage subsystem. Fundamentally the storage
is divided into `path`s, each path has it's UUID, and stores a set of sector
'files'. There are currently 3 types of sector files - `unsealed`, `sealed`,
and `cache`.
Paths can be shared between nodes by sharing the underlying filesystem.
### `stores.Local`
The Local store implements SectorProvider for paths mounted in the local
filesystem. Paths can be shared between nodes, and support shared filesystems
such as NFS.
stores.Local implements all native filesystem-related operations
### `stores.Remote`
The Remote store extends Local store, handles fetching sector files into a local
store if needed, and handles removing sectors from non-local stores.
### `stores.Index`
The Index is a singleton holding metadata about storage paths, and a mapping of
sector files to paths
### `LocalWorker`
LocalWorker implements the Worker interface with ffiwrapper.Sealer and a
store.Store instance
## License
The Filecoin Project is dual-licensed under Apache 2.0 and MIT terms:
- Apache License, Version 2.0, ([LICENSE-APACHE](https://github.com/filecoin-project/sector-storage/blob/master/LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
- MIT license ([LICENSE-MIT](https://github.com/filecoin-project/sector-storage/blob/master/LICENSE-MIT) or http://opensource.org/licenses/MIT)

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 75 KiB

115
extern/sector-storage/faults.go vendored Normal file
View File

@ -0,0 +1,115 @@
package sectorstorage
import (
"context"
"fmt"
"os"
"path/filepath"
"golang.org/x/xerrors"
"github.com/filecoin-project/sector-storage/stores"
"github.com/filecoin-project/specs-actors/actors/abi"
)
// TODO: Track things more actively
type FaultTracker interface {
CheckProvable(ctx context.Context, spt abi.RegisteredSealProof, sectors []abi.SectorID) ([]abi.SectorID, error)
}
// Returns unprovable sectors
func (m *Manager) CheckProvable(ctx context.Context, spt abi.RegisteredSealProof, sectors []abi.SectorID) ([]abi.SectorID, error) {
var bad []abi.SectorID
ssize, err := spt.SectorSize()
if err != nil {
return nil, err
}
// TODO: More better checks
for _, sector := range sectors {
err := func() error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
locked, err := m.index.StorageTryLock(ctx, sector, stores.FTSealed|stores.FTCache, stores.FTNone)
if err != nil {
return xerrors.Errorf("acquiring sector lock: %w", err)
}
if !locked {
log.Warnw("CheckProvable Sector FAULT: can't acquire read lock", "sector", sector, "sealed")
bad = append(bad, sector)
return nil
}
lp, _, err := m.localStore.AcquireSector(ctx, sector, spt, stores.FTSealed|stores.FTCache, stores.FTNone, stores.PathStorage, stores.AcquireMove)
if err != nil {
log.Warnw("CheckProvable Sector FAULT: acquire sector in checkProvable", "sector", sector, "error", err)
bad = append(bad, sector)
return nil
}
if lp.Sealed == "" || lp.Cache == "" {
log.Warnw("CheckProvable Sector FAULT: cache an/or sealed paths not found", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache)
bad = append(bad, sector)
return nil
}
toCheck := map[string]int64{
lp.Sealed: 1,
filepath.Join(lp.Cache, "t_aux"): 0,
filepath.Join(lp.Cache, "p_aux"): 0,
}
addCachePathsForSectorSize(toCheck, lp.Cache, ssize)
for p, sz := range toCheck {
st, err := os.Stat(p)
if err != nil {
log.Warnw("CheckProvable Sector FAULT: sector file stat error", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "file", p, "err", err)
bad = append(bad, sector)
return nil
}
if sz != 0 {
if st.Size() != int64(ssize)*sz {
log.Warnw("CheckProvable Sector FAULT: sector file is wrong size", "sector", sector, "sealed", lp.Sealed, "cache", lp.Cache, "file", p, "size", st.Size(), "expectSize", int64(ssize)*sz)
bad = append(bad, sector)
return nil
}
}
}
return nil
}()
if err != nil {
return nil, err
}
}
return bad, nil
}
func addCachePathsForSectorSize(chk map[string]int64, cacheDir string, ssize abi.SectorSize) {
switch ssize {
case 2 << 10:
fallthrough
case 8 << 20:
fallthrough
case 512 << 20:
chk[filepath.Join(cacheDir, "sc-02-data-tree-r-last.dat")] = 0
case 32 << 30:
for i := 0; i < 8; i++ {
chk[filepath.Join(cacheDir, fmt.Sprintf("sc-02-data-tree-r-last-%d.dat", i))] = 0
}
case 64 << 30:
for i := 0; i < 16; i++ {
chk[filepath.Join(cacheDir, fmt.Sprintf("sc-02-data-tree-r-last-%d.dat", i))] = 0
}
default:
log.Warnf("not checking cache files of %s sectors for faults", ssize)
}
}
var _ FaultTracker = &Manager{}

View File

@ -0,0 +1,86 @@
package basicfs
import (
"context"
"os"
"path/filepath"
"sync"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/sector-storage/stores"
"github.com/filecoin-project/sector-storage/storiface"
)
type sectorFile struct {
abi.SectorID
stores.SectorFileType
}
type Provider struct {
Root string
lk sync.Mutex
waitSector map[sectorFile]chan struct{}
}
func (b *Provider) AcquireSector(ctx context.Context, id abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, ptype stores.PathType) (stores.SectorPaths, func(), error) {
if err := os.Mkdir(filepath.Join(b.Root, stores.FTUnsealed.String()), 0755); err != nil && !os.IsExist(err) {
return stores.SectorPaths{}, nil, err
}
if err := os.Mkdir(filepath.Join(b.Root, stores.FTSealed.String()), 0755); err != nil && !os.IsExist(err) {
return stores.SectorPaths{}, nil, err
}
if err := os.Mkdir(filepath.Join(b.Root, stores.FTCache.String()), 0755); err != nil && !os.IsExist(err) {
return stores.SectorPaths{}, nil, err
}
done := func() {}
out := stores.SectorPaths{
Id: id,
}
for _, fileType := range stores.PathTypes {
if !existing.Has(fileType) && !allocate.Has(fileType) {
continue
}
b.lk.Lock()
if b.waitSector == nil {
b.waitSector = map[sectorFile]chan struct{}{}
}
ch, found := b.waitSector[sectorFile{id, fileType}]
if !found {
ch = make(chan struct{}, 1)
b.waitSector[sectorFile{id, fileType}] = ch
}
b.lk.Unlock()
select {
case ch <- struct{}{}:
case <-ctx.Done():
done()
return stores.SectorPaths{}, nil, ctx.Err()
}
path := filepath.Join(b.Root, fileType.String(), stores.SectorName(id))
prevDone := done
done = func() {
prevDone()
<-ch
}
if !allocate.Has(fileType) {
if _, err := os.Stat(path); os.IsNotExist(err) {
done()
return stores.SectorPaths{}, nil, storiface.ErrSectorNotFound
}
}
stores.SetPathByType(&out, fileType, path)
}
return out, done, nil
}

View File

@ -0,0 +1,34 @@
package ffiwrapper
import (
"golang.org/x/xerrors"
"github.com/filecoin-project/specs-actors/actors/abi"
)
type Config struct {
SealProofType abi.RegisteredSealProof
_ struct{} // guard against nameless init
}
func sizeFromConfig(cfg Config) (abi.SectorSize, error) {
return cfg.SealProofType.SectorSize()
}
func SealProofTypeFromSectorSize(ssize abi.SectorSize) (abi.RegisteredSealProof, error) {
switch ssize {
case 2 << 10:
return abi.RegisteredSealProof_StackedDrg2KiBV1, nil
case 8 << 20:
return abi.RegisteredSealProof_StackedDrg8MiBV1, nil
case 512 << 20:
return abi.RegisteredSealProof_StackedDrg512MiBV1, nil
case 32 << 30:
return abi.RegisteredSealProof_StackedDrg32GiBV1, nil
case 64 << 30:
return abi.RegisteredSealProof_StackedDrg64GiBV1, nil
default:
return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize)
}
}

View File

@ -0,0 +1,53 @@
package ffiwrapper
import (
"io"
"os"
"sync"
"golang.org/x/xerrors"
)
func ToReadableFile(r io.Reader, n int64) (*os.File, func() error, error) {
f, ok := r.(*os.File)
if ok {
return f, func() error { return nil }, nil
}
var w *os.File
f, w, err := os.Pipe()
if err != nil {
return nil, nil, err
}
var wait sync.Mutex
var werr error
wait.Lock()
go func() {
defer wait.Unlock()
var copied int64
copied, werr = io.CopyN(w, r, n)
if werr != nil {
log.Warnf("toReadableFile: copy error: %+v", werr)
}
err := w.Close()
if werr == nil && err != nil {
werr = err
log.Warnf("toReadableFile: close error: %+v", err)
return
}
if copied != n {
log.Warnf("copied different amount than expected: %d != %d", copied, n)
werr = xerrors.Errorf("copied different amount than expected: %d != %d", copied, n)
}
}()
return f, func() error {
wait.Lock()
return werr
}, nil
}

View File

@ -0,0 +1,316 @@
package ffiwrapper
import (
"encoding/binary"
"io"
"os"
"syscall"
"github.com/detailyang/go-fallocate"
"golang.org/x/xerrors"
rlepluslazy "github.com/filecoin-project/go-bitfield/rle"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/sector-storage/fsutil"
"github.com/filecoin-project/sector-storage/storiface"
)
const veryLargeRle = 1 << 20
// Sectors can be partially unsealed. We support this by appending a small
// trailer to each unsealed sector file containing an RLE+ marking which bytes
// in a sector are unsealed, and which are not (holes)
// unsealed sector files internally have this structure
// [unpadded (raw) data][rle+][4B LE length fo the rle+ field]
type partialFile struct {
maxPiece abi.PaddedPieceSize
path string
allocated rlepluslazy.RLE
file *os.File
}
func writeTrailer(maxPieceSize int64, w *os.File, r rlepluslazy.RunIterator) error {
trailer, err := rlepluslazy.EncodeRuns(r, nil)
if err != nil {
return xerrors.Errorf("encoding trailer: %w", err)
}
// maxPieceSize == unpadded(sectorSize) == trailer start
if _, err := w.Seek(maxPieceSize, io.SeekStart); err != nil {
return xerrors.Errorf("seek to trailer start: %w", err)
}
rb, err := w.Write(trailer)
if err != nil {
return xerrors.Errorf("writing trailer data: %w", err)
}
if err := binary.Write(w, binary.LittleEndian, uint32(len(trailer))); err != nil {
return xerrors.Errorf("writing trailer length: %w", err)
}
return w.Truncate(maxPieceSize + int64(rb) + 4)
}
func createPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialFile, error) {
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644)
if err != nil {
return nil, xerrors.Errorf("openning partial file '%s': %w", path, err)
}
err = func() error {
err := fallocate.Fallocate(f, 0, int64(maxPieceSize))
if errno, ok := err.(syscall.Errno); ok {
if errno == syscall.EOPNOTSUPP || errno == syscall.ENOSYS {
log.Warnf("could not allocated space, ignoring: %v", errno)
err = nil // log and ignore
}
}
if err != nil {
return xerrors.Errorf("fallocate '%s': %w", path, err)
}
if err := writeTrailer(int64(maxPieceSize), f, &rlepluslazy.RunSliceIterator{}); err != nil {
return xerrors.Errorf("writing trailer: %w", err)
}
return nil
}()
if err != nil {
f.Close()
return nil, err
}
if err := f.Close(); err != nil {
return nil, xerrors.Errorf("close empty partial file: %w", err)
}
return openPartialFile(maxPieceSize, path)
}
func openPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialFile, error) {
f, err := os.OpenFile(path, os.O_RDWR, 0644)
if err != nil {
return nil, xerrors.Errorf("openning partial file '%s': %w", path, err)
}
var rle rlepluslazy.RLE
err = func() error {
st, err := f.Stat()
if err != nil {
return xerrors.Errorf("stat '%s': %w", path, err)
}
if st.Size() < int64(maxPieceSize) {
return xerrors.Errorf("sector file '%s' was smaller than the sector size %d < %d", path, st.Size(), maxPieceSize)
}
// read trailer
var tlen [4]byte
_, err = f.ReadAt(tlen[:], st.Size()-int64(len(tlen)))
if err != nil {
return xerrors.Errorf("reading trailer length: %w", err)
}
// sanity-check the length
trailerLen := binary.LittleEndian.Uint32(tlen[:])
expectLen := int64(trailerLen) + int64(len(tlen)) + int64(maxPieceSize)
if expectLen != st.Size() {
return xerrors.Errorf("file '%d' has inconsistent length; has %d bytes; expected %d (%d trailer, %d sector data)", path, st.Size(), expectLen, int64(trailerLen)+int64(len(tlen)), maxPieceSize)
}
if trailerLen > veryLargeRle {
log.Warnf("Partial file '%s' has a VERY large trailer with %d bytes", path, trailerLen)
}
trailerStart := st.Size() - int64(len(tlen)) - int64(trailerLen)
if trailerStart != int64(maxPieceSize) {
return xerrors.Errorf("expected sector size to equal trailer start index")
}
trailerBytes := make([]byte, trailerLen)
_, err = f.ReadAt(trailerBytes, trailerStart)
if err != nil {
return xerrors.Errorf("reading trailer: %w", err)
}
rle, err = rlepluslazy.FromBuf(trailerBytes)
if err != nil {
return xerrors.Errorf("decoding trailer: %w", err)
}
it, err := rle.RunIterator()
if err != nil {
return xerrors.Errorf("getting trailer run iterator: %w", err)
}
f, err := rlepluslazy.Fill(it)
if err != nil {
return xerrors.Errorf("filling bitfield: %w", err)
}
lastSet, err := rlepluslazy.Count(f)
if err != nil {
return xerrors.Errorf("finding last set byte index: %w", err)
}
if lastSet > uint64(maxPieceSize) {
return xerrors.Errorf("last set byte at index higher than sector size: %d > %d", lastSet, maxPieceSize)
}
return nil
}()
if err != nil {
f.Close()
return nil, err
}
return &partialFile{
maxPiece: maxPieceSize,
path: path,
allocated: rle,
file: f,
}, nil
}
func (pf *partialFile) Close() error {
return pf.file.Close()
}
func (pf *partialFile) Writer(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (io.Writer, error) {
if _, err := pf.file.Seek(int64(offset), io.SeekStart); err != nil {
return nil, xerrors.Errorf("seek piece start: %w", err)
}
{
have, err := pf.allocated.RunIterator()
if err != nil {
return nil, err
}
and, err := rlepluslazy.And(have, pieceRun(offset, size))
if err != nil {
return nil, err
}
c, err := rlepluslazy.Count(and)
if err != nil {
return nil, err
}
if c > 0 {
log.Warnf("getting partial file writer overwriting %d allocated bytes", c)
}
}
return pf.file, nil
}
func (pf *partialFile) MarkAllocated(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) error {
have, err := pf.allocated.RunIterator()
if err != nil {
return err
}
ored, err := rlepluslazy.Or(have, pieceRun(offset, size))
if err != nil {
return err
}
if err := writeTrailer(int64(pf.maxPiece), pf.file, ored); err != nil {
return xerrors.Errorf("writing trailer: %w", err)
}
return nil
}
func (pf *partialFile) Free(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) error {
have, err := pf.allocated.RunIterator()
if err != nil {
return err
}
if err := fsutil.Deallocate(pf.file, int64(offset), int64(size)); err != nil {
return xerrors.Errorf("deallocating: %w", err)
}
s, err := rlepluslazy.Subtract(have, pieceRun(offset, size))
if err != nil {
return err
}
if err := writeTrailer(int64(pf.maxPiece), pf.file, s); err != nil {
return xerrors.Errorf("writing trailer: %w", err)
}
return nil
}
func (pf *partialFile) Reader(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (*os.File, error) {
if _, err := pf.file.Seek(int64(offset), io.SeekStart); err != nil {
return nil, xerrors.Errorf("seek piece start: %w", err)
}
{
have, err := pf.allocated.RunIterator()
if err != nil {
return nil, err
}
and, err := rlepluslazy.And(have, pieceRun(offset, size))
if err != nil {
return nil, err
}
c, err := rlepluslazy.Count(and)
if err != nil {
return nil, err
}
if c != uint64(size) {
log.Warnf("getting partial file reader reading %d unallocated bytes", uint64(size)-c)
}
}
return pf.file, nil
}
func (pf *partialFile) Allocated() (rlepluslazy.RunIterator, error) {
return pf.allocated.RunIterator()
}
func (pf *partialFile) HasAllocated(offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) {
have, err := pf.Allocated()
if err != nil {
return false, err
}
u, err := rlepluslazy.And(have, pieceRun(offset.Padded(), size.Padded()))
if err != nil {
return false, err
}
uc, err := rlepluslazy.Count(u)
if err != nil {
return false, err
}
return abi.PaddedPieceSize(uc) == size.Padded(), nil
}
func pieceRun(offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) rlepluslazy.RunIterator {
var runs []rlepluslazy.Run
if offset > 0 {
runs = append(runs, rlepluslazy.Run{
Val: false,
Len: uint64(offset),
})
}
runs = append(runs, rlepluslazy.Run{
Val: true,
Len: uint64(size),
})
return &rlepluslazy.RunSliceIterator{Runs: runs}
}

View File

@ -0,0 +1,28 @@
package ffiwrapper
import (
"github.com/filecoin-project/specs-actors/actors/abi"
logging "github.com/ipfs/go-log/v2"
)
var log = logging.Logger("ffiwrapper")
type Sealer struct {
sealProofType abi.RegisteredSealProof
ssize abi.SectorSize // a function of sealProofType and postProofType
sectors SectorProvider
stopping chan struct{}
}
func (sb *Sealer) Stop() {
close(sb.stopping)
}
func (sb *Sealer) SectorSize() abi.SectorSize {
return sb.ssize
}
func (sb *Sealer) SealProofType() abi.RegisteredSealProof {
return sb.sealProofType
}

View File

@ -0,0 +1,673 @@
//+build cgo
package ffiwrapper
import (
"bufio"
"bytes"
"context"
"io"
"math/bits"
"os"
"runtime"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
ffi "github.com/filecoin-project/filecoin-ffi"
rlepluslazy "github.com/filecoin-project/go-bitfield/rle"
commcid "github.com/filecoin-project/go-fil-commcid"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/sector-storage/fr32"
"github.com/filecoin-project/sector-storage/stores"
"github.com/filecoin-project/sector-storage/storiface"
"github.com/filecoin-project/sector-storage/zerocomm"
)
var _ Storage = &Sealer{}
func New(sectors SectorProvider, cfg *Config) (*Sealer, error) {
sectorSize, err := sizeFromConfig(*cfg)
if err != nil {
return nil, err
}
sb := &Sealer{
sealProofType: cfg.SealProofType,
ssize: sectorSize,
sectors: sectors,
stopping: make(chan struct{}),
}
return sb, nil
}
func (sb *Sealer) NewSector(ctx context.Context, sector abi.SectorID) error {
// TODO: Allocate the sector here instead of in addpiece
return nil
}
func (sb *Sealer) AddPiece(ctx context.Context, sector abi.SectorID, existingPieceSizes []abi.UnpaddedPieceSize, pieceSize abi.UnpaddedPieceSize, file storage.Data) (abi.PieceInfo, error) {
var offset abi.UnpaddedPieceSize
for _, size := range existingPieceSizes {
offset += size
}
maxPieceSize := abi.PaddedPieceSize(sb.ssize)
if offset.Padded()+pieceSize.Padded() > maxPieceSize {
return abi.PieceInfo{}, xerrors.Errorf("can't add %d byte piece to sector %v with %d bytes of existing pieces", pieceSize, sector, offset)
}
var err error
var done func()
var stagedFile *partialFile
defer func() {
if done != nil {
done()
}
if stagedFile != nil {
if err := stagedFile.Close(); err != nil {
log.Errorf("closing staged file: %+v", err)
}
}
}()
var stagedPath stores.SectorPaths
if len(existingPieceSizes) == 0 {
stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, 0, stores.FTUnsealed, stores.PathSealing)
if err != nil {
return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err)
}
stagedFile, err = createPartialFile(maxPieceSize, stagedPath.Unsealed)
if err != nil {
return abi.PieceInfo{}, xerrors.Errorf("creating unsealed sector file: %w", err)
}
} else {
stagedPath, done, err = sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, 0, stores.PathSealing)
if err != nil {
return abi.PieceInfo{}, xerrors.Errorf("acquire unsealed sector: %w", err)
}
stagedFile, err = openPartialFile(maxPieceSize, stagedPath.Unsealed)
if err != nil {
return abi.PieceInfo{}, xerrors.Errorf("opening unsealed sector file: %w", err)
}
}
w, err := stagedFile.Writer(storiface.UnpaddedByteIndex(offset).Padded(), pieceSize.Padded())
if err != nil {
return abi.PieceInfo{}, xerrors.Errorf("getting partial file writer: %w", err)
}
pw := fr32.NewPadWriter(w)
pr := io.TeeReader(io.LimitReader(file, int64(pieceSize)), pw)
chunk := abi.PaddedPieceSize(4 << 20)
buf := make([]byte, chunk.Unpadded())
var pieceCids []abi.PieceInfo
for {
var read int
for rbuf := buf; len(rbuf) > 0; {
n, err := pr.Read(rbuf)
if err != nil && err != io.EOF {
return abi.PieceInfo{}, xerrors.Errorf("pr read error: %w", err)
}
rbuf = rbuf[n:]
read += n
if err == io.EOF {
break
}
}
if read == 0 {
break
}
c, err := sb.pieceCid(buf[:read])
if err != nil {
return abi.PieceInfo{}, xerrors.Errorf("pieceCid error: %w", err)
}
pieceCids = append(pieceCids, abi.PieceInfo{
Size: abi.UnpaddedPieceSize(len(buf[:read])).Padded(),
PieceCID: c,
})
}
if err := pw.Close(); err != nil {
return abi.PieceInfo{}, xerrors.Errorf("closing padded writer: %w", err)
}
if err := stagedFile.MarkAllocated(storiface.UnpaddedByteIndex(offset).Padded(), pieceSize.Padded()); err != nil {
return abi.PieceInfo{}, xerrors.Errorf("marking data range as allocated: %w", err)
}
if err := stagedFile.Close(); err != nil {
return abi.PieceInfo{}, err
}
stagedFile = nil
if len(pieceCids) == 1 {
return pieceCids[0], nil
}
pieceCID, err := ffi.GenerateUnsealedCID(sb.sealProofType, pieceCids)
if err != nil {
return abi.PieceInfo{}, xerrors.Errorf("generate unsealed CID: %w", err)
}
// validate that the pieceCID was properly formed
if _, err := commcid.CIDToPieceCommitmentV1(pieceCID); err != nil {
return abi.PieceInfo{}, err
}
return abi.PieceInfo{
Size: pieceSize.Padded(),
PieceCID: pieceCID,
}, nil
}
func (sb *Sealer) pieceCid(in []byte) (cid.Cid, error) {
prf, werr, err := ToReadableFile(bytes.NewReader(in), int64(len(in)))
if err != nil {
return cid.Undef, xerrors.Errorf("getting tee reader pipe: %w", err)
}
pieceCID, err := ffi.GeneratePieceCIDFromFile(sb.sealProofType, prf, abi.UnpaddedPieceSize(len(in)))
if err != nil {
return cid.Undef, xerrors.Errorf("generating piece commitment: %w", err)
}
prf.Close()
return pieceCID, werr()
}
func (sb *Sealer) UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error {
maxPieceSize := abi.PaddedPieceSize(sb.ssize)
// try finding existing
unsealedPath, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTNone, stores.PathStorage)
var pf *partialFile
switch {
case xerrors.Is(err, storiface.ErrSectorNotFound):
unsealedPath, done, err = sb.sectors.AcquireSector(ctx, sector, stores.FTNone, stores.FTUnsealed, stores.PathStorage)
if err != nil {
return xerrors.Errorf("acquire unsealed sector path (allocate): %w", err)
}
defer done()
pf, err = createPartialFile(maxPieceSize, unsealedPath.Unsealed)
if err != nil {
return xerrors.Errorf("create unsealed file: %w", err)
}
case err == nil:
defer done()
pf, err = openPartialFile(maxPieceSize, unsealedPath.Unsealed)
if err != nil {
return xerrors.Errorf("opening partial file: %w", err)
}
default:
return xerrors.Errorf("acquire unsealed sector path (existing): %w", err)
}
defer pf.Close()
allocated, err := pf.Allocated()
if err != nil {
return xerrors.Errorf("getting bitruns of allocated data: %w", err)
}
toUnseal, err := computeUnsealRanges(allocated, offset, size)
if err != nil {
return xerrors.Errorf("computing unseal ranges: %w", err)
}
if !toUnseal.HasNext() {
return nil
}
srcPaths, srcDone, err := sb.sectors.AcquireSector(ctx, sector, stores.FTCache|stores.FTSealed, stores.FTNone, stores.PathStorage)
if err != nil {
return xerrors.Errorf("acquire sealed sector paths: %w", err)
}
defer srcDone()
sealed, err := os.OpenFile(srcPaths.Sealed, os.O_RDONLY, 0644)
if err != nil {
return xerrors.Errorf("opening sealed file: %w", err)
}
defer sealed.Close()
var at, nextat abi.PaddedPieceSize
first := true
for first || toUnseal.HasNext() {
first = false
piece, err := toUnseal.NextRun()
if err != nil {
return xerrors.Errorf("getting next range to unseal: %w", err)
}
at = nextat
nextat += abi.PaddedPieceSize(piece.Len)
if !piece.Val {
continue
}
out, err := pf.Writer(offset.Padded(), size.Padded())
if err != nil {
return xerrors.Errorf("getting partial file writer: %w", err)
}
// <eww>
opr, opw, err := os.Pipe()
if err != nil {
return xerrors.Errorf("creating out pipe: %w", err)
}
var perr error
outWait := make(chan struct{})
{
go func() {
defer close(outWait)
defer opr.Close()
padwriter := fr32.NewPadWriter(out)
if err != nil {
perr = xerrors.Errorf("creating new padded writer: %w", err)
return
}
bsize := uint64(size.Padded())
if bsize > uint64(runtime.NumCPU())*fr32.MTTresh {
bsize = uint64(runtime.NumCPU()) * fr32.MTTresh
}
bw := bufio.NewWriterSize(padwriter, int(abi.PaddedPieceSize(bsize).Unpadded()))
_, err = io.CopyN(bw, opr, int64(size))
if err != nil {
perr = xerrors.Errorf("copying data: %w", err)
return
}
if err := bw.Flush(); err != nil {
perr = xerrors.Errorf("flushing unpadded data: %w", err)
return
}
if err := padwriter.Close(); err != nil {
perr = xerrors.Errorf("closing padwriter: %w", err)
return
}
}()
}
// </eww>
// TODO: This may be possible to do in parallel
err = ffi.UnsealRange(sb.sealProofType,
srcPaths.Cache,
sealed,
opw,
sector.Number,
sector.Miner,
randomness,
commd,
uint64(at.Unpadded()),
uint64(abi.PaddedPieceSize(piece.Len).Unpadded()))
_ = opw.Close()
if err != nil {
return xerrors.Errorf("unseal range: %w", err)
}
select {
case <-outWait:
case <-ctx.Done():
return ctx.Err()
}
if perr != nil {
return xerrors.Errorf("piping output to unsealed file: %w", perr)
}
if err := pf.MarkAllocated(storiface.PaddedByteIndex(at), abi.PaddedPieceSize(piece.Len)); err != nil {
return xerrors.Errorf("marking unsealed range as allocated: %w", err)
}
if !toUnseal.HasNext() {
break
}
}
return nil
}
func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) {
path, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTNone, stores.PathStorage)
if err != nil {
return false, xerrors.Errorf("acquire unsealed sector path: %w", err)
}
defer done()
maxPieceSize := abi.PaddedPieceSize(sb.ssize)
pf, err := openPartialFile(maxPieceSize, path.Unsealed)
if xerrors.Is(err, os.ErrNotExist) {
return false, xerrors.Errorf("opening partial file: %w", err)
}
ok, err := pf.HasAllocated(offset, size)
if err != nil {
pf.Close()
return false, err
}
if !ok {
pf.Close()
return false, nil
}
f, err := pf.Reader(offset.Padded(), size.Padded())
if err != nil {
pf.Close()
return false, xerrors.Errorf("getting partial file reader: %w", err)
}
upr, err := fr32.NewUnpadReader(f, size.Padded())
if err != nil {
return false, xerrors.Errorf("creating unpadded reader: %w", err)
}
if _, err := io.CopyN(writer, upr, int64(size)); err != nil {
pf.Close()
return false, xerrors.Errorf("reading unsealed file: %w", err)
}
if err := pf.Close(); err != nil {
return false, xerrors.Errorf("closing partial file: %w", err)
}
return false, nil
}
func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, stores.FTSealed|stores.FTCache, stores.PathSealing)
if err != nil {
return nil, xerrors.Errorf("acquiring sector paths: %w", err)
}
defer done()
e, err := os.OpenFile(paths.Sealed, os.O_RDWR|os.O_CREATE, 0644)
if err != nil {
return nil, xerrors.Errorf("ensuring sealed file exists: %w", err)
}
if err := e.Close(); err != nil {
return nil, err
}
if err := os.Mkdir(paths.Cache, 0755); err != nil {
if os.IsExist(err) {
log.Warnf("existing cache in %s; removing", paths.Cache)
if err := os.RemoveAll(paths.Cache); err != nil {
return nil, xerrors.Errorf("remove existing sector cache from %s (sector %d): %w", paths.Cache, sector, err)
}
if err := os.Mkdir(paths.Cache, 0755); err != nil {
return nil, xerrors.Errorf("mkdir cache path after cleanup: %w", err)
}
} else {
return nil, err
}
}
var sum abi.UnpaddedPieceSize
for _, piece := range pieces {
sum += piece.Size.Unpadded()
}
ussize := abi.PaddedPieceSize(sb.ssize).Unpadded()
if sum != ussize {
return nil, xerrors.Errorf("aggregated piece sizes don't match sector size: %d != %d (%d)", sum, ussize, int64(ussize-sum))
}
// TODO: context cancellation respect
p1o, err := ffi.SealPreCommitPhase1(
sb.sealProofType,
paths.Cache,
paths.Unsealed,
paths.Sealed,
sector.Number,
sector.Miner,
ticket,
pieces,
)
if err != nil {
return nil, xerrors.Errorf("presealing sector %d (%s): %w", sector.Number, paths.Unsealed, err)
}
return p1o, nil
}
func (sb *Sealer) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) {
paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTSealed|stores.FTCache, 0, stores.PathSealing)
if err != nil {
return storage.SectorCids{}, xerrors.Errorf("acquiring sector paths: %w", err)
}
defer done()
sealedCID, unsealedCID, err := ffi.SealPreCommitPhase2(phase1Out, paths.Cache, paths.Sealed)
if err != nil {
return storage.SectorCids{}, xerrors.Errorf("presealing sector %d (%s): %w", sector.Number, paths.Unsealed, err)
}
return storage.SectorCids{
Unsealed: unsealedCID,
Sealed: sealedCID,
}, nil
}
func (sb *Sealer) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) {
paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTSealed|stores.FTCache, 0, stores.PathSealing)
if err != nil {
return nil, xerrors.Errorf("acquire sector paths: %w", err)
}
defer done()
output, err := ffi.SealCommitPhase1(
sb.sealProofType,
cids.Sealed,
cids.Unsealed,
paths.Cache,
paths.Sealed,
sector.Number,
sector.Miner,
ticket,
seed,
pieces,
)
if err != nil {
log.Warn("StandaloneSealCommit error: ", err)
log.Warnf("num:%d tkt:%v seed:%v, pi:%v sealedCID:%v, unsealedCID:%v", sector.Number, ticket, seed, pieces, cids.Sealed, cids.Unsealed)
return nil, xerrors.Errorf("StandaloneSealCommit: %w", err)
}
return output, nil
}
func (sb *Sealer) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.Commit1Out) (storage.Proof, error) {
return ffi.SealCommitPhase2(phase1Out, sector.Number, sector.Miner)
}
func (sb *Sealer) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error {
if len(keepUnsealed) > 0 {
maxPieceSize := abi.PaddedPieceSize(sb.ssize)
sr := pieceRun(0, maxPieceSize)
for _, s := range keepUnsealed {
si := &rlepluslazy.RunSliceIterator{}
if s.Offset != 0 {
si.Runs = append(si.Runs, rlepluslazy.Run{Val: false, Len: uint64(s.Offset)})
}
si.Runs = append(si.Runs, rlepluslazy.Run{Val: true, Len: uint64(s.Size)})
var err error
sr, err = rlepluslazy.Subtract(sr, si)
if err != nil {
return err
}
}
paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTUnsealed, 0, stores.PathStorage)
if err != nil {
return xerrors.Errorf("acquiring sector cache path: %w", err)
}
defer done()
pf, err := openPartialFile(maxPieceSize, paths.Unsealed)
if xerrors.Is(err, os.ErrNotExist) {
return xerrors.Errorf("opening partial file: %w", err)
}
var at uint64
for sr.HasNext() {
r, err := sr.NextRun()
if err != nil {
_ = pf.Close()
return err
}
offset := at
at += r.Len
if !r.Val {
continue
}
err = pf.Free(storiface.PaddedByteIndex(abi.UnpaddedPieceSize(offset).Padded()), abi.UnpaddedPieceSize(r.Len).Padded())
if err != nil {
_ = pf.Close()
return xerrors.Errorf("free partial file range: %w", err)
}
}
if err := pf.Close(); err != nil {
return err
}
}
paths, done, err := sb.sectors.AcquireSector(ctx, sector, stores.FTCache, 0, stores.PathStorage)
if err != nil {
return xerrors.Errorf("acquiring sector cache path: %w", err)
}
defer done()
return ffi.ClearCache(uint64(sb.ssize), paths.Cache)
}
func (sb *Sealer) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error {
// This call is meant to mark storage as 'freeable'. Given that unsealing is
// very expensive, we don't remove data as soon as we can - instead we only
// do that when we don't have free space for data that really needs it
// This function should not be called at this layer, everything should be
// handled in localworker
return xerrors.Errorf("not supported at this layer")
}
func (sb *Sealer) Remove(ctx context.Context, sector abi.SectorID) error {
return xerrors.Errorf("not supported at this layer") // happens in localworker
}
func GeneratePieceCIDFromFile(proofType abi.RegisteredSealProof, piece io.Reader, pieceSize abi.UnpaddedPieceSize) (cid.Cid, error) {
f, werr, err := ToReadableFile(piece, int64(pieceSize))
if err != nil {
return cid.Undef, err
}
pieceCID, err := ffi.GeneratePieceCIDFromFile(proofType, f, pieceSize)
if err != nil {
return cid.Undef, err
}
return pieceCID, werr()
}
func GetRequiredPadding(oldLength abi.PaddedPieceSize, newPieceLength abi.PaddedPieceSize) ([]abi.PaddedPieceSize, abi.PaddedPieceSize) {
padPieces := make([]abi.PaddedPieceSize, 0)
toFill := uint64(-oldLength % newPieceLength)
n := bits.OnesCount64(toFill)
var sum abi.PaddedPieceSize
for i := 0; i < n; i++ {
next := bits.TrailingZeros64(toFill)
psize := uint64(1) << uint(next)
toFill ^= psize
padded := abi.PaddedPieceSize(psize)
padPieces = append(padPieces, padded)
sum += padded
}
return padPieces, sum
}
func GenerateUnsealedCID(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) {
ssize, err := proofType.SectorSize()
if err != nil {
return cid.Undef, err
}
pssize := abi.PaddedPieceSize(ssize)
allPieces := make([]abi.PieceInfo, 0, len(pieces))
if len(pieces) == 0 {
allPieces = append(allPieces, abi.PieceInfo{
Size: pssize,
PieceCID: zerocomm.ZeroPieceCommitment(pssize.Unpadded()),
})
} else {
var sum abi.PaddedPieceSize
padTo := func(pads []abi.PaddedPieceSize) {
for _, p := range pads {
allPieces = append(allPieces, abi.PieceInfo{
Size: p,
PieceCID: zerocomm.ZeroPieceCommitment(p.Unpadded()),
})
sum += p
}
}
for _, p := range pieces {
ps, _ := GetRequiredPadding(sum, p.Size)
padTo(ps)
allPieces = append(allPieces, p)
sum += p.Size
}
ps, _ := GetRequiredPadding(sum, pssize)
padTo(ps)
}
return ffi.GenerateUnsealedCID(proofType, allPieces)
}

View File

@ -0,0 +1,596 @@
package ffiwrapper
import (
"bytes"
"context"
"fmt"
"github.com/ipfs/go-cid"
"io"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"testing"
"time"
logging "github.com/ipfs/go-log"
"github.com/stretchr/testify/require"
"golang.org/x/xerrors"
ffi "github.com/filecoin-project/filecoin-ffi"
paramfetch "github.com/filecoin-project/go-paramfetch"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/sector-storage/ffiwrapper/basicfs"
"github.com/filecoin-project/sector-storage/stores"
)
func init() {
logging.SetLogLevel("*", "DEBUG") //nolint: errcheck
}
var sealProofType = abi.RegisteredSealProof_StackedDrg2KiBV1
var sectorSize, _ = sealProofType.SectorSize()
var sealRand = abi.SealRandomness{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2}
type seal struct {
id abi.SectorID
cids storage.SectorCids
pi abi.PieceInfo
ticket abi.SealRandomness
}
func data(sn abi.SectorNumber, dlen abi.UnpaddedPieceSize) io.Reader {
return io.MultiReader(
io.LimitReader(rand.New(rand.NewSource(42+int64(sn))), int64(123)),
io.LimitReader(rand.New(rand.NewSource(42+int64(sn))), int64(dlen-123)),
)
}
func (s *seal) precommit(t *testing.T, sb *Sealer, id abi.SectorID, done func()) {
defer done()
dlen := abi.PaddedPieceSize(sectorSize).Unpadded()
var err error
r := data(id.Number, dlen)
s.pi, err = sb.AddPiece(context.TODO(), id, []abi.UnpaddedPieceSize{}, dlen, r)
if err != nil {
t.Fatalf("%+v", err)
}
s.ticket = sealRand
p1, err := sb.SealPreCommit1(context.TODO(), id, s.ticket, []abi.PieceInfo{s.pi})
if err != nil {
t.Fatalf("%+v", err)
}
cids, err := sb.SealPreCommit2(context.TODO(), id, p1)
if err != nil {
t.Fatalf("%+v", err)
}
s.cids = cids
}
func (s *seal) commit(t *testing.T, sb *Sealer, done func()) {
defer done()
seed := abi.InteractiveSealRandomness{0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9}
pc1, err := sb.SealCommit1(context.TODO(), s.id, s.ticket, seed, []abi.PieceInfo{s.pi}, s.cids)
if err != nil {
t.Fatalf("%+v", err)
}
proof, err := sb.SealCommit2(context.TODO(), s.id, pc1)
if err != nil {
t.Fatalf("%+v", err)
}
ok, err := ProofVerifier.VerifySeal(abi.SealVerifyInfo{
SectorID: s.id,
SealedCID: s.cids.Sealed,
SealProof: sealProofType,
Proof: proof,
Randomness: s.ticket,
InteractiveRandomness: seed,
UnsealedCID: s.cids.Unsealed,
})
if err != nil {
t.Fatalf("%+v", err)
}
if !ok {
t.Fatal("proof failed to validate")
}
}
func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si abi.SectorID, done func()) {
defer done()
var b bytes.Buffer
_, err := sb.ReadPiece(context.TODO(), &b, si, 0, 1016)
if err != nil {
t.Fatal(err)
}
expect, _ := ioutil.ReadAll(data(si.Number, 1016))
if !bytes.Equal(b.Bytes(), expect) {
t.Fatal("read wrong bytes")
}
p, sd, err := sp.AcquireSector(context.TODO(), si, stores.FTUnsealed, stores.FTNone, stores.PathStorage)
if err != nil {
t.Fatal(err)
}
if err := os.Remove(p.Unsealed); err != nil {
t.Fatal(err)
}
sd()
_, err = sb.ReadPiece(context.TODO(), &b, si, 0, 1016)
if err == nil {
t.Fatal("HOW?!")
}
log.Info("this is what we expect: ", err)
if err := sb.UnsealPiece(context.TODO(), si, 0, 1016, sealRand, s.cids.Unsealed); err != nil {
t.Fatal(err)
}
b.Reset()
_, err = sb.ReadPiece(context.TODO(), &b, si, 0, 1016)
if err != nil {
t.Fatal(err)
}
expect, _ = ioutil.ReadAll(data(si.Number, 1016))
require.Equal(t, expect, b.Bytes())
b.Reset()
have, err := sb.ReadPiece(context.TODO(), &b, si, 0, 2032)
if err != nil {
t.Fatal(err)
}
if have {
t.Errorf("didn't expect to read things")
}
if b.Len() != 0 {
t.Fatal("read bytes")
}
}
func post(t *testing.T, sealer *Sealer, seals ...seal) time.Time {
/*randomness := abi.PoStRandomness{0, 9, 2, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 7}
sis := make([]abi.SectorInfo, len(seals))
for i, s := range seals {
sis[i] = abi.SectorInfo{
RegisteredProof: sealProofType,
SectorNumber: s.id.Number,
SealedCID: s.cids.Sealed,
}
}
candidates, err := sealer.GenerateEPostCandidates(context.TODO(), seals[0].id.Miner, sis, randomness, []abi.SectorNumber{})
if err != nil {
t.Fatalf("%+v", err)
}*/
fmt.Println("skipping post")
genCandidates := time.Now()
/*if len(candidates) != 1 {
t.Fatal("expected 1 candidate")
}
candidatesPrime := make([]abi.PoStCandidate, len(candidates))
for idx := range candidatesPrime {
candidatesPrime[idx] = candidates[idx].Candidate
}
proofs, err := sealer.ComputeElectionPoSt(context.TODO(), seals[0].id.Miner, sis, randomness, candidatesPrime)
if err != nil {
t.Fatalf("%+v", err)
}
ePoStChallengeCount := ElectionPostChallengeCount(uint64(len(sis)), 0)
ok, err := ProofVerifier.VerifyElectionPost(context.TODO(), abi.PoStVerifyInfo{
Randomness: randomness,
Candidates: candidatesPrime,
Proofs: proofs,
EligibleSectors: sis,
Prover: seals[0].id.Miner,
ChallengeCount: ePoStChallengeCount,
})
if err != nil {
t.Fatalf("%+v", err)
}
if !ok {
t.Fatal("bad post")
}
*/
return genCandidates
}
func getGrothParamFileAndVerifyingKeys(s abi.SectorSize) {
dat, err := ioutil.ReadFile("../parameters.json")
if err != nil {
panic(err)
}
err = paramfetch.GetParams(dat, uint64(s))
if err != nil {
panic(xerrors.Errorf("failed to acquire Groth parameters for 2KiB sectors: %w", err))
}
}
// TestDownloadParams exists only so that developers and CI can pre-download
// Groth parameters and verifying keys before running the tests which rely on
// those parameters and keys. To do this, run the following command:
//
// go test -run=^TestDownloadParams
//
func TestDownloadParams(t *testing.T) {
defer requireFDsClosed(t, openFDs(t))
getGrothParamFileAndVerifyingKeys(sectorSize)
}
func TestSealAndVerify(t *testing.T) {
defer requireFDsClosed(t, openFDs(t))
if runtime.NumCPU() < 10 && os.Getenv("CI") == "" { // don't bother on slow hardware
t.Skip("this is slow")
}
_ = os.Setenv("RUST_LOG", "info")
getGrothParamFileAndVerifyingKeys(sectorSize)
cdir, err := ioutil.TempDir("", "sbtest-c-")
if err != nil {
t.Fatal(err)
}
miner := abi.ActorID(123)
cfg := &Config{
SealProofType: sealProofType,
}
sp := &basicfs.Provider{
Root: cdir,
}
sb, err := New(sp, cfg)
if err != nil {
t.Fatalf("%+v", err)
}
cleanup := func() {
if t.Failed() {
fmt.Printf("not removing %s\n", cdir)
return
}
if err := os.RemoveAll(cdir); err != nil {
t.Error(err)
}
}
defer cleanup()
si := abi.SectorID{Miner: miner, Number: 1}
s := seal{id: si}
start := time.Now()
s.precommit(t, sb, si, func() {})
precommit := time.Now()
s.commit(t, sb, func() {})
commit := time.Now()
genCandidiates := post(t, sb, s)
epost := time.Now()
post(t, sb, s)
if err := sb.FinalizeSector(context.TODO(), si, nil); err != nil {
t.Fatalf("%+v", err)
}
s.unseal(t, sb, sp, si, func() {})
fmt.Printf("PreCommit: %s\n", precommit.Sub(start).String())
fmt.Printf("Commit: %s\n", commit.Sub(precommit).String())
fmt.Printf("GenCandidates: %s\n", genCandidiates.Sub(commit).String())
fmt.Printf("EPoSt: %s\n", epost.Sub(genCandidiates).String())
}
func TestSealPoStNoCommit(t *testing.T) {
defer requireFDsClosed(t, openFDs(t))
if runtime.NumCPU() < 10 && os.Getenv("CI") == "" { // don't bother on slow hardware
t.Skip("this is slow")
}
_ = os.Setenv("RUST_LOG", "info")
getGrothParamFileAndVerifyingKeys(sectorSize)
dir, err := ioutil.TempDir("", "sbtest")
if err != nil {
t.Fatal(err)
}
miner := abi.ActorID(123)
cfg := &Config{
SealProofType: sealProofType,
}
sp := &basicfs.Provider{
Root: dir,
}
sb, err := New(sp, cfg)
if err != nil {
t.Fatalf("%+v", err)
}
cleanup := func() {
if t.Failed() {
fmt.Printf("not removing %s\n", dir)
return
}
if err := os.RemoveAll(dir); err != nil {
t.Error(err)
}
}
defer cleanup()
si := abi.SectorID{Miner: miner, Number: 1}
s := seal{id: si}
start := time.Now()
s.precommit(t, sb, si, func() {})
precommit := time.Now()
if err := sb.FinalizeSector(context.TODO(), si, nil); err != nil {
t.Fatal(err)
}
genCandidiates := post(t, sb, s)
epost := time.Now()
fmt.Printf("PreCommit: %s\n", precommit.Sub(start).String())
fmt.Printf("GenCandidates: %s\n", genCandidiates.Sub(precommit).String())
fmt.Printf("EPoSt: %s\n", epost.Sub(genCandidiates).String())
}
func TestSealAndVerify2(t *testing.T) {
defer requireFDsClosed(t, openFDs(t))
if runtime.NumCPU() < 10 && os.Getenv("CI") == "" { // don't bother on slow hardware
t.Skip("this is slow")
}
_ = os.Setenv("RUST_LOG", "trace")
getGrothParamFileAndVerifyingKeys(sectorSize)
dir, err := ioutil.TempDir("", "sbtest")
if err != nil {
t.Fatal(err)
}
miner := abi.ActorID(123)
cfg := &Config{
SealProofType: sealProofType,
}
sp := &basicfs.Provider{
Root: dir,
}
sb, err := New(sp, cfg)
if err != nil {
t.Fatalf("%+v", err)
}
cleanup := func() {
if err := os.RemoveAll(dir); err != nil {
t.Error(err)
}
}
defer cleanup()
var wg sync.WaitGroup
si1 := abi.SectorID{Miner: miner, Number: 1}
si2 := abi.SectorID{Miner: miner, Number: 2}
s1 := seal{id: si1}
s2 := seal{id: si2}
wg.Add(2)
go s1.precommit(t, sb, si1, wg.Done) //nolint: staticcheck
time.Sleep(100 * time.Millisecond)
go s2.precommit(t, sb, si2, wg.Done) //nolint: staticcheck
wg.Wait()
wg.Add(2)
go s1.commit(t, sb, wg.Done) //nolint: staticcheck
go s2.commit(t, sb, wg.Done) //nolint: staticcheck
wg.Wait()
post(t, sb, s1, s2)
}
func BenchmarkWriteWithAlignment(b *testing.B) {
bt := abi.UnpaddedPieceSize(2 * 127 * 1024 * 1024)
b.SetBytes(int64(bt))
for i := 0; i < b.N; i++ {
b.StopTimer()
rf, w, _ := ToReadableFile(bytes.NewReader(bytes.Repeat([]byte{0xff, 0}, int(bt/2))), int64(bt))
tf, _ := ioutil.TempFile("/tmp/", "scrb-")
b.StartTimer()
ffi.WriteWithAlignment(abi.RegisteredSealProof_StackedDrg2KiBV1, rf, bt, tf, nil)
w()
}
}
func openFDs(t *testing.T) int {
dent, err := ioutil.ReadDir("/proc/self/fd")
require.NoError(t, err)
var skip int
for _, info := range dent {
l, err := os.Readlink(filepath.Join("/proc/self/fd", info.Name()))
if err != nil {
continue
}
if strings.HasPrefix(l, "/dev/nvidia") {
skip++
}
if strings.HasPrefix(l, "/var/tmp/filecoin-proof-parameters/") {
skip++
}
}
return len(dent) - skip
}
func requireFDsClosed(t *testing.T, start int) {
openNow := openFDs(t)
if start != openNow {
dent, err := ioutil.ReadDir("/proc/self/fd")
require.NoError(t, err)
for _, info := range dent {
l, err := os.Readlink(filepath.Join("/proc/self/fd", info.Name()))
if err != nil {
fmt.Printf("FD err %s\n", err)
continue
}
fmt.Printf("FD %s -> %s\n", info.Name(), l)
}
}
log.Infow("open FDs", "start", start, "now", openNow)
require.Equal(t, start, openNow, "FDs shouldn't leak")
}
func TestGenerateUnsealedCID(t *testing.T) {
pt := abi.RegisteredSealProof_StackedDrg2KiBV1
ups := int(abi.PaddedPieceSize(2048).Unpadded())
commP := func(b []byte) cid.Cid {
pf, werr, err := ToReadableFile(bytes.NewReader(b), int64(len(b)))
require.NoError(t, err)
c, err := ffi.GeneratePieceCIDFromFile(pt, pf, abi.UnpaddedPieceSize(len(b)))
require.NoError(t, err)
require.NoError(t, werr())
return c
}
testCommEq := func(name string, in [][]byte, expect [][]byte) {
t.Run(name, func(t *testing.T) {
upi := make([]abi.PieceInfo, len(in))
for i, b := range in {
upi[i] = abi.PieceInfo{
Size: abi.UnpaddedPieceSize(len(b)).Padded(),
PieceCID: commP(b),
}
}
sectorPi := []abi.PieceInfo{
{
Size: 2048,
PieceCID: commP(bytes.Join(expect, nil)),
},
}
expectCid, err := GenerateUnsealedCID(pt, sectorPi)
require.NoError(t, err)
actualCid, err := GenerateUnsealedCID(pt, upi)
require.NoError(t, err)
require.Equal(t, expectCid, actualCid)
})
}
barr := func(b byte, den int) []byte {
return bytes.Repeat([]byte{b}, ups/den)
}
// 0000
testCommEq("zero",
nil,
[][]byte{barr(0, 1)},
)
// 1111
testCommEq("one",
[][]byte{barr(1, 1)},
[][]byte{barr(1, 1)},
)
// 11 00
testCommEq("one|2",
[][]byte{barr(1, 2)},
[][]byte{barr(1, 2), barr(0, 2)},
)
// 1 0 00
testCommEq("one|4",
[][]byte{barr(1, 4)},
[][]byte{barr(1, 4), barr(0, 4), barr(0, 2)},
)
// 11 2 0
testCommEq("one|2-two|4",
[][]byte{barr(1, 2), barr(2, 4)},
[][]byte{barr(1, 2), barr(2, 4), barr(0, 4)},
)
// 1 0 22
testCommEq("one|4-two|2",
[][]byte{barr(1, 4), barr(2, 2)},
[][]byte{barr(1, 4), barr(0, 4), barr(2, 2)},
)
// 1 0 22 0000
testCommEq("one|8-two|4",
[][]byte{barr(1, 8), barr(2, 4)},
[][]byte{barr(1, 8), barr(0, 8), barr(2, 4), barr(0, 2)},
)
// 11 2 0 0000
testCommEq("one|4-two|8",
[][]byte{barr(1, 4), barr(2, 8)},
[][]byte{barr(1, 4), barr(2, 8), barr(0, 8), barr(0, 2)},
)
// 1 0 22 3 0 00 4444 5 0 00
testCommEq("one|16-two|8-three|16-four|4-five|16",
[][]byte{barr(1, 16), barr(2, 8), barr(3, 16), barr(4, 4), barr(5, 16)},
[][]byte{barr(1, 16), barr(0, 16), barr(2, 8), barr(3, 16), barr(0, 16), barr(0, 8), barr(4, 4), barr(5, 16), barr(0, 16), barr(0, 8)},
)
}

View File

@ -0,0 +1,49 @@
package ffiwrapper
import (
"context"
"io"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/sector-storage/ffiwrapper/basicfs"
"github.com/filecoin-project/sector-storage/stores"
"github.com/filecoin-project/sector-storage/storiface"
)
type Validator interface {
CanCommit(sector stores.SectorPaths) (bool, error)
CanProve(sector stores.SectorPaths) (bool, error)
}
type StorageSealer interface {
storage.Sealer
storage.Storage
}
type Storage interface {
storage.Prover
StorageSealer
UnsealPiece(ctx context.Context, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error
ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error)
}
type Verifier interface {
VerifySeal(abi.SealVerifyInfo) (bool, error)
VerifyWinningPoSt(ctx context.Context, info abi.WinningPoStVerifyInfo) (bool, error)
VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVerifyInfo) (bool, error)
GenerateWinningPoStSectorChallenge(context.Context, abi.RegisteredPoStProof, abi.ActorID, abi.PoStRandomness, uint64) ([]uint64, error)
}
type SectorProvider interface {
// * returns storiface.ErrSectorNotFound if a requested existing sector doesn't exist
// * returns an error when allocate is set, and existing isn't, and the sector exists
AcquireSector(ctx context.Context, id abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, ptype stores.PathType) (stores.SectorPaths, func(), error)
}
var _ SectorProvider = &basicfs.Provider{}

View File

@ -0,0 +1,26 @@
package ffiwrapper
import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-bitfield/rle"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/sector-storage/storiface"
)
// merge gaps between ranges which are close to each other
// TODO: more benchmarking to come up with more optimal number
const mergeGaps = 32 << 20
// TODO const expandRuns = 16 << 20 // unseal more than requested for future requests
func computeUnsealRanges(unsealed rlepluslazy.RunIterator, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (rlepluslazy.RunIterator, error) {
todo := pieceRun(offset.Padded(), size.Padded())
todo, err := rlepluslazy.Subtract(todo, unsealed)
if err != nil {
return nil, xerrors.Errorf("compute todo-unsealed: %w", err)
}
return rlepluslazy.JoinClose(todo, mergeGaps)
}

View File

@ -0,0 +1,119 @@
//+build cgo
package ffiwrapper
import (
"context"
"golang.org/x/xerrors"
"go.opencensus.io/trace"
ffi "github.com/filecoin-project/filecoin-ffi"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/sector-storage/stores"
)
func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) {
randomness[31] &= 0x3f
privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWinningPoStProof) // TODO: FAULTS?
if err != nil {
return nil, err
}
defer done()
if len(skipped) > 0 {
return nil, xerrors.Errorf("pubSectorToPriv skipped sectors: %+v", skipped)
}
return ffi.GenerateWinningPoSt(minerID, privsectors, randomness)
}
func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, []abi.SectorID, error) {
randomness[31] &= 0x3f
privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWindowPoStProof)
if err != nil {
return nil, nil, xerrors.Errorf("gathering sector info: %w", err)
}
defer done()
proof, err := ffi.GenerateWindowPoSt(minerID, privsectors, randomness)
return proof, skipped, err
}
func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []abi.SectorInfo, faults []abi.SectorNumber, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error)) (ffi.SortedPrivateSectorInfo, []abi.SectorID, func(), error) {
fmap := map[abi.SectorNumber]struct{}{}
for _, fault := range faults {
fmap[fault] = struct{}{}
}
var doneFuncs []func()
done := func() {
for _, df := range doneFuncs {
df()
}
}
var skipped []abi.SectorID
var out []ffi.PrivateSectorInfo
for _, s := range sectorInfo {
if _, faulty := fmap[s.SectorNumber]; faulty {
continue
}
sid := abi.SectorID{Miner: mid, Number: s.SectorNumber}
paths, d, err := sb.sectors.AcquireSector(ctx, sid, stores.FTCache|stores.FTSealed, 0, stores.PathStorage)
if err != nil {
log.Warnw("failed to acquire sector, skipping", "sector", sid, "error", err)
skipped = append(skipped, sid)
continue
}
doneFuncs = append(doneFuncs, d)
postProofType, err := rpt(s.SealProof)
if err != nil {
done()
return ffi.SortedPrivateSectorInfo{}, nil, nil, xerrors.Errorf("acquiring registered PoSt proof from sector info %+v: %w", s, err)
}
out = append(out, ffi.PrivateSectorInfo{
CacheDirPath: paths.Cache,
PoStProofType: postProofType,
SealedSectorPath: paths.Sealed,
SectorInfo: s,
})
}
return ffi.NewSortedPrivateSectorInfo(out...), skipped, done, nil
}
var _ Verifier = ProofVerifier
type proofVerifier struct{}
var ProofVerifier = proofVerifier{}
func (proofVerifier) VerifySeal(info abi.SealVerifyInfo) (bool, error) {
return ffi.VerifySeal(info)
}
func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info abi.WinningPoStVerifyInfo) (bool, error) {
info.Randomness[31] &= 0x3f
_, span := trace.StartSpan(ctx, "VerifyWinningPoSt")
defer span.End()
return ffi.VerifyWinningPoSt(info)
}
func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVerifyInfo) (bool, error) {
info.Randomness[31] &= 0x3f
_, span := trace.StartSpan(ctx, "VerifyWindowPoSt")
defer span.End()
return ffi.VerifyWindowPoSt(info)
}
func (proofVerifier) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) {
randomness[31] &= 0x3f
return ffi.GenerateWinningPoStSectorChallenge(proofType, minerID, randomness, eligibleSectorCount)
}

157
extern/sector-storage/fr32/fr32.go vendored Normal file
View File

@ -0,0 +1,157 @@
package fr32
import (
"math/bits"
"runtime"
"sync"
"github.com/filecoin-project/specs-actors/actors/abi"
)
var MTTresh = uint64(32 << 20)
func mtChunkCount(usz abi.PaddedPieceSize) uint64 {
threads := (uint64(usz)) / MTTresh
if threads > uint64(runtime.NumCPU()) {
threads = 1 << (bits.Len32(uint32(runtime.NumCPU())))
}
if threads == 0 {
return 1
}
if threads > 32 {
return 32 // avoid too large buffers
}
return threads
}
func mt(in, out []byte, padLen int, op func(unpadded, padded []byte)) {
threads := mtChunkCount(abi.PaddedPieceSize(padLen))
threadBytes := abi.PaddedPieceSize(padLen / int(threads))
var wg sync.WaitGroup
wg.Add(int(threads))
for i := 0; i < int(threads); i++ {
go func(thread int) {
defer wg.Done()
start := threadBytes * abi.PaddedPieceSize(thread)
end := start + threadBytes
op(in[start.Unpadded():end.Unpadded()], out[start:end])
}(i)
}
wg.Wait()
}
// Assumes len(in)%127==0 and len(out)%128==0
func Pad(in, out []byte) {
if len(out) > int(MTTresh) {
mt(in, out, len(out), pad)
return
}
pad(in, out)
}
func pad(in, out []byte) {
chunks := len(out) / 128
for chunk := 0; chunk < chunks; chunk++ {
inOff := chunk * 127
outOff := chunk * 128
copy(out[outOff:outOff+31], in[inOff:inOff+31])
t := in[inOff+31] >> 6
out[outOff+31] = in[inOff+31] & 0x3f
var v byte
for i := 32; i < 64; i++ {
v = in[inOff+i]
out[outOff+i] = (v << 2) | t
t = v >> 6
}
t = v >> 4
out[outOff+63] &= 0x3f
for i := 64; i < 96; i++ {
v = in[inOff+i]
out[outOff+i] = (v << 4) | t
t = v >> 4
}
t = v >> 2
out[outOff+95] &= 0x3f
for i := 96; i < 127; i++ {
v = in[inOff+i]
out[outOff+i] = (v << 6) | t
t = v >> 2
}
out[outOff+127] = t & 0x3f
}
}
// Assumes len(in)%128==0 and len(out)%127==0
func Unpad(in []byte, out []byte) {
if len(in) > int(MTTresh) {
mt(out, in, len(in), unpad)
return
}
unpad(out, in)
}
func unpad(out, in []byte) {
chunks := len(in) / 128
for chunk := 0; chunk < chunks; chunk++ {
inOffNext := chunk*128 + 1
outOff := chunk * 127
at := in[chunk*128]
for i := 0; i < 32; i++ {
next := in[i+inOffNext]
out[outOff+i] = at
//out[i] |= next << 8
at = next
}
out[outOff+31] |= at << 6
for i := 32; i < 64; i++ {
next := in[i+inOffNext]
out[outOff+i] = at >> 2
out[outOff+i] |= next << 6
at = next
}
out[outOff+63] ^= (at << 6) ^ (at << 4)
for i := 64; i < 96; i++ {
next := in[i+inOffNext]
out[outOff+i] = at >> 4
out[outOff+i] |= next << 4
at = next
}
out[outOff+95] ^= (at << 4) ^ (at << 2)
for i := 96; i < 127; i++ {
next := in[i+inOffNext]
out[outOff+i] = at >> 6
out[outOff+i] |= next << 2
at = next
}
}
}

View File

@ -0,0 +1,66 @@
package fr32_test
import (
"bytes"
"github.com/filecoin-project/sector-storage/fr32"
"io"
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/require"
ffi "github.com/filecoin-project/filecoin-ffi"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/sector-storage/ffiwrapper"
)
func TestWriteTwoPcs(t *testing.T) {
tf, _ := ioutil.TempFile("/tmp/", "scrb-")
paddedSize := abi.PaddedPieceSize(16 << 20)
n := 2
var rawBytes []byte
for i := 0; i < n; i++ {
buf := bytes.Repeat([]byte{0xab * byte(i)}, int(paddedSize.Unpadded()))
rawBytes = append(rawBytes, buf...)
rf, w, _ := ffiwrapper.ToReadableFile(bytes.NewReader(buf), int64(len(buf)))
_, _, _, err := ffi.WriteWithAlignment(abi.RegisteredSealProof_StackedDrg32GiBV1, rf, abi.UnpaddedPieceSize(len(buf)), tf, nil)
if err != nil {
panic(err)
}
if err := w(); err != nil {
panic(err)
}
}
if _, err := tf.Seek(io.SeekStart, 0); err != nil {
panic(err)
}
ffiBytes, err := ioutil.ReadAll(tf)
if err != nil {
panic(err)
}
if err := tf.Close(); err != nil {
panic(err)
}
if err := os.Remove(tf.Name()); err != nil {
panic(err)
}
outBytes := make([]byte, int(paddedSize)*n)
fr32.Pad(rawBytes, outBytes)
require.Equal(t, ffiBytes, outBytes)
unpadBytes := make([]byte, int(paddedSize.Unpadded())*n)
fr32.Unpad(ffiBytes, unpadBytes)
require.Equal(t, rawBytes, unpadBytes)
}

250
extern/sector-storage/fr32/fr32_test.go vendored Normal file
View File

@ -0,0 +1,250 @@
package fr32_test
import (
"bytes"
"io"
"io/ioutil"
"math/rand"
"os"
"testing"
ffi "github.com/filecoin-project/filecoin-ffi"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/sector-storage/ffiwrapper"
"github.com/filecoin-project/sector-storage/fr32"
)
func padFFI(buf []byte) []byte {
rf, w, _ := ffiwrapper.ToReadableFile(bytes.NewReader(buf), int64(len(buf)))
tf, _ := ioutil.TempFile("/tmp/", "scrb-")
_, _, _, err := ffi.WriteWithAlignment(abi.RegisteredSealProof_StackedDrg32GiBV1, rf, abi.UnpaddedPieceSize(len(buf)), tf, nil)
if err != nil {
panic(err)
}
if err := w(); err != nil {
panic(err)
}
if _, err := tf.Seek(io.SeekStart, 0); err != nil {
panic(err)
}
padded, err := ioutil.ReadAll(tf)
if err != nil {
panic(err)
}
if err := tf.Close(); err != nil {
panic(err)
}
if err := os.Remove(tf.Name()); err != nil {
panic(err)
}
return padded
}
func TestPadChunkFFI(t *testing.T) {
testByteChunk := func(b byte) func(*testing.T) {
return func(t *testing.T) {
var buf [128]byte
copy(buf[:], bytes.Repeat([]byte{b}, 127))
fr32.Pad(buf[:], buf[:])
expect := padFFI(bytes.Repeat([]byte{b}, 127))
require.Equal(t, expect, buf[:])
}
}
t.Run("ones", testByteChunk(0xff))
t.Run("lsb1", testByteChunk(0x01))
t.Run("msb1", testByteChunk(0x80))
t.Run("zero", testByteChunk(0x0))
t.Run("mid", testByteChunk(0x3c))
}
func TestPadChunkRandEqFFI(t *testing.T) {
for i := 0; i < 200; i++ {
var input [127]byte
rand.Read(input[:])
var buf [128]byte
fr32.Pad(input[:], buf[:])
expect := padFFI(input[:])
require.Equal(t, expect, buf[:])
}
}
func TestRoundtrip(t *testing.T) {
testByteChunk := func(b byte) func(*testing.T) {
return func(t *testing.T) {
var buf [128]byte
input := bytes.Repeat([]byte{0x01}, 127)
fr32.Pad(input, buf[:])
var out [127]byte
fr32.Unpad(buf[:], out[:])
require.Equal(t, input, out[:])
}
}
t.Run("ones", testByteChunk(0xff))
t.Run("lsb1", testByteChunk(0x01))
t.Run("msb1", testByteChunk(0x80))
t.Run("zero", testByteChunk(0x0))
t.Run("mid", testByteChunk(0x3c))
}
func TestRoundtripChunkRand(t *testing.T) {
for i := 0; i < 200; i++ {
var input [127]byte
rand.Read(input[:])
var buf [128]byte
copy(buf[:], input[:])
fr32.Pad(buf[:], buf[:])
var out [127]byte
fr32.Unpad(buf[:], out[:])
require.Equal(t, input[:], out[:])
}
}
func TestRoundtrip16MRand(t *testing.T) {
up := abi.PaddedPieceSize(16 << 20).Unpadded()
input := make([]byte, up)
rand.Read(input[:])
buf := make([]byte, 16<<20)
fr32.Pad(input, buf)
out := make([]byte, up)
fr32.Unpad(buf, out)
require.Equal(t, input, out)
ffi := padFFI(input)
require.Equal(t, ffi, buf)
}
func BenchmarkPadChunk(b *testing.B) {
var buf [128]byte
in := bytes.Repeat([]byte{0xff}, 127)
b.SetBytes(127)
for i := 0; i < b.N; i++ {
fr32.Pad(in, buf[:])
}
}
func BenchmarkChunkRoundtrip(b *testing.B) {
var buf [128]byte
copy(buf[:], bytes.Repeat([]byte{0xff}, 127))
var out [127]byte
b.SetBytes(127)
for i := 0; i < b.N; i++ {
fr32.Pad(buf[:], buf[:])
fr32.Unpad(buf[:], out[:])
}
}
func BenchmarkUnpadChunk(b *testing.B) {
var buf [128]byte
copy(buf[:], bytes.Repeat([]byte{0xff}, 127))
fr32.Pad(buf[:], buf[:])
var out [127]byte
b.SetBytes(127)
b.ReportAllocs()
bs := buf[:]
for i := 0; i < b.N; i++ {
fr32.Unpad(bs, out[:])
}
}
func BenchmarkUnpad16MChunk(b *testing.B) {
up := abi.PaddedPieceSize(16 << 20).Unpadded()
var buf [16 << 20]byte
fr32.Pad(bytes.Repeat([]byte{0xff}, int(up)), buf[:])
var out [16 << 20]byte
b.SetBytes(16 << 20)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
fr32.Unpad(buf[:], out[:])
}
}
func BenchmarkPad16MChunk(b *testing.B) {
up := abi.PaddedPieceSize(16 << 20).Unpadded()
var buf [16 << 20]byte
in := bytes.Repeat([]byte{0xff}, int(up))
b.SetBytes(16 << 20)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
fr32.Pad(in, buf[:])
}
}
func BenchmarkPad1GChunk(b *testing.B) {
up := abi.PaddedPieceSize(1 << 30).Unpadded()
var buf [1 << 30]byte
in := bytes.Repeat([]byte{0xff}, int(up))
b.SetBytes(1 << 30)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
fr32.Pad(in, buf[:])
}
}
func BenchmarkUnpad1GChunk(b *testing.B) {
up := abi.PaddedPieceSize(1 << 30).Unpadded()
var buf [1 << 30]byte
fr32.Pad(bytes.Repeat([]byte{0xff}, int(up)), buf[:])
var out [1 << 30]byte
b.SetBytes(1 << 30)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
fr32.Unpad(buf[:], out[:])
}
}

133
extern/sector-storage/fr32/readers.go vendored Normal file
View File

@ -0,0 +1,133 @@
package fr32
import (
"io"
"math/bits"
"golang.org/x/xerrors"
"github.com/filecoin-project/specs-actors/actors/abi"
)
type unpadReader struct {
src io.Reader
left uint64
work []byte
}
func NewUnpadReader(src io.Reader, sz abi.PaddedPieceSize) (io.Reader, error) {
if err := sz.Validate(); err != nil {
return nil, xerrors.Errorf("bad piece size: %w", err)
}
buf := make([]byte, MTTresh*mtChunkCount(sz))
return &unpadReader{
src: src,
left: uint64(sz),
work: buf,
}, nil
}
func (r *unpadReader) Read(out []byte) (int, error) {
if r.left == 0 {
return 0, io.EOF
}
chunks := len(out) / 127
outTwoPow := 1 << (63 - bits.LeadingZeros64(uint64(chunks*128)))
if err := abi.PaddedPieceSize(outTwoPow).Validate(); err != nil {
return 0, xerrors.Errorf("output must be of valid padded piece size: %w", err)
}
todo := abi.PaddedPieceSize(outTwoPow)
if r.left < uint64(todo) {
todo = abi.PaddedPieceSize(1 << (63 - bits.LeadingZeros64(r.left)))
}
r.left -= uint64(todo)
n, err := r.src.Read(r.work[:todo])
if err != nil && err != io.EOF {
return n, err
}
if n != int(todo) {
return 0, xerrors.Errorf("didn't read enough: %w", err)
}
Unpad(r.work[:todo], out[:todo.Unpadded()])
return int(todo.Unpadded()), err
}
type padWriter struct {
dst io.Writer
stash []byte
work []byte
}
func NewPadWriter(dst io.Writer) io.WriteCloser {
return &padWriter{
dst: dst,
}
}
func (w *padWriter) Write(p []byte) (int, error) {
in := p
if len(p)+len(w.stash) < 127 {
w.stash = append(w.stash, p...)
return len(p), nil
}
if len(w.stash) != 0 {
in = append(w.stash, in...)
}
for {
pieces := subPieces(abi.UnpaddedPieceSize(len(in)))
biggest := pieces[len(pieces)-1]
if abi.PaddedPieceSize(cap(w.work)) < biggest.Padded() {
w.work = make([]byte, 0, biggest.Padded())
}
Pad(in[:int(biggest)], w.work[:int(biggest.Padded())])
n, err := w.dst.Write(w.work[:int(biggest.Padded())])
if err != nil {
return int(abi.PaddedPieceSize(n).Unpadded()), err
}
in = in[biggest:]
if len(in) < 127 {
if cap(w.stash) < len(in) {
w.stash = make([]byte, 0, len(in))
}
w.stash = w.stash[:len(in)]
copy(w.stash, in)
return len(p), nil
}
}
}
func (w *padWriter) Close() error {
if len(w.stash) > 0 {
return xerrors.Errorf("still have %d unprocessed bytes", len(w.stash))
}
// allow gc
w.stash = nil
w.work = nil
w.dst = nil
return nil
}

View File

@ -0,0 +1,34 @@
package fr32_test
import (
"bytes"
"io/ioutil"
"testing"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/sector-storage/fr32"
)
func TestUnpadReader(t *testing.T) {
ps := abi.PaddedPieceSize(64 << 20).Unpadded()
raw := bytes.Repeat([]byte{0x77}, int(ps))
padOut := make([]byte, ps.Padded())
fr32.Pad(raw, padOut)
r, err := fr32.NewUnpadReader(bytes.NewReader(padOut), ps.Padded())
if err != nil {
t.Fatal(err)
}
readered, err := ioutil.ReadAll(r)
if err != nil {
t.Fatal(err)
}
require.Equal(t, raw, readered)
}

31
extern/sector-storage/fr32/utils.go vendored Normal file
View File

@ -0,0 +1,31 @@
package fr32
import (
"math/bits"
"github.com/filecoin-project/specs-actors/actors/abi"
)
func subPieces(in abi.UnpaddedPieceSize) []abi.UnpaddedPieceSize {
// Convert to in-sector bytes for easier math:
//
// (we convert to sector bytes as they are nice round binary numbers)
w := uint64(in.Padded())
out := make([]abi.UnpaddedPieceSize, bits.OnesCount64(w))
for i := range out {
// Extract the next lowest non-zero bit
next := bits.TrailingZeros64(w)
psize := uint64(1) << next
// e.g: if the number is 0b010100, psize will be 0b000100
// set that bit to 0 by XORing it, so the next iteration looks at the
// next bit
w ^= psize
// Add the piece size to the list of pieces we need to create
out[i] = abi.PaddedPieceSize(psize).Unpadded()
}
return out
}

View File

@ -0,0 +1,28 @@
package fsutil
import (
"os"
"syscall"
logging "github.com/ipfs/go-log/v2"
)
var log = logging.Logger("fsutil")
const FallocFlPunchHole = 0x02 // linux/falloc.h
func Deallocate(file *os.File, offset int64, length int64) error {
if length == 0 {
return nil
}
err := syscall.Fallocate(int(file.Fd()), FallocFlPunchHole, offset, length)
if errno, ok := err.(syscall.Errno); ok {
if errno == syscall.EOPNOTSUPP || errno == syscall.ENOSYS {
log.Warnf("could not deallocate space, ignoring: %v", errno)
err = nil // log and ignore
}
}
return err
}

View File

@ -0,0 +1,17 @@
// +build !linux
package fsutil
import (
"os"
logging "github.com/ipfs/go-log/v2"
)
var log = logging.Logger("fsutil")
func Deallocate(file *os.File, offset int64, length int64) error {
log.Warnf("deallocating space not supported")
return nil
}

View File

@ -0,0 +1,29 @@
package fsutil
import (
"os"
"syscall"
"golang.org/x/xerrors"
)
type SizeInfo struct {
OnDisk int64
}
// FileSize returns bytes used by a file on disk
func FileSize(path string) (SizeInfo, error) {
var stat syscall.Stat_t
if err := syscall.Stat(path, &stat); err != nil {
if err == syscall.ENOENT {
return SizeInfo{}, os.ErrNotExist
}
return SizeInfo{}, xerrors.Errorf("stat: %w", err)
}
// NOTE: stat.Blocks is in 512B blocks, NOT in stat.Blksize
// See https://www.gnu.org/software/libc/manual/html_node/Attribute-Meanings.html
return SizeInfo{
int64(stat.Blocks) * 512, // NOTE: int64 cast is needed on osx
}, nil
}

View File

@ -0,0 +1,7 @@
package fsutil
type FsStat struct {
Capacity int64
Available int64 // Available to use for sector storage
Reserved int64
}

View File

@ -0,0 +1,19 @@
package fsutil
import (
"syscall"
"golang.org/x/xerrors"
)
func Statfs(path string) (FsStat, error) {
var stat syscall.Statfs_t
if err := syscall.Statfs(path, &stat); err != nil {
return FsStat{}, xerrors.Errorf("statfs: %w", err)
}
return FsStat{
Capacity: int64(stat.Blocks) * int64(stat.Bsize),
Available: int64(stat.Bavail) * int64(stat.Bsize),
}, nil
}

View File

@ -0,0 +1,28 @@
package fsutil
import (
"syscall"
"unsafe"
)
func Statfs(volumePath string) (FsStat, error) {
// From https://github.com/ricochet2200/go-disk-usage/blob/master/du/diskusage_windows.go
h := syscall.MustLoadDLL("kernel32.dll")
c := h.MustFindProc("GetDiskFreeSpaceExW")
var freeBytes int64
var totalBytes int64
var availBytes int64
c.Call(
uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(volumePath))),
uintptr(unsafe.Pointer(&freeBytes)),
uintptr(unsafe.Pointer(&totalBytes)),
uintptr(unsafe.Pointer(&availBytes)))
return FsStat{
Capacity: totalBytes,
Available: availBytes,
}, nil
}

32
extern/sector-storage/go.mod vendored Normal file
View File

@ -0,0 +1,32 @@
module github.com/filecoin-project/sector-storage
go 1.13
require (
github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e
github.com/elastic/go-sysinfo v1.3.0
github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200716204036-cddc56607e1d
github.com/filecoin-project/go-bitfield v0.1.2
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663
github.com/filecoin-project/specs-actors v0.8.2
github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea
github.com/google/uuid v1.1.1
github.com/gorilla/mux v1.7.4
github.com/hashicorp/go-multierror v1.0.0
github.com/ipfs/go-cid v0.0.6
github.com/ipfs/go-ipfs-files v0.0.7
github.com/ipfs/go-ipld-cbor v0.0.5-0.20200204214505-252690b78669 // indirect
github.com/ipfs/go-log v1.0.4
github.com/ipfs/go-log/v2 v2.0.5
github.com/mattn/go-isatty v0.0.9 // indirect
github.com/mitchellh/go-homedir v1.1.0
github.com/stretchr/testify v1.6.1
go.opencensus.io v0.22.3
golang.org/x/crypto v0.0.0-20200317142112-1b76d66859c6 // indirect
golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect
golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d // indirect
golang.org/x/tools v0.0.0-20200318150045-ba25ddc85566 // indirect
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543
honnef.co/go/tools v0.0.1-2020.1.3 // indirect
)

364
extern/sector-storage/go.sum vendored Normal file
View File

@ -0,0 +1,364 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg=
github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e h1:lj77EKYUpYXTd8CD/+QMIf8b6OIOTsfEBSXiAzuEHTU=
github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e/go.mod h1:3ZQK6DMPSz/QZ73jlWxBtUhNA8xZx7LzUFSq/OfP8vk=
github.com/elastic/go-sysinfo v1.3.0 h1:eb2XFGTMlSwG/yyU9Y8jVAYLIzU2sFzWXwo2gmetyrE=
github.com/elastic/go-sysinfo v1.3.0/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0=
github.com/elastic/go-windows v1.0.0 h1:qLURgZFkkrYyTTkvYpsZIgf83AUsdIHfvlJaqaZ7aSY=
github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU=
github.com/fatih/color v1.8.0 h1:5bzFgL+oy7JITMTxUPJ00n7VxmYd/PdMp5mHFX40/RY=
github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGjnw8=
github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be h1:TooKBwR/g8jG0hZ3lqe9S5sy2vTUcLOZLlz3M5wGn2E=
github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0=
github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200131012142-05d80eeccc5e/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg=
github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200424220931-6263827e49f2/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg=
github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs=
github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw=
github.com/filecoin-project/go-bitfield v0.0.1/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY=
github.com/filecoin-project/go-bitfield v0.1.2 h1:TjLregCoyP1/5lm7WCM0axyV1myIHwbjGa21skuu5tk=
github.com/filecoin-project/go-bitfield v0.1.2/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM=
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus=
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ=
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f h1:GxJzR3oRIMTPtpZ0b7QF8FKPK6/iPAc7trhlL5k/g+s=
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ=
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663 h1:eYxi6vI5CyeXD15X1bB3bledDXbqKxqf0wQzTLgwYwA=
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc=
github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y=
github.com/filecoin-project/specs-actors v0.6.1/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY=
github.com/filecoin-project/specs-actors v0.8.2 h1:fpAPOPqWqmzJCWHpm6P1XDRSpQrxyY5Pzh5H3doYs7Q=
github.com/filecoin-project/specs-actors v0.8.2/go.mod h1:Q3ACV5kBLvqPaYbthc/J1lGMJ5OwogmD9pzdtPRMdCw=
github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea h1:iixjULRQFPn7Q9KlIqfwLJnlAXO10bbkI+xy5GKGdLY=
github.com/filecoin-project/specs-storage v0.1.1-0.20200622113353-88a9704877ea/go.mod h1:Pr5ntAaxsh+sLG/LYiL4tKzvA83Vk5vLODYhfNwOg7k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f h1:KMlcu9X58lhTA/KrfX8Bi1LQSO4pzoVjTiL3h4Jk+Zk=
github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU=
github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE=
github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY=
github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM=
github.com/ipfs/go-cid v0.0.5 h1:o0Ix8e/ql7Zb5UVUJEUfjsWCIY8t48++9lR8qi6oiJU=
github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog=
github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog=
github.com/ipfs/go-cid v0.0.6 h1:go0y+GcDOGeJIV01FeBsta4FHngoA4Wz7KMeLkXAhMs=
github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I=
github.com/ipfs/go-hamt-ipld v0.0.15-0.20200131012125-dd88a59d3f2e/go.mod h1:9aQJu/i/TaRDW6jqB5U217dLIDopn50wxLdHXM2CTfE=
github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk=
github.com/ipfs/go-ipfs-files v0.0.7 h1:s5BRD12ndahqYifeH1S8Z73zqZhR+3IdKYAG9PiETs0=
github.com/ipfs/go-ipfs-files v0.0.7/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs=
github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50=
github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc=
github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc=
github.com/ipfs/go-ipld-cbor v0.0.4 h1:Aw3KPOKXjvrm6VjwJvFf1F1ekR/BH3jdof3Bk7OTiSA=
github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4=
github.com/ipfs/go-ipld-cbor v0.0.5-0.20200204214505-252690b78669 h1:jIVle1vGSzxyUhseYNEqd7qcDVRrIbJ7UxGwao70cF0=
github.com/ipfs/go-ipld-cbor v0.0.5-0.20200204214505-252690b78669/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4=
github.com/ipfs/go-ipld-format v0.0.1 h1:HCu4eB/Gh+KD/Q0M8u888RFkorTWNIL3da4oc5dwc80=
github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms=
github.com/ipfs/go-ipld-format v0.0.2 h1:OVAGlyYT6JPZ0pEfGntFPS40lfrDmaDbQwNHEY2G9Zs=
github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k=
github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM=
github.com/ipfs/go-log v1.0.0/go.mod h1:JO7RzlMK6rA+CIxFMLOuB6Wf5b81GDiKElL7UPSIKjA=
github.com/ipfs/go-log v1.0.4 h1:6nLQdX4W8P9yZZFH7mO+X/PzjN8Laozm/lMJ6esdgzY=
github.com/ipfs/go-log v1.0.4/go.mod h1:oDCg2FkjogeFOhqqb+N39l2RpTNPL6F/StPkB3kPgcs=
github.com/ipfs/go-log/v2 v2.0.5 h1:fL4YI+1g5V/b1Yxr1qAiXTMg1H8z9vx/VmJxBuQMHvU=
github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw=
github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c=
github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4=
github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA=
github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4=
github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE=
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs=
github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw=
github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.9 h1:d5US/mDsogSGW37IV293h//ZFaeajb69h+EHFsv2xGg=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54=
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g=
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U=
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU=
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8=
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc=
github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI=
github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA=
github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4=
github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM=
github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4=
github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA=
github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs=
github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk=
github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc=
github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U=
github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc=
github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc=
github.com/multiformats/go-multihash v0.0.14 h1:QoBceQYQQtNUuf6s7wHxnE2c8bhbMqhfGzNI032se/I=
github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc=
github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg=
github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o=
github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1 h1:CskT+S6Ay54OwxBGB0R3Rsx4Muto6UnEYTyKJbyRIAI=
github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o=
github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a h1:hjZfReYVLbqFkAtr2us7vdy04YWz3LVAirzP7reh8+M=
github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o=
github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0 h1:c8R11WC8m7KNMkTv/0+Be8vvwo4I3/Ut9AC2FW8fX3U=
github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w=
github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa h1:E+gaaifzi2xF65PbDmuKI3PhLWY6G5opMLniFq8vmXA=
github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU=
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8=
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY=
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436 h1:qOpVTI+BrstcjTZLm2Yz/3sOnqkzj3FQoh0g+E5s3Gc=
github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw=
github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830 h1:8kxMKmKzXXL4Ru1nyhvdms/JjWt+3YLpvRb/bAjO/y0=
github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw=
github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY=
github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI=
github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e h1:JY8o/ebUUrCYetWmjRCNghxC59cOEaili83rxPRQCLw=
github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI=
github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d/go.mod h1:W5MvapuoHRP8rz4vxjwCK1pDqF1aQcWsV5PZ+AHbqdg=
github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377 h1:LHFlP/ktDvOnCap7PsT87cs7Gwd0p+qv6Qm5g2ZPR+I=
github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM=
github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE=
github.com/xorcare/golden v0.6.0/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.4.0 h1:f3WCSC2KzAcBXGATIxAB1E2XuCpNU255wNKZ505qi3E=
go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo=
go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200317142112-1b76d66859c6 h1:TjszyFsQsyZNHwdVdZ5m7bjmreu0znc2kRYsEml9/Ww=
golang.org/x/crypto v0.0.0-20200317142112-1b76d66859c6/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190302025703-b6889370fb10/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae h1:QoJmnb9uyPCrH8GIg9uRLn4Ta45yhcQtpymCd0AavO8=
golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d h1:62ap6LNOjDU6uGmKXHJbSfciMoV+FeI1sRXx/pLDL44=
golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200318150045-ba25ddc85566 h1:OXjomkWHhzUx4+HldlJ2TsMxJdWgEo5CTtspD1wdhdk=
golang.org/x/tools v0.0.0-20200318150045-ba25ddc85566/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk=
gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8=
gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M=
howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0=

298
extern/sector-storage/localworker.go vendored Normal file
View File

@ -0,0 +1,298 @@
package sectorstorage
import (
"context"
"io"
"os"
"runtime"
"github.com/elastic/go-sysinfo"
"github.com/hashicorp/go-multierror"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
ffi "github.com/filecoin-project/filecoin-ffi"
"github.com/filecoin-project/specs-actors/actors/abi"
storage2 "github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/sector-storage/ffiwrapper"
"github.com/filecoin-project/sector-storage/sealtasks"
"github.com/filecoin-project/sector-storage/stores"
"github.com/filecoin-project/sector-storage/storiface"
)
var pathTypes = []stores.SectorFileType{stores.FTUnsealed, stores.FTSealed, stores.FTCache}
type WorkerConfig struct {
SealProof abi.RegisteredSealProof
TaskTypes []sealtasks.TaskType
}
type LocalWorker struct {
scfg *ffiwrapper.Config
storage stores.Store
localStore *stores.Local
sindex stores.SectorIndex
acceptTasks map[sealtasks.TaskType]struct{}
}
func NewLocalWorker(wcfg WorkerConfig, store stores.Store, local *stores.Local, sindex stores.SectorIndex) *LocalWorker {
acceptTasks := map[sealtasks.TaskType]struct{}{}
for _, taskType := range wcfg.TaskTypes {
acceptTasks[taskType] = struct{}{}
}
return &LocalWorker{
scfg: &ffiwrapper.Config{
SealProofType: wcfg.SealProof,
},
storage: store,
localStore: local,
sindex: sindex,
acceptTasks: acceptTasks,
}
}
type localWorkerPathProvider struct {
w *LocalWorker
op stores.AcquireMode
}
func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing stores.PathType) (stores.SectorPaths, func(), error) {
paths, storageIDs, err := l.w.storage.AcquireSector(ctx, sector, l.w.scfg.SealProofType, existing, allocate, sealing, l.op)
if err != nil {
return stores.SectorPaths{}, nil, err
}
releaseStorage, err := l.w.localStore.Reserve(ctx, sector, l.w.scfg.SealProofType, allocate, storageIDs, stores.FSOverheadSeal)
if err != nil {
return stores.SectorPaths{}, nil, xerrors.Errorf("reserving storage space: %w", err)
}
log.Debugf("acquired sector %d (e:%d; a:%d): %v", sector, existing, allocate, paths)
return paths, func() {
releaseStorage()
for _, fileType := range pathTypes {
if fileType&allocate == 0 {
continue
}
sid := stores.PathByType(storageIDs, fileType)
if err := l.w.sindex.StorageDeclareSector(ctx, stores.ID(sid), sector, fileType, l.op == stores.AcquireMove); err != nil {
log.Errorf("declare sector error: %+v", err)
}
}
}, nil
}
func (l *LocalWorker) sb() (ffiwrapper.Storage, error) {
return ffiwrapper.New(&localWorkerPathProvider{w: l}, l.scfg)
}
func (l *LocalWorker) NewSector(ctx context.Context, sector abi.SectorID) error {
sb, err := l.sb()
if err != nil {
return err
}
return sb.NewSector(ctx, sector)
}
func (l *LocalWorker) AddPiece(ctx context.Context, sector abi.SectorID, epcs []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) {
sb, err := l.sb()
if err != nil {
return abi.PieceInfo{}, err
}
return sb.AddPiece(ctx, sector, epcs, sz, r)
}
func (l *LocalWorker) Fetch(ctx context.Context, sector abi.SectorID, fileType stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error {
_, done, err := (&localWorkerPathProvider{w: l, op: am}).AcquireSector(ctx, sector, fileType, stores.FTNone, ptype)
if err != nil {
return err
}
done()
return nil
}
func (l *LocalWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage2.PreCommit1Out, err error) {
{
// cleanup previous failed attempts if they exist
if err := l.storage.Remove(ctx, sector, stores.FTSealed, true); err != nil {
return nil, xerrors.Errorf("cleaning up sealed data: %w", err)
}
if err := l.storage.Remove(ctx, sector, stores.FTCache, true); err != nil {
return nil, xerrors.Errorf("cleaning up cache data: %w", err)
}
}
sb, err := l.sb()
if err != nil {
return nil, err
}
return sb.SealPreCommit1(ctx, sector, ticket, pieces)
}
func (l *LocalWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage2.PreCommit1Out) (cids storage2.SectorCids, err error) {
sb, err := l.sb()
if err != nil {
return storage2.SectorCids{}, err
}
return sb.SealPreCommit2(ctx, sector, phase1Out)
}
func (l *LocalWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage2.SectorCids) (output storage2.Commit1Out, err error) {
sb, err := l.sb()
if err != nil {
return nil, err
}
return sb.SealCommit1(ctx, sector, ticket, seed, pieces, cids)
}
func (l *LocalWorker) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage2.Commit1Out) (proof storage2.Proof, err error) {
sb, err := l.sb()
if err != nil {
return nil, err
}
return sb.SealCommit2(ctx, sector, phase1Out)
}
func (l *LocalWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage2.Range) error {
sb, err := l.sb()
if err != nil {
return err
}
if err := sb.FinalizeSector(ctx, sector, keepUnsealed); err != nil {
return xerrors.Errorf("finalizing sector: %w", err)
}
if len(keepUnsealed) == 0 {
if err := l.storage.Remove(ctx, sector, stores.FTUnsealed, true); err != nil {
return xerrors.Errorf("removing unsealed data: %w", err)
}
}
return nil
}
func (l *LocalWorker) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage2.Range) error {
return xerrors.Errorf("implement me")
}
func (l *LocalWorker) Remove(ctx context.Context, sector abi.SectorID) error {
var err error
if rerr := l.storage.Remove(ctx, sector, stores.FTSealed, true); rerr != nil {
err = multierror.Append(err, xerrors.Errorf("removing sector (sealed): %w", rerr))
}
if rerr := l.storage.Remove(ctx, sector, stores.FTCache, true); rerr != nil {
err = multierror.Append(err, xerrors.Errorf("removing sector (cache): %w", rerr))
}
if rerr := l.storage.Remove(ctx, sector, stores.FTUnsealed, true); rerr != nil {
err = multierror.Append(err, xerrors.Errorf("removing sector (unsealed): %w", rerr))
}
return err
}
func (l *LocalWorker) MoveStorage(ctx context.Context, sector abi.SectorID) error {
if err := l.storage.MoveStorage(ctx, sector, l.scfg.SealProofType, stores.FTSealed|stores.FTCache); err != nil {
return xerrors.Errorf("moving sealed data to storage: %w", err)
}
return nil
}
func (l *LocalWorker) UnsealPiece(ctx context.Context, sector abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) error {
sb, err := l.sb()
if err != nil {
return err
}
if err := sb.UnsealPiece(ctx, sector, index, size, randomness, cid); err != nil {
return xerrors.Errorf("unsealing sector: %w", err)
}
if err := l.storage.RemoveCopies(ctx, sector, stores.FTSealed); err != nil {
return xerrors.Errorf("removing source data: %w", err)
}
if err := l.storage.RemoveCopies(ctx, sector, stores.FTCache); err != nil {
return xerrors.Errorf("removing source data: %w", err)
}
return nil
}
func (l *LocalWorker) ReadPiece(ctx context.Context, writer io.Writer, sector abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) {
sb, err := l.sb()
if err != nil {
return false, err
}
return sb.ReadPiece(ctx, writer, sector, index, size)
}
func (l *LocalWorker) TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) {
return l.acceptTasks, nil
}
func (l *LocalWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) {
return l.localStore.Local(ctx)
}
func (l *LocalWorker) Info(context.Context) (storiface.WorkerInfo, error) {
hostname, err := os.Hostname() // TODO: allow overriding from config
if err != nil {
panic(err)
}
gpus, err := ffi.GetGPUDevices()
if err != nil {
log.Errorf("getting gpu devices failed: %+v", err)
}
h, err := sysinfo.Host()
if err != nil {
return storiface.WorkerInfo{}, xerrors.Errorf("getting host info: %w", err)
}
mem, err := h.Memory()
if err != nil {
return storiface.WorkerInfo{}, xerrors.Errorf("getting memory info: %w", err)
}
return storiface.WorkerInfo{
Hostname: hostname,
Resources: storiface.WorkerResources{
MemPhysical: mem.Total,
MemSwap: mem.VirtualTotal,
MemReserved: mem.VirtualUsed + mem.Total - mem.Available, // TODO: sub this process
CPUs: uint64(runtime.NumCPU()),
GPUs: gpus,
},
}, nil
}
func (l *LocalWorker) Closing(ctx context.Context) (<-chan struct{}, error) {
return make(chan struct{}), nil
}
func (l *LocalWorker) Close() error {
return nil
}
var _ Worker = &LocalWorker{}

508
extern/sector-storage/manager.go vendored Normal file
View File

@ -0,0 +1,508 @@
package sectorstorage
import (
"context"
"errors"
"github.com/filecoin-project/sector-storage/fsutil"
"io"
"net/http"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
"github.com/mitchellh/go-homedir"
"golang.org/x/xerrors"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/sector-storage/ffiwrapper"
"github.com/filecoin-project/sector-storage/sealtasks"
"github.com/filecoin-project/sector-storage/stores"
"github.com/filecoin-project/sector-storage/storiface"
)
var log = logging.Logger("advmgr")
var ErrNoWorkers = errors.New("no suitable workers found")
type URLs []string
type Worker interface {
ffiwrapper.StorageSealer
MoveStorage(ctx context.Context, sector abi.SectorID) error
Fetch(ctx context.Context, s abi.SectorID, ft stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error
UnsealPiece(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error
ReadPiece(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (bool, error)
TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error)
// Returns paths accessible to the worker
Paths(context.Context) ([]stores.StoragePath, error)
Info(context.Context) (storiface.WorkerInfo, error)
// returns channel signalling worker shutdown
Closing(context.Context) (<-chan struct{}, error)
Close() error
}
type SectorManager interface {
SectorSize() abi.SectorSize
ReadPiece(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error
ffiwrapper.StorageSealer
storage.Prover
FaultTracker
}
type WorkerID uint64
type Manager struct {
scfg *ffiwrapper.Config
ls stores.LocalStorage
storage *stores.Remote
localStore *stores.Local
remoteHnd *stores.FetchHandler
index stores.SectorIndex
sched *scheduler
storage.Prover
}
type SealerConfig struct {
ParallelFetchLimit int
// Local worker config
AllowPreCommit1 bool
AllowPreCommit2 bool
AllowCommit bool
AllowUnseal bool
}
type StorageAuth http.Header
func New(ctx context.Context, ls stores.LocalStorage, si stores.SectorIndex, cfg *ffiwrapper.Config, sc SealerConfig, urls URLs, sa StorageAuth) (*Manager, error) {
lstor, err := stores.NewLocal(ctx, ls, si, urls)
if err != nil {
return nil, err
}
prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor, index: si}, cfg)
if err != nil {
return nil, xerrors.Errorf("creating prover instance: %w", err)
}
stor := stores.NewRemote(lstor, si, http.Header(sa), sc.ParallelFetchLimit)
m := &Manager{
scfg: cfg,
ls: ls,
storage: stor,
localStore: lstor,
remoteHnd: &stores.FetchHandler{Local: lstor},
index: si,
sched: newScheduler(cfg.SealProofType),
Prover: prover,
}
go m.sched.runSched()
localTasks := []sealtasks.TaskType{
sealtasks.TTAddPiece, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch, sealtasks.TTReadUnsealed,
}
if sc.AllowPreCommit1 {
localTasks = append(localTasks, sealtasks.TTPreCommit1)
}
if sc.AllowPreCommit2 {
localTasks = append(localTasks, sealtasks.TTPreCommit2)
}
if sc.AllowCommit {
localTasks = append(localTasks, sealtasks.TTCommit2)
}
if sc.AllowUnseal {
localTasks = append(localTasks, sealtasks.TTUnseal)
}
err = m.AddWorker(ctx, NewLocalWorker(WorkerConfig{
SealProof: cfg.SealProofType,
TaskTypes: localTasks,
}, stor, lstor, si))
if err != nil {
return nil, xerrors.Errorf("adding local worker: %w", err)
}
return m, nil
}
func (m *Manager) AddLocalStorage(ctx context.Context, path string) error {
path, err := homedir.Expand(path)
if err != nil {
return xerrors.Errorf("expanding local path: %w", err)
}
if err := m.localStore.OpenPath(ctx, path); err != nil {
return xerrors.Errorf("opening local path: %w", err)
}
if err := m.ls.SetStorage(func(sc *stores.StorageConfig) {
sc.StoragePaths = append(sc.StoragePaths, stores.LocalPath{Path: path})
}); err != nil {
return xerrors.Errorf("get storage config: %w", err)
}
return nil
}
func (m *Manager) AddWorker(ctx context.Context, w Worker) error {
info, err := w.Info(ctx)
if err != nil {
return xerrors.Errorf("getting worker info: %w", err)
}
m.sched.newWorkers <- &workerHandle{
w: w,
wt: &workTracker{
running: map[uint64]storiface.WorkerJob{},
},
info: info,
preparing: &activeResources{},
active: &activeResources{},
}
return nil
}
func (m *Manager) ServeHTTP(w http.ResponseWriter, r *http.Request) {
m.remoteHnd.ServeHTTP(w, r)
}
func (m *Manager) SectorSize() abi.SectorSize {
sz, _ := m.scfg.SealProofType.SectorSize()
return sz
}
func schedNop(context.Context, Worker) error {
return nil
}
func schedFetch(sector abi.SectorID, ft stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) func(context.Context, Worker) error {
return func(ctx context.Context, worker Worker) error {
return worker.Fetch(ctx, sector, ft, ptype, am)
}
}
func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if err := m.index.StorageLock(ctx, sector, stores.FTSealed|stores.FTCache, stores.FTUnsealed); err != nil {
return xerrors.Errorf("acquiring sector lock: %w", err)
}
// passing 0 spt because we only need it when allowFetch is true
best, err := m.index.StorageFindSector(ctx, sector, stores.FTUnsealed, 0, false)
if err != nil {
return xerrors.Errorf("read piece: checking for already existing unsealed sector: %w", err)
}
var selector WorkerSelector
if len(best) == 0 { // new
selector = newAllocSelector(m.index, stores.FTUnsealed, stores.PathSealing)
} else { // append to existing
selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false)
}
var readOk bool
if len(best) > 0 {
// There is unsealed sector, see if we can read from it
selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false)
err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error {
readOk, err = w.ReadPiece(ctx, sink, sector, offset, size)
return err
})
if err != nil {
return xerrors.Errorf("reading piece from sealed sector: %w", err)
}
if readOk {
return nil
}
}
unsealFetch := func(ctx context.Context, worker Worker) error {
if err := worker.Fetch(ctx, sector, stores.FTSealed|stores.FTCache, stores.PathSealing, stores.AcquireCopy); err != nil {
return xerrors.Errorf("copy sealed/cache sector data: %w", err)
}
if len(best) > 0 {
if err := worker.Fetch(ctx, sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove); err != nil {
return xerrors.Errorf("copy unsealed sector data: %w", err)
}
}
return nil
}
err = m.sched.Schedule(ctx, sector, sealtasks.TTUnseal, selector, unsealFetch, func(ctx context.Context, w Worker) error {
return w.UnsealPiece(ctx, sector, offset, size, ticket, unsealed)
})
if err != nil {
return err
}
selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false)
err = m.sched.Schedule(ctx, sector, sealtasks.TTReadUnsealed, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error {
readOk, err = w.ReadPiece(ctx, sink, sector, offset, size)
return err
})
if err != nil {
return xerrors.Errorf("reading piece from sealed sector: %w", err)
}
if readOk {
return xerrors.Errorf("failed to read unsealed piece")
}
return nil
}
func (m *Manager) NewSector(ctx context.Context, sector abi.SectorID) error {
log.Warnf("stub NewSector")
return nil
}
func (m *Manager) AddPiece(ctx context.Context, sector abi.SectorID, existingPieces []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if err := m.index.StorageLock(ctx, sector, stores.FTNone, stores.FTUnsealed); err != nil {
return abi.PieceInfo{}, xerrors.Errorf("acquiring sector lock: %w", err)
}
var selector WorkerSelector
var err error
if len(existingPieces) == 0 { // new
selector = newAllocSelector(m.index, stores.FTUnsealed, stores.PathSealing)
} else { // use existing
selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false)
}
var out abi.PieceInfo
err = m.sched.Schedule(ctx, sector, sealtasks.TTAddPiece, selector, schedNop, func(ctx context.Context, w Worker) error {
p, err := w.AddPiece(ctx, sector, existingPieces, sz, r)
if err != nil {
return err
}
out = p
return nil
})
return out, err
}
func (m *Manager) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if err := m.index.StorageLock(ctx, sector, stores.FTUnsealed, stores.FTSealed|stores.FTCache); err != nil {
return nil, xerrors.Errorf("acquiring sector lock: %w", err)
}
// TODO: also consider where the unsealed data sits
selector := newAllocSelector(m.index, stores.FTCache|stores.FTSealed, stores.PathSealing)
err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit1, selector, schedFetch(sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error {
p, err := w.SealPreCommit1(ctx, sector, ticket, pieces)
if err != nil {
return err
}
out = p
return nil
})
return out, err
}
func (m *Manager) SealPreCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.PreCommit1Out) (out storage.SectorCids, err error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if err := m.index.StorageLock(ctx, sector, stores.FTSealed, stores.FTCache); err != nil {
return storage.SectorCids{}, xerrors.Errorf("acquiring sector lock: %w", err)
}
selector := newExistingSelector(m.index, sector, stores.FTCache|stores.FTSealed, true)
err = m.sched.Schedule(ctx, sector, sealtasks.TTPreCommit2, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error {
p, err := w.SealPreCommit2(ctx, sector, phase1Out)
if err != nil {
return err
}
out = p
return nil
})
return out, err
}
func (m *Manager) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (out storage.Commit1Out, err error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if err := m.index.StorageLock(ctx, sector, stores.FTSealed, stores.FTCache); err != nil {
return storage.Commit1Out{}, xerrors.Errorf("acquiring sector lock: %w", err)
}
// NOTE: We set allowFetch to false in so that we always execute on a worker
// with direct access to the data. We want to do that because this step is
// generally very cheap / fast, and transferring data is not worth the effort
selector := newExistingSelector(m.index, sector, stores.FTCache|stores.FTSealed, false)
err = m.sched.Schedule(ctx, sector, sealtasks.TTCommit1, selector, schedFetch(sector, stores.FTCache|stores.FTSealed, stores.PathSealing, stores.AcquireMove), func(ctx context.Context, w Worker) error {
p, err := w.SealCommit1(ctx, sector, ticket, seed, pieces, cids)
if err != nil {
return err
}
out = p
return nil
})
return out, err
}
func (m *Manager) SealCommit2(ctx context.Context, sector abi.SectorID, phase1Out storage.Commit1Out) (out storage.Proof, err error) {
selector := newTaskSelector()
err = m.sched.Schedule(ctx, sector, sealtasks.TTCommit2, selector, schedNop, func(ctx context.Context, w Worker) error {
p, err := w.SealCommit2(ctx, sector, phase1Out)
if err != nil {
return err
}
out = p
return nil
})
return out, err
}
func (m *Manager) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if err := m.index.StorageLock(ctx, sector, stores.FTNone, stores.FTSealed|stores.FTUnsealed|stores.FTCache); err != nil {
return xerrors.Errorf("acquiring sector lock: %w", err)
}
unsealed := stores.FTUnsealed
{
unsealedStores, err := m.index.StorageFindSector(ctx, sector, stores.FTUnsealed, 0, false)
if err != nil {
return xerrors.Errorf("finding unsealed sector: %w", err)
}
if len(unsealedStores) == 0 { // Is some edge-cases unsealed sector may not exist already, that's fine
unsealed = stores.FTNone
}
}
selector := newExistingSelector(m.index, sector, stores.FTCache|stores.FTSealed, false)
err := m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector,
schedFetch(sector, stores.FTCache|stores.FTSealed|unsealed, stores.PathSealing, stores.AcquireMove),
func(ctx context.Context, w Worker) error {
return w.FinalizeSector(ctx, sector, keepUnsealed)
})
if err != nil {
return err
}
fetchSel := newAllocSelector(m.index, stores.FTCache|stores.FTSealed, stores.PathStorage)
moveUnsealed := unsealed
{
if len(keepUnsealed) == 0 {
moveUnsealed = stores.FTNone
}
}
err = m.sched.Schedule(ctx, sector, sealtasks.TTFetch, fetchSel,
schedFetch(sector, stores.FTCache|stores.FTSealed|moveUnsealed, stores.PathStorage, stores.AcquireMove),
func(ctx context.Context, w Worker) error {
return w.MoveStorage(ctx, sector)
})
if err != nil {
return xerrors.Errorf("moving sector to storage: %w", err)
}
return nil
}
func (m *Manager) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error {
log.Warnw("ReleaseUnsealed todo")
return nil
}
func (m *Manager) Remove(ctx context.Context, sector abi.SectorID) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if err := m.index.StorageLock(ctx, sector, stores.FTNone, stores.FTSealed|stores.FTUnsealed|stores.FTCache); err != nil {
return xerrors.Errorf("acquiring sector lock: %w", err)
}
unsealed := stores.FTUnsealed
{
unsealedStores, err := m.index.StorageFindSector(ctx, sector, stores.FTUnsealed, 0, false)
if err != nil {
return xerrors.Errorf("finding unsealed sector: %w", err)
}
if len(unsealedStores) == 0 { // can be already removed
unsealed = stores.FTNone
}
}
selector := newExistingSelector(m.index, sector, stores.FTCache|stores.FTSealed, false)
return m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector,
schedFetch(sector, stores.FTCache|stores.FTSealed|unsealed, stores.PathStorage, stores.AcquireMove),
func(ctx context.Context, w Worker) error {
return w.Remove(ctx, sector)
})
}
func (m *Manager) StorageLocal(ctx context.Context) (map[stores.ID]string, error) {
l, err := m.localStore.Local(ctx)
if err != nil {
return nil, err
}
out := map[stores.ID]string{}
for _, st := range l {
out[st.ID] = st.LocalPath
}
return out, nil
}
func (m *Manager) FsStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) {
return m.storage.FsStat(ctx, id)
}
func (m *Manager) SchedDiag(ctx context.Context) (interface{}, error) {
return m.sched.Info(ctx)
}
func (m *Manager) Close(ctx context.Context) error {
return m.sched.Close(ctx)
}
var _ SectorManager = &Manager{}

152
extern/sector-storage/manager_test.go vendored Normal file
View File

@ -0,0 +1,152 @@
package sectorstorage
import (
"bytes"
"context"
"encoding/json"
"fmt"
"github.com/filecoin-project/sector-storage/fsutil"
"github.com/filecoin-project/sector-storage/sealtasks"
logging "github.com/ipfs/go-log"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/sector-storage/ffiwrapper"
"github.com/filecoin-project/sector-storage/stores"
)
func init() {
logging.SetAllLoggers(logging.LevelDebug)
}
type testStorage stores.StorageConfig
func (t testStorage) DiskUsage(path string) (int64, error) {
return 1, nil // close enough
}
func newTestStorage(t *testing.T) *testStorage {
tp, err := ioutil.TempDir(os.TempDir(), "sector-storage-test-")
require.NoError(t, err)
{
b, err := json.MarshalIndent(&stores.LocalStorageMeta{
ID: stores.ID(uuid.New().String()),
Weight: 1,
CanSeal: true,
CanStore: true,
}, "", " ")
require.NoError(t, err)
err = ioutil.WriteFile(filepath.Join(tp, "sectorstore.json"), b, 0644)
require.NoError(t, err)
}
return &testStorage{
StoragePaths: []stores.LocalPath{
{Path: tp},
},
}
}
func (t testStorage) cleanup() {
for _, path := range t.StoragePaths {
if err := os.RemoveAll(path.Path); err != nil {
fmt.Println("Cleanup error:", err)
}
}
}
func (t testStorage) GetStorage() (stores.StorageConfig, error) {
return stores.StorageConfig(t), nil
}
func (t *testStorage) SetStorage(f func(*stores.StorageConfig)) error {
f((*stores.StorageConfig)(t))
return nil
}
func (t *testStorage) Stat(path string) (fsutil.FsStat, error) {
return fsutil.Statfs(path)
}
var _ stores.LocalStorage = &testStorage{}
func newTestMgr(ctx context.Context, t *testing.T) (*Manager, *stores.Local, *stores.Remote, *stores.Index) {
st := newTestStorage(t)
defer st.cleanup()
si := stores.NewIndex()
cfg := &ffiwrapper.Config{
SealProofType: abi.RegisteredSealProof_StackedDrg2KiBV1,
}
lstor, err := stores.NewLocal(ctx, st, si, nil)
require.NoError(t, err)
prover, err := ffiwrapper.New(&readonlyProvider{stor: lstor}, cfg)
require.NoError(t, err)
stor := stores.NewRemote(lstor, si, nil, 6000)
m := &Manager{
scfg: cfg,
ls: st,
storage: stor,
localStore: lstor,
remoteHnd: &stores.FetchHandler{Local: lstor},
index: si,
sched: newScheduler(cfg.SealProofType),
Prover: prover,
}
go m.sched.runSched()
return m, lstor, stor, si
}
func TestSimple(t *testing.T) {
logging.SetAllLoggers(logging.LevelDebug)
ctx := context.Background()
m, lstor, _, _ := newTestMgr(ctx, t)
localTasks := []sealtasks.TaskType{
sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch,
}
err := m.AddWorker(ctx, newTestWorker(WorkerConfig{
SealProof: abi.RegisteredSealProof_StackedDrg2KiBV1,
TaskTypes: localTasks,
}, lstor))
require.NoError(t, err)
sid := abi.SectorID{Miner: 1000, Number: 1}
pi, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127)))
require.NoError(t, err)
require.Equal(t, abi.PaddedPieceSize(1024), pi.Size)
piz, err := m.AddPiece(ctx, sid, nil, 1016, bytes.NewReader(make([]byte, 1016)[:]))
require.NoError(t, err)
require.Equal(t, abi.PaddedPieceSize(1024), piz.Size)
pieces := []abi.PieceInfo{pi, piz}
ticket := abi.SealRandomness{9, 9, 9, 9, 9, 9, 9, 9}
_, err = m.SealPreCommit1(ctx, sid, ticket, pieces)
require.NoError(t, err)
}

458
extern/sector-storage/mock/mock.go vendored Normal file
View File

@ -0,0 +1,458 @@
package mock
import (
"bytes"
"context"
"fmt"
"io"
"math/rand"
"sync"
commcid "github.com/filecoin-project/go-fil-commcid"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-storage/storage"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log"
"golang.org/x/xerrors"
"github.com/filecoin-project/sector-storage/ffiwrapper"
"github.com/filecoin-project/sector-storage/storiface"
)
var log = logging.Logger("sbmock")
type SectorMgr struct {
sectors map[abi.SectorID]*sectorState
pieces map[cid.Cid][]byte
sectorSize abi.SectorSize
nextSectorID abi.SectorNumber
proofType abi.RegisteredSealProof
lk sync.Mutex
}
type mockVerif struct{}
func NewMockSectorMgr(ssize abi.SectorSize, genesisSectors []abi.SectorID) *SectorMgr {
rt, err := ffiwrapper.SealProofTypeFromSectorSize(ssize)
if err != nil {
panic(err)
}
sectors := make(map[abi.SectorID]*sectorState)
for _, sid := range genesisSectors {
sectors[sid] = &sectorState{
failed: false,
state: stateCommit,
}
}
return &SectorMgr{
sectors: sectors,
pieces: map[cid.Cid][]byte{},
sectorSize: ssize,
nextSectorID: 5,
proofType: rt,
}
}
const (
statePacking = iota
statePreCommit
stateCommit // nolint
)
type sectorState struct {
pieces []cid.Cid
failed bool
state int
lk sync.Mutex
}
func (mgr *SectorMgr) NewSector(ctx context.Context, sector abi.SectorID) error {
return nil
}
func (mgr *SectorMgr) AddPiece(ctx context.Context, sectorId abi.SectorID, existingPieces []abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) {
log.Warn("Add piece: ", sectorId, size, mgr.proofType)
var b bytes.Buffer
tr := io.TeeReader(r, &b)
c, err := ffiwrapper.GeneratePieceCIDFromFile(mgr.proofType, tr, size)
if err != nil {
return abi.PieceInfo{}, xerrors.Errorf("failed to generate piece cid: %w", err)
}
log.Warn("Generated Piece CID: ", c)
mgr.lk.Lock()
mgr.pieces[c] = b.Bytes()
ss, ok := mgr.sectors[sectorId]
if !ok {
ss = &sectorState{
state: statePacking,
}
mgr.sectors[sectorId] = ss
}
mgr.lk.Unlock()
ss.lk.Lock()
ss.pieces = append(ss.pieces, c)
ss.lk.Unlock()
return abi.PieceInfo{
Size: size.Padded(),
PieceCID: c,
}, nil
}
func (mgr *SectorMgr) SectorSize() abi.SectorSize {
return mgr.sectorSize
}
func (mgr *SectorMgr) AcquireSectorNumber() (abi.SectorNumber, error) {
mgr.lk.Lock()
defer mgr.lk.Unlock()
id := mgr.nextSectorID
mgr.nextSectorID++
return id, nil
}
func (mgr *SectorMgr) SealPreCommit1(ctx context.Context, sid abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {
mgr.lk.Lock()
ss, ok := mgr.sectors[sid]
mgr.lk.Unlock()
if !ok {
return nil, xerrors.Errorf("no sector with id %d in storage", sid)
}
ss.lk.Lock()
defer ss.lk.Unlock()
ussize := abi.PaddedPieceSize(mgr.sectorSize).Unpadded()
// TODO: verify pieces in sinfo.pieces match passed in pieces
var sum abi.UnpaddedPieceSize
for _, p := range pieces {
sum += p.Size.Unpadded()
}
if sum != ussize {
return nil, xerrors.Errorf("aggregated piece sizes don't match up: %d != %d", sum, ussize)
}
if ss.state != statePacking {
return nil, xerrors.Errorf("cannot call pre-seal on sector not in 'packing' state")
}
opFinishWait(ctx)
ss.state = statePreCommit
pis := make([]abi.PieceInfo, len(ss.pieces))
for i, piece := range ss.pieces {
pis[i] = abi.PieceInfo{
Size: pieces[i].Size,
PieceCID: piece,
}
}
commd, err := MockVerifier.GenerateDataCommitment(mgr.proofType, pis)
if err != nil {
return nil, err
}
_, _, cc, err := commcid.CIDToCommitment(commd)
if err != nil {
panic(err)
}
cc[0] ^= 'd'
return cc, nil
}
func (mgr *SectorMgr) SealPreCommit2(ctx context.Context, sid abi.SectorID, phase1Out storage.PreCommit1Out) (cids storage.SectorCids, err error) {
db := []byte(string(phase1Out))
db[0] ^= 'd'
d, _ := commcid.DataCommitmentV1ToCID(db)
commr := make([]byte, 32)
for i := range db {
commr[32-(i+1)] = db[i]
}
commR, _ := commcid.ReplicaCommitmentV1ToCID(commr)
return storage.SectorCids{
Unsealed: d,
Sealed: commR,
}, nil
}
func (mgr *SectorMgr) SealCommit1(ctx context.Context, sid abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (output storage.Commit1Out, err error) {
mgr.lk.Lock()
ss, ok := mgr.sectors[sid]
mgr.lk.Unlock()
if !ok {
return nil, xerrors.Errorf("no such sector %d", sid)
}
ss.lk.Lock()
defer ss.lk.Unlock()
if ss.failed {
return nil, xerrors.Errorf("[mock] cannot commit failed sector %d", sid)
}
if ss.state != statePreCommit {
return nil, xerrors.Errorf("cannot commit sector that has not been precommitted")
}
opFinishWait(ctx)
var out [32]byte
for i := range out {
out[i] = cids.Unsealed.Bytes()[i] + cids.Sealed.Bytes()[31-i] - ticket[i]*seed[i] ^ byte(sid.Number&0xff)
}
return out[:], nil
}
func (mgr *SectorMgr) SealCommit2(ctx context.Context, sid abi.SectorID, phase1Out storage.Commit1Out) (proof storage.Proof, err error) {
var out [32]byte
for i := range out {
out[i] = phase1Out[i] ^ byte(sid.Number&0xff)
}
return out[:], nil
}
// Test Instrumentation Methods
func (mgr *SectorMgr) MarkFailed(sid abi.SectorID, failed bool) error {
mgr.lk.Lock()
defer mgr.lk.Unlock()
ss, ok := mgr.sectors[sid]
if !ok {
return fmt.Errorf("no such sector in storage")
}
ss.failed = failed
return nil
}
func opFinishWait(ctx context.Context) {
val, ok := ctx.Value("opfinish").(chan struct{})
if !ok {
return
}
<-val
}
func AddOpFinish(ctx context.Context) (context.Context, func()) {
done := make(chan struct{})
return context.WithValue(ctx, "opfinish", done), func() {
close(done)
}
}
func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, error) {
return generateFakePoSt(sectorInfo, abi.RegisteredSealProof.RegisteredWinningPoStProof, randomness), nil
}
func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []abi.SectorInfo, randomness abi.PoStRandomness) ([]abi.PoStProof, []abi.SectorID, error) {
si := make([]abi.SectorInfo, 0, len(sectorInfo))
var skipped []abi.SectorID
for _, info := range sectorInfo {
sid := abi.SectorID{
Miner: minerID,
Number: info.SectorNumber,
}
_, found := mgr.sectors[sid]
if found && !mgr.sectors[sid].failed {
si = append(si, info)
} else {
skipped = append(skipped, sid)
}
}
return generateFakePoSt(si, abi.RegisteredSealProof.RegisteredWindowPoStProof, randomness), skipped, nil
}
func generateFakePoSt(sectorInfo []abi.SectorInfo, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error), randomness abi.PoStRandomness) []abi.PoStProof {
sectors := abi.NewBitField()
for _, info := range sectorInfo {
sectors.Set(uint64(info.SectorNumber))
}
wp, err := rpt(sectorInfo[0].SealProof)
if err != nil {
panic(err)
}
var proofBuf bytes.Buffer
_, err = proofBuf.Write(randomness)
if err != nil {
panic(err)
}
if err := sectors.MarshalCBOR(&proofBuf); err != nil {
panic(err)
}
return []abi.PoStProof{
{
PoStProof: wp,
ProofBytes: proofBuf.Bytes(),
},
}
}
func (mgr *SectorMgr) ReadPiece(ctx context.Context, w io.Writer, sectorID abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, c cid.Cid) error {
if len(mgr.sectors[sectorID].pieces) > 1 || offset != 0 {
panic("implme")
}
_, err := io.CopyN(w, bytes.NewReader(mgr.pieces[mgr.sectors[sectorID].pieces[0]]), int64(size))
return err
}
func (mgr *SectorMgr) StageFakeData(mid abi.ActorID) (abi.SectorID, []abi.PieceInfo, error) {
usize := abi.PaddedPieceSize(mgr.sectorSize).Unpadded()
sid, err := mgr.AcquireSectorNumber()
if err != nil {
return abi.SectorID{}, nil, err
}
buf := make([]byte, usize)
rand.Read(buf)
id := abi.SectorID{
Miner: mid,
Number: sid,
}
pi, err := mgr.AddPiece(context.TODO(), id, nil, usize, bytes.NewReader(buf))
if err != nil {
return abi.SectorID{}, nil, err
}
return id, []abi.PieceInfo{pi}, nil
}
func (mgr *SectorMgr) FinalizeSector(context.Context, abi.SectorID, []storage.Range) error {
return nil
}
func (mgr *SectorMgr) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error {
return nil
}
func (mgr *SectorMgr) Remove(ctx context.Context, sector abi.SectorID) error {
mgr.lk.Lock()
defer mgr.lk.Unlock()
if _, has := mgr.sectors[sector]; !has {
return xerrors.Errorf("sector not found")
}
delete(mgr.sectors, sector)
return nil
}
func (mgr *SectorMgr) CheckProvable(ctx context.Context, spt abi.RegisteredSealProof, ids []abi.SectorID) ([]abi.SectorID, error) {
var bad []abi.SectorID
for _, sid := range ids {
_, found := mgr.sectors[sid]
if !found || mgr.sectors[sid].failed {
bad = append(bad, sid)
}
}
return bad, nil
}
func (m mockVerif) VerifySeal(svi abi.SealVerifyInfo) (bool, error) {
if len(svi.Proof) != 32 { // Real ones are longer, but this should be fine
return false, nil
}
for i, b := range svi.Proof {
if b != svi.UnsealedCID.Bytes()[i]+svi.SealedCID.Bytes()[31-i]-svi.InteractiveRandomness[i]*svi.Randomness[i] {
return false, nil
}
}
return true, nil
}
func (m mockVerif) VerifyWinningPoSt(ctx context.Context, info abi.WinningPoStVerifyInfo) (bool, error) {
return true, nil
}
func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVerifyInfo) (bool, error) {
if len(info.Proofs) != 1 {
return false, xerrors.Errorf("expected 1 proof entry")
}
proof := info.Proofs[0]
if !bytes.Equal(proof.ProofBytes[:len(info.Randomness)], info.Randomness) {
return false, xerrors.Errorf("bad randomness")
}
sectors := abi.NewBitField()
if err := sectors.UnmarshalCBOR(bytes.NewReader(proof.ProofBytes[len(info.Randomness):])); err != nil {
return false, xerrors.Errorf("unmarshaling sectors bitfield from \"proof\": %w", err)
}
challenged := abi.NewBitField()
for _, sector := range info.ChallengedSectors {
challenged.Set(uint64(sector.SectorNumber))
}
{
b1, err := sectors.MarshalJSON()
if err != nil {
return false, err
}
b2, err := challenged.MarshalJSON()
if err != nil {
return false, err
}
if !bytes.Equal(b1, b2) {
return false, xerrors.Errorf("proven and challenged sector sets didn't match: %s != !s", string(b1), string(b2))
}
}
return true, nil
}
func (m mockVerif) GenerateDataCommitment(pt abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) {
return ffiwrapper.GenerateUnsealedCID(pt, pieces)
}
func (m mockVerif) GenerateWinningPoStSectorChallenge(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, eligibleSectorCount uint64) ([]uint64, error) {
return []uint64{0}, nil
}
var MockVerifier = mockVerif{}
var _ storage.Sealer = &SectorMgr{}
var _ ffiwrapper.Verifier = MockVerifier

45
extern/sector-storage/mock/mock_test.go vendored Normal file
View File

@ -0,0 +1,45 @@
package mock
import (
"context"
"testing"
"time"
"github.com/filecoin-project/specs-actors/actors/abi"
)
func TestOpFinish(t *testing.T) {
sb := NewMockSectorMgr(2048, nil)
sid, pieces, err := sb.StageFakeData(123)
if err != nil {
t.Fatal(err)
}
ctx, done := AddOpFinish(context.TODO())
finished := make(chan struct{})
go func() {
_, err := sb.SealPreCommit1(ctx, sid, abi.SealRandomness{}, pieces)
if err != nil {
t.Error(err)
return
}
close(finished)
}()
select {
case <-finished:
t.Fatal("should not finish until we tell it to")
case <-time.After(time.Second / 2):
}
done()
select {
case <-finished:
case <-time.After(time.Second / 2):
t.Fatal("should finish after we tell it to")
}
}

9
extern/sector-storage/mock/util.go vendored Normal file
View File

@ -0,0 +1,9 @@
package mock
func CommDR(in []byte) (out [32]byte) {
for i, b := range in {
out[i] = ^b
}
return out
}

152
extern/sector-storage/parameters.json vendored Normal file
View File

@ -0,0 +1,152 @@
{
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.params": {
"cid": "QmeDRyxek34F1H6xJY6AkFdWvPsy5F6dKTrebV3ZtWT4ky",
"digest": "f5827f2d8801c62c831e0f972f6dc8bb",
"sector_size": 2048
},
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.vk": {
"cid": "QmUw1ZmG4BBbX19MsbH3zAEGKUc42iFJc5ZAyomDHeJTsA",
"digest": "398fecdb4b2de445125852bc3c080b35",
"sector_size": 2048
},
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.params": {
"cid": "QmUeNKp9YZpiAFm81RV5KuxH1FDGJx2DuwcbU2XNSZLLSv",
"digest": "2b6d2972ac9e862e8134d98fb695b0c5",
"sector_size": 536870912
},
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.vk": {
"cid": "QmQaQmTXX995Akd66ggtJY5bNx6Gkxk8P34JTdMMq8393G",
"digest": "3688c9eb256b7b17f411dad78d5ef74a",
"sector_size": 536870912
},
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.params": {
"cid": "QmfEYTMSkwGJTumQx26iKXGNKiYh3mmAC4SkdybZpJCj5p",
"digest": "09bff16aed893349d94485cfae366a9c",
"sector_size": 2048
},
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.vk": {
"cid": "QmP4ThPieSUJyRanjibWpT5R5cCMzMAU4j8Y7kBn7CSW1Q",
"digest": "142f2f7e8f1b1779290315cabfd2c803",
"sector_size": 2048
},
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.params": {
"cid": "QmcAixrHsz29DgvtZiMc2kQjvPRvWxYUp36QYmRDZbmREm",
"digest": "8f987f64d434365562180b96ec12e299",
"sector_size": 8388608
},
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.vk": {
"cid": "QmT4iFnbL6r4txS5PXsiV7NTzbhCxHy54PvdkJJGV2VFXb",
"digest": "94b6c24ac01924f4feeecedd16b5d77d",
"sector_size": 8388608
},
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.params": {
"cid": "QmbjFst6SFCK1KsTQrfwPdxf3VTNa1raed574tEZZ9PoyQ",
"digest": "2c245fe8179839dd6c6cdea207c67ae8",
"sector_size": 8388608
},
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.vk": {
"cid": "QmQJKmvZN1a5cQ1Nw6CDyXs3nuRPzvyU5NvCFMUL2BfcZC",
"digest": "56ae47bfda53bb8d22981ed8d8d27d72",
"sector_size": 8388608
},
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.params": {
"cid": "QmQCABxeTpdvXTyjDyk7nPBxkQzCh7MXfGztWnSXEPKMLW",
"digest": "7e6b2eb5ecbb11ac651ad66ebbb2075a",
"sector_size": 536870912
},
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.vk": {
"cid": "QmPBweyugh5Sx4umk8ULhgEGbjY8xmWLfU6M7EMpc8Mad6",
"digest": "94a8d9e25a9ab9674d339833664eba25",
"sector_size": 536870912
},
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.params": {
"cid": "QmY5yax1E9KymBnCeHksE9Zi8NieZbmwcpoDGoabkeeb9h",
"digest": "c909ea9e3fe25ab9b391a64593afdbba",
"sector_size": 34359738368
},
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.vk": {
"cid": "QmXnPo4yH5mwMguwrvqgRfduSttbmPrXtbBfbwU21wQWHt",
"digest": "caf900461e988bbf86dbcaca087b7864",
"sector_size": 34359738368
},
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.params": {
"cid": "QmZtzzPWwmZEgR7MSMvXRbt9KVK8k4XZ5RLWHybHJW9SdE",
"digest": "a2844f0703f186d143a06146a04577d8",
"sector_size": 34359738368
},
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.vk": {
"cid": "QmWxEA7EdQCUJTzjNpxg5XTF45D2uVyYnN1QRUb5TRYU8M",
"digest": "2306247a1e616dbe07f01b88196c2044",
"sector_size": 34359738368
},
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.params": {
"cid": "QmP676KwuvyF9Y64uJnXvLtvD1xcuWQ6wD23RzYtQ6dd4f",
"digest": "215b1c667a4f46a1d0178338df568615",
"sector_size": 68719476736
},
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.vk": {
"cid": "QmPvPwbJtcSGyqB1rQJhSF5yvFbX9ZBSsHVej5F8JUyHUJ",
"digest": "0c9c423b28b1455fcbc329a1045fd4dd",
"sector_size": 68719476736
},
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.params": {
"cid": "QmUxPQfvckzm1t6MFRdDZ1fDK5UJzAjK7pTZ97cwyachdr",
"digest": "965132f51ae445b0e6d32692b7561995",
"sector_size": 68719476736
},
"v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.vk": {
"cid": "QmTxq2EBnQWb5R8tS4MHdchj4vNfLYGoSXxwJFvs5xgW4K",
"digest": "fc8c3d26e0e56373ad96cb41520d55a6",
"sector_size": 68719476736
},
"v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.params": {
"cid": "QmRjgZHERgqGoRagR788Kh6ybi26csVYa8mqbqhmZm57Jx",
"digest": "cfc7b0897d1eee48c586f7beb89e67f7",
"sector_size": 2048
},
"v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.vk": {
"cid": "QmNjvnvFP7KgovHUddULoB19fBHT81iz7NcUbzEHZUUPsm",
"digest": "fb59bd061c987eac7068008c44de346b",
"sector_size": 2048
},
"v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.params": {
"cid": "QmTpRPBA4dt8fgGpcVzi4L1KA1U2eBHCE8WVmS2GUygMvT",
"digest": "36d465915b0afbf96bd08e7915e00952",
"sector_size": 536870912
},
"v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.vk": {
"cid": "QmRzDyVfQCLsxspoVsed5bcQRsG6KiktngJfcNBL3TJPZe",
"digest": "99d16df0eb6a7e227a4f4570c4f6b6f1",
"sector_size": 536870912
},
"v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.params": {
"cid": "QmV8ZjTSGzDUWmFvsq9NSyPBR7eDDUcvCPNgj2yE7HMAFu",
"digest": "34f3ddf1d1c9f41c0cd73b91e8b4bc27",
"sector_size": 8388608
},
"v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.vk": {
"cid": "QmTa3VbjTiqJWU6r4WKayaQrUaaBsrpp5UDqYvPDd2C5hs",
"digest": "ec62d59651daa5631d3d1e9c782dd940",
"sector_size": 8388608
},
"v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.params": {
"cid": "Qmf8ngfArxrv9tFWDqBcNegdBMymvuakwyHKd1pbW3pbsb",
"digest": "a16d6f4c6424fb280236739f84b24f97",
"sector_size": 34359738368
},
"v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.vk": {
"cid": "QmfQgVFerArJ6Jupwyc9tKjLD9n1J9ajLHBdpY465tRM7M",
"digest": "7a139d82b8a02e35279d657e197f5c1f",
"sector_size": 34359738368
},
"v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.params": {
"cid": "QmfDha8271nXJn14Aq3qQeghjMBWbs6HNSGa6VuzCVk4TW",
"digest": "5d3cd3f107a3bea8a96d1189efd2965c",
"sector_size": 68719476736
},
"v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.vk": {
"cid": "QmRVtTtiFzHJTHurYzaCvetGAchux9cktixT4aGHthN6Zt",
"digest": "62c366405404e60f171e661492740b1c",
"sector_size": 68719476736
}
}

45
extern/sector-storage/request_queue.go vendored Normal file
View File

@ -0,0 +1,45 @@
package sectorstorage
import "sort"
type requestQueue []*workerRequest
func (q requestQueue) Len() int { return len(q) }
func (q requestQueue) Less(i, j int) bool {
if q[i].priority != q[j].priority {
return q[i].priority > q[j].priority
}
if q[i].taskType != q[j].taskType {
return q[i].taskType.Less(q[j].taskType)
}
return q[i].sector.Number < q[j].sector.Number // optimize minerActor.NewSectors bitfield
}
func (q requestQueue) Swap(i, j int) {
q[i], q[j] = q[j], q[i]
q[i].index = i
q[j].index = j
}
func (q *requestQueue) Push(x *workerRequest) {
n := len(*q)
item := x
item.index = n
*q = append(*q, item)
sort.Sort(q)
}
func (q *requestQueue) Remove(i int) *workerRequest {
old := *q
n := len(old)
item := old[i]
old[i] = old[n-1]
old[n-1] = nil
item.index = -1
*q = old[0 : n-1]
sort.Sort(q)
return item
}

View File

@ -0,0 +1,62 @@
package sectorstorage
import (
"fmt"
"testing"
"github.com/filecoin-project/sector-storage/sealtasks"
)
func TestRequestQueue(t *testing.T) {
rq := &requestQueue{}
rq.Push(&workerRequest{taskType: sealtasks.TTAddPiece})
rq.Push(&workerRequest{taskType: sealtasks.TTPreCommit1})
rq.Push(&workerRequest{taskType: sealtasks.TTPreCommit2})
rq.Push(&workerRequest{taskType: sealtasks.TTPreCommit1})
rq.Push(&workerRequest{taskType: sealtasks.TTAddPiece})
dump := func(s string) {
fmt.Println("---")
fmt.Println(s)
for sqi := 0; sqi < rq.Len(); sqi++ {
task := (*rq)[sqi]
fmt.Println(sqi, task.taskType)
}
}
dump("start")
pt := rq.Remove(0)
dump("pop 1")
if pt.taskType != sealtasks.TTPreCommit2 {
t.Error("expected precommit2, got", pt.taskType)
}
pt = rq.Remove(0)
dump("pop 2")
if pt.taskType != sealtasks.TTPreCommit1 {
t.Error("expected precommit1, got", pt.taskType)
}
pt = rq.Remove(1)
dump("pop 3")
if pt.taskType != sealtasks.TTAddPiece {
t.Error("expected addpiece, got", pt.taskType)
}
pt = rq.Remove(0)
dump("pop 4")
if pt.taskType != sealtasks.TTPreCommit1 {
t.Error("expected precommit1, got", pt.taskType)
}
}

293
extern/sector-storage/resources.go vendored Normal file
View File

@ -0,0 +1,293 @@
package sectorstorage
import (
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/sector-storage/sealtasks"
)
type Resources struct {
MinMemory uint64 // What Must be in RAM for decent perf
MaxMemory uint64 // Memory required (swap + ram)
Threads int // -1 = multithread
CanGPU bool
BaseMinMemory uint64 // What Must be in RAM for decent perf (shared between threads)
}
func (r Resources) MultiThread() bool {
return r.Threads == -1
}
var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources{
sealtasks.TTAddPiece: {
abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ // This is probably a bit conservative
MaxMemory: 64 << 30,
MinMemory: 64 << 30,
Threads: 1,
BaseMinMemory: 1 << 30,
},
abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{ // This is probably a bit conservative
MaxMemory: 32 << 30,
MinMemory: 32 << 30,
Threads: 1,
BaseMinMemory: 1 << 30,
},
abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{
MaxMemory: 1 << 30,
MinMemory: 1 << 30,
Threads: 1,
BaseMinMemory: 1 << 30,
},
abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{
MaxMemory: 2 << 10,
MinMemory: 2 << 10,
Threads: 1,
BaseMinMemory: 2 << 10,
},
abi.RegisteredSealProof_StackedDrg8MiBV1: Resources{
MaxMemory: 8 << 20,
MinMemory: 8 << 20,
Threads: 1,
BaseMinMemory: 8 << 20,
},
},
sealtasks.TTPreCommit1: {
abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{
MaxMemory: 128 << 30,
MinMemory: 112 << 30,
Threads: 1,
BaseMinMemory: 10 << 20,
},
abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{
MaxMemory: 64 << 30,
MinMemory: 56 << 30,
Threads: 1,
BaseMinMemory: 10 << 20,
},
abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{
MaxMemory: 1 << 30,
MinMemory: 768 << 20,
Threads: 1,
BaseMinMemory: 1 << 20,
},
abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{
MaxMemory: 2 << 10,
MinMemory: 2 << 10,
Threads: 1,
BaseMinMemory: 2 << 10,
},
abi.RegisteredSealProof_StackedDrg8MiBV1: Resources{
MaxMemory: 8 << 20,
MinMemory: 8 << 20,
Threads: 1,
BaseMinMemory: 8 << 20,
},
},
sealtasks.TTPreCommit2: {
abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{
MaxMemory: 64 << 30,
MinMemory: 64 << 30,
Threads: -1,
CanGPU: true,
BaseMinMemory: 60 << 30,
},
abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{
MaxMemory: 32 << 30,
MinMemory: 32 << 30,
Threads: -1,
CanGPU: true,
BaseMinMemory: 30 << 30,
},
abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{
MaxMemory: 3 << 29, // 1.5G
MinMemory: 1 << 30,
Threads: -1,
BaseMinMemory: 1 << 30,
},
abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{
MaxMemory: 2 << 10,
MinMemory: 2 << 10,
Threads: -1,
BaseMinMemory: 2 << 10,
},
abi.RegisteredSealProof_StackedDrg8MiBV1: Resources{
MaxMemory: 8 << 20,
MinMemory: 8 << 20,
Threads: -1,
BaseMinMemory: 8 << 20,
},
},
sealtasks.TTCommit1: { // Very short (~100ms), so params are very light
abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{
MaxMemory: 1 << 30,
MinMemory: 1 << 30,
Threads: 0,
BaseMinMemory: 1 << 30,
},
abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{
MaxMemory: 1 << 30,
MinMemory: 1 << 30,
Threads: 0,
BaseMinMemory: 1 << 30,
},
abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{
MaxMemory: 1 << 30,
MinMemory: 1 << 30,
Threads: 0,
BaseMinMemory: 1 << 30,
},
abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{
MaxMemory: 2 << 10,
MinMemory: 2 << 10,
Threads: 0,
BaseMinMemory: 2 << 10,
},
abi.RegisteredSealProof_StackedDrg8MiBV1: Resources{
MaxMemory: 8 << 20,
MinMemory: 8 << 20,
Threads: 0,
BaseMinMemory: 8 << 20,
},
},
sealtasks.TTCommit2: {
abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{
MaxMemory: 190 << 30, // TODO: Confirm
MinMemory: 60 << 30,
Threads: -1,
CanGPU: true,
BaseMinMemory: 64 << 30, // params
},
abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{
MaxMemory: 150 << 30, // TODO: ~30G of this should really be BaseMaxMemory
MinMemory: 30 << 30,
Threads: -1,
CanGPU: true,
BaseMinMemory: 32 << 30, // params
},
abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{
MaxMemory: 3 << 29, // 1.5G
MinMemory: 1 << 30,
Threads: 1, // This is fine
CanGPU: true,
BaseMinMemory: 10 << 30,
},
abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{
MaxMemory: 2 << 10,
MinMemory: 2 << 10,
Threads: 1,
CanGPU: true,
BaseMinMemory: 2 << 10,
},
abi.RegisteredSealProof_StackedDrg8MiBV1: Resources{
MaxMemory: 8 << 20,
MinMemory: 8 << 20,
Threads: 1,
CanGPU: true,
BaseMinMemory: 8 << 20,
},
},
sealtasks.TTFetch: {
abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{
MaxMemory: 1 << 20,
MinMemory: 1 << 20,
Threads: 0,
CanGPU: false,
BaseMinMemory: 0,
},
abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{
MaxMemory: 1 << 20,
MinMemory: 1 << 20,
Threads: 0,
CanGPU: false,
BaseMinMemory: 0,
},
abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{
MaxMemory: 1 << 20,
MinMemory: 1 << 20,
Threads: 0,
CanGPU: false,
BaseMinMemory: 0,
},
abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{
MaxMemory: 1 << 20,
MinMemory: 1 << 20,
Threads: 0,
CanGPU: false,
BaseMinMemory: 0,
},
abi.RegisteredSealProof_StackedDrg8MiBV1: Resources{
MaxMemory: 1 << 20,
MinMemory: 1 << 20,
Threads: 0,
CanGPU: false,
BaseMinMemory: 0,
},
},
}
func init() {
ResourceTable[sealtasks.TTUnseal] = ResourceTable[sealtasks.TTPreCommit1] // TODO: measure accurately
ResourceTable[sealtasks.TTReadUnsealed] = ResourceTable[sealtasks.TTFetch]
}

40
extern/sector-storage/roprov.go vendored Normal file
View File

@ -0,0 +1,40 @@
package sectorstorage
import (
"context"
"golang.org/x/xerrors"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/sector-storage/stores"
)
type readonlyProvider struct {
index stores.SectorIndex
stor *stores.Local
spt abi.RegisteredSealProof
}
func (l *readonlyProvider) AcquireSector(ctx context.Context, id abi.SectorID, existing stores.SectorFileType, allocate stores.SectorFileType, sealing stores.PathType) (stores.SectorPaths, func(), error) {
if allocate != stores.FTNone {
return stores.SectorPaths{}, nil, xerrors.New("read-only storage")
}
ctx, cancel := context.WithCancel(ctx)
// use TryLock to avoid blocking
locked, err := l.index.StorageTryLock(ctx, id, existing, stores.FTNone)
if err != nil {
cancel()
return stores.SectorPaths{}, nil, xerrors.Errorf("acquiring sector lock: %w", err)
}
if !locked {
cancel()
return stores.SectorPaths{}, nil, xerrors.Errorf("failed to acquire sector lock")
}
p, _, err := l.stor.AcquireSector(ctx, id, l.spt, existing, allocate, sealing, stores.AcquireMove)
return p, cancel, err
}

703
extern/sector-storage/sched.go vendored Normal file
View File

@ -0,0 +1,703 @@
package sectorstorage
import (
"context"
"fmt"
"math/rand"
"sort"
"sync"
"time"
"golang.org/x/xerrors"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/sector-storage/sealtasks"
"github.com/filecoin-project/sector-storage/storiface"
)
type schedPrioCtxKey int
var SchedPriorityKey schedPrioCtxKey
var DefaultSchedPriority = 0
var SelectorTimeout = 5 * time.Second
var (
SchedWindows = 2
)
func getPriority(ctx context.Context) int {
sp := ctx.Value(SchedPriorityKey)
if p, ok := sp.(int); ok {
return p
}
return DefaultSchedPriority
}
func WithPriority(ctx context.Context, priority int) context.Context {
return context.WithValue(ctx, SchedPriorityKey, priority)
}
const mib = 1 << 20
type WorkerAction func(ctx context.Context, w Worker) error
type WorkerSelector interface {
Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, a *workerHandle) (bool, error) // true if worker is acceptable for performing a task
Cmp(ctx context.Context, task sealtasks.TaskType, a, b *workerHandle) (bool, error) // true if a is preferred over b
}
type scheduler struct {
spt abi.RegisteredSealProof
workersLk sync.RWMutex
nextWorker WorkerID
workers map[WorkerID]*workerHandle
newWorkers chan *workerHandle
watchClosing chan WorkerID
workerClosing chan WorkerID
schedule chan *workerRequest
windowRequests chan *schedWindowRequest
// owned by the sh.runSched goroutine
schedQueue *requestQueue
openWindows []*schedWindowRequest
info chan func(interface{})
closing chan struct{}
closed chan struct{}
testSync chan struct{} // used for testing
}
type workerHandle struct {
w Worker
info storiface.WorkerInfo
preparing *activeResources
active *activeResources
lk sync.Mutex
// stats / tracking
wt *workTracker
// for sync manager goroutine closing
cleanupStarted bool
closedMgr chan struct{}
closingMgr chan struct{}
}
type schedWindowRequest struct {
worker WorkerID
done chan *schedWindow
}
type schedWindow struct {
allocated activeResources
todo []*workerRequest
}
type activeResources struct {
memUsedMin uint64
memUsedMax uint64
gpuUsed bool
cpuUse uint64
cond *sync.Cond
}
type workerRequest struct {
sector abi.SectorID
taskType sealtasks.TaskType
priority int // larger values more important
sel WorkerSelector
prepare WorkerAction
work WorkerAction
index int // The index of the item in the heap.
indexHeap int
ret chan<- workerResponse
ctx context.Context
}
type workerResponse struct {
err error
}
func newScheduler(spt abi.RegisteredSealProof) *scheduler {
return &scheduler{
spt: spt,
nextWorker: 0,
workers: map[WorkerID]*workerHandle{},
newWorkers: make(chan *workerHandle),
watchClosing: make(chan WorkerID),
workerClosing: make(chan WorkerID),
schedule: make(chan *workerRequest),
windowRequests: make(chan *schedWindowRequest),
schedQueue: &requestQueue{},
info: make(chan func(interface{})),
closing: make(chan struct{}),
closed: make(chan struct{}),
}
}
func (sh *scheduler) Schedule(ctx context.Context, sector abi.SectorID, taskType sealtasks.TaskType, sel WorkerSelector, prepare WorkerAction, work WorkerAction) error {
ret := make(chan workerResponse)
select {
case sh.schedule <- &workerRequest{
sector: sector,
taskType: taskType,
priority: getPriority(ctx),
sel: sel,
prepare: prepare,
work: work,
ret: ret,
ctx: ctx,
}:
case <-sh.closing:
return xerrors.New("closing")
case <-ctx.Done():
return ctx.Err()
}
select {
case resp := <-ret:
return resp.err
case <-sh.closing:
return xerrors.New("closing")
case <-ctx.Done():
return ctx.Err()
}
}
func (r *workerRequest) respond(err error) {
select {
case r.ret <- workerResponse{err: err}:
case <-r.ctx.Done():
log.Warnf("request got cancelled before we could respond")
}
}
type SchedDiagRequestInfo struct {
Sector abi.SectorID
TaskType sealtasks.TaskType
Priority int
}
type SchedDiagInfo struct {
Requests []SchedDiagRequestInfo
OpenWindows []WorkerID
}
func (sh *scheduler) runSched() {
defer close(sh.closed)
go sh.runWorkerWatcher()
for {
select {
case w := <-sh.newWorkers:
sh.newWorker(w)
case wid := <-sh.workerClosing:
sh.dropWorker(wid)
case req := <-sh.schedule:
sh.schedQueue.Push(req)
sh.trySched()
if sh.testSync != nil {
sh.testSync <- struct{}{}
}
case req := <-sh.windowRequests:
sh.openWindows = append(sh.openWindows, req)
sh.trySched()
case ireq := <-sh.info:
ireq(sh.diag())
case <-sh.closing:
sh.schedClose()
return
}
}
}
func (sh *scheduler) diag() SchedDiagInfo {
var out SchedDiagInfo
for sqi := 0; sqi < sh.schedQueue.Len(); sqi++ {
task := (*sh.schedQueue)[sqi]
out.Requests = append(out.Requests, SchedDiagRequestInfo{
Sector: task.sector,
TaskType: task.taskType,
Priority: task.priority,
})
}
for _, window := range sh.openWindows {
out.OpenWindows = append(out.OpenWindows, window.worker)
}
return out
}
func (sh *scheduler) trySched() {
/*
This assigns tasks to workers based on:
- Task priority (achieved by handling sh.schedQueue in order, since it's already sorted by priority)
- Worker resource availability
- Task-specified worker preference (acceptableWindows array below sorted by this preference)
- Window request age
1. For each task in the schedQueue find windows which can handle them
1.1. Create list of windows capable of handling a task
1.2. Sort windows according to task selector preferences
2. Going through schedQueue again, assign task to first acceptable window
with resources available
3. Submit windows with scheduled tasks to workers
*/
windows := make([]schedWindow, len(sh.openWindows))
acceptableWindows := make([][]int, sh.schedQueue.Len())
log.Debugf("SCHED %d queued; %d open windows", sh.schedQueue.Len(), len(windows))
sh.workersLk.RLock()
defer sh.workersLk.RUnlock()
// Step 1
for sqi := 0; sqi < sh.schedQueue.Len(); sqi++ {
task := (*sh.schedQueue)[sqi]
needRes := ResourceTable[task.taskType][sh.spt]
task.indexHeap = sqi
for wnd, windowRequest := range sh.openWindows {
worker := sh.workers[windowRequest.worker]
// TODO: allow bigger windows
if !windows[wnd].allocated.canHandleRequest(needRes, windowRequest.worker, worker.info.Resources) {
continue
}
rpcCtx, cancel := context.WithTimeout(task.ctx, SelectorTimeout)
ok, err := task.sel.Ok(rpcCtx, task.taskType, sh.spt, worker)
cancel()
if err != nil {
log.Errorf("trySched(1) req.sel.Ok error: %+v", err)
continue
}
if !ok {
continue
}
acceptableWindows[sqi] = append(acceptableWindows[sqi], wnd)
}
if len(acceptableWindows[sqi]) == 0 {
continue
}
// Pick best worker (shuffle in case some workers are equally as good)
rand.Shuffle(len(acceptableWindows[sqi]), func(i, j int) {
acceptableWindows[sqi][i], acceptableWindows[sqi][j] = acceptableWindows[sqi][j], acceptableWindows[sqi][i]
})
sort.SliceStable(acceptableWindows[sqi], func(i, j int) bool {
wii := sh.openWindows[acceptableWindows[sqi][i]].worker
wji := sh.openWindows[acceptableWindows[sqi][j]].worker
if wii == wji {
// for the same worker prefer older windows
return acceptableWindows[sqi][i] < acceptableWindows[sqi][j]
}
wi := sh.workers[wii]
wj := sh.workers[wji]
rpcCtx, cancel := context.WithTimeout(task.ctx, SelectorTimeout)
defer cancel()
r, err := task.sel.Cmp(rpcCtx, task.taskType, wi, wj)
if err != nil {
log.Error("selecting best worker: %s", err)
}
return r
})
}
log.Debugf("SCHED windows: %+v", windows)
log.Debugf("SCHED Acceptable win: %+v", acceptableWindows)
// Step 2
scheduled := 0
for sqi := 0; sqi < sh.schedQueue.Len(); sqi++ {
task := (*sh.schedQueue)[sqi]
needRes := ResourceTable[task.taskType][sh.spt]
selectedWindow := -1
for _, wnd := range acceptableWindows[task.indexHeap] {
wid := sh.openWindows[wnd].worker
wr := sh.workers[wid].info.Resources
log.Debugf("SCHED try assign sqi:%d sector %d to window %d", sqi, task.sector.Number, wnd)
// TODO: allow bigger windows
if !windows[wnd].allocated.canHandleRequest(needRes, wid, wr) {
continue
}
log.Debugf("SCHED ASSIGNED sqi:%d sector %d to window %d", sqi, task.sector.Number, wnd)
windows[wnd].allocated.add(wr, needRes)
selectedWindow = wnd
break
}
if selectedWindow < 0 {
// all windows full
continue
}
windows[selectedWindow].todo = append(windows[selectedWindow].todo, task)
sh.schedQueue.Remove(sqi)
sqi--
scheduled++
}
// Step 3
if scheduled == 0 {
return
}
scheduledWindows := map[int]struct{}{}
for wnd, window := range windows {
if len(window.todo) == 0 {
// Nothing scheduled here, keep the window open
continue
}
scheduledWindows[wnd] = struct{}{}
window := window // copy
select {
case sh.openWindows[wnd].done <- &window:
default:
log.Error("expected sh.openWindows[wnd].done to be buffered")
}
}
// Rewrite sh.openWindows array, removing scheduled windows
newOpenWindows := make([]*schedWindowRequest, 0, len(sh.openWindows)-len(scheduledWindows))
for wnd, window := range sh.openWindows {
if _, scheduled := scheduledWindows[wnd]; scheduled {
// keep unscheduled windows open
continue
}
newOpenWindows = append(newOpenWindows, window)
}
sh.openWindows = newOpenWindows
}
func (sh *scheduler) runWorker(wid WorkerID) {
var ready sync.WaitGroup
ready.Add(1)
defer ready.Wait()
go func() {
sh.workersLk.RLock()
worker, found := sh.workers[wid]
sh.workersLk.RUnlock()
ready.Done()
if !found {
panic(fmt.Sprintf("worker %d not found", wid))
}
defer close(worker.closedMgr)
scheduledWindows := make(chan *schedWindow, SchedWindows)
taskDone := make(chan struct{}, 1)
windowsRequested := 0
var activeWindows []*schedWindow
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
workerClosing, err := worker.w.Closing(ctx)
if err != nil {
return
}
defer func() {
log.Warnw("Worker closing", "workerid", wid)
// TODO: close / return all queued tasks
}()
for {
// ask for more windows if we need them
for ; windowsRequested < SchedWindows; windowsRequested++ {
select {
case sh.windowRequests <- &schedWindowRequest{
worker: wid,
done: scheduledWindows,
}:
case <-sh.closing:
return
case <-workerClosing:
return
case <-worker.closingMgr:
return
}
}
select {
case w := <-scheduledWindows:
activeWindows = append(activeWindows, w)
case <-taskDone:
log.Debugw("task done", "workerid", wid)
case <-sh.closing:
return
case <-workerClosing:
return
case <-worker.closingMgr:
return
}
assignLoop:
// process windows in order
for len(activeWindows) > 0 {
// process tasks within a window in order
for len(activeWindows[0].todo) > 0 {
todo := activeWindows[0].todo[0]
needRes := ResourceTable[todo.taskType][sh.spt]
sh.workersLk.RLock()
worker.lk.Lock()
ok := worker.preparing.canHandleRequest(needRes, wid, worker.info.Resources)
worker.lk.Unlock()
if !ok {
sh.workersLk.RUnlock()
break assignLoop
}
log.Debugf("assign worker sector %d", todo.sector.Number)
err := sh.assignWorker(taskDone, wid, worker, todo)
sh.workersLk.RUnlock()
if err != nil {
log.Error("assignWorker error: %+v", err)
go todo.respond(xerrors.Errorf("assignWorker error: %w", err))
}
activeWindows[0].todo = activeWindows[0].todo[1:]
}
copy(activeWindows, activeWindows[1:])
activeWindows[len(activeWindows)-1] = nil
activeWindows = activeWindows[:len(activeWindows)-1]
windowsRequested--
}
}
}()
}
func (sh *scheduler) assignWorker(taskDone chan struct{}, wid WorkerID, w *workerHandle, req *workerRequest) error {
needRes := ResourceTable[req.taskType][sh.spt]
w.lk.Lock()
w.preparing.add(w.info.Resources, needRes)
w.lk.Unlock()
go func() {
err := req.prepare(req.ctx, w.wt.worker(w.w))
sh.workersLk.Lock()
if err != nil {
w.lk.Lock()
w.preparing.free(w.info.Resources, needRes)
w.lk.Unlock()
sh.workersLk.Unlock()
select {
case taskDone <- struct{}{}:
case <-sh.closing:
log.Warnf("scheduler closed while sending response (prepare error: %+v)", err)
}
select {
case req.ret <- workerResponse{err: err}:
case <-req.ctx.Done():
log.Warnf("request got cancelled before we could respond (prepare error: %+v)", err)
case <-sh.closing:
log.Warnf("scheduler closed while sending response (prepare error: %+v)", err)
}
return
}
err = w.active.withResources(wid, w.info.Resources, needRes, &sh.workersLk, func() error {
w.lk.Lock()
w.preparing.free(w.info.Resources, needRes)
w.lk.Unlock()
sh.workersLk.Unlock()
defer sh.workersLk.Lock() // we MUST return locked from this function
select {
case taskDone <- struct{}{}:
case <-sh.closing:
}
err = req.work(req.ctx, w.wt.worker(w.w))
select {
case req.ret <- workerResponse{err: err}:
case <-req.ctx.Done():
log.Warnf("request got cancelled before we could respond")
case <-sh.closing:
log.Warnf("scheduler closed while sending response")
}
return nil
})
sh.workersLk.Unlock()
// This error should always be nil, since nothing is setting it, but just to be safe:
if err != nil {
log.Errorf("error executing worker (withResources): %+v", err)
}
}()
return nil
}
func (sh *scheduler) newWorker(w *workerHandle) {
w.closedMgr = make(chan struct{})
w.closingMgr = make(chan struct{})
sh.workersLk.Lock()
id := sh.nextWorker
sh.workers[id] = w
sh.nextWorker++
sh.workersLk.Unlock()
sh.runWorker(id)
select {
case sh.watchClosing <- id:
case <-sh.closing:
return
}
}
func (sh *scheduler) dropWorker(wid WorkerID) {
sh.workersLk.Lock()
defer sh.workersLk.Unlock()
w := sh.workers[wid]
sh.workerCleanup(wid, w)
delete(sh.workers, wid)
}
func (sh *scheduler) workerCleanup(wid WorkerID, w *workerHandle) {
if !w.cleanupStarted {
close(w.closingMgr)
}
select {
case <-w.closedMgr:
case <-time.After(time.Second):
log.Errorf("timeout closing worker manager goroutine %d", wid)
}
if !w.cleanupStarted {
w.cleanupStarted = true
newWindows := make([]*schedWindowRequest, 0, len(sh.openWindows))
for _, window := range sh.openWindows {
if window.worker != wid {
newWindows = append(newWindows, window)
}
}
sh.openWindows = newWindows
log.Debugf("dropWorker %d", wid)
go func() {
if err := w.w.Close(); err != nil {
log.Warnf("closing worker %d: %+v", err)
}
}()
}
}
func (sh *scheduler) schedClose() {
sh.workersLk.Lock()
defer sh.workersLk.Unlock()
log.Debugf("closing scheduler")
for i, w := range sh.workers {
sh.workerCleanup(i, w)
}
}
func (sh *scheduler) Info(ctx context.Context) (interface{}, error) {
ch := make(chan interface{}, 1)
sh.info <- func(res interface{}) {
ch <- res
}
select {
case res := <-ch:
return res, nil
case <-ctx.Done():
return nil, ctx.Err()
}
}
func (sh *scheduler) Close(ctx context.Context) error {
close(sh.closing)
select {
case <-sh.closed:
case <-ctx.Done():
return ctx.Err()
}
return nil
}

110
extern/sector-storage/sched_resources.go vendored Normal file
View File

@ -0,0 +1,110 @@
package sectorstorage
import (
"sync"
"github.com/filecoin-project/sector-storage/storiface"
)
func (a *activeResources) withResources(id WorkerID, wr storiface.WorkerResources, r Resources, locker sync.Locker, cb func() error) error {
for !a.canHandleRequest(r, id, wr) {
if a.cond == nil {
a.cond = sync.NewCond(locker)
}
a.cond.Wait()
}
a.add(wr, r)
err := cb()
a.free(wr, r)
if a.cond != nil {
a.cond.Broadcast()
}
return err
}
func (a *activeResources) add(wr storiface.WorkerResources, r Resources) {
a.gpuUsed = r.CanGPU
if r.MultiThread() {
a.cpuUse += wr.CPUs
} else {
a.cpuUse += uint64(r.Threads)
}
a.memUsedMin += r.MinMemory
a.memUsedMax += r.MaxMemory
}
func (a *activeResources) free(wr storiface.WorkerResources, r Resources) {
if r.CanGPU {
a.gpuUsed = false
}
if r.MultiThread() {
a.cpuUse -= wr.CPUs
} else {
a.cpuUse -= uint64(r.Threads)
}
a.memUsedMin -= r.MinMemory
a.memUsedMax -= r.MaxMemory
}
func (a *activeResources) canHandleRequest(needRes Resources, wid WorkerID, res storiface.WorkerResources) bool {
// TODO: dedupe needRes.BaseMinMemory per task type (don't add if that task is already running)
minNeedMem := res.MemReserved + a.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory
if minNeedMem > res.MemPhysical {
log.Debugf("sched: not scheduling on worker %d; not enough physical memory - need: %dM, have %dM", wid, minNeedMem/mib, res.MemPhysical/mib)
return false
}
maxNeedMem := res.MemReserved + a.memUsedMax + needRes.MaxMemory + needRes.BaseMinMemory
if maxNeedMem > res.MemSwap+res.MemPhysical {
log.Debugf("sched: not scheduling on worker %d; not enough virtual memory - need: %dM, have %dM", wid, maxNeedMem/mib, (res.MemSwap+res.MemPhysical)/mib)
return false
}
if needRes.MultiThread() {
if a.cpuUse > 0 {
log.Debugf("sched: not scheduling on worker %d; multicore process needs %d threads, %d in use, target %d", wid, res.CPUs, a.cpuUse, res.CPUs)
return false
}
} else {
if a.cpuUse+uint64(needRes.Threads) > res.CPUs {
log.Debugf("sched: not scheduling on worker %d; not enough threads, need %d, %d in use, target %d", wid, needRes.Threads, a.cpuUse, res.CPUs)
return false
}
}
if len(res.GPUs) > 0 && needRes.CanGPU {
if a.gpuUsed {
log.Debugf("sched: not scheduling on worker %d; GPU in use", wid)
return false
}
}
return true
}
func (a *activeResources) utilization(wr storiface.WorkerResources) float64 {
var max float64
cpu := float64(a.cpuUse) / float64(wr.CPUs)
max = cpu
memMin := float64(a.memUsedMin+wr.MemReserved) / float64(wr.MemPhysical)
if memMin > max {
max = memMin
}
memMax := float64(a.memUsedMax+wr.MemReserved) / float64(wr.MemPhysical+wr.MemSwap)
if memMax > max {
max = memMax
}
return max
}

453
extern/sector-storage/sched_test.go vendored Normal file
View File

@ -0,0 +1,453 @@
package sectorstorage
import (
"context"
"fmt"
"io"
"runtime"
"sync"
"testing"
"time"
"github.com/ipfs/go-cid"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/sector-storage/fsutil"
"github.com/filecoin-project/sector-storage/sealtasks"
"github.com/filecoin-project/sector-storage/stores"
"github.com/filecoin-project/sector-storage/storiface"
"github.com/filecoin-project/specs-storage/storage"
)
func TestWithPriority(t *testing.T) {
ctx := context.Background()
require.Equal(t, DefaultSchedPriority, getPriority(ctx))
ctx = WithPriority(ctx, 2222)
require.Equal(t, 2222, getPriority(ctx))
}
type schedTestWorker struct {
name string
taskTypes map[sealtasks.TaskType]struct{}
paths []stores.StoragePath
closed bool
closing chan struct{}
}
func (s *schedTestWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) {
panic("implement me")
}
func (s *schedTestWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storage.SectorCids, error) {
panic("implement me")
}
func (s *schedTestWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) {
panic("implement me")
}
func (s *schedTestWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storage.Proof, error) {
panic("implement me")
}
func (s *schedTestWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error {
panic("implement me")
}
func (s *schedTestWorker) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error {
panic("implement me")
}
func (s *schedTestWorker) Remove(ctx context.Context, sector abi.SectorID) error {
panic("implement me")
}
func (s *schedTestWorker) NewSector(ctx context.Context, sector abi.SectorID) error {
panic("implement me")
}
func (s *schedTestWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) {
panic("implement me")
}
func (s *schedTestWorker) MoveStorage(ctx context.Context, sector abi.SectorID) error {
panic("implement me")
}
func (s *schedTestWorker) Fetch(ctx context.Context, id abi.SectorID, ft stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error {
panic("implement me")
}
func (s *schedTestWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) error {
panic("implement me")
}
func (s *schedTestWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) {
panic("implement me")
}
func (s *schedTestWorker) TaskTypes(ctx context.Context) (map[sealtasks.TaskType]struct{}, error) {
return s.taskTypes, nil
}
func (s *schedTestWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) {
return s.paths, nil
}
func (s *schedTestWorker) Info(ctx context.Context) (storiface.WorkerInfo, error) {
return storiface.WorkerInfo{
Hostname: s.name,
Resources: storiface.WorkerResources{
MemPhysical: 128 << 30,
MemSwap: 200 << 30,
MemReserved: 2 << 30,
CPUs: 32,
GPUs: []string{"a GPU"},
},
}, nil
}
func (s *schedTestWorker) Closing(ctx context.Context) (<-chan struct{}, error) {
return s.closing, nil
}
func (s *schedTestWorker) Close() error {
if !s.closed {
log.Info("close schedTestWorker")
s.closed = true
close(s.closing)
}
return nil
}
var _ Worker = &schedTestWorker{}
func addTestWorker(t *testing.T, sched *scheduler, index *stores.Index, name string, taskTypes map[sealtasks.TaskType]struct{}) {
w := &schedTestWorker{
name: name,
taskTypes: taskTypes,
paths: []stores.StoragePath{{ID: "bb-8", Weight: 2, LocalPath: "<octopus>food</octopus>", CanSeal: true, CanStore: true}},
closing: make(chan struct{}),
}
for _, path := range w.paths {
err := index.StorageAttach(context.TODO(), stores.StorageInfo{
ID: path.ID,
URLs: nil,
Weight: path.Weight,
CanSeal: path.CanSeal,
CanStore: path.CanStore,
}, fsutil.FsStat{
Capacity: 1 << 40,
Available: 1 << 40,
Reserved: 3,
})
require.NoError(t, err)
}
info, err := w.Info(context.TODO())
require.NoError(t, err)
sched.newWorkers <- &workerHandle{
w: w,
wt: &workTracker{
running: map[uint64]storiface.WorkerJob{},
},
info: info,
preparing: &activeResources{},
active: &activeResources{},
}
}
func TestSchedStartStop(t *testing.T) {
spt := abi.RegisteredSealProof_StackedDrg32GiBV1
sched := newScheduler(spt)
go sched.runSched()
addTestWorker(t, sched, stores.NewIndex(), "fred", nil)
require.NoError(t, sched.Close(context.TODO()))
}
func TestSched(t *testing.T) {
ctx, done := context.WithTimeout(context.Background(), 30*time.Second)
defer done()
spt := abi.RegisteredSealProof_StackedDrg32GiBV1
type workerSpec struct {
name string
taskTypes map[sealtasks.TaskType]struct{}
}
noopAction := func(ctx context.Context, w Worker) error {
return nil
}
type runMeta struct {
done map[string]chan struct{}
wg sync.WaitGroup
}
type task func(*testing.T, *scheduler, *stores.Index, *runMeta)
sched := func(taskName, expectWorker string, sid abi.SectorNumber, taskType sealtasks.TaskType) task {
_, _, l, _ := runtime.Caller(1)
_, _, l2, _ := runtime.Caller(2)
return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) {
done := make(chan struct{})
rm.done[taskName] = done
sel := newAllocSelector(index, stores.FTCache, stores.PathSealing)
rm.wg.Add(1)
go func() {
defer rm.wg.Done()
sectorNum := abi.SectorID{
Miner: 8,
Number: sid,
}
err := sched.Schedule(ctx, sectorNum, taskType, sel, func(ctx context.Context, w Worker) error {
wi, err := w.Info(ctx)
require.NoError(t, err)
require.Equal(t, expectWorker, wi.Hostname)
log.Info("IN ", taskName)
for {
_, ok := <-done
if !ok {
break
}
}
log.Info("OUT ", taskName)
return nil
}, noopAction)
require.NoError(t, err, fmt.Sprint(l, l2))
}()
<-sched.testSync
}
}
taskStarted := func(name string) task {
_, _, l, _ := runtime.Caller(1)
_, _, l2, _ := runtime.Caller(2)
return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) {
select {
case rm.done[name] <- struct{}{}:
case <-ctx.Done():
t.Fatal("ctx error", ctx.Err(), l, l2)
}
}
}
taskDone := func(name string) task {
_, _, l, _ := runtime.Caller(1)
_, _, l2, _ := runtime.Caller(2)
return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) {
select {
case rm.done[name] <- struct{}{}:
case <-ctx.Done():
t.Fatal("ctx error", ctx.Err(), l, l2)
}
close(rm.done[name])
}
}
taskNotScheduled := func(name string) task {
_, _, l, _ := runtime.Caller(1)
_, _, l2, _ := runtime.Caller(2)
return func(t *testing.T, sched *scheduler, index *stores.Index, rm *runMeta) {
select {
case rm.done[name] <- struct{}{}:
t.Fatal("not expected", l, l2)
case <-time.After(10 * time.Millisecond): // TODO: better synchronization thingy
}
}
}
testFunc := func(workers []workerSpec, tasks []task) func(t *testing.T) {
return func(t *testing.T) {
index := stores.NewIndex()
sched := newScheduler(spt)
sched.testSync = make(chan struct{})
go sched.runSched()
for _, worker := range workers {
addTestWorker(t, sched, index, worker.name, worker.taskTypes)
}
rm := runMeta{
done: map[string]chan struct{}{},
}
for _, task := range tasks {
task(t, sched, index, &rm)
}
log.Info("wait for async stuff")
rm.wg.Wait()
require.NoError(t, sched.Close(context.TODO()))
}
}
multTask := func(tasks ...task) task {
return func(t *testing.T, s *scheduler, index *stores.Index, meta *runMeta) {
for _, tsk := range tasks {
tsk(t, s, index, meta)
}
}
}
t.Run("one-pc1", testFunc([]workerSpec{
{name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}},
}, []task{
sched("pc1-1", "fred", 8, sealtasks.TTPreCommit1),
taskDone("pc1-1"),
}))
t.Run("pc1-2workers-1", testFunc([]workerSpec{
{name: "fred2", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2: {}}},
{name: "fred1", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}},
}, []task{
sched("pc1-1", "fred1", 8, sealtasks.TTPreCommit1),
taskDone("pc1-1"),
}))
t.Run("pc1-2workers-2", testFunc([]workerSpec{
{name: "fred1", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}},
{name: "fred2", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit2: {}}},
}, []task{
sched("pc1-1", "fred1", 8, sealtasks.TTPreCommit1),
taskDone("pc1-1"),
}))
t.Run("pc1-block-pc2", testFunc([]workerSpec{
{name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}},
}, []task{
sched("pc1", "fred", 8, sealtasks.TTPreCommit1),
taskStarted("pc1"),
sched("pc2", "fred", 8, sealtasks.TTPreCommit2),
taskNotScheduled("pc2"),
taskDone("pc1"),
taskDone("pc2"),
}))
t.Run("pc2-block-pc1", testFunc([]workerSpec{
{name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}},
}, []task{
sched("pc2", "fred", 8, sealtasks.TTPreCommit2),
taskStarted("pc2"),
sched("pc1", "fred", 8, sealtasks.TTPreCommit1),
taskNotScheduled("pc1"),
taskDone("pc2"),
taskDone("pc1"),
}))
t.Run("pc1-batching", testFunc([]workerSpec{
{name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}}},
}, []task{
sched("t1", "fred", 8, sealtasks.TTPreCommit1),
taskStarted("t1"),
sched("t2", "fred", 8, sealtasks.TTPreCommit1),
taskStarted("t2"),
// with worker settings, we can only run 2 parallel PC1s
// start 2 more to fill fetch buffer
sched("t3", "fred", 8, sealtasks.TTPreCommit1),
taskNotScheduled("t3"),
sched("t4", "fred", 8, sealtasks.TTPreCommit1),
taskNotScheduled("t4"),
taskDone("t1"),
taskDone("t2"),
taskStarted("t3"),
taskStarted("t4"),
taskDone("t3"),
taskDone("t4"),
}))
twoPC1 := func(prefix string, sid abi.SectorNumber, schedAssert func(name string) task) task {
return multTask(
sched(prefix+"-a", "fred", sid, sealtasks.TTPreCommit1),
schedAssert(prefix+"-a"),
sched(prefix+"-b", "fred", sid+1, sealtasks.TTPreCommit1),
schedAssert(prefix+"-b"),
)
}
twoPC1Act := func(prefix string, schedAssert func(name string) task) task {
return multTask(
schedAssert(prefix+"-a"),
schedAssert(prefix+"-b"),
)
}
// run this one a bunch of times, it had a very annoying tendency to fail randomly
for i := 0; i < 40; i++ {
t.Run("pc1-pc2-prio", testFunc([]workerSpec{
{name: "fred", taskTypes: map[sealtasks.TaskType]struct{}{sealtasks.TTPreCommit1: {}, sealtasks.TTPreCommit2: {}}},
}, []task{
// fill queues
twoPC1("w0", 0, taskStarted),
twoPC1("w1", 2, taskNotScheduled),
// windowed
sched("t1", "fred", 8, sealtasks.TTPreCommit1),
taskNotScheduled("t1"),
sched("t2", "fred", 9, sealtasks.TTPreCommit1),
taskNotScheduled("t2"),
sched("t3", "fred", 10, sealtasks.TTPreCommit2),
taskNotScheduled("t3"),
twoPC1Act("w0", taskDone),
twoPC1Act("w1", taskStarted),
twoPC1Act("w1", taskDone),
taskStarted("t3"),
taskNotScheduled("t1"),
taskNotScheduled("t2"),
taskDone("t3"),
taskStarted("t1"),
taskStarted("t2"),
taskDone("t1"),
taskDone("t2"),
}))
}
}

97
extern/sector-storage/sched_watch.go vendored Normal file
View File

@ -0,0 +1,97 @@
package sectorstorage
import (
"context"
"reflect"
)
func (sh *scheduler) runWorkerWatcher() {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
nilch := reflect.ValueOf(new(chan struct{})).Elem()
cases := []reflect.SelectCase{
{
Dir: reflect.SelectRecv,
Chan: reflect.ValueOf(sh.closing),
},
{
Dir: reflect.SelectRecv,
Chan: reflect.ValueOf(sh.watchClosing),
},
}
caseToWorker := map[int]WorkerID{}
for {
n, rv, ok := reflect.Select(cases)
switch {
case n == 0: // sh.closing
return
case n == 1: // sh.watchClosing
if !ok {
log.Errorf("watchClosing channel closed")
return
}
wid, ok := rv.Interface().(WorkerID)
if !ok {
panic("got a non-WorkerID message")
}
sh.workersLk.Lock()
workerClosing, err := sh.workers[wid].w.Closing(ctx)
sh.workersLk.Unlock()
if err != nil {
log.Errorf("getting worker closing channel: %+v", err)
select {
case sh.workerClosing <- wid:
case <-sh.closing:
return
}
continue
}
toSet := -1
for i, sc := range cases {
if sc.Chan == nilch {
toSet = i
break
}
}
if toSet == -1 {
toSet = len(cases)
cases = append(cases, reflect.SelectCase{})
}
cases[toSet] = reflect.SelectCase{
Dir: reflect.SelectRecv,
Chan: reflect.ValueOf(workerClosing),
}
caseToWorker[toSet] = wid
default:
wid, found := caseToWorker[n]
if !found {
log.Errorf("worker ID not found for case %d", n)
continue
}
delete(caseToWorker, n)
cases[n] = reflect.SelectCase{
Dir: reflect.SelectRecv,
Chan: nilch,
}
log.Warnf("worker %d dropped", wid)
select {
case sh.workerClosing <- wid:
case <-sh.closing:
return
}
}
}
}

57
extern/sector-storage/sealtasks/task.go vendored Normal file
View File

@ -0,0 +1,57 @@
package sealtasks
type TaskType string
const (
TTAddPiece TaskType = "seal/v0/addpiece"
TTPreCommit1 TaskType = "seal/v0/precommit/1"
TTPreCommit2 TaskType = "seal/v0/precommit/2"
TTCommit1 TaskType = "seal/v0/commit/1" // NOTE: We use this to transfer the sector into miner-local storage for now; Don't use on workers!
TTCommit2 TaskType = "seal/v0/commit/2"
TTFinalize TaskType = "seal/v0/finalize"
TTFetch TaskType = "seal/v0/fetch"
TTUnseal TaskType = "seal/v0/unseal"
TTReadUnsealed TaskType = "seal/v0/unsealread"
)
var order = map[TaskType]int{
TTAddPiece: 7,
TTPreCommit1: 6,
TTPreCommit2: 5,
TTCommit2: 4,
TTCommit1: 3,
TTFetch: 2,
TTFinalize: 1,
TTUnseal: 0,
TTReadUnsealed: 0,
}
var shortNames = map[TaskType]string{
TTAddPiece: "AP ",
TTPreCommit1: "PC1",
TTPreCommit2: "PC2",
TTCommit1: "C1 ",
TTCommit2: "C2 ",
TTFinalize: "FIN",
TTFetch: "GET",
TTUnseal: "UNS",
TTReadUnsealed: "RD ",
}
func (a TaskType) Less(b TaskType) bool {
return order[a] < order[b]
}
func (a TaskType) Short() string {
n, ok := shortNames[a]
if !ok {
return "UNK"
}
return n
}

65
extern/sector-storage/selector_alloc.go vendored Normal file
View File

@ -0,0 +1,65 @@
package sectorstorage
import (
"context"
"golang.org/x/xerrors"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/sector-storage/sealtasks"
"github.com/filecoin-project/sector-storage/stores"
)
type allocSelector struct {
index stores.SectorIndex
alloc stores.SectorFileType
ptype stores.PathType
}
func newAllocSelector(index stores.SectorIndex, alloc stores.SectorFileType, ptype stores.PathType) *allocSelector {
return &allocSelector{
index: index,
alloc: alloc,
ptype: ptype,
}
}
func (s *allocSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *workerHandle) (bool, error) {
tasks, err := whnd.w.TaskTypes(ctx)
if err != nil {
return false, xerrors.Errorf("getting supported worker task types: %w", err)
}
if _, supported := tasks[task]; !supported {
return false, nil
}
paths, err := whnd.w.Paths(ctx)
if err != nil {
return false, xerrors.Errorf("getting worker paths: %w", err)
}
have := map[stores.ID]struct{}{}
for _, path := range paths {
have[path.ID] = struct{}{}
}
best, err := s.index.StorageBestAlloc(ctx, s.alloc, spt, s.ptype)
if err != nil {
return false, xerrors.Errorf("finding best alloc storage: %w", err)
}
for _, info := range best {
if _, ok := have[info.ID]; ok {
return true, nil
}
}
return false, nil
}
func (s *allocSelector) Cmp(ctx context.Context, task sealtasks.TaskType, a, b *workerHandle) (bool, error) {
return a.active.utilization(a.info.Resources) < b.active.utilization(b.info.Resources), nil
}
var _ WorkerSelector = &allocSelector{}

View File

@ -0,0 +1,67 @@
package sectorstorage
import (
"context"
"golang.org/x/xerrors"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/sector-storage/sealtasks"
"github.com/filecoin-project/sector-storage/stores"
)
type existingSelector struct {
index stores.SectorIndex
sector abi.SectorID
alloc stores.SectorFileType
allowFetch bool
}
func newExistingSelector(index stores.SectorIndex, sector abi.SectorID, alloc stores.SectorFileType, allowFetch bool) *existingSelector {
return &existingSelector{
index: index,
sector: sector,
alloc: alloc,
allowFetch: allowFetch,
}
}
func (s *existingSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *workerHandle) (bool, error) {
tasks, err := whnd.w.TaskTypes(ctx)
if err != nil {
return false, xerrors.Errorf("getting supported worker task types: %w", err)
}
if _, supported := tasks[task]; !supported {
return false, nil
}
paths, err := whnd.w.Paths(ctx)
if err != nil {
return false, xerrors.Errorf("getting worker paths: %w", err)
}
have := map[stores.ID]struct{}{}
for _, path := range paths {
have[path.ID] = struct{}{}
}
best, err := s.index.StorageFindSector(ctx, s.sector, s.alloc, spt, s.allowFetch)
if err != nil {
return false, xerrors.Errorf("finding best storage: %w", err)
}
for _, info := range best {
if _, ok := have[info.ID]; ok {
return true, nil
}
}
return false, nil
}
func (s *existingSelector) Cmp(ctx context.Context, task sealtasks.TaskType, a, b *workerHandle) (bool, error) {
return a.active.utilization(a.info.Resources) < b.active.utilization(b.info.Resources), nil
}
var _ WorkerSelector = &existingSelector{}

48
extern/sector-storage/selector_task.go vendored Normal file
View File

@ -0,0 +1,48 @@
package sectorstorage
import (
"context"
"golang.org/x/xerrors"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/sector-storage/sealtasks"
"github.com/filecoin-project/sector-storage/stores"
)
type taskSelector struct {
best []stores.StorageInfo //nolint: unused, structcheck
}
func newTaskSelector() *taskSelector {
return &taskSelector{}
}
func (s *taskSelector) Ok(ctx context.Context, task sealtasks.TaskType, spt abi.RegisteredSealProof, whnd *workerHandle) (bool, error) {
tasks, err := whnd.w.TaskTypes(ctx)
if err != nil {
return false, xerrors.Errorf("getting supported worker task types: %w", err)
}
_, supported := tasks[task]
return supported, nil
}
func (s *taskSelector) Cmp(ctx context.Context, _ sealtasks.TaskType, a, b *workerHandle) (bool, error) {
atasks, err := a.w.TaskTypes(ctx)
if err != nil {
return false, xerrors.Errorf("getting supported worker task types: %w", err)
}
btasks, err := b.w.TaskTypes(ctx)
if err != nil {
return false, xerrors.Errorf("getting supported worker task types: %w", err)
}
if len(atasks) != len(btasks) {
return len(atasks) < len(btasks), nil // prefer workers which can do less
}
return a.active.utilization(a.info.Resources) < b.active.utilization(b.info.Resources), nil
}
var _ WorkerSelector = &allocSelector{}

35
extern/sector-storage/stats.go vendored Normal file
View File

@ -0,0 +1,35 @@
package sectorstorage
import "github.com/filecoin-project/sector-storage/storiface"
func (m *Manager) WorkerStats() map[uint64]storiface.WorkerStats {
m.sched.workersLk.Lock()
defer m.sched.workersLk.Unlock()
out := map[uint64]storiface.WorkerStats{}
for id, handle := range m.sched.workers {
out[uint64(id)] = storiface.WorkerStats{
Info: handle.info,
MemUsedMin: handle.active.memUsedMin,
MemUsedMax: handle.active.memUsedMax,
GpuUsed: handle.active.gpuUsed,
CpuUse: handle.active.cpuUse,
}
}
return out
}
func (m *Manager) WorkerJobs() map[uint64][]storiface.WorkerJob {
m.sched.workersLk.Lock()
defer m.sched.workersLk.Unlock()
out := map[uint64][]storiface.WorkerJob{}
for id, handle := range m.sched.workers {
out[uint64(id)] = handle.wt.Running()
}
return out
}

140
extern/sector-storage/stores/filetype.go vendored Normal file
View File

@ -0,0 +1,140 @@
package stores
import (
"fmt"
"golang.org/x/xerrors"
"github.com/filecoin-project/specs-actors/actors/abi"
)
const (
FTUnsealed SectorFileType = 1 << iota
FTSealed
FTCache
FileTypes = iota
)
const (
FTNone SectorFileType = 0
)
const FSOverheadDen = 10
var FSOverheadSeal = map[SectorFileType]int{ // 10x overheads
FTUnsealed: FSOverheadDen,
FTSealed: FSOverheadDen,
FTCache: 141, // 11 layers + D(2x ssize) + C + R
}
var FsOverheadFinalized = map[SectorFileType]int{
FTUnsealed: FSOverheadDen,
FTSealed: FSOverheadDen,
FTCache: 2,
}
type SectorFileType int
func (t SectorFileType) String() string {
switch t {
case FTUnsealed:
return "unsealed"
case FTSealed:
return "sealed"
case FTCache:
return "cache"
default:
return fmt.Sprintf("<unknown %d>", t)
}
}
func (t SectorFileType) Has(singleType SectorFileType) bool {
return t&singleType == singleType
}
func (t SectorFileType) SealSpaceUse(spt abi.RegisteredSealProof) (uint64, error) {
ssize, err := spt.SectorSize()
if err != nil {
return 0, xerrors.Errorf("getting sector size: %w", err)
}
var need uint64
for _, pathType := range PathTypes {
if !t.Has(pathType) {
continue
}
oh, ok := FSOverheadSeal[pathType]
if !ok {
return 0, xerrors.Errorf("no seal overhead info for %s", pathType)
}
need += uint64(oh) * uint64(ssize) / FSOverheadDen
}
return need, nil
}
func (t SectorFileType) All() [FileTypes]bool {
var out [FileTypes]bool
for i := range out {
out[i] = t&(1<<i) > 0
}
return out
}
type SectorPaths struct {
Id abi.SectorID
Unsealed string
Sealed string
Cache string
}
func ParseSectorID(baseName string) (abi.SectorID, error) {
var n abi.SectorNumber
var mid abi.ActorID
read, err := fmt.Sscanf(baseName, "s-t0%d-%d", &mid, &n)
if err != nil {
return abi.SectorID{}, xerrors.Errorf("sscanf sector name ('%s'): %w", baseName, err)
}
if read != 2 {
return abi.SectorID{}, xerrors.Errorf("parseSectorID expected to scan 2 values, got %d", read)
}
return abi.SectorID{
Miner: mid,
Number: n,
}, nil
}
func SectorName(sid abi.SectorID) string {
return fmt.Sprintf("s-t0%d-%d", sid.Miner, sid.Number)
}
func PathByType(sps SectorPaths, fileType SectorFileType) string {
switch fileType {
case FTUnsealed:
return sps.Unsealed
case FTSealed:
return sps.Sealed
case FTCache:
return sps.Cache
}
panic("requested unknown path type")
}
func SetPathByType(sps *SectorPaths, fileType SectorFileType, p string) {
switch fileType {
case FTUnsealed:
sps.Unsealed = p
case FTSealed:
sps.Sealed = p
case FTCache:
sps.Cache = p
}
}

View File

@ -0,0 +1,155 @@
package stores
import (
"encoding/json"
"io"
"net/http"
"os"
"github.com/gorilla/mux"
logging "github.com/ipfs/go-log/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/sector-storage/tarutil"
)
var log = logging.Logger("stores")
type FetchHandler struct {
*Local
}
func (handler *FetchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // /remote/
mux := mux.NewRouter()
mux.HandleFunc("/remote/stat/{id}", handler.remoteStatFs).Methods("GET")
mux.HandleFunc("/remote/{type}/{id}", handler.remoteGetSector).Methods("GET")
mux.HandleFunc("/remote/{type}/{id}", handler.remoteDeleteSector).Methods("DELETE")
mux.ServeHTTP(w, r)
}
func (handler *FetchHandler) remoteStatFs(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
id := ID(vars["id"])
st, err := handler.Local.FsStat(r.Context(), id)
switch err {
case errPathNotFound:
w.WriteHeader(404)
return
case nil:
break
default:
w.WriteHeader(500)
log.Errorf("%+v", err)
return
}
if err := json.NewEncoder(w).Encode(&st); err != nil {
log.Warnf("error writing stat response: %+v", err)
}
}
func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Request) {
log.Infof("SERVE GET %s", r.URL)
vars := mux.Vars(r)
id, err := ParseSectorID(vars["id"])
if err != nil {
log.Error("%+v", err)
w.WriteHeader(500)
return
}
ft, err := ftFromString(vars["type"])
if err != nil {
log.Error("%+v", err)
w.WriteHeader(500)
return
}
// The caller has a lock on this sector already, no need to get one here
// passing 0 spt because we don't allocate anything
paths, _, err := handler.Local.AcquireSector(r.Context(), id, 0, ft, FTNone, PathStorage, AcquireMove)
if err != nil {
log.Error("%+v", err)
w.WriteHeader(500)
return
}
// TODO: reserve local storage here
path := PathByType(paths, ft)
if path == "" {
log.Error("acquired path was empty")
w.WriteHeader(500)
return
}
stat, err := os.Stat(path)
if err != nil {
log.Error("%+v", err)
w.WriteHeader(500)
return
}
var rd io.Reader
if stat.IsDir() {
rd, err = tarutil.TarDirectory(path)
w.Header().Set("Content-Type", "application/x-tar")
} else {
rd, err = os.OpenFile(path, os.O_RDONLY, 0644)
w.Header().Set("Content-Type", "application/octet-stream")
}
if err != nil {
log.Error("%+v", err)
w.WriteHeader(500)
return
}
w.WriteHeader(200)
if _, err := io.Copy(w, rd); err != nil { // TODO: default 32k buf may be too small
log.Error("%+v", err)
return
}
}
func (handler *FetchHandler) remoteDeleteSector(w http.ResponseWriter, r *http.Request) {
log.Infof("SERVE DELETE %s", r.URL)
vars := mux.Vars(r)
id, err := ParseSectorID(vars["id"])
if err != nil {
log.Error("%+v", err)
w.WriteHeader(500)
return
}
ft, err := ftFromString(vars["type"])
if err != nil {
log.Error("%+v", err)
w.WriteHeader(500)
return
}
if err := handler.Remove(r.Context(), id, ft, false); err != nil {
log.Error("%+v", err)
w.WriteHeader(500)
return
}
}
func ftFromString(t string) (SectorFileType, error) {
switch t {
case FTUnsealed.String():
return FTUnsealed, nil
case FTSealed.String():
return FTSealed, nil
case FTCache.String():
return FTCache, nil
default:
return 0, xerrors.Errorf("unknown sector file type: '%s'", t)
}
}

442
extern/sector-storage/stores/index.go vendored Normal file
View File

@ -0,0 +1,442 @@
package stores
import (
"context"
"github.com/filecoin-project/sector-storage/fsutil"
"net/url"
gopath "path"
"sort"
"sync"
"time"
"golang.org/x/xerrors"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/abi/big"
)
var HeartbeatInterval = 10 * time.Second
var SkippedHeartbeatThresh = HeartbeatInterval * 5
// ID identifies sector storage by UUID. One sector storage should map to one
// filesystem, local or networked / shared by multiple machines
type ID string
type StorageInfo struct {
ID ID
URLs []string // TODO: Support non-http transports
Weight uint64
CanSeal bool
CanStore bool
}
type HealthReport struct {
Stat fsutil.FsStat
Err error
}
type SectorStorageInfo struct {
ID ID
URLs []string // TODO: Support non-http transports
Weight uint64
CanSeal bool
CanStore bool
Primary bool
}
type SectorIndex interface { // part of storage-miner api
StorageAttach(context.Context, StorageInfo, fsutil.FsStat) error
StorageInfo(context.Context, ID) (StorageInfo, error)
StorageReportHealth(context.Context, ID, HealthReport) error
StorageDeclareSector(ctx context.Context, storageId ID, s abi.SectorID, ft SectorFileType, primary bool) error
StorageDropSector(ctx context.Context, storageId ID, s abi.SectorID, ft SectorFileType) error
StorageFindSector(ctx context.Context, sector abi.SectorID, ft SectorFileType, spt abi.RegisteredSealProof, allowFetch bool) ([]SectorStorageInfo, error)
StorageBestAlloc(ctx context.Context, allocate SectorFileType, spt abi.RegisteredSealProof, pathType PathType) ([]StorageInfo, error)
// atomically acquire locks on all sector file types. close ctx to unlock
StorageLock(ctx context.Context, sector abi.SectorID, read SectorFileType, write SectorFileType) error
StorageTryLock(ctx context.Context, sector abi.SectorID, read SectorFileType, write SectorFileType) (bool, error)
}
type Decl struct {
abi.SectorID
SectorFileType
}
type declMeta struct {
storage ID
primary bool
}
type storageEntry struct {
info *StorageInfo
fsi fsutil.FsStat
lastHeartbeat time.Time
heartbeatErr error
}
type Index struct {
*indexLocks
lk sync.RWMutex
sectors map[Decl][]*declMeta
stores map[ID]*storageEntry
}
func NewIndex() *Index {
return &Index{
indexLocks: &indexLocks{
locks: map[abi.SectorID]*sectorLock{},
},
sectors: map[Decl][]*declMeta{},
stores: map[ID]*storageEntry{},
}
}
func (i *Index) StorageList(ctx context.Context) (map[ID][]Decl, error) {
i.lk.RLock()
defer i.lk.RUnlock()
byID := map[ID]map[abi.SectorID]SectorFileType{}
for id := range i.stores {
byID[id] = map[abi.SectorID]SectorFileType{}
}
for decl, ids := range i.sectors {
for _, id := range ids {
byID[id.storage][decl.SectorID] |= decl.SectorFileType
}
}
out := map[ID][]Decl{}
for id, m := range byID {
out[id] = []Decl{}
for sectorID, fileType := range m {
out[id] = append(out[id], Decl{
SectorID: sectorID,
SectorFileType: fileType,
})
}
}
return out, nil
}
func (i *Index) StorageAttach(ctx context.Context, si StorageInfo, st fsutil.FsStat) error {
i.lk.Lock()
defer i.lk.Unlock()
log.Infof("New sector storage: %s", si.ID)
if _, ok := i.stores[si.ID]; ok {
for _, u := range si.URLs {
if _, err := url.Parse(u); err != nil {
return xerrors.Errorf("failed to parse url %s: %w", si.URLs, err)
}
}
uloop:
for _, u := range si.URLs {
for _, l := range i.stores[si.ID].info.URLs {
if u == l {
continue uloop
}
}
i.stores[si.ID].info.URLs = append(i.stores[si.ID].info.URLs, u)
}
return nil
}
i.stores[si.ID] = &storageEntry{
info: &si,
fsi: st,
lastHeartbeat: time.Now(),
}
return nil
}
func (i *Index) StorageReportHealth(ctx context.Context, id ID, report HealthReport) error {
i.lk.Lock()
defer i.lk.Unlock()
ent, ok := i.stores[id]
if !ok {
return xerrors.Errorf("health report for unknown storage: %s", id)
}
ent.fsi = report.Stat
ent.heartbeatErr = report.Err
ent.lastHeartbeat = time.Now()
return nil
}
func (i *Index) StorageDeclareSector(ctx context.Context, storageId ID, s abi.SectorID, ft SectorFileType, primary bool) error {
i.lk.Lock()
defer i.lk.Unlock()
loop:
for _, fileType := range PathTypes {
if fileType&ft == 0 {
continue
}
d := Decl{s, fileType}
for _, sid := range i.sectors[d] {
if sid.storage == storageId {
if !sid.primary && primary {
sid.primary = true
} else {
log.Warnf("sector %v redeclared in %s", s, storageId)
}
continue loop
}
}
i.sectors[d] = append(i.sectors[d], &declMeta{
storage: storageId,
primary: primary,
})
}
return nil
}
func (i *Index) StorageDropSector(ctx context.Context, storageId ID, s abi.SectorID, ft SectorFileType) error {
i.lk.Lock()
defer i.lk.Unlock()
for _, fileType := range PathTypes {
if fileType&ft == 0 {
continue
}
d := Decl{s, fileType}
if len(i.sectors[d]) == 0 {
return nil
}
rewritten := make([]*declMeta, 0, len(i.sectors[d])-1)
for _, sid := range i.sectors[d] {
if sid.storage == storageId {
continue
}
rewritten = append(rewritten, sid)
}
if len(rewritten) == 0 {
delete(i.sectors, d)
return nil
}
i.sectors[d] = rewritten
}
return nil
}
func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft SectorFileType, spt abi.RegisteredSealProof, allowFetch bool) ([]SectorStorageInfo, error) {
i.lk.RLock()
defer i.lk.RUnlock()
storageIDs := map[ID]uint64{}
isprimary := map[ID]bool{}
for _, pathType := range PathTypes {
if ft&pathType == 0 {
continue
}
for _, id := range i.sectors[Decl{s, pathType}] {
storageIDs[id.storage]++
isprimary[id.storage] = isprimary[id.storage] || id.primary
}
}
out := make([]SectorStorageInfo, 0, len(storageIDs))
for id, n := range storageIDs {
st, ok := i.stores[id]
if !ok {
log.Warnf("storage %s is not present in sector index (referenced by sector %v)", id, s)
continue
}
urls := make([]string, len(st.info.URLs))
for k, u := range st.info.URLs {
rl, err := url.Parse(u)
if err != nil {
return nil, xerrors.Errorf("failed to parse url: %w", err)
}
rl.Path = gopath.Join(rl.Path, ft.String(), SectorName(s))
urls[k] = rl.String()
}
out = append(out, SectorStorageInfo{
ID: id,
URLs: urls,
Weight: st.info.Weight * n, // storage with more sector types is better
CanSeal: st.info.CanSeal,
CanStore: st.info.CanStore,
Primary: isprimary[id],
})
}
if allowFetch {
spaceReq, err := ft.SealSpaceUse(spt)
if err != nil {
return nil, xerrors.Errorf("estimating required space: %w", err)
}
for id, st := range i.stores {
if !st.info.CanSeal {
continue
}
if spaceReq > uint64(st.fsi.Available) {
log.Debugf("not selecting on %s, out of space (available: %d, need: %d)", st.info.ID, st.fsi.Available, spaceReq)
continue
}
if time.Since(st.lastHeartbeat) > SkippedHeartbeatThresh {
log.Debugf("not selecting on %s, didn't receive heartbeats for %s", st.info.ID, time.Since(st.lastHeartbeat))
continue
}
if st.heartbeatErr != nil {
log.Debugf("not selecting on %s, heartbeat error: %s", st.info.ID, st.heartbeatErr)
continue
}
if _, ok := storageIDs[id]; ok {
continue
}
urls := make([]string, len(st.info.URLs))
for k, u := range st.info.URLs {
rl, err := url.Parse(u)
if err != nil {
return nil, xerrors.Errorf("failed to parse url: %w", err)
}
rl.Path = gopath.Join(rl.Path, ft.String(), SectorName(s))
urls[k] = rl.String()
}
out = append(out, SectorStorageInfo{
ID: id,
URLs: urls,
Weight: st.info.Weight * 0, // TODO: something better than just '0'
CanSeal: st.info.CanSeal,
CanStore: st.info.CanStore,
Primary: false,
})
}
}
return out, nil
}
func (i *Index) StorageInfo(ctx context.Context, id ID) (StorageInfo, error) {
i.lk.RLock()
defer i.lk.RUnlock()
si, found := i.stores[id]
if !found {
return StorageInfo{}, xerrors.Errorf("sector store not found")
}
return *si.info, nil
}
func (i *Index) StorageBestAlloc(ctx context.Context, allocate SectorFileType, spt abi.RegisteredSealProof, pathType PathType) ([]StorageInfo, error) {
i.lk.RLock()
defer i.lk.RUnlock()
var candidates []storageEntry
spaceReq, err := allocate.SealSpaceUse(spt)
if err != nil {
return nil, xerrors.Errorf("estimating required space: %w", err)
}
for _, p := range i.stores {
if (pathType == PathSealing) && !p.info.CanSeal {
continue
}
if (pathType == PathStorage) && !p.info.CanStore {
continue
}
if spaceReq > uint64(p.fsi.Available) {
log.Debugf("not allocating on %s, out of space (available: %d, need: %d)", p.info.ID, p.fsi.Available, spaceReq)
continue
}
if time.Since(p.lastHeartbeat) > SkippedHeartbeatThresh {
log.Debugf("not allocating on %s, didn't receive heartbeats for %s", p.info.ID, time.Since(p.lastHeartbeat))
continue
}
if p.heartbeatErr != nil {
log.Debugf("not allocating on %s, heartbeat error: %s", p.info.ID, p.heartbeatErr)
continue
}
candidates = append(candidates, *p)
}
if len(candidates) == 0 {
return nil, xerrors.New("no good path found")
}
sort.Slice(candidates, func(i, j int) bool {
iw := big.Mul(big.NewInt(int64(candidates[i].fsi.Available)), big.NewInt(int64(candidates[i].info.Weight)))
jw := big.Mul(big.NewInt(int64(candidates[j].fsi.Available)), big.NewInt(int64(candidates[j].info.Weight)))
return iw.GreaterThan(jw)
})
out := make([]StorageInfo, len(candidates))
for i, candidate := range candidates {
out[i] = *candidate.info
}
return out, nil
}
func (i *Index) FindSector(id abi.SectorID, typ SectorFileType) ([]ID, error) {
i.lk.RLock()
defer i.lk.RUnlock()
f, ok := i.sectors[Decl{
SectorID: id,
SectorFileType: typ,
}]
if !ok {
return nil, nil
}
out := make([]ID, 0, len(f))
for _, meta := range f {
out = append(out, meta.storage)
}
return out, nil
}
var _ SectorIndex = &Index{}

View File

@ -0,0 +1,154 @@
package stores
import (
"context"
"sync"
"golang.org/x/xerrors"
"github.com/filecoin-project/specs-actors/actors/abi"
)
type sectorLock struct {
cond *ctxCond
r [FileTypes]uint
w SectorFileType
refs uint // access with indexLocks.lk
}
func (l *sectorLock) canLock(read SectorFileType, write SectorFileType) bool {
for i, b := range write.All() {
if b && l.r[i] > 0 {
return false
}
}
// check that there are no locks taken for either read or write file types we want
return l.w&read == 0 && l.w&write == 0
}
func (l *sectorLock) tryLock(read SectorFileType, write SectorFileType) bool {
if !l.canLock(read, write) {
return false
}
for i, set := range read.All() {
if set {
l.r[i]++
}
}
l.w |= write
return true
}
type lockFn func(l *sectorLock, ctx context.Context, read SectorFileType, write SectorFileType) (bool, error)
func (l *sectorLock) tryLockSafe(ctx context.Context, read SectorFileType, write SectorFileType) (bool, error) {
l.cond.L.Lock()
defer l.cond.L.Unlock()
return l.tryLock(read, write), nil
}
func (l *sectorLock) lock(ctx context.Context, read SectorFileType, write SectorFileType) (bool, error) {
l.cond.L.Lock()
defer l.cond.L.Unlock()
for !l.tryLock(read, write) {
if err := l.cond.Wait(ctx); err != nil {
return false, err
}
}
return true, nil
}
func (l *sectorLock) unlock(read SectorFileType, write SectorFileType) {
l.cond.L.Lock()
defer l.cond.L.Unlock()
for i, set := range read.All() {
if set {
l.r[i]--
}
}
l.w &= ^write
l.cond.Broadcast()
}
type indexLocks struct {
lk sync.Mutex
locks map[abi.SectorID]*sectorLock
}
func (i *indexLocks) lockWith(ctx context.Context, lockFn lockFn, sector abi.SectorID, read SectorFileType, write SectorFileType) (bool, error) {
if read|write == 0 {
return false, nil
}
if read|write > (1<<FileTypes)-1 {
return false, xerrors.Errorf("unknown file types specified")
}
i.lk.Lock()
slk, ok := i.locks[sector]
if !ok {
slk = &sectorLock{}
slk.cond = newCtxCond(&sync.Mutex{})
i.locks[sector] = slk
}
slk.refs++
i.lk.Unlock()
locked, err := lockFn(slk, ctx, read, write)
if err != nil {
return false, err
}
if !locked {
return false, nil
}
go func() {
// TODO: we can avoid this goroutine with a bit of creativity and reflect
<-ctx.Done()
i.lk.Lock()
slk.unlock(read, write)
slk.refs--
if slk.refs == 0 {
delete(i.locks, sector)
}
i.lk.Unlock()
}()
return true, nil
}
func (i *indexLocks) StorageLock(ctx context.Context, sector abi.SectorID, read SectorFileType, write SectorFileType) error {
ok, err := i.lockWith(ctx, (*sectorLock).lock, sector, read, write)
if err != nil {
return err
}
if !ok {
return xerrors.Errorf("failed to acquire lock")
}
return nil
}
func (i *indexLocks) StorageTryLock(ctx context.Context, sector abi.SectorID, read SectorFileType, write SectorFileType) (bool, error) {
return i.lockWith(ctx, (*sectorLock).tryLockSafe, sector, read, write)
}

View File

@ -0,0 +1,170 @@
package stores
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/specs-actors/actors/abi"
)
var aSector = abi.SectorID{
Miner: 2,
Number: 9000,
}
func TestCanLock(t *testing.T) {
lk := sectorLock{
r: [FileTypes]uint{},
w: FTNone,
}
require.Equal(t, true, lk.canLock(FTUnsealed, FTNone))
require.Equal(t, true, lk.canLock(FTNone, FTUnsealed))
ftAll := FTUnsealed | FTSealed | FTCache
require.Equal(t, true, lk.canLock(ftAll, FTNone))
require.Equal(t, true, lk.canLock(FTNone, ftAll))
lk.r[0] = 1 // unsealed read taken
require.Equal(t, true, lk.canLock(FTUnsealed, FTNone))
require.Equal(t, false, lk.canLock(FTNone, FTUnsealed))
require.Equal(t, true, lk.canLock(ftAll, FTNone))
require.Equal(t, false, lk.canLock(FTNone, ftAll))
require.Equal(t, true, lk.canLock(FTNone, FTSealed|FTCache))
require.Equal(t, true, lk.canLock(FTUnsealed, FTSealed|FTCache))
lk.r[0] = 0
lk.w = FTSealed
require.Equal(t, true, lk.canLock(FTUnsealed, FTNone))
require.Equal(t, true, lk.canLock(FTNone, FTUnsealed))
require.Equal(t, false, lk.canLock(FTSealed, FTNone))
require.Equal(t, false, lk.canLock(FTNone, FTSealed))
require.Equal(t, false, lk.canLock(ftAll, FTNone))
require.Equal(t, false, lk.canLock(FTNone, ftAll))
}
func TestIndexLocksSeq(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
ilk := &indexLocks{
locks: map[abi.SectorID]*sectorLock{},
}
require.NoError(t, ilk.StorageLock(ctx, aSector, FTNone, FTUnsealed))
cancel()
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
require.NoError(t, ilk.StorageLock(ctx, aSector, FTNone, FTUnsealed))
cancel()
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
require.NoError(t, ilk.StorageLock(ctx, aSector, FTNone, FTUnsealed))
cancel()
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
require.NoError(t, ilk.StorageLock(ctx, aSector, FTUnsealed, FTNone))
cancel()
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
require.NoError(t, ilk.StorageLock(ctx, aSector, FTNone, FTUnsealed))
cancel()
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
require.NoError(t, ilk.StorageLock(ctx, aSector, FTNone, FTUnsealed))
cancel()
}
func TestIndexLocksBlockOn(t *testing.T) {
test := func(r1 SectorFileType, w1 SectorFileType, r2 SectorFileType, w2 SectorFileType) func(t *testing.T) {
return func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ilk := &indexLocks{
locks: map[abi.SectorID]*sectorLock{},
}
require.NoError(t, ilk.StorageLock(ctx, aSector, r1, w1))
sch := make(chan struct{})
go func() {
ctx, cancel := context.WithCancel(context.Background())
sch <- struct{}{}
require.NoError(t, ilk.StorageLock(ctx, aSector, r2, w2))
cancel()
sch <- struct{}{}
}()
<-sch
select {
case <-sch:
t.Fatal("that shouldn't happen")
case <-time.After(40 * time.Millisecond):
}
cancel()
select {
case <-sch:
case <-time.After(time.Second):
t.Fatal("timed out")
}
}
}
t.Run("readBlocksWrite", test(FTUnsealed, FTNone, FTNone, FTUnsealed))
t.Run("writeBlocksRead", test(FTNone, FTUnsealed, FTUnsealed, FTNone))
t.Run("writeBlocksWrite", test(FTNone, FTUnsealed, FTNone, FTUnsealed))
}
func TestIndexLocksBlockWonR(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ilk := &indexLocks{
locks: map[abi.SectorID]*sectorLock{},
}
require.NoError(t, ilk.StorageLock(ctx, aSector, FTUnsealed, FTNone))
sch := make(chan struct{})
go func() {
ctx, cancel := context.WithCancel(context.Background())
sch <- struct{}{}
require.NoError(t, ilk.StorageLock(ctx, aSector, FTNone, FTUnsealed))
cancel()
sch <- struct{}{}
}()
<-sch
select {
case <-sch:
t.Fatal("that shouldn't happen")
case <-time.After(40 * time.Millisecond):
}
cancel()
select {
case <-sch:
case <-time.After(time.Second):
t.Fatal("timed out")
}
}

View File

@ -0,0 +1,49 @@
package stores
import (
"context"
"sync"
)
// like sync.Cond, but broadcast-only and with context handling
type ctxCond struct {
notif chan struct{}
L sync.Locker
lk sync.Mutex
}
func newCtxCond(l sync.Locker) *ctxCond {
return &ctxCond{
L: l,
}
}
func (c *ctxCond) Broadcast() {
c.lk.Lock()
if c.notif != nil {
close(c.notif)
c.notif = nil
}
c.lk.Unlock()
}
func (c *ctxCond) Wait(ctx context.Context) error {
c.lk.Lock()
if c.notif == nil {
c.notif = make(chan struct{})
}
wait := c.notif
c.lk.Unlock()
c.L.Unlock()
defer c.L.Lock()
select {
case <-wait:
return nil
case <-ctx.Done():
return ctx.Err()
}
}

View File

@ -0,0 +1,35 @@
package stores
import (
"context"
"github.com/filecoin-project/sector-storage/fsutil"
"github.com/filecoin-project/specs-actors/actors/abi"
)
type PathType string
const (
PathStorage PathType = "storage"
PathSealing PathType = "sealing"
)
type AcquireMode string
const (
AcquireMove AcquireMode = "move"
AcquireCopy AcquireMode = "copy"
)
type Store interface {
AcquireSector(ctx context.Context, s abi.SectorID, spt abi.RegisteredSealProof, existing SectorFileType, allocate SectorFileType, sealing PathType, op AcquireMode) (paths SectorPaths, stores SectorPaths, err error)
Remove(ctx context.Context, s abi.SectorID, types SectorFileType, force bool) error
// like remove, but doesn't remove the primary sector copy, nor the last
// non-primary copy if there no primary copies
RemoveCopies(ctx context.Context, s abi.SectorID, types SectorFileType) error
// move sectors into storage
MoveStorage(ctx context.Context, s abi.SectorID, spt abi.RegisteredSealProof, types SectorFileType) error
FsStat(ctx context.Context, id ID) (fsutil.FsStat, error)
}

591
extern/sector-storage/stores/local.go vendored Normal file
View File

@ -0,0 +1,591 @@
package stores
import (
"context"
"encoding/json"
"io/ioutil"
"math/bits"
"math/rand"
"os"
"path/filepath"
"sync"
"time"
"golang.org/x/xerrors"
"github.com/filecoin-project/sector-storage/fsutil"
"github.com/filecoin-project/specs-actors/actors/abi"
)
type StoragePath struct {
ID ID
Weight uint64
LocalPath string
CanSeal bool
CanStore bool
}
// [path]/sectorstore.json
type LocalStorageMeta struct {
ID ID
Weight uint64 // 0 = readonly
CanSeal bool
CanStore bool
}
// .lotusstorage/storage.json
type StorageConfig struct {
StoragePaths []LocalPath
}
type LocalPath struct {
Path string
}
type LocalStorage interface {
GetStorage() (StorageConfig, error)
SetStorage(func(*StorageConfig)) error
Stat(path string) (fsutil.FsStat, error)
// returns real disk usage for a file/directory
// os.ErrNotExit when file doesn't exist
DiskUsage(path string) (int64, error)
}
const MetaFile = "sectorstore.json"
var PathTypes = []SectorFileType{FTUnsealed, FTSealed, FTCache}
type Local struct {
localStorage LocalStorage
index SectorIndex
urls []string
paths map[ID]*path
localLk sync.RWMutex
}
type path struct {
local string // absolute local path
reserved int64
reservations map[abi.SectorID]SectorFileType
}
func (p *path) stat(ls LocalStorage) (fsutil.FsStat, error) {
stat, err := ls.Stat(p.local)
if err != nil {
return fsutil.FsStat{}, xerrors.Errorf("stat %s: %w", p.local, err)
}
stat.Reserved = p.reserved
for id, ft := range p.reservations {
for _, fileType := range PathTypes {
if fileType&ft == 0 {
continue
}
sp := p.sectorPath(id, fileType)
used, err := ls.DiskUsage(sp)
if err == os.ErrNotExist {
p, ferr := tempFetchDest(sp, false)
if ferr != nil {
return fsutil.FsStat{}, ferr
}
used, err = ls.DiskUsage(p)
}
if err != nil {
log.Errorf("getting disk usage of '%s': %+v", p.sectorPath(id, fileType), err)
continue
}
stat.Reserved -= used
}
}
if stat.Reserved < 0 {
log.Warnf("negative reserved storage: p.reserved=%d, reserved: %d", p.reserved, stat.Reserved)
stat.Reserved = 0
}
stat.Available -= stat.Reserved
if stat.Available < 0 {
stat.Available = 0
}
return stat, err
}
func (p *path) sectorPath(sid abi.SectorID, fileType SectorFileType) string {
return filepath.Join(p.local, fileType.String(), SectorName(sid))
}
func NewLocal(ctx context.Context, ls LocalStorage, index SectorIndex, urls []string) (*Local, error) {
l := &Local{
localStorage: ls,
index: index,
urls: urls,
paths: map[ID]*path{},
}
return l, l.open(ctx)
}
func (st *Local) OpenPath(ctx context.Context, p string) error {
st.localLk.Lock()
defer st.localLk.Unlock()
mb, err := ioutil.ReadFile(filepath.Join(p, MetaFile))
if err != nil {
return xerrors.Errorf("reading storage metadata for %s: %w", p, err)
}
var meta LocalStorageMeta
if err := json.Unmarshal(mb, &meta); err != nil {
return xerrors.Errorf("unmarshalling storage metadata for %s: %w", p, err)
}
// TODO: Check existing / dedupe
out := &path{
local: p,
reserved: 0,
reservations: map[abi.SectorID]SectorFileType{},
}
fst, err := out.stat(st.localStorage)
if err != nil {
return err
}
err = st.index.StorageAttach(ctx, StorageInfo{
ID: meta.ID,
URLs: st.urls,
Weight: meta.Weight,
CanSeal: meta.CanSeal,
CanStore: meta.CanStore,
}, fst)
if err != nil {
return xerrors.Errorf("declaring storage in index: %w", err)
}
for _, t := range PathTypes {
ents, err := ioutil.ReadDir(filepath.Join(p, t.String()))
if err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(filepath.Join(p, t.String()), 0755); err != nil {
return xerrors.Errorf("openPath mkdir '%s': %w", filepath.Join(p, t.String()), err)
}
continue
}
return xerrors.Errorf("listing %s: %w", filepath.Join(p, t.String()), err)
}
for _, ent := range ents {
if ent.Name() == FetchTempSubdir {
continue
}
sid, err := ParseSectorID(ent.Name())
if err != nil {
return xerrors.Errorf("parse sector id %s: %w", ent.Name(), err)
}
if err := st.index.StorageDeclareSector(ctx, meta.ID, sid, t, meta.CanStore); err != nil {
return xerrors.Errorf("declare sector %d(t:%d) -> %s: %w", sid, t, meta.ID, err)
}
}
}
st.paths[meta.ID] = out
return nil
}
func (st *Local) open(ctx context.Context) error {
cfg, err := st.localStorage.GetStorage()
if err != nil {
return xerrors.Errorf("getting local storage config: %w", err)
}
for _, path := range cfg.StoragePaths {
err := st.OpenPath(ctx, path.Path)
if err != nil {
return xerrors.Errorf("opening path %s: %w", path.Path, err)
}
}
go st.reportHealth(ctx)
return nil
}
func (st *Local) reportHealth(ctx context.Context) {
// randomize interval by ~10%
interval := (HeartbeatInterval*100_000 + time.Duration(rand.Int63n(10_000))) / 100_000
for {
select {
case <-time.After(interval):
case <-ctx.Done():
return
}
st.localLk.RLock()
toReport := map[ID]HealthReport{}
for id, p := range st.paths {
stat, err := p.stat(st.localStorage)
toReport[id] = HealthReport{
Stat: stat,
Err: err,
}
}
st.localLk.RUnlock()
for id, report := range toReport {
if err := st.index.StorageReportHealth(ctx, id, report); err != nil {
log.Warnf("error reporting storage health for %s: %+v", id, report)
}
}
}
}
func (st *Local) Reserve(ctx context.Context, sid abi.SectorID, spt abi.RegisteredSealProof, ft SectorFileType, storageIDs SectorPaths, overheadTab map[SectorFileType]int) (func(), error) {
ssize, err := spt.SectorSize()
if err != nil {
return nil, xerrors.Errorf("getting sector size: %w", err)
}
st.localLk.Lock()
done := func() {}
deferredDone := func() { done() }
defer func() {
st.localLk.Unlock()
deferredDone()
}()
for _, fileType := range PathTypes {
if fileType&ft == 0 {
continue
}
id := ID(PathByType(storageIDs, fileType))
p, ok := st.paths[id]
if !ok {
return nil, errPathNotFound
}
stat, err := p.stat(st.localStorage)
if err != nil {
return nil, xerrors.Errorf("getting local storage stat: %w", err)
}
overhead := int64(overheadTab[fileType]) * int64(ssize) / FSOverheadDen
if stat.Available < overhead {
return nil, xerrors.Errorf("can't reserve %d bytes in '%s' (id:%s), only %d available", overhead, p.local, id, stat.Available)
}
p.reserved += overhead
prevDone := done
done = func() {
prevDone()
st.localLk.Lock()
defer st.localLk.Unlock()
p.reserved -= overhead
}
}
deferredDone = func() {}
return done, nil
}
func (st *Local) AcquireSector(ctx context.Context, sid abi.SectorID, spt abi.RegisteredSealProof, existing SectorFileType, allocate SectorFileType, pathType PathType, op AcquireMode) (SectorPaths, SectorPaths, error) {
if existing|allocate != existing^allocate {
return SectorPaths{}, SectorPaths{}, xerrors.New("can't both find and allocate a sector")
}
st.localLk.RLock()
defer st.localLk.RUnlock()
var out SectorPaths
var storageIDs SectorPaths
for _, fileType := range PathTypes {
if fileType&existing == 0 {
continue
}
si, err := st.index.StorageFindSector(ctx, sid, fileType, spt, false)
if err != nil {
log.Warnf("finding existing sector %d(t:%d) failed: %+v", sid, fileType, err)
continue
}
for _, info := range si {
p, ok := st.paths[info.ID]
if !ok {
continue
}
if p.local == "" { // TODO: can that even be the case?
continue
}
spath := p.sectorPath(sid, fileType)
SetPathByType(&out, fileType, spath)
SetPathByType(&storageIDs, fileType, string(info.ID))
existing ^= fileType
break
}
}
for _, fileType := range PathTypes {
if fileType&allocate == 0 {
continue
}
sis, err := st.index.StorageBestAlloc(ctx, fileType, spt, pathType)
if err != nil {
return SectorPaths{}, SectorPaths{}, xerrors.Errorf("finding best storage for allocating : %w", err)
}
var best string
var bestID ID
for _, si := range sis {
p, ok := st.paths[si.ID]
if !ok {
continue
}
if p.local == "" { // TODO: can that even be the case?
continue
}
if (pathType == PathSealing) && !si.CanSeal {
continue
}
if (pathType == PathStorage) && !si.CanStore {
continue
}
// TODO: Check free space
best = p.sectorPath(sid, fileType)
bestID = si.ID
break
}
if best == "" {
return SectorPaths{}, SectorPaths{}, xerrors.Errorf("couldn't find a suitable path for a sector")
}
SetPathByType(&out, fileType, best)
SetPathByType(&storageIDs, fileType, string(bestID))
allocate ^= fileType
}
return out, storageIDs, nil
}
func (st *Local) Local(ctx context.Context) ([]StoragePath, error) {
st.localLk.RLock()
defer st.localLk.RUnlock()
var out []StoragePath
for id, p := range st.paths {
if p.local == "" {
continue
}
si, err := st.index.StorageInfo(ctx, id)
if err != nil {
return nil, xerrors.Errorf("get storage info for %s: %w", id, err)
}
out = append(out, StoragePath{
ID: id,
Weight: si.Weight,
LocalPath: p.local,
CanSeal: si.CanSeal,
CanStore: si.CanStore,
})
}
return out, nil
}
func (st *Local) Remove(ctx context.Context, sid abi.SectorID, typ SectorFileType, force bool) error {
if bits.OnesCount(uint(typ)) != 1 {
return xerrors.New("delete expects one file type")
}
si, err := st.index.StorageFindSector(ctx, sid, typ, 0, false)
if err != nil {
return xerrors.Errorf("finding existing sector %d(t:%d) failed: %w", sid, typ, err)
}
if len(si) == 0 && !force {
return xerrors.Errorf("can't delete sector %v(%d), not found", sid, typ)
}
for _, info := range si {
if err := st.removeSector(ctx, sid, typ, info.ID); err != nil {
return err
}
}
return nil
}
func (st *Local) RemoveCopies(ctx context.Context, sid abi.SectorID, typ SectorFileType) error {
if bits.OnesCount(uint(typ)) != 1 {
return xerrors.New("delete expects one file type")
}
si, err := st.index.StorageFindSector(ctx, sid, typ, 0, false)
if err != nil {
return xerrors.Errorf("finding existing sector %d(t:%d) failed: %w", sid, typ, err)
}
var hasPrimary bool
for _, info := range si {
if info.Primary {
hasPrimary = true
break
}
}
if !hasPrimary {
log.Warnf("RemoveCopies: no primary copies of sector %v (%s), not removing anything", sid, typ)
return nil
}
for _, info := range si {
if info.Primary {
continue
}
if err := st.removeSector(ctx, sid, typ, info.ID); err != nil {
return err
}
}
return nil
}
func (st *Local) removeSector(ctx context.Context, sid abi.SectorID, typ SectorFileType, storage ID) error {
p, ok := st.paths[storage]
if !ok {
return nil
}
if p.local == "" { // TODO: can that even be the case?
return nil
}
if err := st.index.StorageDropSector(ctx, storage, sid, typ); err != nil {
return xerrors.Errorf("dropping sector from index: %w", err)
}
spath := p.sectorPath(sid, typ)
log.Infof("remove %s", spath)
if err := os.RemoveAll(spath); err != nil {
log.Errorf("removing sector (%v) from %s: %+v", sid, spath, err)
}
return nil
}
func (st *Local) MoveStorage(ctx context.Context, s abi.SectorID, spt abi.RegisteredSealProof, types SectorFileType) error {
dest, destIds, err := st.AcquireSector(ctx, s, spt, FTNone, types, PathStorage, AcquireMove)
if err != nil {
return xerrors.Errorf("acquire dest storage: %w", err)
}
src, srcIds, err := st.AcquireSector(ctx, s, spt, types, FTNone, PathStorage, AcquireMove)
if err != nil {
return xerrors.Errorf("acquire src storage: %w", err)
}
for _, fileType := range PathTypes {
if fileType&types == 0 {
continue
}
sst, err := st.index.StorageInfo(ctx, ID(PathByType(srcIds, fileType)))
if err != nil {
return xerrors.Errorf("failed to get source storage info: %w", err)
}
dst, err := st.index.StorageInfo(ctx, ID(PathByType(destIds, fileType)))
if err != nil {
return xerrors.Errorf("failed to get source storage info: %w", err)
}
if sst.ID == dst.ID {
log.Debugf("not moving %v(%d); src and dest are the same", s, fileType)
continue
}
if sst.CanStore {
log.Debugf("not moving %v(%d); source supports storage", s, fileType)
continue
}
log.Debugf("moving %v(%d) to storage: %s(se:%t; st:%t) -> %s(se:%t; st:%t)", s, fileType, sst.ID, sst.CanSeal, sst.CanStore, dst.ID, dst.CanSeal, dst.CanStore)
if err := st.index.StorageDropSector(ctx, ID(PathByType(srcIds, fileType)), s, fileType); err != nil {
return xerrors.Errorf("dropping source sector from index: %w", err)
}
if err := move(PathByType(src, fileType), PathByType(dest, fileType)); err != nil {
// TODO: attempt some recovery (check if src is still there, re-declare)
return xerrors.Errorf("moving sector %v(%d): %w", s, fileType, err)
}
if err := st.index.StorageDeclareSector(ctx, ID(PathByType(destIds, fileType)), s, fileType, true); err != nil {
return xerrors.Errorf("declare sector %d(t:%d) -> %s: %w", s, fileType, ID(PathByType(destIds, fileType)), err)
}
}
return nil
}
var errPathNotFound = xerrors.Errorf("fsstat: path not found")
func (st *Local) FsStat(ctx context.Context, id ID) (fsutil.FsStat, error) {
st.localLk.RLock()
defer st.localLk.RUnlock()
p, ok := st.paths[id]
if !ok {
return fsutil.FsStat{}, errPathNotFound
}
return p.stat(st.localStorage)
}
var _ Store = &Local{}

View File

@ -0,0 +1,94 @@
package stores
import (
"context"
"encoding/json"
"github.com/filecoin-project/sector-storage/fsutil"
"github.com/google/uuid"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
)
const pathSize = 16 << 20
type TestingLocalStorage struct {
root string
c StorageConfig
}
func (t *TestingLocalStorage) DiskUsage(path string) (int64, error) {
return 1, nil
}
func (t *TestingLocalStorage) GetStorage() (StorageConfig, error) {
return t.c, nil
}
func (t *TestingLocalStorage) SetStorage(f func(*StorageConfig)) error {
f(&t.c)
return nil
}
func (t *TestingLocalStorage) Stat(path string) (fsutil.FsStat, error) {
return fsutil.FsStat{
Capacity: pathSize,
Available: pathSize,
}, nil
}
func (t *TestingLocalStorage) init(subpath string) error {
path := filepath.Join(t.root, subpath)
if err := os.Mkdir(path, 0755); err != nil {
return err
}
metaFile := filepath.Join(path, MetaFile)
meta := &LocalStorageMeta{
ID: ID(uuid.New().String()),
Weight: 1,
CanSeal: true,
CanStore: true,
}
mb, err := json.MarshalIndent(meta, "", " ")
if err != nil {
return err
}
if err := ioutil.WriteFile(metaFile, mb, 0644); err != nil {
return err
}
return nil
}
var _ LocalStorage = &TestingLocalStorage{}
func TestLocalStorage(t *testing.T) {
ctx := context.TODO()
root, err := ioutil.TempDir("", "sector-storage-teststorage-")
require.NoError(t, err)
tstor := &TestingLocalStorage{
root: root,
}
index := NewIndex()
st, err := NewLocal(ctx, tstor, index, nil)
require.NoError(t, err)
p1 := "1"
require.NoError(t, tstor.init("1"))
err = st.OpenPath(ctx, filepath.Join(tstor.root, p1))
require.NoError(t, err)
// TODO: put more things here
}

406
extern/sector-storage/stores/remote.go vendored Normal file
View File

@ -0,0 +1,406 @@
package stores
import (
"context"
"encoding/json"
"github.com/filecoin-project/sector-storage/fsutil"
"io/ioutil"
"math/bits"
"mime"
"net/http"
"net/url"
"os"
gopath "path"
"path/filepath"
"sort"
"sync"
"github.com/hashicorp/go-multierror"
files "github.com/ipfs/go-ipfs-files"
"golang.org/x/xerrors"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/sector-storage/storiface"
"github.com/filecoin-project/sector-storage/tarutil"
)
var FetchTempSubdir = "fetching"
type Remote struct {
local *Local
index SectorIndex
auth http.Header
limit chan struct{}
fetchLk sync.Mutex
fetching map[abi.SectorID]chan struct{}
}
func (r *Remote) RemoveCopies(ctx context.Context, s abi.SectorID, types SectorFileType) error {
// TODO: do this on remotes too
// (not that we really need to do that since it's always called by the
// worker which pulled the copy)
return r.local.RemoveCopies(ctx, s, types)
}
func NewRemote(local *Local, index SectorIndex, auth http.Header, fetchLimit int) *Remote {
return &Remote{
local: local,
index: index,
auth: auth,
limit: make(chan struct{}, fetchLimit),
fetching: map[abi.SectorID]chan struct{}{},
}
}
func (r *Remote) AcquireSector(ctx context.Context, s abi.SectorID, spt abi.RegisteredSealProof, existing SectorFileType, allocate SectorFileType, pathType PathType, op AcquireMode) (SectorPaths, SectorPaths, error) {
if existing|allocate != existing^allocate {
return SectorPaths{}, SectorPaths{}, xerrors.New("can't both find and allocate a sector")
}
for {
r.fetchLk.Lock()
c, locked := r.fetching[s]
if !locked {
r.fetching[s] = make(chan struct{})
r.fetchLk.Unlock()
break
}
r.fetchLk.Unlock()
select {
case <-c:
continue
case <-ctx.Done():
return SectorPaths{}, SectorPaths{}, ctx.Err()
}
}
defer func() {
r.fetchLk.Lock()
close(r.fetching[s])
delete(r.fetching, s)
r.fetchLk.Unlock()
}()
paths, stores, err := r.local.AcquireSector(ctx, s, spt, existing, allocate, pathType, op)
if err != nil {
return SectorPaths{}, SectorPaths{}, xerrors.Errorf("local acquire error: %w", err)
}
var toFetch SectorFileType
for _, fileType := range PathTypes {
if fileType&existing == 0 {
continue
}
if PathByType(paths, fileType) == "" {
toFetch |= fileType
}
}
apaths, ids, err := r.local.AcquireSector(ctx, s, spt, FTNone, toFetch, pathType, op)
if err != nil {
return SectorPaths{}, SectorPaths{}, xerrors.Errorf("allocate local sector for fetching: %w", err)
}
odt := FSOverheadSeal
if pathType == PathStorage {
odt = FsOverheadFinalized
}
releaseStorage, err := r.local.Reserve(ctx, s, spt, toFetch, ids, odt)
if err != nil {
return SectorPaths{}, SectorPaths{}, xerrors.Errorf("reserving storage space: %w", err)
}
defer releaseStorage()
for _, fileType := range PathTypes {
if fileType&existing == 0 {
continue
}
if PathByType(paths, fileType) != "" {
continue
}
dest := PathByType(apaths, fileType)
storageID := PathByType(ids, fileType)
url, err := r.acquireFromRemote(ctx, s, fileType, dest)
if err != nil {
return SectorPaths{}, SectorPaths{}, err
}
SetPathByType(&paths, fileType, dest)
SetPathByType(&stores, fileType, storageID)
if err := r.index.StorageDeclareSector(ctx, ID(storageID), s, fileType, op == AcquireMove); err != nil {
log.Warnf("declaring sector %v in %s failed: %+v", s, storageID, err)
continue
}
if op == AcquireMove {
if err := r.deleteFromRemote(ctx, url); err != nil {
log.Warnf("deleting sector %v from %s (delete %s): %+v", s, storageID, url, err)
}
}
}
return paths, stores, nil
}
func tempFetchDest(spath string, create bool) (string, error) {
st, b := filepath.Split(spath)
tempdir := filepath.Join(st, FetchTempSubdir)
if create {
if err := os.MkdirAll(tempdir, 0755); err != nil {
return "", xerrors.Errorf("creating temp fetch dir: %w", err)
}
}
return filepath.Join(tempdir, b), nil
}
func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, fileType SectorFileType, dest string) (string, error) {
si, err := r.index.StorageFindSector(ctx, s, fileType, 0, false)
if err != nil {
return "", err
}
if len(si) == 0 {
return "", xerrors.Errorf("failed to acquire sector %v from remote(%d): %w", s, fileType, storiface.ErrSectorNotFound)
}
sort.Slice(si, func(i, j int) bool {
return si[i].Weight < si[j].Weight
})
var merr error
for _, info := range si {
// TODO: see what we have local, prefer that
for _, url := range info.URLs {
tempDest, err := tempFetchDest(dest, true)
if err != nil {
return "", err
}
if err := os.RemoveAll(dest); err != nil {
return "", xerrors.Errorf("removing dest: %w", err)
}
err = r.fetch(ctx, url, tempDest)
if err != nil {
merr = multierror.Append(merr, xerrors.Errorf("fetch error %s (storage %s) -> %s: %w", url, info.ID, tempDest, err))
continue
}
if err := move(tempDest, dest); err != nil {
return "", xerrors.Errorf("fetch move error (storage %s) %s -> %s: %w", info.ID, tempDest, dest, err)
}
if merr != nil {
log.Warnw("acquireFromRemote encountered errors when fetching sector from remote", "errors", merr)
}
return url, nil
}
}
return "", xerrors.Errorf("failed to acquire sector %v from remote (tried %v): %w", s, si, merr)
}
func (r *Remote) fetch(ctx context.Context, url, outname string) error {
log.Infof("Fetch %s -> %s", url, outname)
if len(r.limit) >= cap(r.limit) {
log.Infof("Throttling fetch, %d already running", len(r.limit))
}
// TODO: Smarter throttling
// * Priority (just going sequentially is still pretty good)
// * Per interface
// * Aware of remote load
select {
case r.limit <- struct{}{}:
defer func() { <-r.limit }()
case <-ctx.Done():
return xerrors.Errorf("context error while waiting for fetch limiter: %w", ctx.Err())
}
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return xerrors.Errorf("request: %w", err)
}
req.Header = r.auth
req = req.WithContext(ctx)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return xerrors.Errorf("do request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return xerrors.Errorf("non-200 code: %d", resp.StatusCode)
}
/*bar := pb.New64(w.sizeForType(typ))
bar.ShowPercent = true
bar.ShowSpeed = true
bar.Units = pb.U_BYTES
barreader := bar.NewProxyReader(resp.Body)
bar.Start()
defer bar.Finish()*/
mediatype, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type"))
if err != nil {
return xerrors.Errorf("parse media type: %w", err)
}
if err := os.RemoveAll(outname); err != nil {
return xerrors.Errorf("removing dest: %w", err)
}
switch mediatype {
case "application/x-tar":
return tarutil.ExtractTar(resp.Body, outname)
case "application/octet-stream":
return files.WriteTo(files.NewReaderFile(resp.Body), outname)
default:
return xerrors.Errorf("unknown content type: '%s'", mediatype)
}
}
func (r *Remote) MoveStorage(ctx context.Context, s abi.SectorID, spt abi.RegisteredSealProof, types SectorFileType) error {
// Make sure we have the data local
_, _, err := r.AcquireSector(ctx, s, spt, types, FTNone, PathStorage, AcquireMove)
if err != nil {
return xerrors.Errorf("acquire src storage (remote): %w", err)
}
return r.local.MoveStorage(ctx, s, spt, types)
}
func (r *Remote) Remove(ctx context.Context, sid abi.SectorID, typ SectorFileType, force bool) error {
if bits.OnesCount(uint(typ)) != 1 {
return xerrors.New("delete expects one file type")
}
if err := r.local.Remove(ctx, sid, typ, force); err != nil {
return xerrors.Errorf("remove from local: %w", err)
}
si, err := r.index.StorageFindSector(ctx, sid, typ, 0, false)
if err != nil {
return xerrors.Errorf("finding existing sector %d(t:%d) failed: %w", sid, typ, err)
}
for _, info := range si {
for _, url := range info.URLs {
if err := r.deleteFromRemote(ctx, url); err != nil {
log.Warnf("remove %s: %+v", url, err)
continue
}
break
}
}
return nil
}
func (r *Remote) deleteFromRemote(ctx context.Context, url string) error {
log.Infof("Delete %s", url)
req, err := http.NewRequest("DELETE", url, nil)
if err != nil {
return xerrors.Errorf("request: %w", err)
}
req.Header = r.auth
req = req.WithContext(ctx)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return xerrors.Errorf("do request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return xerrors.Errorf("non-200 code: %d", resp.StatusCode)
}
return nil
}
func (r *Remote) FsStat(ctx context.Context, id ID) (fsutil.FsStat, error) {
st, err := r.local.FsStat(ctx, id)
switch err {
case nil:
return st, nil
case errPathNotFound:
break
default:
return fsutil.FsStat{}, xerrors.Errorf("local stat: %w", err)
}
si, err := r.index.StorageInfo(ctx, id)
if err != nil {
return fsutil.FsStat{}, xerrors.Errorf("getting remote storage info: %w", err)
}
if len(si.URLs) == 0 {
return fsutil.FsStat{}, xerrors.Errorf("no known URLs for remote storage %s", id)
}
rl, err := url.Parse(si.URLs[0])
if err != nil {
return fsutil.FsStat{}, xerrors.Errorf("failed to parse url: %w", err)
}
rl.Path = gopath.Join(rl.Path, "stat", string(id))
req, err := http.NewRequest("GET", rl.String(), nil)
if err != nil {
return fsutil.FsStat{}, xerrors.Errorf("request: %w", err)
}
req.Header = r.auth
req = req.WithContext(ctx)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return fsutil.FsStat{}, xerrors.Errorf("do request: %w", err)
}
switch resp.StatusCode {
case 200:
break
case 404:
return fsutil.FsStat{}, errPathNotFound
case 500:
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fsutil.FsStat{}, xerrors.Errorf("fsstat: got http 500, then failed to read the error: %w", err)
}
return fsutil.FsStat{}, xerrors.Errorf("fsstat: got http 500: %s", string(b))
}
var out fsutil.FsStat
if err := json.NewDecoder(resp.Body).Decode(&out); err != nil {
return fsutil.FsStat{}, xerrors.Errorf("decoding fsstat: %w", err)
}
defer resp.Body.Close()
return out, nil
}
var _ Store = &Remote{}

View File

@ -0,0 +1,43 @@
package stores
import (
"bytes"
"os/exec"
"path/filepath"
"strings"
"github.com/mitchellh/go-homedir"
"golang.org/x/xerrors"
)
func move(from, to string) error {
from, err := homedir.Expand(from)
if err != nil {
return xerrors.Errorf("move: expanding from: %w", err)
}
to, err = homedir.Expand(to)
if err != nil {
return xerrors.Errorf("move: expanding to: %w", err)
}
if filepath.Base(from) != filepath.Base(to) {
return xerrors.Errorf("move: base names must match ('%s' != '%s')", filepath.Base(from), filepath.Base(to))
}
log.Debugw("move sector data", "from", from, "to", to)
toDir := filepath.Dir(to)
// `mv` has decades of experience in moving files quickly; don't pretend we
// can do better
var errOut bytes.Buffer
cmd := exec.Command("/usr/bin/env", "mv", "-t", toDir, from)
cmd.Stderr = &errOut
if err := cmd.Run(); err != nil {
return xerrors.Errorf("exec mv (stderr: %s): %w", strings.TrimSpace(errOut.String()), err)
}
return nil
}

17
extern/sector-storage/storiface/ffi.go vendored Normal file
View File

@ -0,0 +1,17 @@
package storiface
import (
"errors"
"github.com/filecoin-project/specs-actors/actors/abi"
)
var ErrSectorNotFound = errors.New("sector not found")
type UnpaddedByteIndex uint64
func (i UnpaddedByteIndex) Padded() PaddedByteIndex {
return PaddedByteIndex(abi.UnpaddedPieceSize(i).Padded())
}
type PaddedByteIndex uint64

View File

@ -0,0 +1,41 @@
package storiface
import (
"time"
"github.com/filecoin-project/sector-storage/sealtasks"
"github.com/filecoin-project/specs-actors/actors/abi"
)
type WorkerInfo struct {
Hostname string
Resources WorkerResources
}
type WorkerResources struct {
MemPhysical uint64
MemSwap uint64
MemReserved uint64 // Used by system / other processes
CPUs uint64 // Logical cores
GPUs []string
}
type WorkerStats struct {
Info WorkerInfo
MemUsedMin uint64
MemUsedMax uint64
GpuUsed bool
CpuUse uint64
}
type WorkerJob struct {
ID uint64
Sector abi.SectorID
Task sealtasks.TaskType
Start time.Time
}

92
extern/sector-storage/tarutil/systar.go vendored Normal file
View File

@ -0,0 +1,92 @@
package tarutil
import (
"archive/tar"
"golang.org/x/xerrors"
"io"
"io/ioutil"
"os"
"path/filepath"
logging "github.com/ipfs/go-log/v2"
)
var log = logging.Logger("tarutil") // nolint
func ExtractTar(body io.Reader, dir string) error {
if err := os.MkdirAll(dir, 0755); err != nil {
return xerrors.Errorf("mkdir: %w", err)
}
tr := tar.NewReader(body)
for {
header, err := tr.Next()
switch err {
default:
return err
case io.EOF:
return nil
case nil:
}
f, err := os.Create(filepath.Join(dir, header.Name))
if err != nil {
return xerrors.Errorf("creating file %s: %w", filepath.Join(dir, header.Name), err)
}
if _, err := io.Copy(f, tr); err != nil {
return err
}
if err := f.Close(); err != nil {
return err
}
}
}
func TarDirectory(dir string) (io.ReadCloser, error) {
r, w := io.Pipe()
go func() {
_ = w.CloseWithError(writeTarDirectory(dir, w))
}()
return r, nil
}
func writeTarDirectory(dir string, w io.Writer) error {
tw := tar.NewWriter(w)
files, err := ioutil.ReadDir(dir)
if err != nil {
return err
}
for _, file := range files {
h, err := tar.FileInfoHeader(file, "")
if err != nil {
return xerrors.Errorf("getting header for file %s: %w", file.Name(), err)
}
if err := tw.WriteHeader(h); err != nil {
return xerrors.Errorf("wiritng header for file %s: %w", file.Name(), err)
}
f, err := os.OpenFile(filepath.Join(dir, file.Name()), os.O_RDONLY, 644) // nolint
if err != nil {
return xerrors.Errorf("opening %s for reading: %w", file.Name(), err)
}
if _, err := io.Copy(tw, f); err != nil {
return xerrors.Errorf("copy data for file %s: %w", file.Name(), err)
}
if err := f.Close(); err != nil {
return err
}
}
return nil
}

127
extern/sector-storage/testworker_test.go vendored Normal file
View File

@ -0,0 +1,127 @@
package sectorstorage
import (
"context"
"io"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/sector-storage/mock"
"github.com/filecoin-project/sector-storage/sealtasks"
"github.com/filecoin-project/sector-storage/stores"
"github.com/filecoin-project/sector-storage/storiface"
)
type testWorker struct {
acceptTasks map[sealtasks.TaskType]struct{}
lstor *stores.Local
mockSeal *mock.SectorMgr
}
func newTestWorker(wcfg WorkerConfig, lstor *stores.Local) *testWorker {
ssize, err := wcfg.SealProof.SectorSize()
if err != nil {
panic(err)
}
acceptTasks := map[sealtasks.TaskType]struct{}{}
for _, taskType := range wcfg.TaskTypes {
acceptTasks[taskType] = struct{}{}
}
return &testWorker{
acceptTasks: acceptTasks,
lstor: lstor,
mockSeal: mock.NewMockSectorMgr(ssize, nil),
}
}
func (t *testWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) {
return t.mockSeal.SealPreCommit1(ctx, sector, ticket, pieces)
}
func (t *testWorker) NewSector(ctx context.Context, sector abi.SectorID) error {
panic("implement me")
}
func (t *testWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) error {
panic("implement me")
}
func (t *testWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) {
panic("implement me")
}
func (t *testWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) {
return t.mockSeal.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData)
}
func (t *testWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storage.SectorCids, error) {
panic("implement me")
}
func (t *testWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) {
panic("implement me")
}
func (t *testWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storage.Proof, error) {
panic("implement me")
}
func (t *testWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error {
panic("implement me")
}
func (t *testWorker) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error {
panic("implement me")
}
func (t *testWorker) Remove(ctx context.Context, sector abi.SectorID) error {
panic("implement me")
}
func (t *testWorker) MoveStorage(ctx context.Context, sector abi.SectorID) error {
panic("implement me")
}
func (t *testWorker) Fetch(ctx context.Context, id abi.SectorID, fileType stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error {
return nil
}
func (t *testWorker) TaskTypes(ctx context.Context) (map[sealtasks.TaskType]struct{}, error) {
return t.acceptTasks, nil
}
func (t *testWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) {
return t.lstor.Local(ctx)
}
func (t *testWorker) Info(ctx context.Context) (storiface.WorkerInfo, error) {
res := ResourceTable[sealtasks.TTPreCommit2][abi.RegisteredSealProof_StackedDrg2KiBV1]
return storiface.WorkerInfo{
Hostname: "testworkerer",
Resources: storiface.WorkerResources{
MemPhysical: res.MinMemory * 3,
MemSwap: 0,
MemReserved: res.MinMemory,
CPUs: 32,
GPUs: nil,
},
}, nil
}
func (t *testWorker) Closing(ctx context.Context) (<-chan struct{}, error) {
return ctx.Done(), nil
}
func (t *testWorker) Close() error {
panic("implement me")
}
var _ Worker = &testWorker{}

129
extern/sector-storage/work_tracker.go vendored Normal file
View File

@ -0,0 +1,129 @@
package sectorstorage
import (
"context"
"io"
"sync"
"time"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/sector-storage/sealtasks"
"github.com/filecoin-project/sector-storage/stores"
"github.com/filecoin-project/sector-storage/storiface"
)
type workTracker struct {
lk sync.Mutex
ctr uint64
running map[uint64]storiface.WorkerJob
// TODO: done, aggregate stats, queue stats, scheduler feedback
}
func (wt *workTracker) track(sid abi.SectorID, task sealtasks.TaskType) func() {
wt.lk.Lock()
defer wt.lk.Unlock()
id := wt.ctr
wt.ctr++
wt.running[id] = storiface.WorkerJob{
ID: id,
Sector: sid,
Task: task,
Start: time.Now(),
}
return func() {
wt.lk.Lock()
defer wt.lk.Unlock()
delete(wt.running, id)
}
}
func (wt *workTracker) worker(w Worker) Worker {
return &trackedWorker{
Worker: w,
tracker: wt,
}
}
func (wt *workTracker) Running() []storiface.WorkerJob {
wt.lk.Lock()
defer wt.lk.Unlock()
out := make([]storiface.WorkerJob, 0, len(wt.running))
for _, job := range wt.running {
out = append(out, job)
}
return out
}
type trackedWorker struct {
Worker
tracker *workTracker
}
func (t *trackedWorker) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) {
defer t.tracker.track(sector, sealtasks.TTPreCommit1)()
return t.Worker.SealPreCommit1(ctx, sector, ticket, pieces)
}
func (t *trackedWorker) SealPreCommit2(ctx context.Context, sector abi.SectorID, pc1o storage.PreCommit1Out) (storage.SectorCids, error) {
defer t.tracker.track(sector, sealtasks.TTPreCommit2)()
return t.Worker.SealPreCommit2(ctx, sector, pc1o)
}
func (t *trackedWorker) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) {
defer t.tracker.track(sector, sealtasks.TTCommit1)()
return t.Worker.SealCommit1(ctx, sector, ticket, seed, pieces, cids)
}
func (t *trackedWorker) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storage.Proof, error) {
defer t.tracker.track(sector, sealtasks.TTCommit2)()
return t.Worker.SealCommit2(ctx, sector, c1o)
}
func (t *trackedWorker) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error {
defer t.tracker.track(sector, sealtasks.TTFinalize)()
return t.Worker.FinalizeSector(ctx, sector, keepUnsealed)
}
func (t *trackedWorker) AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) {
defer t.tracker.track(sector, sealtasks.TTAddPiece)()
return t.Worker.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData)
}
func (t *trackedWorker) Fetch(ctx context.Context, s abi.SectorID, ft stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error {
defer t.tracker.track(s, sealtasks.TTFetch)()
return t.Worker.Fetch(ctx, s, ft, ptype, am)
}
func (t *trackedWorker) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) error {
defer t.tracker.track(id, sealtasks.TTUnseal)()
return t.Worker.UnsealPiece(ctx, id, index, size, randomness, cid)
}
func (t *trackedWorker) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) {
defer t.tracker.track(id, sealtasks.TTReadUnsealed)()
return t.Worker.ReadPiece(ctx, writer, id, index, size)
}
var _ Worker = &trackedWorker{}

View File

@ -0,0 +1,56 @@
package zerocomm
import (
"math/bits"
commcid "github.com/filecoin-project/go-fil-commcid"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/ipfs/go-cid"
)
const Levels = 37
const Skip = 2 // can't generate for 32, 64b
var PieceComms = [Levels - Skip][32]byte{
{0x37, 0x31, 0xbb, 0x99, 0xac, 0x68, 0x9f, 0x66, 0xee, 0xf5, 0x97, 0x3e, 0x4a, 0x94, 0xda, 0x18, 0x8f, 0x4d, 0xdc, 0xae, 0x58, 0x7, 0x24, 0xfc, 0x6f, 0x3f, 0xd6, 0xd, 0xfd, 0x48, 0x83, 0x33},
{0x64, 0x2a, 0x60, 0x7e, 0xf8, 0x86, 0xb0, 0x4, 0xbf, 0x2c, 0x19, 0x78, 0x46, 0x3a, 0xe1, 0xd4, 0x69, 0x3a, 0xc0, 0xf4, 0x10, 0xeb, 0x2d, 0x1b, 0x7a, 0x47, 0xfe, 0x20, 0x5e, 0x5e, 0x75, 0xf},
{0x57, 0xa2, 0x38, 0x1a, 0x28, 0x65, 0x2b, 0xf4, 0x7f, 0x6b, 0xef, 0x7a, 0xca, 0x67, 0x9b, 0xe4, 0xae, 0xde, 0x58, 0x71, 0xab, 0x5c, 0xf3, 0xeb, 0x2c, 0x8, 0x11, 0x44, 0x88, 0xcb, 0x85, 0x26},
{0x1f, 0x7a, 0xc9, 0x59, 0x55, 0x10, 0xe0, 0x9e, 0xa4, 0x1c, 0x46, 0xb, 0x17, 0x64, 0x30, 0xbb, 0x32, 0x2c, 0xd6, 0xfb, 0x41, 0x2e, 0xc5, 0x7c, 0xb1, 0x7d, 0x98, 0x9a, 0x43, 0x10, 0x37, 0x2f},
{0xfc, 0x7e, 0x92, 0x82, 0x96, 0xe5, 0x16, 0xfa, 0xad, 0xe9, 0x86, 0xb2, 0x8f, 0x92, 0xd4, 0x4a, 0x4f, 0x24, 0xb9, 0x35, 0x48, 0x52, 0x23, 0x37, 0x6a, 0x79, 0x90, 0x27, 0xbc, 0x18, 0xf8, 0x33},
{0x8, 0xc4, 0x7b, 0x38, 0xee, 0x13, 0xbc, 0x43, 0xf4, 0x1b, 0x91, 0x5c, 0xe, 0xed, 0x99, 0x11, 0xa2, 0x60, 0x86, 0xb3, 0xed, 0x62, 0x40, 0x1b, 0xf9, 0xd5, 0x8b, 0x8d, 0x19, 0xdf, 0xf6, 0x24},
{0xb2, 0xe4, 0x7b, 0xfb, 0x11, 0xfa, 0xcd, 0x94, 0x1f, 0x62, 0xaf, 0x5c, 0x75, 0xf, 0x3e, 0xa5, 0xcc, 0x4d, 0xf5, 0x17, 0xd5, 0xc4, 0xf1, 0x6d, 0xb2, 0xb4, 0xd7, 0x7b, 0xae, 0xc1, 0xa3, 0x2f},
{0xf9, 0x22, 0x61, 0x60, 0xc8, 0xf9, 0x27, 0xbf, 0xdc, 0xc4, 0x18, 0xcd, 0xf2, 0x3, 0x49, 0x31, 0x46, 0x0, 0x8e, 0xae, 0xfb, 0x7d, 0x2, 0x19, 0x4d, 0x5e, 0x54, 0x81, 0x89, 0x0, 0x51, 0x8},
{0x2c, 0x1a, 0x96, 0x4b, 0xb9, 0xb, 0x59, 0xeb, 0xfe, 0xf, 0x6d, 0xa2, 0x9a, 0xd6, 0x5a, 0xe3, 0xe4, 0x17, 0x72, 0x4a, 0x8f, 0x7c, 0x11, 0x74, 0x5a, 0x40, 0xca, 0xc1, 0xe5, 0xe7, 0x40, 0x11},
{0xfe, 0xe3, 0x78, 0xce, 0xf1, 0x64, 0x4, 0xb1, 0x99, 0xed, 0xe0, 0xb1, 0x3e, 0x11, 0xb6, 0x24, 0xff, 0x9d, 0x78, 0x4f, 0xbb, 0xed, 0x87, 0x8d, 0x83, 0x29, 0x7e, 0x79, 0x5e, 0x2, 0x4f, 0x2},
{0x8e, 0x9e, 0x24, 0x3, 0xfa, 0x88, 0x4c, 0xf6, 0x23, 0x7f, 0x60, 0xdf, 0x25, 0xf8, 0x3e, 0xe4, 0xd, 0xca, 0x9e, 0xd8, 0x79, 0xeb, 0x6f, 0x63, 0x52, 0xd1, 0x50, 0x84, 0xf5, 0xad, 0xd, 0x3f},
{0x75, 0x2d, 0x96, 0x93, 0xfa, 0x16, 0x75, 0x24, 0x39, 0x54, 0x76, 0xe3, 0x17, 0xa9, 0x85, 0x80, 0xf0, 0x9, 0x47, 0xaf, 0xb7, 0xa3, 0x5, 0x40, 0xd6, 0x25, 0xa9, 0x29, 0x1c, 0xc1, 0x2a, 0x7},
{0x70, 0x22, 0xf6, 0xf, 0x7e, 0xf6, 0xad, 0xfa, 0x17, 0x11, 0x7a, 0x52, 0x61, 0x9e, 0x30, 0xce, 0xa8, 0x2c, 0x68, 0x7, 0x5a, 0xdf, 0x1c, 0x66, 0x77, 0x86, 0xec, 0x50, 0x6e, 0xef, 0x2d, 0x19},
{0xd9, 0x98, 0x87, 0xb9, 0x73, 0x57, 0x3a, 0x96, 0xe1, 0x13, 0x93, 0x64, 0x52, 0x36, 0xc1, 0x7b, 0x1f, 0x4c, 0x70, 0x34, 0xd7, 0x23, 0xc7, 0xa9, 0x9f, 0x70, 0x9b, 0xb4, 0xda, 0x61, 0x16, 0x2b},
{0xd0, 0xb5, 0x30, 0xdb, 0xb0, 0xb4, 0xf2, 0x5c, 0x5d, 0x2f, 0x2a, 0x28, 0xdf, 0xee, 0x80, 0x8b, 0x53, 0x41, 0x2a, 0x2, 0x93, 0x1f, 0x18, 0xc4, 0x99, 0xf5, 0xa2, 0x54, 0x8, 0x6b, 0x13, 0x26},
{0x84, 0xc0, 0x42, 0x1b, 0xa0, 0x68, 0x5a, 0x1, 0xbf, 0x79, 0x5a, 0x23, 0x44, 0x6, 0x4f, 0xe4, 0x24, 0xbd, 0x52, 0xa9, 0xd2, 0x43, 0x77, 0xb3, 0x94, 0xff, 0x4c, 0x4b, 0x45, 0x68, 0xe8, 0x11},
{0x65, 0xf2, 0x9e, 0x5d, 0x98, 0xd2, 0x46, 0xc3, 0x8b, 0x38, 0x8c, 0xfc, 0x6, 0xdb, 0x1f, 0x6b, 0x2, 0x13, 0x3, 0xc5, 0xa2, 0x89, 0x0, 0xb, 0xdc, 0xe8, 0x32, 0xa9, 0xc3, 0xec, 0x42, 0x1c},
{0xa2, 0x24, 0x75, 0x8, 0x28, 0x58, 0x50, 0x96, 0x5b, 0x7e, 0x33, 0x4b, 0x31, 0x27, 0xb0, 0xc0, 0x42, 0xb1, 0xd0, 0x46, 0xdc, 0x54, 0x40, 0x21, 0x37, 0x62, 0x7c, 0xd8, 0x79, 0x9c, 0xe1, 0x3a},
{0xda, 0xfd, 0xab, 0x6d, 0xa9, 0x36, 0x44, 0x53, 0xc2, 0x6d, 0x33, 0x72, 0x6b, 0x9f, 0xef, 0xe3, 0x43, 0xbe, 0x8f, 0x81, 0x64, 0x9e, 0xc0, 0x9, 0xaa, 0xd3, 0xfa, 0xff, 0x50, 0x61, 0x75, 0x8},
{0xd9, 0x41, 0xd5, 0xe0, 0xd6, 0x31, 0x4a, 0x99, 0x5c, 0x33, 0xff, 0xbd, 0x4f, 0xbe, 0x69, 0x11, 0x8d, 0x73, 0xd4, 0xe5, 0xfd, 0x2c, 0xd3, 0x1f, 0xf, 0x7c, 0x86, 0xeb, 0xdd, 0x14, 0xe7, 0x6},
{0x51, 0x4c, 0x43, 0x5c, 0x3d, 0x4, 0xd3, 0x49, 0xa5, 0x36, 0x5f, 0xbd, 0x59, 0xff, 0xc7, 0x13, 0x62, 0x91, 0x11, 0x78, 0x59, 0x91, 0xc1, 0xa3, 0xc5, 0x3a, 0xf2, 0x20, 0x79, 0x74, 0x1a, 0x2f},
{0xad, 0x6, 0x85, 0x39, 0x69, 0xd3, 0x7d, 0x34, 0xff, 0x8, 0xe0, 0x9f, 0x56, 0x93, 0xa, 0x4a, 0xd1, 0x9a, 0x89, 0xde, 0xf6, 0xc, 0xbf, 0xee, 0x7e, 0x1d, 0x33, 0x81, 0xc1, 0xe7, 0x1c, 0x37},
{0x39, 0x56, 0xe, 0x7b, 0x13, 0xa9, 0x3b, 0x7, 0xa2, 0x43, 0xfd, 0x27, 0x20, 0xff, 0xa7, 0xcb, 0x3e, 0x1d, 0x2e, 0x50, 0x5a, 0xb3, 0x62, 0x9e, 0x79, 0xf4, 0x63, 0x13, 0x51, 0x2c, 0xda, 0x6},
{0xcc, 0xc3, 0xc0, 0x12, 0xf5, 0xb0, 0x5e, 0x81, 0x1a, 0x2b, 0xbf, 0xdd, 0xf, 0x68, 0x33, 0xb8, 0x42, 0x75, 0xb4, 0x7b, 0xf2, 0x29, 0xc0, 0x5, 0x2a, 0x82, 0x48, 0x4f, 0x3c, 0x1a, 0x5b, 0x3d},
{0x7d, 0xf2, 0x9b, 0x69, 0x77, 0x31, 0x99, 0xe8, 0xf2, 0xb4, 0xb, 0x77, 0x91, 0x9d, 0x4, 0x85, 0x9, 0xee, 0xd7, 0x68, 0xe2, 0xc7, 0x29, 0x7b, 0x1f, 0x14, 0x37, 0x3, 0x4f, 0xc3, 0xc6, 0x2c},
{0x66, 0xce, 0x5, 0xa3, 0x66, 0x75, 0x52, 0xcf, 0x45, 0xc0, 0x2b, 0xcc, 0x4e, 0x83, 0x92, 0x91, 0x9b, 0xde, 0xac, 0x35, 0xde, 0x2f, 0xf5, 0x62, 0x71, 0x84, 0x8e, 0x9f, 0x7b, 0x67, 0x51, 0x7},
{0xd8, 0x61, 0x2, 0x18, 0x42, 0x5a, 0xb5, 0xe9, 0x5b, 0x1c, 0xa6, 0x23, 0x9d, 0x29, 0xa2, 0xe4, 0x20, 0xd7, 0x6, 0xa9, 0x6f, 0x37, 0x3e, 0x2f, 0x9c, 0x9a, 0x91, 0xd7, 0x59, 0xd1, 0x9b, 0x1},
{0x6d, 0x36, 0x4b, 0x1e, 0xf8, 0x46, 0x44, 0x1a, 0x5a, 0x4a, 0x68, 0x86, 0x23, 0x14, 0xac, 0xc0, 0xa4, 0x6f, 0x1, 0x67, 0x17, 0xe5, 0x34, 0x43, 0xe8, 0x39, 0xee, 0xdf, 0x83, 0xc2, 0x85, 0x3c},
{0x7, 0x7e, 0x5f, 0xde, 0x35, 0xc5, 0xa, 0x93, 0x3, 0xa5, 0x50, 0x9, 0xe3, 0x49, 0x8a, 0x4e, 0xbe, 0xdf, 0xf3, 0x9c, 0x42, 0xb7, 0x10, 0xb7, 0x30, 0xd8, 0xec, 0x7a, 0xc7, 0xaf, 0xa6, 0x3e},
{0xe6, 0x40, 0x5, 0xa6, 0xbf, 0xe3, 0x77, 0x79, 0x53, 0xb8, 0xad, 0x6e, 0xf9, 0x3f, 0xf, 0xca, 0x10, 0x49, 0xb2, 0x4, 0x16, 0x54, 0xf2, 0xa4, 0x11, 0xf7, 0x70, 0x27, 0x99, 0xce, 0xce, 0x2},
{0x25, 0x9d, 0x3d, 0x6b, 0x1f, 0x4d, 0x87, 0x6d, 0x11, 0x85, 0xe1, 0x12, 0x3a, 0xf6, 0xf5, 0x50, 0x1a, 0xf0, 0xf6, 0x7c, 0xf1, 0x5b, 0x52, 0x16, 0x25, 0x5b, 0x7b, 0x17, 0x8d, 0x12, 0x5, 0x1d},
{0x3f, 0x9a, 0x4d, 0x41, 0x1d, 0xa4, 0xef, 0x1b, 0x36, 0xf3, 0x5f, 0xf0, 0xa1, 0x95, 0xae, 0x39, 0x2a, 0xb2, 0x3f, 0xee, 0x79, 0x67, 0xb7, 0xc4, 0x1b, 0x3, 0xd1, 0x61, 0x3f, 0xc2, 0x92, 0x39},
{0xfe, 0x4e, 0xf3, 0x28, 0xc6, 0x1a, 0xa3, 0x9c, 0xfd, 0xb2, 0x48, 0x4e, 0xaa, 0x32, 0xa1, 0x51, 0xb1, 0xfe, 0x3d, 0xfd, 0x1f, 0x96, 0xdd, 0x8c, 0x97, 0x11, 0xfd, 0x86, 0xd6, 0xc5, 0x81, 0x13},
{0xf5, 0x5d, 0x68, 0x90, 0xe, 0x2d, 0x83, 0x81, 0xec, 0xcb, 0x81, 0x64, 0xcb, 0x99, 0x76, 0xf2, 0x4b, 0x2d, 0xe0, 0xdd, 0x61, 0xa3, 0x1b, 0x97, 0xce, 0x6e, 0xb2, 0x38, 0x50, 0xd5, 0xe8, 0x19},
{0xaa, 0xaa, 0x8c, 0x4c, 0xb4, 0xa, 0xac, 0xee, 0x1e, 0x2, 0xdc, 0x65, 0x42, 0x4b, 0x2a, 0x6c, 0x8e, 0x99, 0xf8, 0x3, 0xb7, 0x2f, 0x79, 0x29, 0xc4, 0x10, 0x1d, 0x7f, 0xae, 0x6b, 0xff, 0x32},
}
func ZeroPieceCommitment(sz abi.UnpaddedPieceSize) cid.Cid {
level := bits.TrailingZeros64(uint64(sz.Padded())) - Skip - 5 // 2^5 = 32
commP, _ := commcid.PieceCommitmentV1ToCID(PieceComms[level][:])
return commP
}

View File

@ -0,0 +1,115 @@
package zerocomm_test
import (
"bytes"
"fmt"
"io"
"testing"
commcid "github.com/filecoin-project/go-fil-commcid"
abi "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/sector-storage/ffiwrapper"
"github.com/filecoin-project/sector-storage/zerocomm"
)
func TestComms(t *testing.T) {
t.Skip("don't have enough ram") // no, but seriously, currently this needs like 3tb of /tmp
var expPieceComms [zerocomm.Levels - zerocomm.Skip]cid.Cid
{
l2, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg2KiBV1, bytes.NewReader(make([]byte, 127)), 127)
if err != nil {
t.Fatal(err)
}
expPieceComms[0] = l2
}
for i := 1; i < zerocomm.Levels-2; i++ {
var err error
sz := abi.UnpaddedPieceSize(127 << uint(i))
fmt.Println(i, sz)
r := io.LimitReader(&NullReader{}, int64(sz))
expPieceComms[i], err = ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg2KiBV1, r, sz)
if err != nil {
t.Fatal(err)
}
}
for i, comm := range expPieceComms {
c, err := commcid.CIDToPieceCommitmentV1(comm)
if err != nil {
t.Fatal(err)
}
if string(c) != string(zerocomm.PieceComms[i][:]) {
t.Errorf("zero commitment %d didn't match", i)
}
}
for _, comm := range expPieceComms { // Could do codegen, but this is good enough
fmt.Printf("%#v,\n", comm)
}
}
func TestCommsSmall(t *testing.T) {
var expPieceComms [8]cid.Cid
lvls := len(expPieceComms) + zerocomm.Skip
{
l2, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg2KiBV1, bytes.NewReader(make([]byte, 127)), 127)
if err != nil {
t.Fatal(err)
}
expPieceComms[0] = l2
}
for i := 1; i < lvls-2; i++ {
var err error
sz := abi.UnpaddedPieceSize(127 << uint(i))
fmt.Println(i, sz)
r := io.LimitReader(&NullReader{}, int64(sz))
expPieceComms[i], err = ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg2KiBV1, r, sz)
if err != nil {
t.Fatal(err)
}
}
for i, comm := range expPieceComms {
c, err := commcid.CIDToPieceCommitmentV1(comm)
if err != nil {
t.Fatal(err)
}
if string(c) != string(zerocomm.PieceComms[i][:]) {
t.Errorf("zero commitment %d didn't match", i)
}
}
for _, comm := range expPieceComms { // Could do codegen, but this is good enough
fmt.Printf("%#v,\n", comm)
}
}
func TestForSise(t *testing.T) {
exp, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg2KiBV1, bytes.NewReader(make([]byte, 1016)), 1016)
if err != nil {
return
}
actual := zerocomm.ZeroPieceCommitment(1016)
if !exp.Equals(actual) {
t.Errorf("zero commitment didn't match")
}
}
type NullReader struct{}
func (NullReader) Read(out []byte) (int, error) {
for i := range out {
out[i] = 0
}
return len(out), nil
}

79
extern/storage-fsm/.circleci/config.yml vendored Normal file
View File

@ -0,0 +1,79 @@
version: 2.1
orbs:
go: gotest/tools@0.0.9
executors:
golang:
docker:
- image: circleci/golang:1.13
resource_class: 2xlarge
commands:
prepare-git-checkout:
steps:
- checkout
- run: git submodule sync
- run: git submodule update --init --recursive
install-build-dependencies:
steps:
- run: sudo apt-get update
- run: sudo apt-get install -y jq ocl-icd-opencl-dev
- run: ./extern/filecoin-ffi/install-filcrypto
download-groth-params-and-verifying-keys:
steps:
- restore_cache:
name: Restore parameters cache
keys:
- 'v24-2k-lotus-params'
paths:
- /var/tmp/filecoin-proof-parameters/
- run: |
DIR=$(pwd)
cd $(mktemp -d)
go get github.com/filecoin-project/go-paramfetch/paramfetch
go build -o go-paramfetch github.com/filecoin-project/go-paramfetch/paramfetch
./go-paramfetch 2048 "${DIR}/parameters.json"
- save_cache:
name: Save parameters cache
key: 'v24-2k-lotus-params'
paths:
- /var/tmp/filecoin-proof-parameters/
jobs:
test:
executor: golang
environment:
RUST_LOG: info
steps:
- prepare-git-checkout
- install-build-dependencies
- download-groth-params-and-verifying-keys
- run: go test -v -timeout 10m ./...
mod-tidy-check:
executor: golang
steps:
- prepare-git-checkout
- go/mod-download
- go/mod-tidy-check
gofmt-check:
executor: golang
steps:
- prepare-git-checkout
- go/mod-download
- run: "! go fmt ./... 2>&1 | read"
lint-check:
executor: golang
steps:
- prepare-git-checkout
- install-build-dependencies
- go/mod-download
- go/install-golangci-lint:
gobin: $HOME/.local/bin
version: 1.23.8
- run:
command: $HOME/.local/bin/golangci-lint run -v --concurrency 2
workflows:
version: 2.1
build_and_test:
jobs:
- mod-tidy-check
- lint-check
- gofmt-check
- test

24
extern/storage-fsm/.gitignore vendored Normal file
View File

@ -0,0 +1,24 @@
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Dependency directories (remove the comment below to include it)
# vendor/
# filecoin-ffi assets
*.a
*.h
*.pc
# build artifacts
build/.filecoin-ffi-install
build/.update-submodules

4
extern/storage-fsm/.gitmodules vendored Normal file
View File

@ -0,0 +1,4 @@
[submodule "extern/filecoin-ffi"]
path = extern/filecoin-ffi
url = git@github.com:filecoin-project/filecoin-ffi
branch = master

5
extern/storage-fsm/LICENSE-APACHE vendored Normal file
View File

@ -0,0 +1,5 @@
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.

19
extern/storage-fsm/LICENSE-MIT vendored Normal file
View File

@ -0,0 +1,19 @@
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

59
extern/storage-fsm/Makefile vendored Normal file
View File

@ -0,0 +1,59 @@
SHELL=/usr/bin/env bash
all: build
.PHONY: all
# git submodules that need to be loaded
SUBMODULES:=
# things to clean up, e.g. libfilecoin.a
CLEAN:=
FFI_PATH:=extern/filecoin-ffi/
FFI_DEPS:=libfilcrypto.a filcrypto.pc filcrypto.h
FFI_DEPS:=$(addprefix $(FFI_PATH),$(FFI_DEPS))
$(FFI_DEPS): build/.filecoin-ffi-install ;
# dummy file that marks the last time the filecoin-ffi project was built
build/.filecoin-ffi-install: $(FFI_PATH)
$(MAKE) -C $(FFI_PATH) $(FFI_DEPS:$(FFI_PATH)%=%)
@touch $@
SUBMODULES+=$(FFI_PATH)
BUILD_DEPS+=build/.filecoin-ffi-install
CLEAN+=build/.filecoin-ffi-install
$(SUBMODULES): build/.update-submodules ;
# dummy file that marks the last time submodules were updated
build/.update-submodules:
git submodule update --init --recursive
touch $@
CLEAN+=build/.update-submodules
# build and install any upstream dependencies, e.g. filecoin-ffi
deps: $(BUILD_DEPS)
.PHONY: deps
test: $(BUILD_DEPS)
RUST_LOG=info go test -race -count 1 -v -timeout 120m ./...
.PHONY: test
lint: $(BUILD_DEPS)
golangci-lint run -v --concurrency 2 --new-from-rev origin/master
.PHONY: lint
build: $(BUILD_DEPS)
go build -v $(GOFLAGS) ./...
.PHONY: build
clean:
rm -rf $(CLEAN)
-$(MAKE) -C $(FFI_PATH) clean
.PHONY: clean
type-gen:
go run ./gen/main.go
.PHONY: type-gen

18
extern/storage-fsm/README.md vendored Normal file
View File

@ -0,0 +1,18 @@
# storage-fsm
[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io)
[![CircleCI](https://circleci.com/gh/filecoin-project/storage-fsm.svg?style=svg)](https://circleci.com/gh/filecoin-project/storage-fsm)
[![standard-readme compliant](https://img.shields.io/badge/standard--readme-OK-green.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme)
> A finite state machine used for sector storage
## Disclaimer
Please report your issues with regards to storage-fsm at the [lotus issue tracker](https://github.com/filecoin-project/lotus/issues)
## License
The Filecoin Project is dual-licensed under Apache 2.0 and MIT terms:
- Apache License, Version 2.0, ([LICENSE-APACHE](https://github.com/filecoin-project/storage-fsm/blob/master/LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
- MIT license ([LICENSE-MIT](https://github.com/filecoin-project/storage-fsm/blob/master/LICENSE-MIT) or http://opensource.org/licenses/MIT)

0
extern/storage-fsm/build/.keep vendored Normal file
View File

1563
extern/storage-fsm/cbor_gen.go vendored Normal file

File diff suppressed because it is too large Load Diff

165
extern/storage-fsm/checks.go vendored Normal file
View File

@ -0,0 +1,165 @@
package sealing
import (
"bytes"
"context"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/sector-storage/ffiwrapper"
"github.com/filecoin-project/sector-storage/zerocomm"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/specs-actors/actors/crypto"
)
// TODO: For now we handle this by halting state execution, when we get jsonrpc reconnecting
// We should implement some wait-for-api logic
type ErrApi struct{ error }
type ErrInvalidDeals struct{ error }
type ErrInvalidPiece struct{ error }
type ErrExpiredDeals struct{ error }
type ErrBadCommD struct{ error }
type ErrExpiredTicket struct{ error }
type ErrBadTicket struct{ error }
type ErrPrecommitOnChain struct{ error }
type ErrBadSeed struct{ error }
type ErrInvalidProof struct{ error }
type ErrNoPrecommit struct{ error }
func checkPieces(ctx context.Context, si SectorInfo, api SealingAPI) error {
tok, height, err := api.ChainHead(ctx)
if err != nil {
return &ErrApi{xerrors.Errorf("getting chain head: %w", err)}
}
for i, p := range si.Pieces {
// if no deal is associated with the piece, ensure that we added it as
// filler (i.e. ensure that it has a zero PieceCID)
if p.DealInfo == nil {
exp := zerocomm.ZeroPieceCommitment(p.Piece.Size.Unpadded())
if !p.Piece.PieceCID.Equals(exp) {
return &ErrInvalidPiece{xerrors.Errorf("sector %d piece %d had non-zero PieceCID %+v", si.SectorNumber, i, p.Piece.PieceCID)}
}
continue
}
proposal, err := api.StateMarketStorageDeal(ctx, p.DealInfo.DealID, tok)
if err != nil {
return &ErrInvalidDeals{xerrors.Errorf("getting deal %d for piece %d: %w", p.DealInfo.DealID, i, err)}
}
if proposal.PieceCID != p.Piece.PieceCID {
return &ErrInvalidDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %x != %x", i, len(si.Pieces), si.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID, proposal.PieceCID)}
}
if p.Piece.Size != proposal.PieceSize {
return &ErrInvalidDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers deal %d with different size: %d != %d", i, len(si.Pieces), si.SectorNumber, p.DealInfo.DealID, p.Piece.Size, proposal.PieceSize)}
}
if height >= proposal.StartEpoch {
return &ErrExpiredDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers expired deal %d - should start at %d, head %d", i, len(si.Pieces), si.SectorNumber, p.DealInfo.DealID, proposal.StartEpoch, height)}
}
}
return nil
}
// checkPrecommit checks that data commitment generated in the sealing process
// matches pieces, and that the seal ticket isn't expired
func checkPrecommit(ctx context.Context, maddr address.Address, si SectorInfo, tok TipSetToken, height abi.ChainEpoch, api SealingAPI) (err error) {
commD, err := api.StateComputeDataCommitment(ctx, maddr, si.SectorType, si.dealIDs(), tok)
if err != nil {
return &ErrApi{xerrors.Errorf("calling StateComputeDataCommitment: %w", err)}
}
if !commD.Equals(*si.CommD) {
return &ErrBadCommD{xerrors.Errorf("on chain CommD differs from sector: %s != %s", commD, si.CommD)}
}
if height-(si.TicketEpoch+SealRandomnessLookback) > SealRandomnessLookbackLimit(si.SectorType) {
return &ErrExpiredTicket{xerrors.Errorf("ticket expired: seal height: %d, head: %d", si.TicketEpoch+SealRandomnessLookback, height)}
}
pci, err := api.StateSectorPreCommitInfo(ctx, maddr, si.SectorNumber, tok)
if err != nil {
return &ErrApi{xerrors.Errorf("getting precommit info: %w", err)}
}
if pci != nil {
if pci.Info.SealRandEpoch != si.TicketEpoch {
return &ErrBadTicket{xerrors.Errorf("bad ticket epoch: %d != %d", pci.Info.SealRandEpoch, si.TicketEpoch)}
}
return &ErrPrecommitOnChain{xerrors.Errorf("precommit already on chain")}
}
return nil
}
func (m *Sealing) checkCommit(ctx context.Context, si SectorInfo, proof []byte, tok TipSetToken) (err error) {
if si.SeedEpoch == 0 {
return &ErrBadSeed{xerrors.Errorf("seed epoch was not set")}
}
pci, err := m.api.StateSectorPreCommitInfo(ctx, m.maddr, si.SectorNumber, tok)
if err != nil {
return xerrors.Errorf("getting precommit info: %w", err)
}
if pci == nil {
return &ErrNoPrecommit{xerrors.Errorf("precommit info not found on-chain")}
}
if pci.PreCommitEpoch+miner.PreCommitChallengeDelay != si.SeedEpoch {
return &ErrBadSeed{xerrors.Errorf("seed epoch doesn't match on chain info: %d != %d", pci.PreCommitEpoch+miner.PreCommitChallengeDelay, si.SeedEpoch)}
}
buf := new(bytes.Buffer)
if err := m.maddr.MarshalCBOR(buf); err != nil {
return err
}
seed, err := m.api.ChainGetRandomness(ctx, tok, crypto.DomainSeparationTag_InteractiveSealChallengeSeed, si.SeedEpoch, buf.Bytes())
if err != nil {
return &ErrApi{xerrors.Errorf("failed to get randomness for computing seal proof: %w", err)}
}
if string(seed) != string(si.SeedValue) {
return &ErrBadSeed{xerrors.Errorf("seed has changed")}
}
ss, err := m.api.StateMinerSectorSize(ctx, m.maddr, tok)
if err != nil {
return &ErrApi{err}
}
spt, err := ffiwrapper.SealProofTypeFromSectorSize(ss)
if err != nil {
return err
}
if *si.CommR != pci.Info.SealedCID {
log.Warn("on-chain sealed CID doesn't match!")
}
ok, err := m.verif.VerifySeal(abi.SealVerifyInfo{
SectorID: m.minerSector(si.SectorNumber),
SealedCID: pci.Info.SealedCID,
SealProof: spt,
Proof: proof,
Randomness: si.TicketValue,
InteractiveRandomness: si.SeedValue,
UnsealedCID: *si.CommD,
})
if err != nil {
return xerrors.Errorf("verify seal: %w", err)
}
if !ok {
return &ErrInvalidProof{xerrors.New("invalid proof (compute error?)")}
}
return nil
}

17
extern/storage-fsm/constants.go vendored Normal file
View File

@ -0,0 +1,17 @@
package sealing
import (
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
)
// Epochs
const SealRandomnessLookback = miner.ChainFinality
// Epochs
func SealRandomnessLookbackLimit(spt abi.RegisteredSealProof) abi.ChainEpoch {
return miner.MaxSealDuration[spt]
}
// Epochs
const InteractivePoRepConfidence = 6

15
extern/storage-fsm/events.go vendored Normal file
View File

@ -0,0 +1,15 @@
package sealing
import (
"context"
"github.com/filecoin-project/specs-actors/actors/abi"
)
// `curH`-`ts.Height` = `confidence`
type HeightHandler func(ctx context.Context, tok TipSetToken, curH abi.ChainEpoch) error
type RevertHandler func(ctx context.Context, tok TipSetToken) error
type Events interface {
ChainAt(hnd HeightHandler, rev RevertHandler, confidence int, h abi.ChainEpoch) error
}

394
extern/storage-fsm/fsm.go vendored Normal file
View File

@ -0,0 +1,394 @@
package sealing
import (
"bytes"
"context"
"encoding/json"
"fmt"
"reflect"
"time"
"golang.org/x/xerrors"
statemachine "github.com/filecoin-project/go-statemachine"
"github.com/filecoin-project/specs-actors/actors/abi"
)
func (m *Sealing) Plan(events []statemachine.Event, user interface{}) (interface{}, uint64, error) {
next, err := m.plan(events, user.(*SectorInfo))
if err != nil || next == nil {
return nil, uint64(len(events)), err
}
return func(ctx statemachine.Context, si SectorInfo) error {
err := next(ctx, si)
if err != nil {
log.Errorf("unhandled sector error (%d): %+v", si.SectorNumber, err)
return nil
}
return nil
}, uint64(len(events)), nil // TODO: This processed event count is not very correct
}
var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *SectorInfo) error{
// Sealing
UndefinedSectorState: planOne(
on(SectorStart{}, Empty),
on(SectorStartCC{}, Packing),
),
Empty: planOne(on(SectorAddPiece{}, WaitDeals)),
WaitDeals: planOne(
on(SectorAddPiece{}, WaitDeals),
on(SectorStartPacking{}, Packing),
),
Packing: planOne(on(SectorPacked{}, PreCommit1)),
PreCommit1: planOne(
on(SectorPreCommit1{}, PreCommit2),
on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed),
on(SectorPackingFailed{}, PackingFailed),
),
PreCommit2: planOne(
on(SectorPreCommit2{}, PreCommitting),
on(SectorSealPreCommit2Failed{}, SealPreCommit2Failed),
on(SectorPackingFailed{}, PackingFailed),
),
PreCommitting: planOne(
on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed),
on(SectorPreCommitted{}, PreCommitWait),
on(SectorChainPreCommitFailed{}, PreCommitFailed),
on(SectorPreCommitLanded{}, WaitSeed),
),
PreCommitWait: planOne(
on(SectorChainPreCommitFailed{}, PreCommitFailed),
on(SectorPreCommitLanded{}, WaitSeed),
),
WaitSeed: planOne(
on(SectorSeedReady{}, Committing),
on(SectorChainPreCommitFailed{}, PreCommitFailed),
),
Committing: planCommitting,
CommitWait: planOne(
on(SectorProving{}, FinalizeSector),
on(SectorCommitFailed{}, CommitFailed),
),
FinalizeSector: planOne(
on(SectorFinalized{}, Proving),
on(SectorFinalizeFailed{}, FinalizeFailed),
),
// Sealing errors
SealPreCommit1Failed: planOne(
on(SectorRetrySealPreCommit1{}, PreCommit1),
),
SealPreCommit2Failed: planOne(
on(SectorRetrySealPreCommit1{}, PreCommit1),
on(SectorRetrySealPreCommit2{}, PreCommit2),
),
PreCommitFailed: planOne(
on(SectorRetryPreCommit{}, PreCommitting),
on(SectorRetryWaitSeed{}, WaitSeed),
on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed),
on(SectorPreCommitLanded{}, WaitSeed),
),
ComputeProofFailed: planOne(
on(SectorRetryComputeProof{}, Committing),
on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed),
),
CommitFailed: planOne(
on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed),
on(SectorRetryWaitSeed{}, WaitSeed),
on(SectorRetryComputeProof{}, Committing),
on(SectorRetryInvalidProof{}, Committing),
on(SectorRetryPreCommitWait{}, PreCommitWait),
on(SectorChainPreCommitFailed{}, PreCommitFailed),
on(SectorRetryPreCommit{}, PreCommitting),
),
FinalizeFailed: planOne(
on(SectorRetryFinalize{}, FinalizeSector),
),
// Post-seal
Proving: planOne(
on(SectorFaultReported{}, FaultReported),
on(SectorFaulty{}, Faulty),
on(SectorRemove{}, Removing),
),
Removing: planOne(
on(SectorRemoved{}, Removed),
on(SectorRemoveFailed{}, RemoveFailed),
),
Faulty: planOne(
on(SectorFaultReported{}, FaultReported),
),
FaultedFinal: final,
Removed: final,
}
func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(statemachine.Context, SectorInfo) error, error) {
/////
// First process all events
for _, event := range events {
e, err := json.Marshal(event)
if err != nil {
log.Errorf("marshaling event for logging: %+v", err)
continue
}
l := Log{
Timestamp: uint64(time.Now().Unix()),
Message: string(e),
Kind: fmt.Sprintf("event;%T", event.User),
}
if err, iserr := event.User.(xerrors.Formatter); iserr {
l.Trace = fmt.Sprintf("%+v", err)
}
if len(state.Log) > 8000 {
log.Warnw("truncating sector log", "sector", state.SectorNumber)
state.Log[2000] = Log{
Timestamp: uint64(time.Now().Unix()),
Message: "truncating log (above 8000 entries)",
Kind: fmt.Sprintf("truncate"),
}
state.Log = append(state.Log[:2000], state.Log[:6000]...)
}
state.Log = append(state.Log, l)
}
p := fsmPlanners[state.State]
if p == nil {
return nil, xerrors.Errorf("planner for state %s not found", state.State)
}
if err := p(events, state); err != nil {
return nil, xerrors.Errorf("running planner for state %s failed: %w", state.State, err)
}
/////
// Now decide what to do next
/*
* Empty <- incoming deals
| |
| v
*<- WaitDeals <- incoming deals
| |
| v
*<- Packing <- incoming committed capacity
| |
| v
*<- PreCommit1 <--> SealPreCommit1Failed
| | ^ ^^
| | *----------++----\
| v v || |
*<- PreCommit2 --------++--> SealPreCommit2Failed
| | ||
| v /-------/|
* PreCommitting <-----+---> PreCommitFailed
| | | ^
| v | |
*<- WaitSeed -----------+-----/
| ||| ^ |
| ||| \--------*-----/
| ||| |
| vvv v----+----> ComputeProofFailed
*<- Committing |
| | ^--> CommitFailed
| v ^
*<- CommitWait ---/
| |
| v
| FinalizeSector <--> FinalizeFailed
| |
| v
*<- Proving
|
v
FailedUnrecoverable
UndefinedSectorState <- ¯\_()_/¯
| ^
*---------------------/
*/
switch state.State {
// Happy path
case Empty:
fallthrough
case WaitDeals:
log.Infof("Waiting for deals %d", state.SectorNumber)
case Packing:
return m.handlePacking, nil
case PreCommit1:
return m.handlePreCommit1, nil
case PreCommit2:
return m.handlePreCommit2, nil
case PreCommitting:
return m.handlePreCommitting, nil
case PreCommitWait:
return m.handlePreCommitWait, nil
case WaitSeed:
return m.handleWaitSeed, nil
case Committing:
return m.handleCommitting, nil
case CommitWait:
return m.handleCommitWait, nil
case FinalizeSector:
return m.handleFinalizeSector, nil
// Handled failure modes
case SealPreCommit1Failed:
return m.handleSealPrecommit1Failed, nil
case SealPreCommit2Failed:
return m.handleSealPrecommit2Failed, nil
case PreCommitFailed:
return m.handlePreCommitFailed, nil
case ComputeProofFailed:
return m.handleComputeProofFailed, nil
case CommitFailed:
return m.handleCommitFailed, nil
case FinalizeFailed:
return m.handleFinalizeFailed, nil
// Post-seal
case Proving:
return m.handleProvingSector, nil
case Removing:
return m.handleRemoving, nil
case Removed:
return nil, nil
// Faults
case Faulty:
return m.handleFaulty, nil
case FaultReported:
return m.handleFaultReported, nil
// Fatal errors
case UndefinedSectorState:
log.Error("sector update with undefined state!")
case FailedUnrecoverable:
log.Errorf("sector %d failed unrecoverably", state.SectorNumber)
default:
log.Errorf("unexpected sector update state: %s", state.State)
}
return nil, nil
}
func planCommitting(events []statemachine.Event, state *SectorInfo) error {
for _, event := range events {
switch e := event.User.(type) {
case globalMutator:
if e.applyGlobal(state) {
return nil
}
case SectorCommitted: // the normal case
e.apply(state)
state.State = CommitWait
case SectorSeedReady: // seed changed :/
if e.SeedEpoch == state.SeedEpoch && bytes.Equal(e.SeedValue, state.SeedValue) {
log.Warnf("planCommitting: got SectorSeedReady, but the seed didn't change")
continue // or it didn't!
}
log.Warnf("planCommitting: commit Seed changed")
e.apply(state)
state.State = Committing
return nil
case SectorComputeProofFailed:
state.State = ComputeProofFailed
case SectorSealPreCommit1Failed:
state.State = SealPreCommit1Failed
case SectorCommitFailed:
state.State = CommitFailed
default:
return xerrors.Errorf("planCommitting got event of unknown type %T, events: %+v", event.User, events)
}
}
return nil
}
func (m *Sealing) restartSectors(ctx context.Context) error {
trackedSectors, err := m.ListSectors()
if err != nil {
log.Errorf("loading sector list: %+v", err)
}
for _, sector := range trackedSectors {
if err := m.sectors.Send(uint64(sector.SectorNumber), SectorRestart{}); err != nil {
log.Errorf("restarting sector %d: %+v", sector.SectorNumber, err)
}
}
// TODO: Grab on-chain sector set and diff with trackedSectors
return nil
}
func (m *Sealing) ForceSectorState(ctx context.Context, id abi.SectorNumber, state SectorState) error {
return m.sectors.Send(id, SectorForceState{state})
}
func final(events []statemachine.Event, state *SectorInfo) error {
return xerrors.Errorf("didn't expect any events in state %s, got %+v", state.State, events)
}
func on(mut mutator, next SectorState) func() (mutator, SectorState) {
return func() (mutator, SectorState) {
return mut, next
}
}
func planOne(ts ...func() (mut mutator, next SectorState)) func(events []statemachine.Event, state *SectorInfo) error {
return func(events []statemachine.Event, state *SectorInfo) error {
if len(events) != 1 {
for _, event := range events {
if gm, ok := event.User.(globalMutator); ok {
gm.applyGlobal(state)
return nil
}
}
return xerrors.Errorf("planner for state %s only has a plan for a single event only, got %+v", state.State, events)
}
if gm, ok := events[0].User.(globalMutator); ok {
gm.applyGlobal(state)
return nil
}
for _, t := range ts {
mut, next := t()
if reflect.TypeOf(events[0].User) != reflect.TypeOf(mut) {
continue
}
if err, iserr := events[0].User.(error); iserr {
log.Warnf("sector %d got error event %T: %+v", state.SectorNumber, events[0].User, err)
}
events[0].User.(mutator).apply(state)
state.State = next
return nil
}
_, ok := events[0].User.(Ignorable)
if ok {
return nil
}
return xerrors.Errorf("planner for state %s received unexpected event %T (%+v)", state.State, events[0].User, events[0])
}
}

Some files were not shown because too many files have changed in this diff Show More