2022-06-14 18:03:38 +00:00
|
|
|
package sealer
|
2021-11-26 16:40:43 +00:00
|
|
|
|
|
|
|
import (
|
2021-12-03 19:11:52 +00:00
|
|
|
"bufio"
|
2021-11-26 16:40:43 +00:00
|
|
|
"context"
|
|
|
|
"io"
|
2022-03-29 14:11:06 +00:00
|
|
|
"sync"
|
2021-11-26 16:40:43 +00:00
|
|
|
|
2023-05-23 09:59:40 +00:00
|
|
|
lru "github.com/hashicorp/golang-lru/v2"
|
2021-11-26 16:40:43 +00:00
|
|
|
"github.com/ipfs/go-cid"
|
2021-12-03 19:11:52 +00:00
|
|
|
"go.opencensus.io/stats"
|
2023-05-23 11:32:17 +00:00
|
|
|
"go.opencensus.io/tag"
|
2021-11-26 16:40:43 +00:00
|
|
|
"golang.org/x/xerrors"
|
|
|
|
|
|
|
|
"github.com/filecoin-project/dagstore/mount"
|
|
|
|
"github.com/filecoin-project/go-state-types/abi"
|
2022-06-14 15:00:51 +00:00
|
|
|
|
2021-12-03 19:11:52 +00:00
|
|
|
"github.com/filecoin-project/lotus/metrics"
|
2021-11-26 16:40:43 +00:00
|
|
|
)
|
|
|
|
|
2021-11-26 17:49:41 +00:00
|
|
|
// For small read skips, it's faster to "burn" some bytes than to setup new sector reader.
|
|
|
|
// Assuming 1ms stream seek latency, and 1G/s stream rate, we're willing to discard up to 1 MiB.
|
|
|
|
var MaxPieceReaderBurnBytes int64 = 1 << 20 // 1M
|
2021-12-03 19:11:52 +00:00
|
|
|
var ReadBuf = 128 * (127 * 8) // unpadded(128k)
|
2021-11-26 16:40:43 +00:00
|
|
|
|
2023-05-19 16:30:16 +00:00
|
|
|
var MinRandomReadSize = int64(4 << 10)
|
|
|
|
|
2023-05-25 13:55:12 +00:00
|
|
|
type pieceGetter func(offset, size uint64) (io.ReadCloser, error)
|
2021-12-08 22:16:27 +00:00
|
|
|
|
2021-11-26 16:40:43 +00:00
|
|
|
type pieceReader struct {
|
2021-12-08 22:16:27 +00:00
|
|
|
getReader pieceGetter
|
|
|
|
pieceCid cid.Cid
|
|
|
|
len abi.UnpaddedPieceSize
|
|
|
|
onClose context.CancelFunc
|
2021-11-26 16:40:43 +00:00
|
|
|
|
2023-05-23 11:32:17 +00:00
|
|
|
seqMCtx context.Context
|
|
|
|
atMCtx context.Context
|
|
|
|
|
2021-11-26 16:40:43 +00:00
|
|
|
closed bool
|
|
|
|
seqAt int64 // next byte to be read by io.Reader
|
|
|
|
|
2023-05-19 16:30:16 +00:00
|
|
|
// sequential reader
|
|
|
|
seqMu sync.Mutex
|
|
|
|
r io.ReadCloser
|
|
|
|
br *bufio.Reader
|
|
|
|
rAt int64
|
|
|
|
|
|
|
|
// random read cache
|
|
|
|
remReads *lru.Cache[int64, []byte] // data start offset -> data
|
|
|
|
// todo try carrying a "bytes read sequentially so far" counter with those
|
|
|
|
// cacahed byte buffers, increase buffer sizes when we see that we're doing
|
|
|
|
// a long sequential read
|
2021-11-26 16:40:43 +00:00
|
|
|
}
|
|
|
|
|
2023-05-25 13:55:12 +00:00
|
|
|
func (p *pieceReader) init(ctx context.Context) (_ *pieceReader, err error) {
|
|
|
|
stats.Record(ctx, metrics.DagStorePRInitCount.M(1))
|
2021-12-03 16:07:14 +00:00
|
|
|
|
2023-05-25 13:55:12 +00:00
|
|
|
p.seqMCtx, _ = tag.New(ctx, tag.Upsert(metrics.PRReadType, "seq"))
|
|
|
|
p.atMCtx, _ = tag.New(ctx, tag.Upsert(metrics.PRReadType, "rand"))
|
2023-05-23 11:32:17 +00:00
|
|
|
|
2023-05-19 16:30:16 +00:00
|
|
|
p.remReads, err = lru.New[int64, []byte](100)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-11-26 16:40:43 +00:00
|
|
|
p.rAt = 0
|
2023-05-25 13:55:12 +00:00
|
|
|
p.r, err = p.getReader(uint64(p.rAt), uint64(p.len))
|
2021-11-26 16:40:43 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-12-08 22:16:27 +00:00
|
|
|
if p.r == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2021-12-03 19:11:52 +00:00
|
|
|
p.br = bufio.NewReaderSize(p.r, ReadBuf)
|
2021-11-26 16:40:43 +00:00
|
|
|
|
|
|
|
return p, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *pieceReader) check() error {
|
|
|
|
if p.closed {
|
|
|
|
return xerrors.Errorf("reader closed")
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *pieceReader) Close() error {
|
2023-05-19 16:30:16 +00:00
|
|
|
p.seqMu.Lock()
|
|
|
|
defer p.seqMu.Unlock()
|
2022-03-29 14:11:06 +00:00
|
|
|
|
2021-11-26 16:40:43 +00:00
|
|
|
if err := p.check(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if p.r != nil {
|
|
|
|
if err := p.r.Close(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
p.r = nil
|
|
|
|
}
|
|
|
|
|
2021-12-08 22:16:27 +00:00
|
|
|
p.onClose()
|
|
|
|
|
|
|
|
p.closed = true
|
|
|
|
|
2021-11-26 16:40:43 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *pieceReader) Read(b []byte) (int, error) {
|
2023-05-19 16:30:16 +00:00
|
|
|
p.seqMu.Lock()
|
|
|
|
defer p.seqMu.Unlock()
|
2022-03-29 14:11:06 +00:00
|
|
|
|
2021-11-26 16:40:43 +00:00
|
|
|
if err := p.check(); err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
2023-05-19 16:30:16 +00:00
|
|
|
n, err := p.readSeqReader(b)
|
2021-11-26 16:40:43 +00:00
|
|
|
p.seqAt += int64(n)
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *pieceReader) Seek(offset int64, whence int) (int64, error) {
|
2023-05-19 16:30:16 +00:00
|
|
|
p.seqMu.Lock()
|
|
|
|
defer p.seqMu.Unlock()
|
2022-03-29 14:11:06 +00:00
|
|
|
|
2021-11-26 16:40:43 +00:00
|
|
|
if err := p.check(); err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
switch whence {
|
|
|
|
case io.SeekStart:
|
|
|
|
p.seqAt = offset
|
|
|
|
case io.SeekCurrent:
|
|
|
|
p.seqAt += offset
|
|
|
|
case io.SeekEnd:
|
|
|
|
p.seqAt = int64(p.len) + offset
|
|
|
|
default:
|
|
|
|
return 0, xerrors.Errorf("bad whence")
|
|
|
|
}
|
|
|
|
|
|
|
|
return p.seqAt, nil
|
|
|
|
}
|
|
|
|
|
2023-05-19 16:30:16 +00:00
|
|
|
func (p *pieceReader) readSeqReader(b []byte) (n int, err error) {
|
|
|
|
off := p.seqAt
|
2022-03-29 14:11:06 +00:00
|
|
|
|
2021-11-26 16:40:43 +00:00
|
|
|
if err := p.check(); err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
2023-05-23 11:32:17 +00:00
|
|
|
stats.Record(p.seqMCtx, metrics.DagStorePRBytesRequested.M(int64(len(b))))
|
2021-12-03 16:07:14 +00:00
|
|
|
|
2021-11-29 14:32:27 +00:00
|
|
|
// 1. Get the backing reader into the correct position
|
2021-11-26 16:40:43 +00:00
|
|
|
|
|
|
|
// if the backing reader is ahead of the offset we want, or more than
|
|
|
|
// MaxPieceReaderBurnBytes behind, reset the reader
|
2021-11-29 14:32:27 +00:00
|
|
|
if p.r == nil || p.rAt > off || p.rAt+MaxPieceReaderBurnBytes < off {
|
2021-11-26 16:40:43 +00:00
|
|
|
if p.r != nil {
|
|
|
|
if err := p.r.Close(); err != nil {
|
|
|
|
return 0, xerrors.Errorf("closing backing reader: %w", err)
|
|
|
|
}
|
|
|
|
p.r = nil
|
2021-12-03 19:11:52 +00:00
|
|
|
p.br = nil
|
2021-11-26 16:40:43 +00:00
|
|
|
}
|
|
|
|
|
2021-12-03 19:11:52 +00:00
|
|
|
log.Debugw("pieceReader new stream", "piece", p.pieceCid, "at", p.rAt, "off", off-p.rAt, "n", len(b))
|
2021-11-26 19:21:09 +00:00
|
|
|
|
2021-12-03 16:07:14 +00:00
|
|
|
if off > p.rAt {
|
2023-05-23 11:32:17 +00:00
|
|
|
stats.Record(p.seqMCtx, metrics.DagStorePRSeekForwardBytes.M(off-p.rAt), metrics.DagStorePRSeekForwardCount.M(1))
|
2021-12-03 16:07:14 +00:00
|
|
|
} else {
|
2023-05-23 11:32:17 +00:00
|
|
|
stats.Record(p.seqMCtx, metrics.DagStorePRSeekBackBytes.M(p.rAt-off), metrics.DagStorePRSeekBackCount.M(1))
|
2021-12-03 16:07:14 +00:00
|
|
|
}
|
|
|
|
|
2021-11-26 16:40:43 +00:00
|
|
|
p.rAt = off
|
2023-05-25 13:55:12 +00:00
|
|
|
p.r, err = p.getReader(uint64(p.rAt), uint64(p.len))
|
2021-12-03 19:11:52 +00:00
|
|
|
p.br = bufio.NewReaderSize(p.r, ReadBuf)
|
2021-11-26 16:40:43 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, xerrors.Errorf("getting backing reader: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-29 14:32:27 +00:00
|
|
|
// 2. Check if we need to burn some bytes
|
2021-11-26 16:40:43 +00:00
|
|
|
if off > p.rAt {
|
2023-05-23 11:32:17 +00:00
|
|
|
stats.Record(p.seqMCtx, metrics.DagStorePRBytesDiscarded.M(off-p.rAt), metrics.DagStorePRDiscardCount.M(1))
|
2021-12-03 16:07:14 +00:00
|
|
|
|
2021-12-03 19:11:52 +00:00
|
|
|
n, err := io.CopyN(io.Discard, p.br, off-p.rAt)
|
2021-11-26 17:01:09 +00:00
|
|
|
p.rAt += n
|
|
|
|
if err != nil {
|
2021-11-26 16:40:43 +00:00
|
|
|
return 0, xerrors.Errorf("discarding read gap: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-29 14:32:27 +00:00
|
|
|
// 3. Sanity check
|
2021-11-26 17:01:09 +00:00
|
|
|
if off != p.rAt {
|
|
|
|
return 0, xerrors.Errorf("bad reader offset; requested %d; at %d", off, p.rAt)
|
|
|
|
}
|
|
|
|
|
2021-11-29 14:32:27 +00:00
|
|
|
// 4. Read!
|
2021-12-03 19:11:52 +00:00
|
|
|
n, err = io.ReadFull(p.br, b)
|
|
|
|
if n < len(b) {
|
|
|
|
log.Debugw("pieceReader short read", "piece", p.pieceCid, "at", p.rAt, "toEnd", int64(p.len)-p.rAt, "n", len(b), "read", n, "err", err)
|
|
|
|
}
|
2021-12-03 22:36:36 +00:00
|
|
|
if err == io.ErrUnexpectedEOF {
|
|
|
|
err = io.EOF
|
|
|
|
}
|
2021-12-03 19:11:52 +00:00
|
|
|
|
2021-11-26 16:40:43 +00:00
|
|
|
p.rAt += int64(n)
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
|
2023-05-19 16:30:16 +00:00
|
|
|
func (p *pieceReader) ReadAt(b []byte, off int64) (n int, err error) {
|
2023-05-23 11:32:17 +00:00
|
|
|
stats.Record(p.atMCtx, metrics.DagStorePRBytesRequested.M(int64(len(b))))
|
|
|
|
|
2023-05-19 16:30:16 +00:00
|
|
|
var filled int64
|
|
|
|
|
|
|
|
// try to get a buf from lru
|
|
|
|
data, ok := p.remReads.Get(off)
|
|
|
|
if ok {
|
|
|
|
n = copy(b, data)
|
|
|
|
filled += int64(n)
|
|
|
|
|
|
|
|
if n < len(data) {
|
|
|
|
p.remReads.Add(off+int64(n), data[n:])
|
|
|
|
|
|
|
|
// keep the header buffered
|
|
|
|
if off != 0 {
|
|
|
|
p.remReads.Remove(off)
|
|
|
|
}
|
|
|
|
}
|
2023-05-23 11:32:17 +00:00
|
|
|
|
|
|
|
stats.Record(p.atMCtx, metrics.DagStorePRAtHitBytes.M(int64(n)), metrics.DagStorePRAtHitCount.M(1))
|
|
|
|
// dagstore/pr_at_hit_bytes, dagstore/pr_at_hit_count
|
2023-05-19 16:30:16 +00:00
|
|
|
}
|
|
|
|
if filled == int64(len(b)) {
|
2023-05-23 11:32:17 +00:00
|
|
|
// dagstore/pr_at_cache_fill_count
|
|
|
|
stats.Record(p.atMCtx, metrics.DagStorePRAtCacheFillCount.M(1))
|
2023-05-19 16:30:16 +00:00
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
readOff := off + filled
|
|
|
|
readSize := int64(len(b)) - filled
|
|
|
|
|
|
|
|
smallRead := readSize < MinRandomReadSize
|
|
|
|
|
|
|
|
if smallRead {
|
|
|
|
// read into small read buf
|
|
|
|
readBuf := make([]byte, MinRandomReadSize)
|
|
|
|
bn, err := p.readInto(readBuf, readOff)
|
|
|
|
if err != nil && err != io.EOF {
|
|
|
|
return int(filled), err
|
|
|
|
}
|
|
|
|
|
2023-05-23 12:15:46 +00:00
|
|
|
_ = stats.RecordWithTags(p.atMCtx, []tag.Mutator{tag.Insert(metrics.PRReadSize, "small")}, metrics.DagStorePRAtReadBytes.M(int64(bn)), metrics.DagStorePRAtReadCount.M(1))
|
2023-05-23 11:32:17 +00:00
|
|
|
|
2023-05-19 16:30:16 +00:00
|
|
|
// reslice so that the slice is the data
|
|
|
|
readBuf = readBuf[:bn]
|
|
|
|
|
|
|
|
// fill user data
|
|
|
|
used := copy(b[filled:], readBuf[:])
|
|
|
|
filled += int64(used)
|
|
|
|
readBuf = readBuf[used:]
|
|
|
|
|
|
|
|
// cache the rest
|
|
|
|
if len(readBuf) > 0 {
|
|
|
|
p.remReads.Add(readOff+int64(used), readBuf)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// read into user buf
|
|
|
|
bn, err := p.readInto(b[filled:], readOff)
|
|
|
|
if err != nil {
|
|
|
|
return int(filled), err
|
|
|
|
}
|
|
|
|
filled += int64(bn)
|
2023-05-23 11:32:17 +00:00
|
|
|
|
2023-05-23 12:15:46 +00:00
|
|
|
_ = stats.RecordWithTags(p.atMCtx, []tag.Mutator{tag.Insert(metrics.PRReadSize, "big")}, metrics.DagStorePRAtReadBytes.M(int64(bn)), metrics.DagStorePRAtReadCount.M(1))
|
2023-05-19 16:30:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if filled < int64(len(b)) {
|
|
|
|
return int(filled), io.EOF
|
|
|
|
}
|
|
|
|
|
|
|
|
return int(filled), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *pieceReader) readInto(b []byte, off int64) (n int, err error) {
|
2023-05-25 13:55:12 +00:00
|
|
|
rd, err := p.getReader(uint64(off), uint64(len(b)))
|
2023-05-19 16:30:16 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, xerrors.Errorf("getting reader: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
n, err = io.ReadFull(rd, b)
|
2023-05-19 16:47:51 +00:00
|
|
|
|
|
|
|
cerr := rd.Close()
|
|
|
|
|
2023-05-19 16:30:16 +00:00
|
|
|
if err == io.ErrUnexpectedEOF {
|
|
|
|
err = io.EOF
|
|
|
|
}
|
|
|
|
|
2023-05-19 16:47:51 +00:00
|
|
|
if err != nil {
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return n, cerr
|
2023-05-19 16:30:16 +00:00
|
|
|
}
|
|
|
|
|
2021-11-26 16:40:43 +00:00
|
|
|
var _ mount.Reader = (*pieceReader)(nil)
|