lotus/node/repo/fsrepo_ds.go

100 lines
2.5 KiB
Go
Raw Normal View History

package repo
import (
"os"
"path/filepath"
dgbadger "github.com/dgraph-io/badger/v2"
ldbopts "github.com/syndtr/goleveldb/leveldb/opt"
"golang.org/x/xerrors"
"github.com/ipfs/go-datastore"
badger "github.com/ipfs/go-ds-badger2"
levelds "github.com/ipfs/go-ds-leveldb"
measure "github.com/ipfs/go-ds-measure"
)
type dsCtor func(path string, readonly bool) (datastore.Batching, error)
2020-07-06 20:03:37 +00:00
func ChainBadgerOptions() badger.Options {
opts := badger.DefaultOptions
opts.GcInterval = 0 // disable GC for chain datastore
opts.Options = dgbadger.DefaultOptions("").WithTruncate(true).
WithValueThreshold(128)
return opts
}
2020-07-06 20:03:37 +00:00
var fsDatastores = map[string]dsCtor{
"chain": chainBadgerDs,
"metadata": levelDs,
// Those need to be fast for large writes... but also need a really good GC :c
"staging": badgerDs, // miner specific
2020-07-06 20:03:37 +00:00
"client": badgerDs, // client specific
}
func chainBadgerDs(path string, readonly bool) (datastore.Batching, error) {
opts := ChainBadgerOptions()
opts.ReadOnly = readonly
return badger.NewDatastore(path, &opts)
}
func badgerDs(path string, readonly bool) (datastore.Batching, error) {
opts := badger.DefaultOptions
opts.ReadOnly = readonly
opts.Options = dgbadger.DefaultOptions("").WithTruncate(true).
WithValueThreshold(1 << 10)
return badger.NewDatastore(path, &opts)
}
func levelDs(path string, readonly bool) (datastore.Batching, error) {
return levelds.NewDatastore(path, &levelds.Options{
Compression: ldbopts.NoCompression,
2020-08-06 01:16:54 +00:00
NoSync: false,
Strict: ldbopts.StrictAll,
ReadOnly: readonly,
})
}
func (fsr *fsLockedRepo) openDatastores(readonly bool) (map[string]datastore.Batching, error) {
if err := os.MkdirAll(fsr.join(fsDatastore), 0755); err != nil {
return nil, xerrors.Errorf("mkdir %s: %w", fsr.join(fsDatastore), err)
}
2020-07-06 20:03:37 +00:00
out := map[string]datastore.Batching{}
for p, ctor := range fsDatastores {
prefix := datastore.NewKey(p)
// TODO: optimization: don't init datastores we don't need
ds, err := ctor(fsr.join(filepath.Join(fsDatastore, p)), readonly)
if err != nil {
return nil, xerrors.Errorf("opening datastore %s: %w", prefix, err)
}
ds = measure.New("fsrepo."+p, ds)
2020-07-06 20:03:37 +00:00
out[datastore.NewKey(p).String()] = ds
}
return out, nil
}
func (fsr *fsLockedRepo) Datastore(ns string) (datastore.Batching, error) {
fsr.dsOnce.Do(func() {
fsr.ds, fsr.dsErr = fsr.openDatastores(fsr.readonly)
})
if fsr.dsErr != nil {
return nil, fsr.dsErr
}
2020-07-06 20:03:37 +00:00
ds, ok := fsr.ds[ns]
if ok {
return ds, nil
}
return nil, xerrors.Errorf("no such datastore: %s", ns)
2020-07-07 09:38:22 +00:00
}