backup: open datastores in readonly in offline mode

This commit is contained in:
Łukasz Magiera 2020-10-01 14:51:37 +02:00
parent deac7166b5
commit 9b5a0815fc
3 changed files with 24 additions and 8 deletions

View File

@ -60,7 +60,7 @@ func offlineBackup(cctx *cli.Context) error {
return xerrors.Errorf("repo at '%s' is not initialized", cctx.String(FlagMinerRepo)) return xerrors.Errorf("repo at '%s' is not initialized", cctx.String(FlagMinerRepo))
} }
lr, err := r.Lock(repo.StorageMiner) lr, err := r.LockRO(repo.StorageMiner)
if err != nil { if err != nil {
return xerrors.Errorf("locking repo: %w", err) return xerrors.Errorf("locking repo: %w", err)
} }

View File

@ -226,11 +226,23 @@ func (fsr *FsRepo) Lock(repoType RepoType) (LockedRepo, error) {
}, nil }, nil
} }
// Like Lock, except datastores will work in read-only mode
func (fsr *FsRepo) LockRO(repoType RepoType) (LockedRepo, error) {
lr, err := fsr.Lock(repoType)
if err != nil {
return nil, err
}
lr.(*fsLockedRepo).readonly = true
return lr, nil
}
type fsLockedRepo struct { type fsLockedRepo struct {
path string path string
configPath string configPath string
repoType RepoType repoType RepoType
closer io.Closer closer io.Closer
readonly bool
ds map[string]datastore.Batching ds map[string]datastore.Batching
dsErr error dsErr error

View File

@ -14,7 +14,7 @@ import (
ldbopts "github.com/syndtr/goleveldb/leveldb/opt" ldbopts "github.com/syndtr/goleveldb/leveldb/opt"
) )
type dsCtor func(path string) (datastore.Batching, error) type dsCtor func(path string, readonly bool) (datastore.Batching, error)
var fsDatastores = map[string]dsCtor{ var fsDatastores = map[string]dsCtor{
"chain": chainBadgerDs, "chain": chainBadgerDs,
@ -26,9 +26,10 @@ var fsDatastores = map[string]dsCtor{
"client": badgerDs, // client specific "client": badgerDs, // client specific
} }
func chainBadgerDs(path string) (datastore.Batching, error) { func chainBadgerDs(path string, readonly bool) (datastore.Batching, error) {
opts := badger.DefaultOptions opts := badger.DefaultOptions
opts.GcInterval = 0 // disable GC for chain datastore opts.GcInterval = 0 // disable GC for chain datastore
opts.ReadOnly = readonly
opts.Options = dgbadger.DefaultOptions("").WithTruncate(true). opts.Options = dgbadger.DefaultOptions("").WithTruncate(true).
WithValueThreshold(1 << 10) WithValueThreshold(1 << 10)
@ -36,23 +37,26 @@ func chainBadgerDs(path string) (datastore.Batching, error) {
return badger.NewDatastore(path, &opts) return badger.NewDatastore(path, &opts)
} }
func badgerDs(path string) (datastore.Batching, error) { func badgerDs(path string, readonly bool) (datastore.Batching, error) {
opts := badger.DefaultOptions opts := badger.DefaultOptions
opts.ReadOnly = readonly
opts.Options = dgbadger.DefaultOptions("").WithTruncate(true). opts.Options = dgbadger.DefaultOptions("").WithTruncate(true).
WithValueThreshold(1 << 10) WithValueThreshold(1 << 10)
return badger.NewDatastore(path, &opts) return badger.NewDatastore(path, &opts)
} }
func levelDs(path string) (datastore.Batching, error) { func levelDs(path string, readonly bool) (datastore.Batching, error) {
return levelds.NewDatastore(path, &levelds.Options{ return levelds.NewDatastore(path, &levelds.Options{
Compression: ldbopts.NoCompression, Compression: ldbopts.NoCompression,
NoSync: false, NoSync: false,
Strict: ldbopts.StrictAll, Strict: ldbopts.StrictAll,
ReadOnly: readonly,
}) })
} }
func (fsr *fsLockedRepo) openDatastores() (map[string]datastore.Batching, error) { func (fsr *fsLockedRepo) openDatastores(readonly bool) (map[string]datastore.Batching, error) {
if err := os.MkdirAll(fsr.join(fsDatastore), 0755); err != nil { if err := os.MkdirAll(fsr.join(fsDatastore), 0755); err != nil {
return nil, xerrors.Errorf("mkdir %s: %w", fsr.join(fsDatastore), err) return nil, xerrors.Errorf("mkdir %s: %w", fsr.join(fsDatastore), err)
} }
@ -63,7 +67,7 @@ func (fsr *fsLockedRepo) openDatastores() (map[string]datastore.Batching, error)
prefix := datastore.NewKey(p) prefix := datastore.NewKey(p)
// TODO: optimization: don't init datastores we don't need // TODO: optimization: don't init datastores we don't need
ds, err := ctor(fsr.join(filepath.Join(fsDatastore, p))) ds, err := ctor(fsr.join(filepath.Join(fsDatastore, p)), readonly)
if err != nil { if err != nil {
return nil, xerrors.Errorf("opening datastore %s: %w", prefix, err) return nil, xerrors.Errorf("opening datastore %s: %w", prefix, err)
} }
@ -78,7 +82,7 @@ func (fsr *fsLockedRepo) openDatastores() (map[string]datastore.Batching, error)
func (fsr *fsLockedRepo) Datastore(ns string) (datastore.Batching, error) { func (fsr *fsLockedRepo) Datastore(ns string) (datastore.Batching, error) {
fsr.dsOnce.Do(func() { fsr.dsOnce.Do(func() {
fsr.ds, fsr.dsErr = fsr.openDatastores() fsr.ds, fsr.dsErr = fsr.openDatastores(fsr.readonly)
}) })
if fsr.dsErr != nil { if fsr.dsErr != nil {