2019-07-10 13:23:29 +00:00
|
|
|
package repo
|
|
|
|
|
|
|
|
import (
|
2020-05-27 08:13:06 +00:00
|
|
|
"bytes"
|
2020-12-30 10:04:00 +00:00
|
|
|
"context"
|
2019-07-18 14:01:39 +00:00
|
|
|
"encoding/json"
|
2019-10-30 16:38:39 +00:00
|
|
|
"fmt"
|
2019-07-10 13:23:29 +00:00
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
|
|
|
"strings"
|
2019-07-10 17:09:57 +00:00
|
|
|
"sync"
|
2019-07-10 13:23:29 +00:00
|
|
|
|
2020-06-09 23:37:18 +00:00
|
|
|
"github.com/BurntSushi/toml"
|
2021-02-28 22:48:36 +00:00
|
|
|
|
2019-07-10 13:23:29 +00:00
|
|
|
"github.com/ipfs/go-datastore"
|
|
|
|
fslock "github.com/ipfs/go-fs-lock"
|
2020-01-08 19:10:57 +00:00
|
|
|
logging "github.com/ipfs/go-log/v2"
|
2019-07-10 17:09:57 +00:00
|
|
|
"github.com/mitchellh/go-homedir"
|
2019-07-18 14:01:39 +00:00
|
|
|
"github.com/multiformats/go-base32"
|
2019-07-10 13:23:29 +00:00
|
|
|
"github.com/multiformats/go-multiaddr"
|
|
|
|
"golang.org/x/xerrors"
|
|
|
|
|
2021-02-28 22:48:36 +00:00
|
|
|
"github.com/filecoin-project/lotus/blockstore"
|
2021-01-29 20:01:00 +00:00
|
|
|
badgerbs "github.com/filecoin-project/lotus/blockstore/badger"
|
2020-08-17 13:26:18 +00:00
|
|
|
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
|
|
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
2020-05-27 08:13:06 +00:00
|
|
|
|
2019-10-18 04:47:41 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/types"
|
|
|
|
"github.com/filecoin-project/lotus/node/config"
|
2019-07-10 13:23:29 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2020-03-05 19:21:06 +00:00
|
|
|
fsAPI = "api"
|
|
|
|
fsAPIToken = "token"
|
|
|
|
fsConfig = "config.toml"
|
2020-03-03 22:19:22 +00:00
|
|
|
fsStorageConfig = "storage.json"
|
2020-03-05 19:21:06 +00:00
|
|
|
fsDatastore = "datastore"
|
|
|
|
fsLock = "repo.lock"
|
|
|
|
fsKeystore = "keystore"
|
2019-07-10 13:23:29 +00:00
|
|
|
)
|
|
|
|
|
2019-10-30 16:38:39 +00:00
|
|
|
type RepoType int
|
|
|
|
|
|
|
|
const (
|
2019-11-12 17:59:38 +00:00
|
|
|
_ = iota // Default is invalid
|
|
|
|
FullNode RepoType = iota
|
|
|
|
StorageMiner
|
2020-03-13 01:37:38 +00:00
|
|
|
Worker
|
2020-09-05 19:36:32 +00:00
|
|
|
Wallet
|
2021-07-29 09:55:37 +00:00
|
|
|
Markets
|
2019-10-30 16:38:39 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func defConfForType(t RepoType) interface{} {
|
|
|
|
switch t {
|
2019-11-12 17:59:38 +00:00
|
|
|
case FullNode:
|
2019-10-30 16:38:39 +00:00
|
|
|
return config.DefaultFullNode()
|
2021-07-29 09:55:37 +00:00
|
|
|
case StorageMiner, Markets:
|
|
|
|
// markets is a specialised miner service
|
|
|
|
// this taxonomy needs to be cleaned up
|
2019-10-30 16:38:39 +00:00
|
|
|
return config.DefaultStorageMiner()
|
2020-03-13 01:37:38 +00:00
|
|
|
case Worker:
|
2020-03-13 11:59:19 +00:00
|
|
|
return &struct{}{}
|
2020-09-05 19:36:32 +00:00
|
|
|
case Wallet:
|
|
|
|
return &struct{}{}
|
2019-10-30 16:38:39 +00:00
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("unknown RepoType(%d)", int(t)))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-10 17:09:57 +00:00
|
|
|
var log = logging.Logger("repo")
|
|
|
|
|
2019-07-26 11:45:25 +00:00
|
|
|
var ErrRepoExists = xerrors.New("repo exists")
|
2019-07-10 17:09:57 +00:00
|
|
|
|
2019-07-10 13:35:00 +00:00
|
|
|
// FsRepo is struct for repo, use NewFS to create
|
2019-07-10 13:23:29 +00:00
|
|
|
type FsRepo struct {
|
2020-09-30 06:56:38 +00:00
|
|
|
path string
|
|
|
|
configPath string
|
2019-07-10 13:23:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var _ Repo = &FsRepo{}
|
|
|
|
|
2019-07-10 15:14:29 +00:00
|
|
|
// NewFS creates a repo instance based on a path on file system
|
2019-07-10 15:10:45 +00:00
|
|
|
func NewFS(path string) (*FsRepo, error) {
|
2019-07-10 17:09:57 +00:00
|
|
|
path, err := homedir.Expand(path)
|
|
|
|
if err != nil {
|
2019-07-10 17:36:17 +00:00
|
|
|
return nil, err
|
2019-07-10 17:09:57 +00:00
|
|
|
}
|
|
|
|
|
2019-07-10 15:10:45 +00:00
|
|
|
return &FsRepo{
|
2020-09-30 06:56:38 +00:00
|
|
|
path: path,
|
|
|
|
configPath: filepath.Join(path, fsConfig),
|
2019-07-10 15:10:45 +00:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2020-09-30 06:56:38 +00:00
|
|
|
func (fsr *FsRepo) SetConfigPath(cfgPath string) {
|
|
|
|
fsr.configPath = cfgPath
|
|
|
|
}
|
|
|
|
|
2019-07-23 21:54:54 +00:00
|
|
|
func (fsr *FsRepo) Exists() (bool, error) {
|
2019-10-22 17:18:06 +00:00
|
|
|
_, err := os.Stat(filepath.Join(fsr.path, fsDatastore))
|
2019-07-23 21:54:54 +00:00
|
|
|
notexist := os.IsNotExist(err)
|
|
|
|
if notexist {
|
|
|
|
err = nil
|
2020-09-05 19:36:32 +00:00
|
|
|
|
|
|
|
_, err = os.Stat(filepath.Join(fsr.path, fsKeystore))
|
|
|
|
notexist = os.IsNotExist(err)
|
|
|
|
if notexist {
|
|
|
|
err = nil
|
|
|
|
}
|
2019-07-23 21:54:54 +00:00
|
|
|
}
|
|
|
|
return !notexist, err
|
2019-07-19 09:23:24 +00:00
|
|
|
}
|
|
|
|
|
2019-10-30 16:38:39 +00:00
|
|
|
func (fsr *FsRepo) Init(t RepoType) error {
|
2019-07-30 23:18:21 +00:00
|
|
|
exist, err := fsr.Exists()
|
2020-03-05 19:21:06 +00:00
|
|
|
if err != nil {
|
2019-07-10 17:09:57 +00:00
|
|
|
return err
|
|
|
|
}
|
2019-07-30 23:18:21 +00:00
|
|
|
if exist {
|
|
|
|
return nil
|
|
|
|
}
|
2019-07-10 17:09:57 +00:00
|
|
|
|
|
|
|
log.Infof("Initializing repo at '%s'", fsr.path)
|
2020-09-14 08:49:35 +00:00
|
|
|
err = os.MkdirAll(fsr.path, 0755) //nolint: gosec
|
2019-07-30 23:18:21 +00:00
|
|
|
if err != nil && !os.IsExist(err) {
|
2019-07-18 14:01:39 +00:00
|
|
|
return err
|
|
|
|
}
|
2019-10-30 16:38:39 +00:00
|
|
|
|
|
|
|
if err := fsr.initConfig(t); err != nil {
|
|
|
|
return xerrors.Errorf("init config: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return fsr.initKeystore()
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fsr *FsRepo) initConfig(t RepoType) error {
|
2020-09-30 06:56:38 +00:00
|
|
|
_, err := os.Stat(fsr.configPath)
|
2019-11-12 21:42:26 +00:00
|
|
|
if err == nil {
|
|
|
|
// exists
|
|
|
|
return nil
|
|
|
|
} else if !os.IsNotExist(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-09-30 06:56:38 +00:00
|
|
|
c, err := os.Create(fsr.configPath)
|
2019-07-24 11:20:00 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-10-30 16:38:39 +00:00
|
|
|
comm, err := config.ConfigComment(defConfForType(t))
|
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("comment: %w", err)
|
|
|
|
}
|
|
|
|
_, err = c.Write(comm)
|
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("write config: %w", err)
|
|
|
|
}
|
2019-07-10 17:09:57 +00:00
|
|
|
|
2019-10-30 16:38:39 +00:00
|
|
|
if err := c.Close(); err != nil {
|
|
|
|
return xerrors.Errorf("close config: %w", err)
|
|
|
|
}
|
|
|
|
return nil
|
2019-07-18 14:01:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (fsr *FsRepo) initKeystore() error {
|
|
|
|
kstorePath := filepath.Join(fsr.path, fsKeystore)
|
|
|
|
if _, err := os.Stat(kstorePath); err == nil {
|
|
|
|
return ErrRepoExists
|
|
|
|
} else if !os.IsNotExist(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return os.Mkdir(kstorePath, 0700)
|
2019-07-10 17:09:57 +00:00
|
|
|
}
|
|
|
|
|
2019-07-10 13:35:00 +00:00
|
|
|
// APIEndpoint returns endpoint of API in this repo
|
2019-07-10 13:23:29 +00:00
|
|
|
func (fsr *FsRepo) APIEndpoint() (multiaddr.Multiaddr, error) {
|
|
|
|
p := filepath.Join(fsr.path, fsAPI)
|
|
|
|
|
2019-07-26 03:26:29 +00:00
|
|
|
f, err := os.Open(p)
|
2019-07-10 13:23:29 +00:00
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return nil, ErrNoAPIEndpoint
|
|
|
|
} else if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer f.Close() //nolint: errcheck // Read only op
|
|
|
|
|
|
|
|
data, err := ioutil.ReadAll(f)
|
|
|
|
if err != nil {
|
2019-07-26 11:45:25 +00:00
|
|
|
return nil, xerrors.Errorf("failed to read %q: %w", p, err)
|
2019-07-10 13:23:29 +00:00
|
|
|
}
|
|
|
|
strma := string(data)
|
|
|
|
strma = strings.TrimSpace(strma)
|
|
|
|
|
|
|
|
apima, err := multiaddr.NewMultiaddr(strma)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return apima, nil
|
|
|
|
}
|
|
|
|
|
2019-07-23 18:49:09 +00:00
|
|
|
func (fsr *FsRepo) APIToken() ([]byte, error) {
|
|
|
|
p := filepath.Join(fsr.path, fsAPIToken)
|
|
|
|
f, err := os.Open(p)
|
|
|
|
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return nil, ErrNoAPIEndpoint
|
|
|
|
} else if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer f.Close() //nolint: errcheck // Read only op
|
|
|
|
|
2020-05-27 08:13:06 +00:00
|
|
|
tb, err := ioutil.ReadAll(f)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return bytes.TrimSpace(tb), nil
|
2019-07-23 18:49:09 +00:00
|
|
|
}
|
|
|
|
|
2019-07-10 13:35:00 +00:00
|
|
|
// Lock acquires exclusive lock on this repo
|
2019-10-30 16:38:39 +00:00
|
|
|
func (fsr *FsRepo) Lock(repoType RepoType) (LockedRepo, error) {
|
2019-07-10 15:10:45 +00:00
|
|
|
locked, err := fslock.Locked(fsr.path, fsLock)
|
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("could not check lock status: %w", err)
|
|
|
|
}
|
|
|
|
if locked {
|
|
|
|
return nil, ErrRepoAlreadyLocked
|
|
|
|
}
|
|
|
|
|
2019-07-10 13:23:29 +00:00
|
|
|
closer, err := fslock.Lock(fsr.path, fsLock)
|
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("could not lock the repo: %w", err)
|
|
|
|
}
|
|
|
|
return &fsLockedRepo{
|
2020-09-30 06:56:38 +00:00
|
|
|
path: fsr.path,
|
|
|
|
configPath: fsr.configPath,
|
|
|
|
repoType: repoType,
|
|
|
|
closer: closer,
|
2019-07-10 13:23:29 +00:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2020-10-01 12:51:37 +00:00
|
|
|
// Like Lock, except datastores will work in read-only mode
|
|
|
|
func (fsr *FsRepo) LockRO(repoType RepoType) (LockedRepo, error) {
|
|
|
|
lr, err := fsr.Lock(repoType)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
lr.(*fsLockedRepo).readonly = true
|
|
|
|
return lr, nil
|
|
|
|
}
|
|
|
|
|
2019-07-10 13:23:29 +00:00
|
|
|
type fsLockedRepo struct {
|
2020-09-30 06:56:38 +00:00
|
|
|
path string
|
|
|
|
configPath string
|
|
|
|
repoType RepoType
|
|
|
|
closer io.Closer
|
2020-10-01 12:51:37 +00:00
|
|
|
readonly bool
|
2019-07-10 17:09:57 +00:00
|
|
|
|
2020-07-17 20:14:03 +00:00
|
|
|
ds map[string]datastore.Batching
|
|
|
|
dsErr error
|
|
|
|
dsOnce sync.Once
|
2020-03-09 19:22:30 +00:00
|
|
|
|
2020-11-01 16:56:20 +00:00
|
|
|
bs blockstore.Blockstore
|
|
|
|
bsErr error
|
|
|
|
bsOnce sync.Once
|
2021-02-28 22:48:36 +00:00
|
|
|
ssPath string
|
|
|
|
ssErr error
|
|
|
|
ssOnce sync.Once
|
2020-11-01 16:56:20 +00:00
|
|
|
|
2020-03-09 19:22:30 +00:00
|
|
|
storageLk sync.Mutex
|
2020-06-10 16:07:47 +00:00
|
|
|
configLk sync.Mutex
|
2019-07-10 13:23:29 +00:00
|
|
|
}
|
|
|
|
|
2021-02-28 22:48:36 +00:00
|
|
|
func (fsr *fsLockedRepo) Readonly() bool {
|
|
|
|
return fsr.readonly
|
|
|
|
}
|
|
|
|
|
2019-07-12 09:59:18 +00:00
|
|
|
func (fsr *fsLockedRepo) Path() string {
|
|
|
|
return fsr.path
|
|
|
|
}
|
|
|
|
|
2019-07-10 13:23:29 +00:00
|
|
|
func (fsr *fsLockedRepo) Close() error {
|
2019-07-10 15:10:45 +00:00
|
|
|
err := os.Remove(fsr.join(fsAPI))
|
|
|
|
|
|
|
|
if err != nil && !os.IsNotExist(err) {
|
|
|
|
return xerrors.Errorf("could not remove API file: %w", err)
|
|
|
|
}
|
2019-09-17 14:34:22 +00:00
|
|
|
if fsr.ds != nil {
|
2020-07-06 20:03:37 +00:00
|
|
|
for _, ds := range fsr.ds {
|
|
|
|
if err := ds.Close(); err != nil {
|
|
|
|
return xerrors.Errorf("could not close datastore: %w", err)
|
|
|
|
}
|
2019-09-17 14:34:22 +00:00
|
|
|
}
|
|
|
|
}
|
2019-07-10 15:10:45 +00:00
|
|
|
|
2020-11-01 16:56:20 +00:00
|
|
|
// type assertion will return ok=false if fsr.bs is nil altogether.
|
|
|
|
if c, ok := fsr.bs.(io.Closer); ok && c != nil {
|
|
|
|
if err := c.Close(); err != nil {
|
|
|
|
return xerrors.Errorf("could not close blockstore: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-10 15:10:45 +00:00
|
|
|
err = fsr.closer.Close()
|
2019-07-10 13:23:29 +00:00
|
|
|
fsr.closer = nil
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-11-01 13:50:41 +00:00
|
|
|
// Blockstore returns a blockstore for the provided data domain.
|
2020-12-30 10:04:00 +00:00
|
|
|
func (fsr *fsLockedRepo) Blockstore(ctx context.Context, domain BlockstoreDomain) (blockstore.Blockstore, error) {
|
2021-02-28 22:48:36 +00:00
|
|
|
if domain != UniversalBlockstore {
|
2020-11-01 13:01:26 +00:00
|
|
|
return nil, ErrInvalidBlockstoreDomain
|
|
|
|
}
|
|
|
|
|
2020-11-01 16:56:20 +00:00
|
|
|
fsr.bsOnce.Do(func() {
|
|
|
|
path := fsr.join(filepath.Join(fsDatastore, "chain"))
|
|
|
|
readonly := fsr.readonly
|
2020-11-01 13:01:26 +00:00
|
|
|
|
2020-11-19 15:02:12 +00:00
|
|
|
if err := os.MkdirAll(path, 0755); err != nil {
|
|
|
|
fsr.bsErr = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-11-01 16:56:20 +00:00
|
|
|
opts, err := BadgerBlockstoreOptions(domain, path, readonly)
|
|
|
|
if err != nil {
|
|
|
|
fsr.bsErr = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-07-21 20:37:59 +00:00
|
|
|
//
|
|
|
|
// Tri-state environment variable LOTUS_CHAIN_BADGERSTORE_DISABLE_FSYNC
|
|
|
|
// - unset == the default (currently fsync enabled)
|
|
|
|
// - set with a false-y value == fsync enabled no matter what a future default is
|
|
|
|
// - set with any other value == fsync is disabled ignored defaults (recommended for day-to-day use)
|
|
|
|
//
|
|
|
|
if nosyncBs, nosyncBsSet := os.LookupEnv("LOTUS_CHAIN_BADGERSTORE_DISABLE_FSYNC"); nosyncBsSet {
|
|
|
|
nosyncBs = strings.ToLower(nosyncBs)
|
|
|
|
if nosyncBs == "" || nosyncBs == "0" || nosyncBs == "false" || nosyncBs == "no" {
|
|
|
|
opts.SyncWrites = true
|
|
|
|
} else {
|
|
|
|
opts.SyncWrites = false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-01 16:56:20 +00:00
|
|
|
bs, err := badgerbs.Open(opts)
|
|
|
|
if err != nil {
|
|
|
|
fsr.bsErr = err
|
2020-11-19 15:02:12 +00:00
|
|
|
return
|
2020-11-01 16:56:20 +00:00
|
|
|
}
|
2021-02-28 22:48:36 +00:00
|
|
|
fsr.bs = blockstore.WrapIDStore(bs)
|
2020-11-01 16:56:20 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
return fsr.bs, fsr.bsErr
|
2020-11-01 13:01:26 +00:00
|
|
|
}
|
|
|
|
|
2021-02-28 22:48:36 +00:00
|
|
|
func (fsr *fsLockedRepo) SplitstorePath() (string, error) {
|
|
|
|
fsr.ssOnce.Do(func() {
|
|
|
|
path := fsr.join(filepath.Join(fsDatastore, "splitstore"))
|
|
|
|
|
|
|
|
if err := os.MkdirAll(path, 0755); err != nil {
|
|
|
|
fsr.ssErr = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
fsr.ssPath = path
|
|
|
|
})
|
|
|
|
|
|
|
|
return fsr.ssPath, fsr.ssErr
|
|
|
|
}
|
|
|
|
|
2019-07-10 13:23:29 +00:00
|
|
|
// join joins path elements with fsr.path
|
|
|
|
func (fsr *fsLockedRepo) join(paths ...string) string {
|
|
|
|
return filepath.Join(append([]string{fsr.path}, paths...)...)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fsr *fsLockedRepo) stillValid() error {
|
|
|
|
if fsr.closer == nil {
|
|
|
|
return ErrClosedRepo
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-05-26 18:35:20 +00:00
|
|
|
func (fsr *fsLockedRepo) Config() (interface{}, error) {
|
2020-06-10 16:07:47 +00:00
|
|
|
fsr.configLk.Lock()
|
|
|
|
defer fsr.configLk.Unlock()
|
|
|
|
|
|
|
|
return fsr.loadConfigFromDisk()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fsr *fsLockedRepo) loadConfigFromDisk() (interface{}, error) {
|
2020-09-30 06:56:38 +00:00
|
|
|
return config.FromFile(fsr.configPath, defConfForType(fsr.repoType))
|
2019-07-10 13:23:29 +00:00
|
|
|
}
|
|
|
|
|
2020-06-10 16:07:47 +00:00
|
|
|
func (fsr *fsLockedRepo) SetConfig(c func(interface{})) error {
|
2020-06-09 23:37:18 +00:00
|
|
|
if err := fsr.stillValid(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-06-10 16:07:47 +00:00
|
|
|
fsr.configLk.Lock()
|
|
|
|
defer fsr.configLk.Unlock()
|
|
|
|
|
|
|
|
cfg, err := fsr.loadConfigFromDisk()
|
2020-06-09 23:37:18 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-06-10 16:07:47 +00:00
|
|
|
// mutate in-memory representation of config
|
|
|
|
c(cfg)
|
|
|
|
|
|
|
|
// buffer into which we write TOML bytes
|
|
|
|
buf := new(bytes.Buffer)
|
|
|
|
|
|
|
|
// encode now-mutated config as TOML and write to buffer
|
|
|
|
err = toml.NewEncoder(buf).Encode(cfg)
|
2020-06-09 23:37:18 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-06-10 16:07:47 +00:00
|
|
|
// write buffer of TOML bytes to config file
|
2020-09-30 06:56:38 +00:00
|
|
|
err = ioutil.WriteFile(fsr.configPath, buf.Bytes(), 0644)
|
2020-06-09 23:37:18 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-03-27 20:08:06 +00:00
|
|
|
func (fsr *fsLockedRepo) GetStorage() (stores.StorageConfig, error) {
|
2020-03-09 19:22:30 +00:00
|
|
|
fsr.storageLk.Lock()
|
|
|
|
defer fsr.storageLk.Unlock()
|
|
|
|
|
2020-03-09 21:46:17 +00:00
|
|
|
return fsr.getStorage(nil)
|
2020-03-09 19:22:30 +00:00
|
|
|
}
|
|
|
|
|
2020-03-27 20:08:06 +00:00
|
|
|
func (fsr *fsLockedRepo) getStorage(def *stores.StorageConfig) (stores.StorageConfig, error) {
|
2020-03-09 21:46:17 +00:00
|
|
|
c, err := config.StorageFromFile(fsr.join(fsStorageConfig), def)
|
2020-03-03 22:19:22 +00:00
|
|
|
if err != nil {
|
2020-03-27 20:08:06 +00:00
|
|
|
return stores.StorageConfig{}, err
|
2020-03-03 22:19:22 +00:00
|
|
|
}
|
|
|
|
return *c, nil
|
|
|
|
}
|
|
|
|
|
2020-03-27 20:08:06 +00:00
|
|
|
func (fsr *fsLockedRepo) SetStorage(c func(*stores.StorageConfig)) error {
|
2020-03-09 19:22:30 +00:00
|
|
|
fsr.storageLk.Lock()
|
|
|
|
defer fsr.storageLk.Unlock()
|
|
|
|
|
2020-03-27 20:08:06 +00:00
|
|
|
sc, err := fsr.getStorage(&stores.StorageConfig{})
|
2020-03-09 19:22:30 +00:00
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("get storage: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
c(&sc)
|
|
|
|
|
|
|
|
return config.WriteStorageFile(fsr.join(fsStorageConfig), sc)
|
2020-03-03 22:19:22 +00:00
|
|
|
}
|
|
|
|
|
2020-07-08 15:23:27 +00:00
|
|
|
func (fsr *fsLockedRepo) Stat(path string) (fsutil.FsStat, error) {
|
|
|
|
return fsutil.Statfs(path)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fsr *fsLockedRepo) DiskUsage(path string) (int64, error) {
|
|
|
|
si, err := fsutil.FileSize(path)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
return si.OnDisk, nil
|
2020-05-26 08:20:32 +00:00
|
|
|
}
|
|
|
|
|
2019-07-10 13:23:29 +00:00
|
|
|
func (fsr *fsLockedRepo) SetAPIEndpoint(ma multiaddr.Multiaddr) error {
|
|
|
|
if err := fsr.stillValid(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-07-11 11:52:07 +00:00
|
|
|
return ioutil.WriteFile(fsr.join(fsAPI), []byte(ma.String()), 0644)
|
2019-07-10 13:23:29 +00:00
|
|
|
}
|
|
|
|
|
2019-07-23 18:49:09 +00:00
|
|
|
func (fsr *fsLockedRepo) SetAPIToken(token []byte) error {
|
|
|
|
if err := fsr.stillValid(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return ioutil.WriteFile(fsr.join(fsAPIToken), token, 0600)
|
|
|
|
}
|
|
|
|
|
2019-07-18 14:57:49 +00:00
|
|
|
func (fsr *fsLockedRepo) KeyStore() (types.KeyStore, error) {
|
2019-07-18 14:01:39 +00:00
|
|
|
if err := fsr.stillValid(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return fsr, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var kstrPermissionMsg = "permissions of key: '%s' are too relaxed, " +
|
|
|
|
"required: 0600, got: %#o"
|
|
|
|
|
|
|
|
// List lists all the keys stored in the KeyStore
|
|
|
|
func (fsr *fsLockedRepo) List() ([]string, error) {
|
|
|
|
if err := fsr.stillValid(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
kstorePath := fsr.join(fsKeystore)
|
|
|
|
dir, err := os.Open(kstorePath)
|
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("opening dir to list keystore: %w", err)
|
|
|
|
}
|
2020-07-23 10:21:13 +00:00
|
|
|
defer dir.Close() //nolint:errcheck
|
2019-07-18 14:01:39 +00:00
|
|
|
files, err := dir.Readdir(-1)
|
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("reading keystore dir: %w", err)
|
|
|
|
}
|
|
|
|
keys := make([]string, 0, len(files))
|
|
|
|
for _, f := range files {
|
|
|
|
if f.Mode()&0077 != 0 {
|
|
|
|
return nil, xerrors.Errorf(kstrPermissionMsg, f.Name(), f.Mode())
|
|
|
|
}
|
|
|
|
name, err := base32.RawStdEncoding.DecodeString(f.Name())
|
|
|
|
if err != nil {
|
|
|
|
return nil, xerrors.Errorf("decoding key: '%s': %w", f.Name(), err)
|
|
|
|
}
|
|
|
|
keys = append(keys, string(name))
|
|
|
|
}
|
|
|
|
return keys, nil
|
|
|
|
}
|
|
|
|
|
2019-07-18 14:57:49 +00:00
|
|
|
// Get gets a key out of keystore and returns types.KeyInfo coresponding to named key
|
|
|
|
func (fsr *fsLockedRepo) Get(name string) (types.KeyInfo, error) {
|
2019-07-18 14:01:39 +00:00
|
|
|
if err := fsr.stillValid(); err != nil {
|
2019-07-18 14:57:49 +00:00
|
|
|
return types.KeyInfo{}, err
|
2019-07-18 14:01:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
encName := base32.RawStdEncoding.EncodeToString([]byte(name))
|
|
|
|
keyPath := fsr.join(fsKeystore, encName)
|
|
|
|
|
|
|
|
fstat, err := os.Stat(keyPath)
|
|
|
|
if os.IsNotExist(err) {
|
2019-10-17 10:18:40 +00:00
|
|
|
return types.KeyInfo{}, xerrors.Errorf("opening key '%s': %w", name, types.ErrKeyInfoNotFound)
|
2019-07-18 14:01:39 +00:00
|
|
|
} else if err != nil {
|
2019-07-18 14:57:49 +00:00
|
|
|
return types.KeyInfo{}, xerrors.Errorf("opening key '%s': %w", name, err)
|
2019-07-18 14:01:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if fstat.Mode()&0077 != 0 {
|
2020-02-12 20:25:29 +00:00
|
|
|
return types.KeyInfo{}, xerrors.Errorf(kstrPermissionMsg, name, fstat.Mode())
|
2019-07-18 14:01:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
file, err := os.Open(keyPath)
|
|
|
|
if err != nil {
|
2019-07-18 14:57:49 +00:00
|
|
|
return types.KeyInfo{}, xerrors.Errorf("opening key '%s': %w", name, err)
|
2019-07-18 14:01:39 +00:00
|
|
|
}
|
|
|
|
defer file.Close() //nolint: errcheck // read only op
|
|
|
|
|
|
|
|
data, err := ioutil.ReadAll(file)
|
|
|
|
if err != nil {
|
2019-07-18 14:57:49 +00:00
|
|
|
return types.KeyInfo{}, xerrors.Errorf("reading key '%s': %w", name, err)
|
2019-07-18 14:01:39 +00:00
|
|
|
}
|
|
|
|
|
2019-07-18 14:57:49 +00:00
|
|
|
var res types.KeyInfo
|
2019-07-18 14:01:39 +00:00
|
|
|
err = json.Unmarshal(data, &res)
|
|
|
|
if err != nil {
|
2019-07-18 14:57:49 +00:00
|
|
|
return types.KeyInfo{}, xerrors.Errorf("decoding key '%s': %w", name, err)
|
2019-07-18 14:01:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return res, nil
|
|
|
|
}
|
|
|
|
|
2021-03-09 15:52:51 +00:00
|
|
|
const KTrashPrefix = "trash-"
|
|
|
|
|
2019-07-18 14:01:39 +00:00
|
|
|
// Put saves key info under given name
|
2019-07-18 14:57:49 +00:00
|
|
|
func (fsr *fsLockedRepo) Put(name string, info types.KeyInfo) error {
|
2021-03-09 15:52:51 +00:00
|
|
|
return fsr.put(name, info, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fsr *fsLockedRepo) put(rawName string, info types.KeyInfo, retries int) error {
|
2019-07-18 14:01:39 +00:00
|
|
|
if err := fsr.stillValid(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-03-09 15:52:51 +00:00
|
|
|
name := rawName
|
|
|
|
if retries > 0 {
|
|
|
|
name = fmt.Sprintf("%s-%d", rawName, retries)
|
|
|
|
}
|
|
|
|
|
2019-07-18 14:01:39 +00:00
|
|
|
encName := base32.RawStdEncoding.EncodeToString([]byte(name))
|
|
|
|
keyPath := fsr.join(fsKeystore, encName)
|
|
|
|
|
|
|
|
_, err := os.Stat(keyPath)
|
2021-03-09 15:52:51 +00:00
|
|
|
if err == nil && strings.HasPrefix(name, KTrashPrefix) {
|
|
|
|
// retry writing the trash-prefixed file with a number suffix
|
|
|
|
return fsr.put(rawName, info, retries+1)
|
2020-10-29 21:33:20 +00:00
|
|
|
} else if err == nil {
|
2019-10-18 11:39:31 +00:00
|
|
|
return xerrors.Errorf("checking key before put '%s': %w", name, types.ErrKeyExists)
|
2019-07-18 14:01:39 +00:00
|
|
|
} else if !os.IsNotExist(err) {
|
|
|
|
return xerrors.Errorf("checking key before put '%s': %w", name, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
keyData, err := json.Marshal(info)
|
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("encoding key '%s': %w", name, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = ioutil.WriteFile(keyPath, keyData, 0600)
|
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("writing key '%s': %w", name, err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fsr *fsLockedRepo) Delete(name string) error {
|
|
|
|
if err := fsr.stillValid(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
encName := base32.RawStdEncoding.EncodeToString([]byte(name))
|
|
|
|
keyPath := fsr.join(fsKeystore, encName)
|
|
|
|
|
|
|
|
_, err := os.Stat(keyPath)
|
|
|
|
if os.IsNotExist(err) {
|
2019-10-17 10:18:40 +00:00
|
|
|
return xerrors.Errorf("checking key before delete '%s': %w", name, types.ErrKeyInfoNotFound)
|
2019-07-18 14:01:39 +00:00
|
|
|
} else if err != nil {
|
|
|
|
return xerrors.Errorf("checking key before delete '%s': %w", name, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = os.Remove(keyPath)
|
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("deleting key '%s': %w", name, err)
|
|
|
|
}
|
|
|
|
return nil
|
2019-07-10 13:23:29 +00:00
|
|
|
}
|