Merge branch 'master' into feat/replace-multistore-carv2

This commit is contained in:
Raúl Kripalani 2021-07-27 22:23:28 +01:00
commit 7c2341b43c
2 changed files with 213 additions and 20 deletions

View File

@ -2,6 +2,18 @@ package main
import (
"fmt"
"os"
"path"
levelds "github.com/ipfs/go-ds-leveldb"
ldbopts "github.com/syndtr/goleveldb/leveldb/opt"
"github.com/filecoin-project/lotus/lib/backupds"
"github.com/filecoin-project/lotus/node/repo"
"github.com/ipfs/go-datastore"
dsq "github.com/ipfs/go-datastore/query"
logging "github.com/ipfs/go-log/v2"
lcli "github.com/filecoin-project/lotus/cli"
@ -18,6 +30,8 @@ var marketCmd = &cli.Command{
Flags: []cli.Flag{},
Subcommands: []*cli.Command{
marketDealFeesCmd,
marketExportDatastoreCmd,
marketImportDatastoreCmd,
},
}
@ -100,3 +114,196 @@ var marketDealFeesCmd = &cli.Command{
return xerrors.New("must provide either --provider or --dealId flag")
},
}
const mktsMetadataNamespace = "metadata"
var marketExportDatastoreCmd = &cli.Command{
Name: "export-datastore",
Description: "export markets datastore key/values to a file",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "repo",
Usage: "path to the repo",
},
&cli.StringFlag{
Name: "backup-dir",
Usage: "path to the backup directory",
},
},
Action: func(cctx *cli.Context) error {
logging.SetLogLevel("badger", "ERROR") // nolint:errcheck
// If the backup dir is not specified, just use the OS temp dir
backupDir := cctx.String("backup-dir")
if backupDir == "" {
backupDir = os.TempDir()
}
// Open the repo at the repo path
repoPath := cctx.String("repo")
lr, err := openLockedRepo(repoPath)
if err != nil {
return err
}
defer lr.Close() //nolint:errcheck
// Open the metadata datastore on the repo
ds, err := lr.Datastore(cctx.Context, datastore.NewKey(mktsMetadataNamespace).String())
if err != nil {
return xerrors.Errorf("opening datastore %s on repo %s: %w", mktsMetadataNamespace, repoPath, err)
}
// Create a tmp datastore that we'll add the exported key / values to
// and then backup
backupDsDir := path.Join(backupDir, "markets-backup-datastore")
if err := os.MkdirAll(backupDsDir, 0775); err != nil { //nolint:gosec
return xerrors.Errorf("creating tmp datastore directory: %w", err)
}
defer os.RemoveAll(backupDsDir) //nolint:errcheck
backupDs, err := levelds.NewDatastore(backupDsDir, &levelds.Options{
Compression: ldbopts.NoCompression,
NoSync: false,
Strict: ldbopts.StrictAll,
ReadOnly: false,
})
if err != nil {
return xerrors.Errorf("opening backup datastore at %s: %w", backupDir, err)
}
// Export the key / values
prefixes := []string{
"/deals/provider",
"/retrievals/provider",
"/storagemarket",
}
for _, prefix := range prefixes {
err := exportPrefix(prefix, ds, backupDs)
if err != nil {
return err
}
}
// Wrap the datastore in a backup datastore
bds, err := backupds.Wrap(backupDs, "")
if err != nil {
return xerrors.Errorf("opening backupds: %w", err)
}
// Create a file for the backup
fpath := path.Join(backupDir, "markets.datastore.backup")
out, err := os.OpenFile(fpath, os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return xerrors.Errorf("opening backup file %s: %w", fpath, err)
}
// Write the backup to the file
if err := bds.Backup(out); err != nil {
if cerr := out.Close(); cerr != nil {
log.Errorw("error closing backup file while handling backup error", "closeErr", cerr, "backupErr", err)
}
return xerrors.Errorf("backup error: %w", err)
}
if err := out.Close(); err != nil {
return xerrors.Errorf("closing backup file: %w", err)
}
fmt.Println("Wrote backup file to " + fpath)
return nil
},
}
func exportPrefix(prefix string, ds datastore.Batching, backupDs datastore.Batching) error {
q, err := ds.Query(dsq.Query{
Prefix: prefix,
})
if err != nil {
return xerrors.Errorf("datastore query: %w", err)
}
defer q.Close() //nolint:errcheck
for res := range q.Next() {
fmt.Println("Exporting key " + res.Key)
err := backupDs.Put(datastore.NewKey(res.Key), res.Value)
if err != nil {
return xerrors.Errorf("putting %s to backup datastore: %w", res.Key, err)
}
}
return nil
}
var marketImportDatastoreCmd = &cli.Command{
Name: "import-datastore",
Description: "import markets datastore key/values from a backup file",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "repo",
Usage: "path to the repo",
},
&cli.StringFlag{
Name: "backup-path",
Usage: "path to the backup file",
Required: true,
},
},
Action: func(cctx *cli.Context) error {
logging.SetLogLevel("badger", "ERROR") // nolint:errcheck
backupPath := cctx.String("backup-path")
// Open the repo at the repo path
lr, err := openLockedRepo(cctx.String("repo"))
if err != nil {
return err
}
defer lr.Close() //nolint:errcheck
// Open the metadata datastore on the repo
repoDs, err := lr.Datastore(cctx.Context, datastore.NewKey(mktsMetadataNamespace).String())
if err != nil {
return err
}
r, err := os.Open(backupPath)
if err != nil {
return xerrors.Errorf("opening backup path %s: %w", backupPath, err)
}
fmt.Println("Importing from backup file " + backupPath)
err = backupds.RestoreInto(r, repoDs)
if err != nil {
return xerrors.Errorf("restoring backup from path %s: %w", backupPath, err)
}
fmt.Println("Completed importing from backup file " + backupPath)
return nil
},
}
func openLockedRepo(path string) (repo.LockedRepo, error) {
// Open the repo at the repo path
rpo, err := repo.NewFS(path)
if err != nil {
return nil, xerrors.Errorf("could not open repo %s: %w", path, err)
}
// Make sure the repo exists
exists, err := rpo.Exists()
if err != nil {
return nil, xerrors.Errorf("checking repo %s exists: %w", path, err)
}
if !exists {
return nil, xerrors.Errorf("repo does not exist: %s", path)
}
// Lock the repo
lr, err := rpo.Lock(repo.StorageMiner)
if err != nil {
return nil, xerrors.Errorf("locking repo %s: %w", path, err)
}
return lr, nil
}

View File

@ -63,24 +63,15 @@ Testing an RC:
- [ ] (optional) let a sector go faulty, and see it be recovered
- [ ] **Stage 2 - Community Testing**
- [ ] Inform beta miners (@lotus-early-testers-miner in Filecoin Slack #fil-lotus)
- [ ] Ask close ecosystem partners to test their projects (@lotus-early-testers-eco-dev in Filecoin slack #fil-lotus)
- [ ] Powergate
- [ ] Glif
- [ ] Zondax
- [ ] Stats dashboard
- [ ] Community dashboards
- [ ] Infura
- [ ] Sentinel
- [ ] Protofire
- [ ] Fleek
- [ ] Inform beta lotus users (@lotus-early-testers in Filecoin Slack #fil-lotus)
- [ ] **Stage 3 - Community Prod Testing**
- [ ] Documentation
- [ ] Ensure that [CHANGELOG.md](https://github.com/filecoin-project/lotus/blob/master/CHANGELOG.md) is up to date
- [ ] Check if any [config](https://docs.filecoin.io/get-started/lotus/configuration-and-advanced-usage/#configuration) updates are needed
- [ ] Invite the wider community through (link to the release issue):
- [ ] Check `Create a discussion for this release` when tagging for the major rcs(new features, hot-fixes) release
- [ ] Check `Create a discussion for this release` when tagging for the major/close-to-final rcs(new features, hot-fixes) release
- [ ] Link the disucssion in #fil-lotus on Filecoin slack
- [ ] **Stage 4 - Release**
@ -91,11 +82,10 @@ Testing an RC:
- [ ] Merge `release-vX.Y.Z` into the `releases` branch.
- [ ] Tag this merge commit (on the `releases` branch) with `vX.Y.Z`
- [ ] Cut the release [here](https://github.com/filecoin-project/lotus/releases/new?prerelease=true&target=releases).
- [ ] Check `Create a discussion for this release` when tagging the release
- [ ] Final announcements
- [ ] Update network.filecoin.io for mainnet, calib and nerpa.
- [ ] repost in #fil-lotus in filecoin slack
- [ ] Inform node provides (Protofire, Digital Ocean..)
- [ ] repost in #fil-lotus-announcement in filecoin slack
- [ ] Inform node providers (Protofire, Digital Ocean..)
- [ ] **Post-Release**
- [ ] Merge the `releases` branch back into `master`, ignoring the changes to `version.go` (keep the `-dev` version from master). Do NOT delete the `releases` branch when doing so!
@ -104,11 +94,7 @@ Testing an RC:
## ❤️ Contributors
< list generated by scripts/mkreleaselog >
Would you like to contribute to Lotus and don't know how? Well, there are a few places you can get started:
- TODO
See the final release notes!
## ⁉️ Do you have questions?