lp config after testing

This commit is contained in:
Andrew Jackson (Ajax) 2023-09-26 22:06:00 -05:00
parent aae259c426
commit 545329311f
6 changed files with 254 additions and 29 deletions

190
base.toml Normal file
View File

@ -0,0 +1,190 @@
[Subsystems]
# type: bool
#EnableWindowPost = false
# type: bool
#EnableWinningPost = false
[Fees]
# type: types.FIL
#DefaultMaxFee = "0.07 FIL"
# type: types.FIL
#MaxPreCommitGasFee = "0.025 FIL"
# type: types.FIL
#MaxCommitGasFee = "0.05 FIL"
# type: types.FIL
#MaxTerminateGasFee = "0.5 FIL"
# WindowPoSt is a high-value operation, so the default fee should be high.
#
# type: types.FIL
#MaxWindowPoStGasFee = "5 FIL"
# type: types.FIL
#MaxPublishDealsFee = "0.05 FIL"
[Fees.MaxPreCommitBatchGasFee]
# type: types.FIL
#Base = "0 FIL"
# type: types.FIL
#PerSector = "0.02 FIL"
[Fees.MaxCommitBatchGasFee]
# type: types.FIL
#Base = "0 FIL"
# type: types.FIL
#PerSector = "0.03 FIL"
[Addresses]
# Addresses to send PreCommit messages from
#
# type: []string
#PreCommitControl = []
# Addresses to send Commit messages from
#
# type: []string
#CommitControl = []
# type: []string
#TerminateControl = []
# DisableOwnerFallback disables usage of the owner address for messages
# sent automatically
#
# type: bool
#DisableOwnerFallback = false
# DisableWorkerFallback disables usage of the worker address for messages
# sent automatically, if control addresses are configured.
# A control address that doesn't have enough funds will still be chosen
# over the worker address if this flag is set.
#
# type: bool
#DisableWorkerFallback = false
[Proving]
# Maximum number of sector checks to run in parallel. (0 = unlimited)
#
# WARNING: Setting this value too high may make the node crash by running out of stack
# WARNING: Setting this value too low may make sector challenge reading much slower, resulting in failed PoSt due
# to late submission.
#
# After changing this option, confirm that the new value works in your setup by invoking
# 'lotus-miner proving compute window-post 0'
#
# type: int
#ParallelCheckLimit = 0
# Maximum amount of time a proving pre-check can take for a sector. If the check times out the sector will be skipped
#
# WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the
# test challenge took longer than this timeout
# WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this sector are
# blocked (e.g. in case of disconnected NFS mount)
#
# type: Duration
#SingleCheckTimeout = "0s"
# Maximum amount of time a proving pre-check can take for an entire partition. If the check times out, sectors in
# the partition which didn't get checked on time will be skipped
#
# WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the
# test challenge took longer than this timeout
# WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this partition are
# blocked or slow
#
# type: Duration
#PartitionCheckTimeout = "0s"
# Disable Window PoSt computation on the lotus-miner process even if no window PoSt workers are present.
#
# WARNING: If no windowPoSt workers are connected, window PoSt WILL FAIL resulting in faulty sectors which will need
# to be recovered. Before enabling this option, make sure your PoSt workers work correctly.
#
# After changing this option, confirm that the new value works in your setup by invoking
# 'lotus-miner proving compute window-post 0'
#
# type: bool
#DisableBuiltinWindowPoSt = false
# Disable Winning PoSt computation on the lotus-miner process even if no winning PoSt workers are present.
#
# WARNING: If no WinningPoSt workers are connected, Winning PoSt WILL FAIL resulting in lost block rewards.
# Before enabling this option, make sure your PoSt workers work correctly.
#
# type: bool
#DisableBuiltinWinningPoSt = false
# Disable WindowPoSt provable sector readability checks.
#
# In normal operation, when preparing to compute WindowPoSt, lotus-miner will perform a round of reading challenges
# from all sectors to confirm that those sectors can be proven. Challenges read in this process are discarded, as
# we're only interested in checking that sector data can be read.
#
# When using builtin proof computation (no PoSt workers, and DisableBuiltinWindowPoSt is set to false), this process
# can save a lot of time and compute resources in the case that some sectors are not readable - this is caused by
# the builtin logic not skipping snark computation when some sectors need to be skipped.
#
# When using PoSt workers, this process is mostly redundant, with PoSt workers challenges will be read once, and
# if challenges for some sectors aren't readable, those sectors will just get skipped.
#
# Disabling sector pre-checks will slightly reduce IO load when proving sectors, possibly resulting in shorter
# time to produce window PoSt. In setups with good IO capabilities the effect of this option on proving time should
# be negligible.
#
# NOTE: It likely is a bad idea to disable sector pre-checks in setups with no PoSt workers.
#
# NOTE: Even when this option is enabled, recovering sectors will be checked before recovery declaration message is
# sent to the chain
#
# After changing this option, confirm that the new value works in your setup by invoking
# 'lotus-miner proving compute window-post 0'
#
# type: bool
#DisableWDPoStPreChecks = false
# Maximum number of partitions to prove in a single SubmitWindowPoSt messace. 0 = network limit (10 in nv16)
#
# A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors.
#
# The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which
# means that a single message can prove at most 10 partitions
#
# Note that setting this value lower may result in less efficient gas use - more messages will be sent,
# to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
#
# Setting this value above the network limit has no effect
#
# type: int
#MaxPartitionsPerPoStMessage = 0
# In some cases when submitting DeclareFaultsRecovered messages,
# there may be too many recoveries to fit in a BlockGasLimit.
# In those cases it may be necessary to set this value to something low (eg 1);
# Note that setting this value lower may result in less efficient gas use - more messages will be sent than needed,
# resulting in more total gas use (but each message will have lower gas limit)
#
# type: int
#MaxPartitionsPerRecoveryMessage = 0
# Enable single partition per PoSt Message for partitions containing recovery sectors
#
# In cases when submitting PoSt messages which contain recovering sectors, the default network limit may still be
# too high to fit in the block gas limit. In those cases, it becomes useful to only house the single partition
# with recovering sectors in the post message
#
# Note that setting this value lower may result in less efficient gas use - more messages will be sent,
# to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
#
# type: bool
#SingleRecoveringPartitionPerPostMessage = false

View File

@ -16,7 +16,6 @@ import (
"github.com/google/uuid"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/namespace"
logging "github.com/ipfs/go-log/v2"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/mitchellh/go-homedir"
@ -122,6 +121,33 @@ var initCmd = &cli.Command{
Name: "from",
Usage: "select which address to send actor creation message from",
},
&cli.StringFlag{
Name: "db-host",
EnvVars: []string{"LOTUS_DB_HOST"},
Usage: "Command separated list of hostnames for yugabyte cluster",
Value: "yugabyte",
},
&cli.StringFlag{
Name: "db-name",
EnvVars: []string{"LOTUS_DB_NAME"},
Value: "yugabyte",
},
&cli.StringFlag{
Name: "db-user",
EnvVars: []string{"LOTUS_DB_USER"},
Value: "yugabyte",
},
&cli.StringFlag{
Name: "db-password",
EnvVars: []string{"LOTUS_DB_PASSWORD"},
Value: "yugabyte",
},
&cli.StringFlag{
Name: "db-port",
EnvVars: []string{"LOTUS_DB_PORT"},
Hidden: true,
Value: "5433",
},
},
Subcommands: []*cli.Command{
restoreCmd,
@ -466,8 +492,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode
smsts := statestore.New(namespace.Wrap(mds, modules.ManagerWorkPrefix))
// TODO: run sector index init only for devnets. This is not needed for longer running networks
harmonyDB, err := harmonydb.New([]string{"127.0.0.1"}, "yugabyte", "yugabyte", "yugabyte", "5433", "",
func(s string) { logging.Logger("harmonydb").Error(s) })
harmonyDB, err := harmonydb.New([]string{cctx.String("db-host")}, cctx.String("db-name"), cctx.String("db-user"), cctx.String("db-password"), cctx.String("db-port"), "")
if err != nil {
return err
}

View File

@ -2,6 +2,7 @@ package main
import (
"context"
"database/sql"
"errors"
"fmt"
"os"
@ -9,7 +10,6 @@ import (
"strings"
"github.com/BurntSushi/toml"
"github.com/kr/pretty"
"github.com/urfave/cli/v2"
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
@ -80,7 +80,7 @@ var configSetCmd = &cli.Command{
name := strings.Split(fn, ".")[0]
_, err = db.Exec(context.Background(),
`INSERT INTO harmony_config (title, config) VALUES (?,?)
`INSERT INTO harmony_config (title, config) VALUES ($1, $2)
ON CONFLICT (title) DO UPDATE SET config = excluded.config`, name, string(bytes))
if err != nil {
return fmt.Errorf("unable to save config layer: %w", err)
@ -106,7 +106,7 @@ var configGetCmd = &cli.Command{
}
var cfg string
err = db.QueryRow(context.Background(), `SELECT config FROM harmony_config WHERE title=?`, args.First()).Scan(&cfg)
err = db.QueryRow(context.Background(), `SELECT config FROM harmony_config WHERE title=$1`, args.First()).Scan(&cfg)
if err != nil {
return err
}
@ -126,7 +126,7 @@ var configListCmd = &cli.Command{
return err
}
var res []string
err = db.Select(context.Background(), &res, `SELECT title FROM harmony_confg ORDER BY title`)
err = db.Select(context.Background(), &res, `SELECT title FROM harmony_config ORDER BY title`)
if err != nil {
return fmt.Errorf("unable to read from db: %w", err)
}
@ -140,7 +140,7 @@ var configListCmd = &cli.Command{
var configViewCmd = &cli.Command{
Name: "view",
Usage: "View stacked config layers as it will be interpreted.",
Usage: "View stacked config layers as it will be interpreted by this version of lotus-provider.",
ArgsUsage: "a list of layers to be interpreted as the final config",
Action: func(cctx *cli.Context) error {
db, err := makeDB(cctx)
@ -152,9 +152,9 @@ var configViewCmd = &cli.Command{
return err
}
fmt.Println(pretty.Sprint(lp))
return nil
e := toml.NewEncoder(os.Stdout)
e.Indent = " "
return e.Encode(lp)
},
}
@ -163,9 +163,12 @@ func getConfig(cctx *cli.Context, db *harmonydb.DB) (*config.LotusProviderConfig
have := []string{}
for _, layer := range regexp.MustCompile("[ |,]").Split(cctx.String("layers"), -1) {
text := ""
err := db.QueryRow(cctx.Context, `SELECT config FROM harmony_config WHERE title=?`, layer).Scan(&text)
err := db.QueryRow(cctx.Context, `SELECT config FROM harmony_config WHERE title=$1`, layer).Scan(&text)
if err != nil {
return nil, fmt.Errorf("could not read layer %s: %w", layer, err)
if strings.Contains(err.Error(), sql.ErrNoRows.Error()) {
return nil, fmt.Errorf("missing layer '%s' ", layer)
}
return nil, fmt.Errorf("could not read layer '%s': %w", layer, err)
}
meta, err := toml.Decode(text, &lp)
if err != nil {

View File

@ -112,6 +112,11 @@ func main() {
EnvVars: []string{"LOTUS_LAYERS"},
Value: "base",
},
&cli.StringFlag{
Name: FlagRepoPath,
EnvVars: []string{"LOTUS_REPO_PATH"},
Value: "~/.lotus",
},
cliutil.FlagVeryVerbose,
},
Commands: append(local, lcli.CommonCommands...),
@ -121,7 +126,7 @@ func main() {
After: func(c *cli.Context) error {
if r := recover(); r != nil {
// Generate report in LOTUS_PATH and re-raise panic
build.GeneratePanicReport(c.String("panic-reports"), c.String(FlagProviderRepo), c.App.Name)
build.GeneratePanicReport(c.String("panic-reports"), c.String(FlagRepoPath), c.App.Name)
panic(r)
}
return nil
@ -133,5 +138,5 @@ func main() {
}
const (
FlagProviderRepo = "provider-repo"
FlagRepoPath = "repo-path"
)

View File

@ -87,7 +87,7 @@ var runCmd = &cli.Command{
// Open repo
repoPath := cctx.String(FlagProviderRepo)
repoPath := cctx.String(FlagRepoPath)
fmt.Println("repopath", repoPath)
r, err := repo.NewFS(repoPath)
if err != nil {

View File

@ -33,7 +33,6 @@ type DB struct {
cfg *pgxpool.Config
schema string
hostnames []string
log func(string)
}
var logger = logging.Logger("harmonydb")
@ -50,7 +49,6 @@ func NewFromConfig(cfg config.HarmonyDB) (*DB, error) {
cfg.Database,
cfg.Port,
"",
func(s string) { logger.Error(s) },
)
}
@ -63,7 +61,6 @@ func NewFromConfigWithITestID(cfg config.HarmonyDB) func(id ITestID) (*DB, error
cfg.Database,
cfg.Port,
id,
func(s string) { logger.Error(s) },
)
}
}
@ -71,7 +68,7 @@ func NewFromConfigWithITestID(cfg config.HarmonyDB) func(id ITestID) (*DB, error
// New is to be called once per binary to establish the pool.
// log() is for errors. It returns an upgraded database's connection.
// This entry point serves both production and integration tests, so it's more DI.
func New(hosts []string, username, password, database, port string, itestID ITestID, log func(string)) (*DB, error) {
func New(hosts []string, username, password, database, port string, itestID ITestID) (*DB, error) {
itest := string(itestID)
connString := ""
if len(hosts) > 0 {
@ -102,11 +99,11 @@ func New(hosts []string, username, password, database, port string, itestID ITes
}
cfg.ConnConfig.OnNotice = func(conn *pgconn.PgConn, n *pgconn.Notice) {
log("database notice: " + n.Message + ": " + n.Detail)
logger.Debug("database notice: " + n.Message + ": " + n.Detail)
DBMeasures.Errors.M(1)
}
db := DB{cfg: cfg, schema: schema, hostnames: hosts, log: log} // pgx populated in AddStatsAndConnect
db := DB{cfg: cfg, schema: schema, hostnames: hosts} // pgx populated in AddStatsAndConnect
if err := db.addStatsAndConnect(); err != nil {
return nil, err
}
@ -175,7 +172,7 @@ func (db *DB) addStatsAndConnect() error {
var err error
db.pgx, err = pgxpool.NewWithConfig(context.Background(), db.cfg)
if err != nil {
db.log(fmt.Sprintf("Unable to connect to database: %v\n", err))
logger.Error(fmt.Sprintf("Unable to connect to database: %v\n", err))
return err
}
return nil
@ -229,7 +226,7 @@ func (db *DB) upgrade() error {
applied TIMESTAMP DEFAULT current_timestamp
)`)
if err != nil {
db.log("Upgrade failed.")
logger.Error("Upgrade failed.")
return err
}
@ -240,7 +237,7 @@ func (db *DB) upgrade() error {
var landedEntries []struct{ Entry string }
err = db.Select(context.Background(), &landedEntries, "SELECT entry FROM base")
if err != nil {
db.log("Cannot read entries: " + err.Error())
logger.Error("Cannot read entries: " + err.Error())
return err
}
for _, l := range landedEntries {
@ -249,18 +246,23 @@ func (db *DB) upgrade() error {
}
dir, err := fs.ReadDir("sql")
if err != nil {
db.log("Cannot read fs entries: " + err.Error())
logger.Error("Cannot read fs entries: " + err.Error())
return err
}
sort.Slice(dir, func(i, j int) bool { return dir[i].Name() < dir[j].Name() })
if len(dir) == 0 {
logger.Error("No sql files found.")
}
for _, e := range dir {
name := e.Name()
if landed[name] || !strings.HasSuffix(name, ".sql") {
logger.Debug("DB Schema " + name + " already applied.")
continue
}
file, err := fs.ReadFile("sql/" + name)
if err != nil {
db.log("weird embed file read err")
logger.Error("weird embed file read err")
return err
}
for _, s := range strings.Split(string(file), ";") { // Implement the changes.
@ -270,7 +272,7 @@ func (db *DB) upgrade() error {
_, err = db.pgx.Exec(context.Background(), s)
if err != nil {
msg := fmt.Sprintf("Could not upgrade! File %s, Query: %s, Returned: %s", name, s, err.Error())
db.log(msg)
logger.Error(msg)
return errors.New(msg) // makes devs lives easier by placing message at the end.
}
}
@ -278,7 +280,7 @@ func (db *DB) upgrade() error {
// Mark Completed.
_, err = db.Exec(context.Background(), "INSERT INTO base (entry) VALUES ($1)", name)
if err != nil {
db.log("Cannot update base: " + err.Error())
logger.Error("Cannot update base: " + err.Error())
return fmt.Errorf("cannot insert into base: %w", err)
}
}