2023-07-14 23:05:49 +00:00
|
|
|
package harmonydb
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"embed"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"math/rand"
|
2023-08-23 23:57:34 +00:00
|
|
|
"net"
|
2023-07-14 23:05:49 +00:00
|
|
|
"regexp"
|
|
|
|
"sort"
|
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
logging "github.com/ipfs/go-log/v2"
|
|
|
|
"github.com/jackc/pgx/v5"
|
|
|
|
"github.com/jackc/pgx/v5/pgconn"
|
|
|
|
"github.com/jackc/pgx/v5/pgxpool"
|
|
|
|
|
|
|
|
"github.com/filecoin-project/lotus/node/config"
|
|
|
|
)
|
|
|
|
|
|
|
|
type ITestID string
|
|
|
|
|
|
|
|
// ItestNewID see ITestWithID doc
|
|
|
|
func ITestNewID() ITestID {
|
|
|
|
return ITestID(strconv.Itoa(rand.Intn(99999)))
|
|
|
|
}
|
|
|
|
|
|
|
|
type DB struct {
|
|
|
|
pgx *pgxpool.Pool
|
|
|
|
cfg *pgxpool.Config
|
|
|
|
schema string
|
|
|
|
hostnames []string
|
|
|
|
}
|
|
|
|
|
|
|
|
var logger = logging.Logger("harmonydb")
|
|
|
|
|
|
|
|
// NewFromConfig is a convenience function.
|
|
|
|
// In usage:
|
|
|
|
//
|
|
|
|
// db, err := NewFromConfig(config.HarmonyDB) // in binary init
|
|
|
|
func NewFromConfig(cfg config.HarmonyDB) (*DB, error) {
|
|
|
|
return New(
|
|
|
|
cfg.Hosts,
|
|
|
|
cfg.Username,
|
|
|
|
cfg.Password,
|
|
|
|
cfg.Database,
|
|
|
|
cfg.Port,
|
|
|
|
"",
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewFromConfigWithITestID(cfg config.HarmonyDB) func(id ITestID) (*DB, error) {
|
|
|
|
return func(id ITestID) (*DB, error) {
|
|
|
|
return New(
|
|
|
|
cfg.Hosts,
|
|
|
|
cfg.Username,
|
|
|
|
cfg.Password,
|
|
|
|
cfg.Database,
|
|
|
|
cfg.Port,
|
|
|
|
id,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// New is to be called once per binary to establish the pool.
|
|
|
|
// log() is for errors. It returns an upgraded database's connection.
|
|
|
|
// This entry point serves both production and integration tests, so it's more DI.
|
2023-09-27 03:06:00 +00:00
|
|
|
func New(hosts []string, username, password, database, port string, itestID ITestID) (*DB, error) {
|
2023-07-14 23:05:49 +00:00
|
|
|
itest := string(itestID)
|
|
|
|
connString := ""
|
|
|
|
if len(hosts) > 0 {
|
|
|
|
connString = "host=" + hosts[0] + " "
|
|
|
|
}
|
|
|
|
for k, v := range map[string]string{"user": username, "password": password, "dbname": database, "port": port} {
|
|
|
|
if strings.TrimSpace(v) != "" {
|
|
|
|
connString += k + "=" + v + " "
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
schema := "lotus"
|
|
|
|
if itest != "" {
|
|
|
|
schema = "itest_" + itest
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := ensureSchemaExists(connString, schema); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
cfg, err := pgxpool.ParseConfig(connString + "search_path=" + schema)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// enable multiple fallback hosts.
|
|
|
|
for _, h := range hosts[1:] {
|
|
|
|
cfg.ConnConfig.Fallbacks = append(cfg.ConnConfig.Fallbacks, &pgconn.FallbackConfig{Host: h})
|
|
|
|
}
|
|
|
|
|
|
|
|
cfg.ConnConfig.OnNotice = func(conn *pgconn.PgConn, n *pgconn.Notice) {
|
2023-09-27 03:06:00 +00:00
|
|
|
logger.Debug("database notice: " + n.Message + ": " + n.Detail)
|
2023-07-14 23:05:49 +00:00
|
|
|
DBMeasures.Errors.M(1)
|
|
|
|
}
|
|
|
|
|
2023-09-27 03:06:00 +00:00
|
|
|
db := DB{cfg: cfg, schema: schema, hostnames: hosts} // pgx populated in AddStatsAndConnect
|
2023-07-14 23:05:49 +00:00
|
|
|
if err := db.addStatsAndConnect(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return &db, db.upgrade()
|
|
|
|
}
|
|
|
|
|
|
|
|
type tracer struct {
|
|
|
|
}
|
|
|
|
|
|
|
|
type ctxkey string
|
|
|
|
|
2023-08-14 16:40:12 +00:00
|
|
|
const SQL_START = ctxkey("sqlStart")
|
|
|
|
const SQL_STRING = ctxkey("sqlString")
|
2023-07-14 23:05:49 +00:00
|
|
|
|
|
|
|
func (t tracer) TraceQueryStart(ctx context.Context, conn *pgx.Conn, data pgx.TraceQueryStartData) context.Context {
|
2023-08-14 16:40:12 +00:00
|
|
|
return context.WithValue(context.WithValue(ctx, SQL_START, time.Now()), SQL_STRING, data.SQL)
|
2023-07-14 23:05:49 +00:00
|
|
|
}
|
|
|
|
func (t tracer) TraceQueryEnd(ctx context.Context, conn *pgx.Conn, data pgx.TraceQueryEndData) {
|
|
|
|
DBMeasures.Hits.M(1)
|
2023-08-14 16:40:12 +00:00
|
|
|
ms := time.Since(ctx.Value(SQL_START).(time.Time)).Milliseconds()
|
2023-07-14 23:05:49 +00:00
|
|
|
DBMeasures.TotalWait.M(ms)
|
|
|
|
DBMeasures.Waits.Observe(float64(ms))
|
|
|
|
if data.Err != nil {
|
|
|
|
DBMeasures.Errors.M(1)
|
|
|
|
}
|
2023-08-14 16:40:12 +00:00
|
|
|
logger.Debugw("SQL run",
|
|
|
|
"query", ctx.Value(SQL_STRING).(string),
|
|
|
|
"err", data.Err,
|
|
|
|
"rowCt", data.CommandTag.RowsAffected(),
|
|
|
|
"milliseconds", ms)
|
2023-07-14 23:05:49 +00:00
|
|
|
}
|
|
|
|
|
2023-08-23 23:57:34 +00:00
|
|
|
func (db *DB) GetRoutableIP() (string, error) {
|
|
|
|
tx, err := db.pgx.Begin(context.Background())
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
2023-08-26 03:25:19 +00:00
|
|
|
defer func() { _ = tx.Rollback(context.Background()) }()
|
2023-08-23 23:57:34 +00:00
|
|
|
local := tx.Conn().PgConn().Conn().LocalAddr()
|
|
|
|
addr, ok := local.(*net.TCPAddr)
|
|
|
|
if !ok {
|
2023-08-25 23:17:31 +00:00
|
|
|
return "", fmt.Errorf("could not get local addr from %v", addr)
|
2023-08-23 23:57:34 +00:00
|
|
|
}
|
|
|
|
return addr.IP.String(), nil
|
|
|
|
}
|
|
|
|
|
2023-07-14 23:05:49 +00:00
|
|
|
// addStatsAndConnect connects a prometheus logger. Be sure to run this before using the DB.
|
|
|
|
func (db *DB) addStatsAndConnect() error {
|
|
|
|
|
|
|
|
db.cfg.ConnConfig.Tracer = tracer{}
|
|
|
|
|
|
|
|
hostnameToIndex := map[string]float64{}
|
|
|
|
for i, h := range db.hostnames {
|
|
|
|
hostnameToIndex[h] = float64(i)
|
|
|
|
}
|
|
|
|
db.cfg.AfterConnect = func(ctx context.Context, c *pgx.Conn) error {
|
|
|
|
s := db.pgx.Stat()
|
|
|
|
DBMeasures.OpenConnections.M(int64(s.TotalConns()))
|
|
|
|
DBMeasures.WhichHost.Observe(hostnameToIndex[c.Config().Host])
|
|
|
|
|
|
|
|
//FUTURE place for any connection seasoning
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-09-29 16:56:10 +00:00
|
|
|
// Timeout the first connection so we know if the DB is down.
|
|
|
|
ctx, ctxClose := context.WithDeadline(context.Background(), time.Now().Add(5*time.Second))
|
|
|
|
defer ctxClose()
|
2023-07-14 23:05:49 +00:00
|
|
|
var err error
|
2023-09-29 16:56:10 +00:00
|
|
|
db.pgx, err = pgxpool.NewWithConfig(ctx, db.cfg)
|
2023-07-14 23:05:49 +00:00
|
|
|
if err != nil {
|
2023-09-27 03:06:00 +00:00
|
|
|
logger.Error(fmt.Sprintf("Unable to connect to database: %v\n", err))
|
2023-07-14 23:05:49 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ITestDeleteAll will delete everything created for "this" integration test.
|
|
|
|
// This must be called at the end of each integration test.
|
|
|
|
func (db *DB) ITestDeleteAll() {
|
|
|
|
if !strings.HasPrefix(db.schema, "itest_") {
|
|
|
|
fmt.Println("Warning: this should never be called on anything but an itest schema.")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer db.pgx.Close()
|
|
|
|
_, err := db.pgx.Exec(context.Background(), "DROP SCHEMA "+db.schema+" CASCADE")
|
|
|
|
if err != nil {
|
|
|
|
fmt.Println("warning: unclean itest shutdown: cannot delete schema: " + err.Error())
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var schemaREString = "^[A-Za-z0-9_]+$"
|
|
|
|
var schemaRE = regexp.MustCompile(schemaREString)
|
|
|
|
|
|
|
|
func ensureSchemaExists(connString, schema string) error {
|
|
|
|
// FUTURE allow using fallback DBs for start-up.
|
2023-10-16 15:28:58 +00:00
|
|
|
ctx, cncl := context.WithDeadline(context.Background(), time.Now().Add(3*time.Second))
|
|
|
|
p, err := pgx.Connect(ctx, connString)
|
|
|
|
defer cncl()
|
2023-07-14 23:05:49 +00:00
|
|
|
if err != nil {
|
2023-10-16 15:28:58 +00:00
|
|
|
return fmt.Errorf("unable to connect to db: %s, err: %v", connString, err)
|
2023-07-14 23:05:49 +00:00
|
|
|
}
|
|
|
|
defer func() { _ = p.Close(context.Background()) }()
|
|
|
|
|
|
|
|
if len(schema) < 5 || !schemaRE.MatchString(schema) {
|
|
|
|
return errors.New("schema must be of the form " + schemaREString + "\n Got: " + schema)
|
|
|
|
}
|
|
|
|
_, err = p.Exec(context.Background(), "CREATE SCHEMA IF NOT EXISTS "+schema)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("cannot create schema: %w", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
//go:embed sql
|
|
|
|
var fs embed.FS
|
|
|
|
|
|
|
|
func (db *DB) upgrade() error {
|
|
|
|
// Does the version table exist? if not, make it.
|
|
|
|
// NOTE: This cannot change except via the next sql file.
|
2023-07-18 21:51:26 +00:00
|
|
|
_, err := db.Exec(context.Background(), `CREATE TABLE IF NOT EXISTS base (
|
2023-07-14 23:05:49 +00:00
|
|
|
id SERIAL PRIMARY KEY,
|
|
|
|
entry CHAR(12),
|
|
|
|
applied TIMESTAMP DEFAULT current_timestamp
|
|
|
|
)`)
|
|
|
|
if err != nil {
|
2023-09-27 03:06:00 +00:00
|
|
|
logger.Error("Upgrade failed.")
|
2023-07-14 23:05:49 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// __Run scripts in order.__
|
|
|
|
|
|
|
|
landed := map[string]bool{}
|
|
|
|
{
|
|
|
|
var landedEntries []struct{ Entry string }
|
|
|
|
err = db.Select(context.Background(), &landedEntries, "SELECT entry FROM base")
|
|
|
|
if err != nil {
|
2023-09-27 03:06:00 +00:00
|
|
|
logger.Error("Cannot read entries: " + err.Error())
|
2023-07-14 23:05:49 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, l := range landedEntries {
|
|
|
|
landed[l.Entry] = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
dir, err := fs.ReadDir("sql")
|
|
|
|
if err != nil {
|
2023-09-27 03:06:00 +00:00
|
|
|
logger.Error("Cannot read fs entries: " + err.Error())
|
2023-07-14 23:05:49 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
sort.Slice(dir, func(i, j int) bool { return dir[i].Name() < dir[j].Name() })
|
2023-09-27 03:06:00 +00:00
|
|
|
|
|
|
|
if len(dir) == 0 {
|
|
|
|
logger.Error("No sql files found.")
|
|
|
|
}
|
2023-07-14 23:05:49 +00:00
|
|
|
for _, e := range dir {
|
|
|
|
name := e.Name()
|
|
|
|
if landed[name] || !strings.HasSuffix(name, ".sql") {
|
2023-09-27 03:06:00 +00:00
|
|
|
logger.Debug("DB Schema " + name + " already applied.")
|
2023-07-14 23:05:49 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
file, err := fs.ReadFile("sql/" + name)
|
|
|
|
if err != nil {
|
2023-09-27 03:06:00 +00:00
|
|
|
logger.Error("weird embed file read err")
|
2023-07-14 23:05:49 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, s := range strings.Split(string(file), ";") { // Implement the changes.
|
|
|
|
if len(strings.TrimSpace(s)) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
_, err = db.pgx.Exec(context.Background(), s)
|
|
|
|
if err != nil {
|
2023-08-14 16:40:12 +00:00
|
|
|
msg := fmt.Sprintf("Could not upgrade! File %s, Query: %s, Returned: %s", name, s, err.Error())
|
2023-09-27 03:06:00 +00:00
|
|
|
logger.Error(msg)
|
2023-08-14 16:40:12 +00:00
|
|
|
return errors.New(msg) // makes devs lives easier by placing message at the end.
|
2023-07-14 23:05:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Mark Completed.
|
2023-07-18 21:51:26 +00:00
|
|
|
_, err = db.Exec(context.Background(), "INSERT INTO base (entry) VALUES ($1)", name)
|
2023-07-14 23:05:49 +00:00
|
|
|
if err != nil {
|
2023-09-27 03:06:00 +00:00
|
|
|
logger.Error("Cannot update base: " + err.Error())
|
2023-07-14 23:05:49 +00:00
|
|
|
return fmt.Errorf("cannot insert into base: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|