plugeth-statediff/indexer/database/sql/lazy_tx.go

130 lines
3.1 KiB
Go
Raw Normal View History

2023-06-14 12:43:34 +00:00
package sql
import (
"context"
"reflect"
2023-10-03 10:34:13 +00:00
"sync"
"time"
2023-06-14 12:43:34 +00:00
"github.com/cerc-io/plugeth-statediff/indexer/database/metrics"
"github.com/cerc-io/plugeth-statediff/utils/log"
2023-06-14 12:43:34 +00:00
)
// Changing this to 1 would make sure only sequential COPYs were combined.
const copyFromCheckLimit = 100
type DelayedTx struct {
cache []interface{}
db Database
2023-10-03 10:34:13 +00:00
sync.RWMutex
2023-06-14 12:43:34 +00:00
}
type cachedStmt struct {
sql string
args []interface{}
}
type copyFrom struct {
tableName []string
columnNames []string
rows [][]interface{}
}
2023-10-03 10:34:13 +00:00
type result int64
2023-06-14 12:43:34 +00:00
func (cf *copyFrom) appendRows(rows [][]interface{}) {
cf.rows = append(cf.rows, rows...)
}
func (cf *copyFrom) matches(tableName []string, columnNames []string) bool {
return reflect.DeepEqual(cf.tableName, tableName) && reflect.DeepEqual(cf.columnNames, columnNames)
}
func NewDelayedTx(db Database) *DelayedTx {
return &DelayedTx{db: db}
}
func (tx *DelayedTx) QueryRow(ctx context.Context, sql string, args ...interface{}) ScannableRow {
return tx.db.QueryRow(ctx, sql, args...)
}
func (tx *DelayedTx) findPrevCopyFrom(tableName []string, columnNames []string, limit int) (*copyFrom, int) {
2023-10-03 10:34:13 +00:00
tx.RLock()
defer tx.RUnlock()
2023-06-14 12:43:34 +00:00
for pos, count := len(tx.cache)-1, 0; pos >= 0 && count < limit; pos, count = pos-1, count+1 {
prevCopy, ok := tx.cache[pos].(*copyFrom)
if ok && prevCopy.matches(tableName, columnNames) {
return prevCopy, count
}
}
return nil, -1
}
func (tx *DelayedTx) CopyFrom(ctx context.Context, tableName []string, columnNames []string, rows [][]interface{}) (int64, error) {
if prevCopy, distance := tx.findPrevCopyFrom(tableName, columnNames, copyFromCheckLimit); nil != prevCopy {
log.Trace("statediff lazy_tx : Appending to COPY", "table", tableName,
"current", len(prevCopy.rows), "new", len(rows), "distance", distance)
prevCopy.appendRows(rows)
} else {
2023-10-03 10:34:13 +00:00
tx.Lock()
2023-06-14 12:43:34 +00:00
tx.cache = append(tx.cache, &copyFrom{tableName, columnNames, rows})
2023-10-03 10:34:13 +00:00
tx.Unlock()
2023-06-14 12:43:34 +00:00
}
return 0, nil
}
func (tx *DelayedTx) Exec(ctx context.Context, sql string, args ...interface{}) (Result, error) {
2023-10-03 10:34:13 +00:00
tx.Lock()
2023-06-14 12:43:34 +00:00
tx.cache = append(tx.cache, cachedStmt{sql, args})
2023-10-03 10:34:13 +00:00
defer tx.Unlock()
return result(0), nil
2023-06-14 12:43:34 +00:00
}
func (tx *DelayedTx) Commit(ctx context.Context) error {
t := time.Now()
2023-06-14 12:43:34 +00:00
base, err := tx.db.Begin(ctx)
if err != nil {
return err
}
metrics.IndexerMetrics.FreePostgresTimer.Update(time.Since(t))
2023-06-14 12:43:34 +00:00
defer func() {
if p := recover(); p != nil {
rollback(ctx, base)
panic(p)
} else if err != nil {
rollback(ctx, base)
}
}()
2023-10-03 10:34:13 +00:00
tx.Lock()
defer tx.Unlock()
2023-06-14 12:43:34 +00:00
for _, item := range tx.cache {
switch item := item.(type) {
case *copyFrom:
2023-06-21 13:32:07 +00:00
_, err = base.CopyFrom(ctx, item.tableName, item.columnNames, item.rows)
2023-06-14 12:43:34 +00:00
if err != nil {
log.Error("COPY error", "table", item.tableName, "error", err)
2023-06-14 12:43:34 +00:00
return err
}
case cachedStmt:
2023-06-21 13:32:07 +00:00
_, err = base.Exec(ctx, item.sql, item.args...)
2023-06-14 12:43:34 +00:00
if err != nil {
return err
}
}
}
tx.cache = nil
return base.Commit(ctx)
}
func (tx *DelayedTx) Rollback(ctx context.Context) error {
2023-10-03 10:34:13 +00:00
tx.Lock()
defer tx.Unlock()
2023-06-14 12:43:34 +00:00
tx.cache = nil
return nil
}
2023-10-03 10:34:13 +00:00
// RowsAffected satisfies sql.Result
func (r result) RowsAffected() (int64, error) {
return int64(r), nil
}