go-ethereum/statediff/indexer/database/sql/batch_tx.go
Michael Shaw 7be0033d12 conflicts resolved no testing
update tests, helper methods, etc for changed interfaces

linted and some tests updated... statediff tests failing on filesystem call locally

undo changes to go.mod from rebase

changed ref and repo to try old stack-orch with miner.etherbase arg

turn off new tests yml for old tests with hack for old stack-orchestrator

cicd cleanup to trigger PR and testing (#324)

publish step using broken tests switched (#325)

Publish with old tests and no vulcanize publish (#326)

* publish step using broken tests switched

* rebase inserted old vulcanize publish steps

run tests in Jenkins CICD (#327)

* run race tests in CICD

* set HOME env for .ethereum mkdir permission denied

* use same homeDir method as other places in code

* unused variable in test removed

* do NOT run race tests

Unit test inconsistencies (#330)

* run race tests in CICD

* set HOME env for .ethereum mkdir permission denied

* use same homeDir method as other places in code

* unused variable in test removed

* do NOT run race tests

* add statediffing test to Jenkinsfile

Add COPY support for inserting multiple rows in a single operation. (#328)

* Add COPY support for inserting multiple rows in a single command.

Fix CI tests by using specific version of Foundry (#333)

* Fix CI tests by using specific version of Foundry

---------

Co-authored-by: Michael Shaw <michael@abastionofsanity.com>

Add timers/counters for LevelDB Get, Put, Has, and Delete. (#332)

* Add timers/counters for LevelDB Get, Put, Has, and Delete.

* Test for null metrics (the unit tests don't initialize them).

Add timer and counter for batched write operations. (#337)

* Add timer and counter for batched write operations.

* Tweak comment
2023-03-17 10:53:32 -04:00

145 lines
4.1 KiB
Go

// VulcanizeDB
// Copyright © 2021 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package sql
import (
"context"
"sync"
"sync/atomic"
blockstore "github.com/ipfs/go-ipfs-blockstore"
dshelp "github.com/ipfs/go-ipfs-ds-help"
node "github.com/ipfs/go-ipld-format"
"github.com/lib/pq"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
)
const startingCacheCapacity = 1024 * 24
// BatchTx wraps a sql tx with the state necessary for building the tx concurrently during trie difference iteration
type BatchTx struct {
BlockNumber string
ctx context.Context
dbtx Tx
stm string
quit chan struct{}
iplds chan models.IPLDModel
ipldCache models.IPLDBatch
removedCacheFlag *uint32
// Tracks expected cache size and ensures cache is caught up before flush
cacheWg sync.WaitGroup
submit func(blockTx *BatchTx, err error) error
}
// Submit satisfies indexer.AtomicTx
func (tx *BatchTx) Submit(err error) error {
return tx.submit(tx, err)
}
func (tx *BatchTx) flush() error {
tx.cacheWg.Wait()
_, err := tx.dbtx.Exec(tx.ctx, tx.stm, pq.Array(tx.ipldCache.BlockNumbers), pq.Array(tx.ipldCache.Keys),
pq.Array(tx.ipldCache.Values))
if err != nil {
log.Debug(insertError{"public.blocks", err, tx.stm,
struct {
blockNumbers []string
keys []string
values [][]byte
}{
tx.ipldCache.BlockNumbers,
tx.ipldCache.Keys,
tx.ipldCache.Values,
}}.Error())
return insertError{"public.blocks", err, tx.stm, "too many arguments; use debug mode for full list"}
}
tx.ipldCache = models.IPLDBatch{}
return nil
}
// run in background goroutine to synchronize concurrent appends to the ipldCache
func (tx *BatchTx) cache() {
for {
select {
case i := <-tx.iplds:
tx.ipldCache.BlockNumbers = append(tx.ipldCache.BlockNumbers, i.BlockNumber)
tx.ipldCache.Keys = append(tx.ipldCache.Keys, i.Key)
tx.ipldCache.Values = append(tx.ipldCache.Values, i.Data)
tx.cacheWg.Done()
case <-tx.quit:
tx.ipldCache = models.IPLDBatch{}
return
}
}
}
func (tx *BatchTx) cacheDirect(key string, value []byte) {
tx.cacheWg.Add(1)
tx.iplds <- models.IPLDModel{
BlockNumber: tx.BlockNumber,
Key: key,
Data: value,
}
}
func (tx *BatchTx) cacheIPLD(i node.Node) {
tx.cacheWg.Add(1)
tx.iplds <- models.IPLDModel{
BlockNumber: tx.BlockNumber,
Key: blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(i.Cid().Hash()).String(),
Data: i.RawData(),
}
}
func (tx *BatchTx) cacheRaw(codec, mh uint64, raw []byte) (string, string, error) {
c, err := ipld.RawdataToCid(codec, raw, mh)
if err != nil {
return "", "", err
}
prefixedKey := blockstore.BlockPrefix.String() + dshelp.MultihashToDsKey(c.Hash()).String()
tx.cacheWg.Add(1)
tx.iplds <- models.IPLDModel{
BlockNumber: tx.BlockNumber,
Key: prefixedKey,
Data: raw,
}
return c.String(), prefixedKey, err
}
func (tx *BatchTx) cacheRemoved(key string, value []byte) {
if atomic.LoadUint32(tx.removedCacheFlag) == 0 {
atomic.StoreUint32(tx.removedCacheFlag, 1)
tx.cacheWg.Add(1)
tx.iplds <- models.IPLDModel{
BlockNumber: tx.BlockNumber,
Key: key,
Data: value,
}
}
}
// rollback sql transaction and log any error
func rollback(ctx context.Context, tx Tx) {
if err := tx.Rollback(ctx); err != nil {
log.Error(err.Error())
}
}