logging tweaks

This commit is contained in:
Roy Crihfield 2022-03-09 21:37:33 +08:00
parent 7a2ef4f542
commit 02488e2b79
3 changed files with 24 additions and 21 deletions

View File

@ -281,13 +281,15 @@ func (p *publisher) PrepareTxForBatch(tx snapt.Tx, maxBatchSize uint) (snapt.Tx,
func (p *publisher) logNodeCounters() { func (p *publisher) logNodeCounters() {
t := time.NewTicker(logInterval) t := time.NewTicker(logInterval)
for range t.C { for range t.C {
p.printNodeCounters() p.printNodeCounters("progress")
} }
} }
func (p *publisher) printNodeCounters() { func (p *publisher) printNodeCounters(msg string) {
logrus.Infof("runtime: %s", time.Now().Sub(p.startTime).String()) log.WithFields(log.Fields{
logrus.Infof("processed state nodes: %d", atomic.LoadUint64(&p.stateNodeCounter)) "runtime": time.Now().Sub(p.startTime).String(),
logrus.Infof("processed storage nodes: %d", atomic.LoadUint64(&p.storageNodeCounter)) "state nodes": atomic.LoadUint64(&p.stateNodeCounter),
logrus.Infof("processed code nodes: %d", atomic.LoadUint64(&p.codeNodeCounter)) "storage nodes": atomic.LoadUint64(&p.storageNodeCounter),
"code nodes": atomic.LoadUint64(&p.codeNodeCounter),
}).Info(msg)
} }

View File

@ -27,7 +27,7 @@ import (
blockstore "github.com/ipfs/go-ipfs-blockstore" blockstore "github.com/ipfs/go-ipfs-blockstore"
dshelp "github.com/ipfs/go-ipfs-ds-help" dshelp "github.com/ipfs/go-ipfs-ds-help"
"github.com/multiformats/go-multihash" "github.com/multiformats/go-multihash"
"github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql" "github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres" "github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
@ -81,8 +81,7 @@ func (p *publisher) BeginTx() (snapt.Tx, error) {
} }
go p.logNodeCounters() go p.logNodeCounters()
return pubTx{tx, func() { return pubTx{tx, func() {
logrus.Info("----- final counts -----") p.printNodeCounters("final stats")
p.printNodeCounters()
}}, nil }}, nil
} }
@ -227,13 +226,15 @@ func (p *publisher) PrepareTxForBatch(tx snapt.Tx, maxBatchSize uint) (snapt.Tx,
func (p *publisher) logNodeCounters() { func (p *publisher) logNodeCounters() {
t := time.NewTicker(logInterval) t := time.NewTicker(logInterval)
for range t.C { for range t.C {
p.printNodeCounters() p.printNodeCounters("progress")
} }
} }
func (p *publisher) printNodeCounters() { func (p *publisher) printNodeCounters(msg string) {
logrus.Infof("runtime: %s", time.Now().Sub(p.startTime).String()) log.WithFields(log.Fields{
logrus.Infof("processed state nodes: %d", atomic.LoadUint64(&p.stateNodeCounter)) "runtime": time.Now().Sub(p.startTime).String(),
logrus.Infof("processed storage nodes: %d", atomic.LoadUint64(&p.storageNodeCounter)) "state nodes": atomic.LoadUint64(&p.stateNodeCounter),
logrus.Infof("processed code nodes: %d", atomic.LoadUint64(&p.codeNodeCounter)) "storage nodes": atomic.LoadUint64(&p.storageNodeCounter),
"code nodes": atomic.LoadUint64(&p.codeNodeCounter),
}).Info(msg)
} }

View File

@ -29,7 +29,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
. "github.com/vulcanize/eth-pg-ipfs-state-snapshot/pkg/types" . "github.com/vulcanize/eth-pg-ipfs-state-snapshot/pkg/types"
iter "github.com/vulcanize/go-eth-state-node-iterator" iter "github.com/vulcanize/go-eth-state-node-iterator"
@ -79,14 +79,14 @@ type SnapshotParams struct {
func (s *Service) CreateSnapshot(params SnapshotParams) error { func (s *Service) CreateSnapshot(params SnapshotParams) error {
// extract header from lvldb and publish to PG-IPFS // extract header from lvldb and publish to PG-IPFS
// hold onto the headerID so that we can link the state nodes to this header // hold onto the headerID so that we can link the state nodes to this header
logrus.Infof("Creating snapshot at height %d", params.Height) log.Infof("Creating snapshot at height %d", params.Height)
hash := rawdb.ReadCanonicalHash(s.ethDB, params.Height) hash := rawdb.ReadCanonicalHash(s.ethDB, params.Height)
header := rawdb.ReadHeader(s.ethDB, hash, params.Height) header := rawdb.ReadHeader(s.ethDB, hash, params.Height)
if header == nil { if header == nil {
return fmt.Errorf("unable to read canonical header at height %d", params.Height) return fmt.Errorf("unable to read canonical header at height %d", params.Height)
} }
logrus.Infof("head hash: %s head height: %d", hash.Hex(), params.Height) log.Infof("head hash: %s head height: %d", hash.Hex(), params.Height)
err := s.ipfsPublisher.PublishHeader(header) err := s.ipfsPublisher.PublishHeader(header)
if err != nil { if err != nil {
@ -128,7 +128,7 @@ func (s *Service) CreateSnapshot(params SnapshotParams) error {
defer func() { defer func() {
err := s.tracker.haltAndDump(s.recoveryFile) err := s.tracker.haltAndDump(s.recoveryFile)
if err != nil { if err != nil {
logrus.Error("failed to write recovery file: ", err) log.Error("failed to write recovery file: ", err)
} }
}() }()
@ -142,7 +142,7 @@ func (s *Service) CreateSnapshot(params SnapshotParams) error {
// Create snapshot up to head (ignores height param) // Create snapshot up to head (ignores height param)
func (s *Service) CreateLatestSnapshot(workers uint) error { func (s *Service) CreateLatestSnapshot(workers uint) error {
logrus.Info("Creating snapshot at head") log.Info("Creating snapshot at head")
hash := rawdb.ReadHeadHeaderHash(s.ethDB) hash := rawdb.ReadHeadHeaderHash(s.ethDB)
height := rawdb.ReadHeaderNumber(s.ethDB, hash) height := rawdb.ReadHeaderNumber(s.ethDB, hash)
if height == nil { if height == nil {
@ -234,7 +234,7 @@ func (s *Service) createSnapshot(it trie.NodeIterator, headerID string) error {
codeHash := common.BytesToHash(account.CodeHash) codeHash := common.BytesToHash(account.CodeHash)
codeBytes := rawdb.ReadCode(s.ethDB, codeHash) codeBytes := rawdb.ReadCode(s.ethDB, codeHash)
if len(codeBytes) == 0 { if len(codeBytes) == 0 {
logrus.Error("Code is missing", "account", common.BytesToHash(it.LeafKey())) log.Error("Code is missing", "account", common.BytesToHash(it.LeafKey()))
return errors.New("missing code") return errors.New("missing code")
} }