ipld-eth-state-snapshot/pkg/snapshot/publisher.go

200 lines
6.5 KiB
Go
Raw Normal View History

2020-07-01 18:44:59 +00:00
// Copyright © 2020 Vulcanize, Inc
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package snapshot
import (
2020-07-01 23:07:56 +00:00
"bytes"
2021-12-15 07:23:18 +00:00
"fmt"
2021-12-23 07:52:44 +00:00
"sync/atomic"
"time"
2020-07-01 23:07:56 +00:00
2021-12-15 07:23:18 +00:00
"github.com/ethereum/go-ethereum/common"
2020-07-01 18:44:59 +00:00
"github.com/ethereum/go-ethereum/core/types"
"github.com/jmoiron/sqlx"
2020-07-01 18:44:59 +00:00
"github.com/multiformats/go-multihash"
2021-12-23 07:52:44 +00:00
"github.com/sirupsen/logrus"
2020-07-01 18:44:59 +00:00
"github.com/ethereum/go-ethereum/statediff/indexer/ipfs/ipld"
"github.com/ethereum/go-ethereum/statediff/indexer/postgres"
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
2020-07-01 18:44:59 +00:00
)
2021-12-23 14:34:34 +00:00
const logInterval = 1 * time.Minute
2021-12-23 07:52:44 +00:00
2021-12-14 06:50:19 +00:00
// Publisher is wrapper around DB.
2020-07-01 18:44:59 +00:00
type Publisher struct {
db *postgres.DB
currBatchSize uint
2021-12-23 07:52:44 +00:00
stateNodeCounter uint64
storageNodeCounter uint64
codeNodeCounter uint64
startTime time.Time
2020-07-01 18:44:59 +00:00
}
2021-12-14 06:50:19 +00:00
// NewPublisher creates Publisher
2020-07-01 18:44:59 +00:00
func NewPublisher(db *postgres.DB) *Publisher {
return &Publisher{
db: db,
currBatchSize: 0,
startTime: time.Now(),
2020-07-01 18:44:59 +00:00
}
}
// PublishHeader writes the header to the ipfs backing pg datastore and adds secondary indexes in the header_cids table
2020-07-01 18:44:59 +00:00
func (p *Publisher) PublishHeader(header *types.Header) (int64, error) {
headerNode, err := ipld.NewEthHeader(header)
if err != nil {
return 0, err
}
2020-07-01 18:44:59 +00:00
tx, err := p.db.Beginx()
if err != nil {
return 0, err
}
2020-07-01 18:44:59 +00:00
defer func() {
if p := recover(); p != nil {
shared.Rollback(tx)
panic(p)
} else if err != nil {
shared.Rollback(tx)
} else {
err = tx.Commit()
}
}()
if err = shared.PublishIPLD(tx, headerNode); err != nil {
2020-07-01 18:44:59 +00:00
return 0, err
}
2020-07-15 04:43:11 +00:00
mhKey, _ := shared.MultihashKeyFromCIDString(headerNode.Cid().String())
2020-07-01 18:44:59 +00:00
var headerID int64
2020-07-15 04:43:11 +00:00
err = tx.QueryRowx(`INSERT INTO eth.header_cids (block_number, block_hash, parent_hash, cid, td, node_id, reward, state_root, tx_root, receipt_root, uncle_root, bloom, timestamp, mh_key, times_validated)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)
2020-07-01 18:44:59 +00:00
ON CONFLICT (block_number, block_hash) DO UPDATE SET block_number = header_cids.block_number
RETURNING id`,
2020-07-15 04:43:11 +00:00
header.Number.Uint64(), header.Hash().Hex(), header.ParentHash.Hex(), headerNode.Cid().String(), "0", p.db.NodeID, "0", header.Root.Hex(), header.TxHash.Hex(),
header.ReceiptHash.Hex(), header.UncleHash.Hex(), header.Bloom.Bytes(), header.Time, mhKey, 0).Scan(&headerID)
2020-07-01 18:44:59 +00:00
return headerID, err
}
// PublishStateNode writes the state node to the ipfs backing datastore and adds secondary indexes in the state_cids table
2021-12-14 06:50:19 +00:00
func (p *Publisher) PublishStateNode(node *node, headerID int64, tx *sqlx.Tx) (int64, error) {
2020-07-01 18:44:59 +00:00
var stateID int64
var stateKey string
2021-12-14 06:50:19 +00:00
if !bytes.Equal(node.key.Bytes(), nullHash.Bytes()) {
stateKey = node.key.Hex()
2020-07-01 18:44:59 +00:00
}
2021-12-14 06:50:19 +00:00
stateCIDStr, mhKey, err := shared.PublishRaw(tx, ipld.MEthStateTrie, multihash.KECCAK_256, node.value)
2020-07-01 18:44:59 +00:00
if err != nil {
return 0, err
}
2020-07-15 04:43:11 +00:00
err = tx.QueryRowx(`INSERT INTO eth.state_cids (header_id, state_leaf_key, cid, state_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7)
ON CONFLICT (header_id, state_path) DO UPDATE SET (state_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7)
2020-07-01 18:44:59 +00:00
RETURNING id`,
2021-12-14 06:50:19 +00:00
headerID, stateKey, stateCIDStr, node.path, node.nodeType, false, mhKey).Scan(&stateID)
2021-12-23 07:52:44 +00:00
// increment state node counter.
atomic.AddUint64(&p.stateNodeCounter, 1)
// increment current batch size counter
p.currBatchSize += 2
2020-07-01 18:44:59 +00:00
return stateID, err
}
// PublishStorageNode writes the storage node to the ipfs backing pg datastore and adds secondary indexes in the storage_cids table
2021-12-14 06:50:19 +00:00
func (p *Publisher) PublishStorageNode(node *node, stateID int64, tx *sqlx.Tx) error {
2020-07-01 18:44:59 +00:00
var storageKey string
2021-12-14 06:50:19 +00:00
if !bytes.Equal(node.key.Bytes(), nullHash.Bytes()) {
storageKey = node.key.Hex()
2020-07-01 18:44:59 +00:00
}
2021-12-14 06:50:19 +00:00
storageCIDStr, mhKey, err := shared.PublishRaw(tx, ipld.MEthStorageTrie, multihash.KECCAK_256, node.value)
2020-07-01 18:44:59 +00:00
if err != nil {
return err
}
_, err = tx.Exec(`INSERT INTO eth.storage_cids (state_id, storage_leaf_key, cid, storage_path, node_type, diff, mh_key) VALUES ($1, $2, $3, $4, $5, $6, $7)
ON CONFLICT (state_id, storage_path) DO UPDATE SET (storage_leaf_key, cid, node_type, diff, mh_key) = ($2, $3, $5, $6, $7)`,
2021-12-14 06:50:19 +00:00
stateID, storageKey, storageCIDStr, node.path, node.nodeType, false, mhKey)
if err != nil {
return err
}
2021-12-23 07:52:44 +00:00
// increment storage node counter.
atomic.AddUint64(&p.storageNodeCounter, 1)
// increment current batch size counter
p.currBatchSize += 2
return nil
2020-07-01 18:44:59 +00:00
}
// PublishCode writes code to the ipfs backing pg datastore
2021-12-15 07:23:18 +00:00
func (p *Publisher) PublishCode(codeHash common.Hash, codeBytes []byte, tx *sqlx.Tx) error {
// no codec for code, doesn't matter though since blockstore key is multihash-derived
2021-12-15 07:23:18 +00:00
mhKey, err := shared.MultihashKeyFromKeccak256(codeHash)
if err != nil {
2021-12-15 07:23:18 +00:00
return fmt.Errorf("error deriving multihash key from codehash: %v", err)
}
2021-12-15 07:23:18 +00:00
if err = shared.PublishDirect(tx, mhKey, codeBytes); err != nil {
return fmt.Errorf("error publishing code IPLD: %v", err)
}
// increment code node counter.
atomic.AddUint64(&p.codeNodeCounter, 1)
2021-12-23 07:52:44 +00:00
p.currBatchSize++
return nil
}
func (p *Publisher) checkBatchSize(tx *sqlx.Tx, maxBatchSize uint) (*sqlx.Tx, error) {
var err error
// maximum batch size reached, commit the current transaction and begin a new transaction.
if maxBatchSize <= p.currBatchSize {
if err = tx.Commit(); err != nil {
return nil, err
}
tx, err = p.db.Beginx()
if err != nil {
return nil, err
}
p.currBatchSize = 0
}
return tx, nil
}
2021-12-23 07:52:44 +00:00
// logNodeCounters periodically logs the number of node processed.
func (p *Publisher) logNodeCounters() {
2021-12-23 14:34:34 +00:00
t := time.NewTicker(logInterval)
2021-12-23 07:52:44 +00:00
for range t.C {
p.printNodeCounters()
2021-12-23 07:52:44 +00:00
}
}
func (p *Publisher) printNodeCounters() {
logrus.Infof("runtime: %s", time.Now().Sub(p.startTime).String())
logrus.Infof("processed state nodes: %d", atomic.LoadUint64(&p.stateNodeCounter))
logrus.Infof("processed storage nodes: %d", atomic.LoadUint64(&p.storageNodeCounter))
logrus.Infof("processed code nodes: %d", atomic.LoadUint64(&p.codeNodeCounter))
}