cmd,internal/era: implement export-history subcommand (#26621)

* all: implement era format, add history importer/export

* internal/era/e2store: refactor e2store to provide ReadAt interface

* internal/era/e2store: export HeaderSize

* internal/era: refactor era to use ReadAt interface

* internal/era: elevate anonymous func to named

* cmd/utils: don't store entire era file in-memory during import / export

* internal/era: better abstraction between era and e2store

* cmd/era: properly close era files

* cmd/era: don't let defers stack

* cmd/geth: add description for import-history

* cmd/utils: better bytes buffer

* internal/era: error if accumulator has more records than max allowed

* internal/era: better doc comment

* internal/era/e2store: rm superfluous reader, rm superfluous testcases, add fuzzer

* internal/era: avoid some repetition

* internal/era: simplify clauses

* internal/era: unexport things

* internal/era,cmd/utils,cmd/era: change to iterator interface for reading era entries

* cmd/utils: better defer handling in history test

* internal/era,cmd: add number method to era iterator to get the current block number

* internal/era/e2store: avoid double allocation during write

* internal/era,cmd/utils: fix lint issues

* internal/era: add ReaderAt func so entry value can be read lazily

Co-authored-by: lightclient <lightclient@protonmail.com>
Co-authored-by: Martin Holst Swende <martin@swende.se>

* internal/era: improve iterator interface

* internal/era: fix rlp decode of header and correctly read total difficulty

* cmd/era: fix rebase errors

* cmd/era: clearer comments

* cmd,internal: fix comment typos

---------

Co-authored-by: Martin Holst Swende <martin@swende.se>
This commit is contained in:
lightclient 2024-02-07 09:18:27 -07:00 committed by GitHub
parent 199e0c9ff5
commit 1f50aa7631
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 2145 additions and 0 deletions

324
cmd/era/main.go Normal file
View File

@ -0,0 +1,324 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"encoding/json"
"fmt"
"math/big"
"os"
"path"
"strconv"
"strings"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/internal/era"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
"github.com/urfave/cli/v2"
)
var app = flags.NewApp("go-ethereum era tool")
var (
dirFlag = &cli.StringFlag{
Name: "dir",
Usage: "directory storing all relevant era1 files",
Value: "eras",
}
networkFlag = &cli.StringFlag{
Name: "network",
Usage: "network name associated with era1 files",
Value: "mainnet",
}
eraSizeFlag = &cli.IntFlag{
Name: "size",
Usage: "number of blocks per era",
Value: era.MaxEra1Size,
}
txsFlag = &cli.BoolFlag{
Name: "txs",
Usage: "print full transaction values",
}
)
var (
blockCommand = &cli.Command{
Name: "block",
Usage: "get block data",
ArgsUsage: "<number>",
Action: block,
Flags: []cli.Flag{
txsFlag,
},
}
infoCommand = &cli.Command{
Name: "info",
ArgsUsage: "<epoch>",
Usage: "get epoch information",
Action: info,
}
verifyCommand = &cli.Command{
Name: "verify",
ArgsUsage: "<expected>",
Usage: "verifies each era1 against expected accumulator root",
Action: verify,
}
)
func init() {
app.Commands = []*cli.Command{
blockCommand,
infoCommand,
verifyCommand,
}
app.Flags = []cli.Flag{
dirFlag,
networkFlag,
eraSizeFlag,
}
}
func main() {
if err := app.Run(os.Args); err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
}
// block prints the specified block from an era1 store.
func block(ctx *cli.Context) error {
num, err := strconv.ParseUint(ctx.Args().First(), 10, 64)
if err != nil {
return fmt.Errorf("invalid block number: %w", err)
}
e, err := open(ctx, num/uint64(ctx.Int(eraSizeFlag.Name)))
if err != nil {
return fmt.Errorf("error opening era1: %w", err)
}
defer e.Close()
// Read block with number.
block, err := e.GetBlockByNumber(num)
if err != nil {
return fmt.Errorf("error reading block %d: %w", num, err)
}
// Convert block to JSON and print.
val := ethapi.RPCMarshalBlock(block, ctx.Bool(txsFlag.Name), ctx.Bool(txsFlag.Name), params.MainnetChainConfig)
b, err := json.MarshalIndent(val, "", " ")
if err != nil {
return fmt.Errorf("error marshaling json: %w", err)
}
fmt.Println(string(b))
return nil
}
// info prints some high-level information about the era1 file.
func info(ctx *cli.Context) error {
epoch, err := strconv.ParseUint(ctx.Args().First(), 10, 64)
if err != nil {
return fmt.Errorf("invalid epoch number: %w", err)
}
e, err := open(ctx, epoch)
if err != nil {
return err
}
defer e.Close()
acc, err := e.Accumulator()
if err != nil {
return fmt.Errorf("error reading accumulator: %w", err)
}
td, err := e.InitialTD()
if err != nil {
return fmt.Errorf("error reading total difficulty: %w", err)
}
info := struct {
Accumulator common.Hash `json:"accumulator"`
TotalDifficulty *big.Int `json:"totalDifficulty"`
StartBlock uint64 `json:"startBlock"`
Count uint64 `json:"count"`
}{
acc, td, e.Start(), e.Count(),
}
b, _ := json.MarshalIndent(info, "", " ")
fmt.Println(string(b))
return nil
}
// open opens an era1 file at a certain epoch.
func open(ctx *cli.Context, epoch uint64) (*era.Era, error) {
var (
dir = ctx.String(dirFlag.Name)
network = ctx.String(networkFlag.Name)
)
entries, err := era.ReadDir(dir, network)
if err != nil {
return nil, fmt.Errorf("error reading era dir: %w", err)
}
if epoch >= uint64(len(entries)) {
return nil, fmt.Errorf("epoch out-of-bounds: last %d, want %d", len(entries)-1, epoch)
}
return era.Open(path.Join(dir, entries[epoch]))
}
// verify checks each era1 file in a directory to ensure it is well-formed and
// that the accumulator matches the expected value.
func verify(ctx *cli.Context) error {
if ctx.Args().Len() != 1 {
return fmt.Errorf("missing accumulators file")
}
roots, err := readHashes(ctx.Args().First())
if err != nil {
return fmt.Errorf("unable to read expected roots file: %w", err)
}
var (
dir = ctx.String(dirFlag.Name)
network = ctx.String(networkFlag.Name)
start = time.Now()
reported = time.Now()
)
entries, err := era.ReadDir(dir, network)
if err != nil {
return fmt.Errorf("error reading %s: %w", dir, err)
}
if len(entries) != len(roots) {
return fmt.Errorf("number of era1 files should match the number of accumulator hashes")
}
// Verify each epoch matches the expected root.
for i, want := range roots {
// Wrap in function so defers don't stack.
err := func() error {
name := entries[i]
e, err := era.Open(path.Join(dir, name))
if err != nil {
return fmt.Errorf("error opening era1 file %s: %w", name, err)
}
defer e.Close()
// Read accumulator and check against expected.
if got, err := e.Accumulator(); err != nil {
return fmt.Errorf("error retrieving accumulator for %s: %w", name, err)
} else if got != want {
return fmt.Errorf("invalid root %s: got %s, want %s", name, got, want)
}
// Recompute accumulator.
if err := checkAccumulator(e); err != nil {
return fmt.Errorf("error verify era1 file %s: %w", name, err)
}
// Give the user some feedback that something is happening.
if time.Since(reported) >= 8*time.Second {
fmt.Printf("Verifying Era1 files \t\t verified=%d,\t elapsed=%s\n", i, common.PrettyDuration(time.Since(start)))
reported = time.Now()
}
return nil
}()
if err != nil {
return err
}
}
return nil
}
// checkAccumulator verifies the accumulator matches the data in the Era.
func checkAccumulator(e *era.Era) error {
var (
err error
want common.Hash
td *big.Int
tds = make([]*big.Int, 0)
hashes = make([]common.Hash, 0)
)
if want, err = e.Accumulator(); err != nil {
return fmt.Errorf("error reading accumulator: %w", err)
}
if td, err = e.InitialTD(); err != nil {
return fmt.Errorf("error reading total difficulty: %w", err)
}
it, err := era.NewIterator(e)
if err != nil {
return fmt.Errorf("error making era iterator: %w", err)
}
// To fully verify an era the following attributes must be checked:
// 1) the block index is constructed correctly
// 2) the tx root matches the value in the block
// 3) the receipts root matches the value in the block
// 4) the starting total difficulty value is correct
// 5) the accumulator is correct by recomputing it locally, which verifies
// the blocks are all correct (via hash)
//
// The attributes 1), 2), and 3) are checked for each block. 4) and 5) require
// accumulation across the entire set and are verified at the end.
for it.Next() {
// 1) next() walks the block index, so we're able to implicitly verify it.
if it.Error() != nil {
return fmt.Errorf("error reading block %d: %w", it.Number(), err)
}
block, receipts, err := it.BlockAndReceipts()
if it.Error() != nil {
return fmt.Errorf("error reading block %d: %w", it.Number(), err)
}
// 2) recompute tx root and verify against header.
tr := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil))
if tr != block.TxHash() {
return fmt.Errorf("tx root in block %d mismatch: want %s, got %s", block.NumberU64(), block.TxHash(), tr)
}
// 3) recompute receipt root and check value against block.
rr := types.DeriveSha(receipts, trie.NewStackTrie(nil))
if rr != block.ReceiptHash() {
return fmt.Errorf("receipt root in block %d mismatch: want %s, got %s", block.NumberU64(), block.ReceiptHash(), rr)
}
hashes = append(hashes, block.Hash())
td.Add(td, block.Difficulty())
tds = append(tds, new(big.Int).Set(td))
}
// 4+5) Verify accumulator and total difficulty.
got, err := era.ComputeAccumulator(hashes, tds)
if err != nil {
return fmt.Errorf("error computing accumulator: %w", err)
}
if got != want {
return fmt.Errorf("expected accumulator root does not match calculated: got %s, want %s", got, want)
}
return nil
}
// readHashes reads a file of newline-delimited hashes.
func readHashes(f string) ([]common.Hash, error) {
b, err := os.ReadFile(f)
if err != nil {
return nil, fmt.Errorf("unable to open accumulators file")
}
s := strings.Split(string(b), "\n")
// Remove empty last element, if present.
if s[len(s)-1] == "" {
s = s[:len(s)-1]
}
// Convert to hashes.
r := make([]common.Hash, len(s))
for i := range s {
r[i] = common.HexToHash(s[i])
}
return r, nil
}

View File

@ -35,10 +35,12 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/internal/era"
"github.com/ethereum/go-ethereum/internal/flags" "github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/params"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
) )
@ -122,6 +124,33 @@ Optional second and third arguments control the first and
last block to write. In this mode, the file will be appended last block to write. In this mode, the file will be appended
if already existing. If the file ends with .gz, the output will if already existing. If the file ends with .gz, the output will
be gzipped.`, be gzipped.`,
}
importHistoryCommand = &cli.Command{
Action: importHistory,
Name: "import-history",
Usage: "Import an Era archive",
ArgsUsage: "<dir>",
Flags: flags.Merge([]cli.Flag{
utils.TxLookupLimitFlag,
},
utils.DatabaseFlags,
utils.NetworkFlags,
),
Description: `
The import-history command will import blocks and their corresponding receipts
from Era archives.
`,
}
exportHistoryCommand = &cli.Command{
Action: exportHistory,
Name: "export-history",
Usage: "Export blockchain history to Era archives",
ArgsUsage: "<dir> <first> <last>",
Flags: flags.Merge(utils.DatabaseFlags),
Description: `
The export-history command will export blocks and their corresponding receipts
into Era archives. Eras are typically packaged in steps of 8192 blocks.
`,
} }
importPreimagesCommand = &cli.Command{ importPreimagesCommand = &cli.Command{
Action: importPreimages, Action: importPreimages,
@ -364,7 +393,97 @@ func exportChain(ctx *cli.Context) error {
} }
err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last)) err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
} }
if err != nil {
utils.Fatalf("Export error: %v\n", err)
}
fmt.Printf("Export done in %v\n", time.Since(start))
return nil
}
func importHistory(ctx *cli.Context) error {
if ctx.Args().Len() != 1 {
utils.Fatalf("usage: %s", ctx.Command.ArgsUsage)
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
chain, db := utils.MakeChain(ctx, stack, false)
defer db.Close()
var (
start = time.Now()
dir = ctx.Args().Get(0)
network string
)
// Determine network.
if utils.IsNetworkPreset(ctx) {
switch {
case ctx.Bool(utils.MainnetFlag.Name):
network = "mainnet"
case ctx.Bool(utils.SepoliaFlag.Name):
network = "sepolia"
case ctx.Bool(utils.GoerliFlag.Name):
network = "goerli"
}
} else {
// No network flag set, try to determine network based on files
// present in directory.
var networks []string
for _, n := range params.NetworkNames {
entries, err := era.ReadDir(dir, n)
if err != nil {
return fmt.Errorf("error reading %s: %w", dir, err)
}
if len(entries) > 0 {
networks = append(networks, n)
}
}
if len(networks) == 0 {
return fmt.Errorf("no era1 files found in %s", dir)
}
if len(networks) > 1 {
return fmt.Errorf("multiple networks found, use a network flag to specify desired network")
}
network = networks[0]
}
if err := utils.ImportHistory(chain, db, dir, network); err != nil {
return err
}
fmt.Printf("Import done in %v\n", time.Since(start))
return nil
}
// exportHistory exports chain history in Era archives at a specified
// directory.
func exportHistory(ctx *cli.Context) error {
if ctx.Args().Len() != 3 {
utils.Fatalf("usage: %s", ctx.Command.ArgsUsage)
}
stack, _ := makeConfigNode(ctx)
defer stack.Close()
chain, _ := utils.MakeChain(ctx, stack, true)
start := time.Now()
var (
dir = ctx.Args().Get(0)
first, ferr = strconv.ParseInt(ctx.Args().Get(1), 10, 64)
last, lerr = strconv.ParseInt(ctx.Args().Get(2), 10, 64)
)
if ferr != nil || lerr != nil {
utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
}
if first < 0 || last < 0 {
utils.Fatalf("Export error: block number must be greater than 0\n")
}
if head := chain.CurrentSnapBlock(); uint64(last) > head.Number.Uint64() {
utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.Number.Uint64())
}
err := utils.ExportHistory(chain, dir, uint64(first), uint64(last), uint64(era.MaxEra1Size))
if err != nil { if err != nil {
utils.Fatalf("Export error: %v\n", err) utils.Fatalf("Export error: %v\n", err)
} }

View File

@ -208,6 +208,8 @@ func init() {
initCommand, initCommand,
importCommand, importCommand,
exportCommand, exportCommand,
importHistoryCommand,
exportHistoryCommand,
importPreimagesCommand, importPreimagesCommand,
removedbCommand, removedbCommand,
dumpCommand, dumpCommand,

View File

@ -19,12 +19,15 @@ package utils
import ( import (
"bufio" "bufio"
"bytes"
"compress/gzip" "compress/gzip"
"crypto/sha256"
"errors" "errors"
"fmt" "fmt"
"io" "io"
"os" "os"
"os/signal" "os/signal"
"path"
"runtime" "runtime"
"strings" "strings"
"syscall" "syscall"
@ -39,8 +42,10 @@ import (
"github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/internal/debug" "github.com/ethereum/go-ethereum/internal/debug"
"github.com/ethereum/go-ethereum/internal/era"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
) )
@ -228,6 +233,105 @@ func ImportChain(chain *core.BlockChain, fn string) error {
return nil return nil
} }
func readList(filename string) ([]string, error) {
b, err := os.ReadFile(filename)
if err != nil {
return nil, err
}
return strings.Split(string(b), "\n"), nil
}
// ImportHistory imports Era1 files containing historical block information,
// starting from genesis.
func ImportHistory(chain *core.BlockChain, db ethdb.Database, dir string, network string) error {
if chain.CurrentSnapBlock().Number.BitLen() != 0 {
return fmt.Errorf("history import only supported when starting from genesis")
}
entries, err := era.ReadDir(dir, network)
if err != nil {
return fmt.Errorf("error reading %s: %w", dir, err)
}
checksums, err := readList(path.Join(dir, "checksums.txt"))
if err != nil {
return fmt.Errorf("unable to read checksums.txt: %w", err)
}
if len(checksums) != len(entries) {
return fmt.Errorf("expected equal number of checksums and entries, have: %d checksums, %d entries", len(checksums), len(entries))
}
var (
start = time.Now()
reported = time.Now()
imported = 0
forker = core.NewForkChoice(chain, nil)
h = sha256.New()
buf = bytes.NewBuffer(nil)
)
for i, filename := range entries {
err := func() error {
f, err := os.Open(path.Join(dir, filename))
if err != nil {
return fmt.Errorf("unable to open era: %w", err)
}
defer f.Close()
// Validate checksum.
if _, err := io.Copy(h, f); err != nil {
return fmt.Errorf("unable to recalculate checksum: %w", err)
}
if have, want := common.BytesToHash(h.Sum(buf.Bytes()[:])).Hex(), checksums[i]; have != want {
return fmt.Errorf("checksum mismatch: have %s, want %s", have, want)
}
h.Reset()
buf.Reset()
// Import all block data from Era1.
e, err := era.From(f)
if err != nil {
return fmt.Errorf("error opening era: %w", err)
}
it, err := era.NewIterator(e)
if err != nil {
return fmt.Errorf("error making era reader: %w", err)
}
for it.Next() {
block, err := it.Block()
if err != nil {
return fmt.Errorf("error reading block %d: %w", it.Number(), err)
}
if block.Number().BitLen() == 0 {
continue // skip genesis
}
receipts, err := it.Receipts()
if err != nil {
return fmt.Errorf("error reading receipts %d: %w", it.Number(), err)
}
if status, err := chain.HeaderChain().InsertHeaderChain([]*types.Header{block.Header()}, start, forker); err != nil {
return fmt.Errorf("error inserting header %d: %w", it.Number(), err)
} else if status != core.CanonStatTy {
return fmt.Errorf("error inserting header %d, not canon: %v", it.Number(), status)
}
if _, err := chain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{receipts}, 2^64-1); err != nil {
return fmt.Errorf("error inserting body %d: %w", it.Number(), err)
}
imported += 1
// Give the user some feedback that something is happening.
if time.Since(reported) >= 8*time.Second {
log.Info("Importing Era files", "head", it.Number(), "imported", imported, "elapsed", common.PrettyDuration(time.Since(start)))
imported = 0
reported = time.Now()
}
}
return nil
}()
if err != nil {
return err
}
}
return nil
}
func missingBlocks(chain *core.BlockChain, blocks []*types.Block) []*types.Block { func missingBlocks(chain *core.BlockChain, blocks []*types.Block) []*types.Block {
head := chain.CurrentBlock() head := chain.CurrentBlock()
for i, block := range blocks { for i, block := range blocks {
@ -297,6 +401,93 @@ func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, las
return nil return nil
} }
// ExportHistory exports blockchain history into the specified directory,
// following the Era format.
func ExportHistory(bc *core.BlockChain, dir string, first, last, step uint64) error {
log.Info("Exporting blockchain history", "dir", dir)
if head := bc.CurrentBlock().Number.Uint64(); head < last {
log.Warn("Last block beyond head, setting last = head", "head", head, "last", last)
last = head
}
network := "unknown"
if name, ok := params.NetworkNames[bc.Config().ChainID.String()]; ok {
network = name
}
if err := os.MkdirAll(dir, os.ModePerm); err != nil {
return fmt.Errorf("error creating output directory: %w", err)
}
var (
start = time.Now()
reported = time.Now()
h = sha256.New()
buf = bytes.NewBuffer(nil)
checksums []string
)
for i := first; i <= last; i += step {
err := func() error {
filename := path.Join(dir, era.Filename(network, int(i/step), common.Hash{}))
f, err := os.Create(filename)
if err != nil {
return fmt.Errorf("could not create era file: %w", err)
}
defer f.Close()
w := era.NewBuilder(f)
for j := uint64(0); j < step && j <= last-i; j++ {
var (
n = i + j
block = bc.GetBlockByNumber(n)
)
if block == nil {
return fmt.Errorf("export failed on #%d: not found", n)
}
receipts := bc.GetReceiptsByHash(block.Hash())
if receipts == nil {
return fmt.Errorf("export failed on #%d: receipts not found", n)
}
td := bc.GetTd(block.Hash(), block.NumberU64())
if td == nil {
return fmt.Errorf("export failed on #%d: total difficulty not found", n)
}
if err := w.Add(block, receipts, td); err != nil {
return err
}
}
root, err := w.Finalize()
if err != nil {
return fmt.Errorf("export failed to finalize %d: %w", step/i, err)
}
// Set correct filename with root.
os.Rename(filename, path.Join(dir, era.Filename(network, int(i/step), root)))
// Compute checksum of entire Era1.
if _, err := f.Seek(0, io.SeekStart); err != nil {
return err
}
if _, err := io.Copy(h, f); err != nil {
return fmt.Errorf("unable to calculate checksum: %w", err)
}
checksums = append(checksums, common.BytesToHash(h.Sum(buf.Bytes()[:])).Hex())
h.Reset()
buf.Reset()
return nil
}()
if err != nil {
return err
}
if time.Since(reported) >= 8*time.Second {
log.Info("Exporting blocks", "exported", i, "elapsed", common.PrettyDuration(time.Since(start)))
reported = time.Now()
}
}
os.WriteFile(path.Join(dir, "checksums.txt"), []byte(strings.Join(checksums, "\n")), os.ModePerm)
log.Info("Exported blockchain to", "dir", dir)
return nil
}
// ImportPreimages imports a batch of exported hash preimages into the database. // ImportPreimages imports a batch of exported hash preimages into the database.
// It's a part of the deprecated functionality, should be removed in the future. // It's a part of the deprecated functionality, should be removed in the future.
func ImportPreimages(db ethdb.Database, fn string) error { func ImportPreimages(db ethdb.Database, fn string) error {

184
cmd/utils/history_test.go Normal file
View File

@ -0,0 +1,184 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package utils
import (
"bytes"
"crypto/sha256"
"io"
"math/big"
"os"
"path"
"strings"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/internal/era"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
)
var (
count uint64 = 128
step uint64 = 16
)
func TestHistoryImportAndExport(t *testing.T) {
var (
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
address = crypto.PubkeyToAddress(key.PublicKey)
genesis = &core.Genesis{
Config: params.TestChainConfig,
Alloc: core.GenesisAlloc{address: {Balance: big.NewInt(1000000000000000000)}},
}
signer = types.LatestSigner(genesis.Config)
)
// Generate chain.
db, blocks, _ := core.GenerateChainWithGenesis(genesis, ethash.NewFaker(), int(count), func(i int, g *core.BlockGen) {
if i == 0 {
return
}
tx, err := types.SignNewTx(key, signer, &types.DynamicFeeTx{
ChainID: genesis.Config.ChainID,
Nonce: uint64(i - 1),
GasTipCap: common.Big0,
GasFeeCap: g.PrevBlock(0).BaseFee(),
Gas: 50000,
To: &common.Address{0xaa},
Value: big.NewInt(int64(i)),
Data: nil,
AccessList: nil,
})
if err != nil {
t.Fatalf("error creating tx: %v", err)
}
g.AddTx(tx)
})
// Initialize BlockChain.
chain, err := core.NewBlockChain(db, nil, genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("unable to initialize chain: %v", err)
}
if _, err := chain.InsertChain(blocks); err != nil {
t.Fatalf("error insterting chain: %v", err)
}
// Make temp directory for era files.
dir, err := os.MkdirTemp("", "history-export-test")
if err != nil {
t.Fatalf("error creating temp test directory: %v", err)
}
defer os.RemoveAll(dir)
// Export history to temp directory.
if err := ExportHistory(chain, dir, 0, count, step); err != nil {
t.Fatalf("error exporting history: %v", err)
}
// Read checksums.
b, err := os.ReadFile(path.Join(dir, "checksums.txt"))
if err != nil {
t.Fatalf("failed to read checksums: %v", err)
}
checksums := strings.Split(string(b), "\n")
// Verify each Era.
entries, _ := era.ReadDir(dir, "mainnet")
for i, filename := range entries {
func() {
f, err := os.Open(path.Join(dir, filename))
if err != nil {
t.Fatalf("error opening era file: %v", err)
}
var (
h = sha256.New()
buf = bytes.NewBuffer(nil)
)
if _, err := io.Copy(h, f); err != nil {
t.Fatalf("unable to recalculate checksum: %v", err)
}
if got, want := common.BytesToHash(h.Sum(buf.Bytes()[:])).Hex(), checksums[i]; got != want {
t.Fatalf("checksum %d does not match: got %s, want %s", i, got, want)
}
e, err := era.From(f)
if err != nil {
t.Fatalf("error opening era: %v", err)
}
defer e.Close()
it, err := era.NewIterator(e)
if err != nil {
t.Fatalf("error making era reader: %v", err)
}
for j := 0; it.Next(); j++ {
n := i*int(step) + j
if it.Error() != nil {
t.Fatalf("error reading block entry %d: %v", n, err)
}
block, receipts, err := it.BlockAndReceipts()
if err != nil {
t.Fatalf("error reading block entry %d: %v", n, err)
}
want := chain.GetBlockByNumber(uint64(n))
if want, got := uint64(n), block.NumberU64(); want != got {
t.Fatalf("blocks out of order: want %d, got %d", want, got)
}
if want.Hash() != block.Hash() {
t.Fatalf("block hash mismatch %d: want %s, got %s", n, want.Hash().Hex(), block.Hash().Hex())
}
if got := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); got != want.TxHash() {
t.Fatalf("tx hash %d mismatch: want %s, got %s", n, want.TxHash(), got)
}
if got := types.CalcUncleHash(block.Uncles()); got != want.UncleHash() {
t.Fatalf("uncle hash %d mismatch: want %s, got %s", n, want.UncleHash(), got)
}
if got := types.DeriveSha(receipts, trie.NewStackTrie(nil)); got != want.ReceiptHash() {
t.Fatalf("receipt root %d mismatch: want %s, got %s", n, want.ReceiptHash(), got)
}
}
}()
}
// Now import Era.
freezer := t.TempDir()
db2, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false)
if err != nil {
panic(err)
}
t.Cleanup(func() {
db2.Close()
})
genesis.MustCommit(db2, trie.NewDatabase(db, trie.HashDefaults))
imported, err := core.NewBlockChain(db2, nil, genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("unable to initialize chain: %v", err)
}
if err := ImportHistory(imported, db2, dir, "mainnet"); err != nil {
t.Fatalf("failed to import chain: %v", err)
}
if have, want := imported.CurrentHeader(), chain.CurrentHeader(); have.Hash() != want.Hash() {
t.Fatalf("imported chain does not match expected, have (%d, %s) want (%d, %s)", have.Number, have.Hash(), want.Number, want.Hash())
}
}

View File

@ -410,6 +410,11 @@ func (bc *BlockChain) TrieDB() *trie.Database {
return bc.triedb return bc.triedb
} }
// HeaderChain returns the underlying header chain.
func (bc *BlockChain) HeaderChain() *HeaderChain {
return bc.hc
}
// SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent. // SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription { func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch)) return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))

3
go.mod
View File

@ -22,6 +22,7 @@ require (
github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127
github.com/ethereum/c-kzg-4844 v0.4.0 github.com/ethereum/c-kzg-4844 v0.4.0
github.com/fatih/color v1.13.0 github.com/fatih/color v1.13.0
github.com/ferranbt/fastssz v0.1.2
github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e
github.com/fjl/memsize v0.0.2 github.com/fjl/memsize v0.0.2
github.com/fsnotify/fsnotify v1.6.0 github.com/fsnotify/fsnotify v1.6.0
@ -114,10 +115,12 @@ require (
github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/kilic/bls12-381 v0.1.0 // indirect github.com/kilic/bls12-381 v0.1.0 // indirect
github.com/klauspost/compress v1.15.15 // indirect github.com/klauspost/compress v1.15.15 // indirect
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
github.com/kr/pretty v0.3.1 // indirect github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect github.com/kr/text v0.2.0 // indirect
github.com/mattn/go-runewidth v0.0.13 // indirect github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/minio/sha256-simd v1.0.0 // indirect
github.com/mitchellh/mapstructure v1.4.1 // indirect github.com/mitchellh/mapstructure v1.4.1 // indirect
github.com/mitchellh/pointerstructure v1.2.0 // indirect github.com/mitchellh/pointerstructure v1.2.0 // indirect
github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect

8
go.sum
View File

@ -187,6 +187,8 @@ github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/ferranbt/fastssz v0.1.2 h1:Dky6dXlngF6Qjc+EfDipAkE83N5I5DE68bY6O0VLNPk=
github.com/ferranbt/fastssz v0.1.2/go.mod h1:X5UPrE2u1UJjxHA8X54u04SBwdAQjG2sFtWs39YxyWs=
github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e h1:bBLctRc7kr01YGvaDfgLbTwjFNW5jdp5y5rj8XXBHfY= github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e h1:bBLctRc7kr01YGvaDfgLbTwjFNW5jdp5y5rj8XXBHfY=
github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY= github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY=
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA= github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
@ -399,6 +401,9 @@ github.com/klauspost/compress v1.9.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0
github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw=
github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4=
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
@ -446,6 +451,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182aff
github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg= github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg=
github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ= github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ=
github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
@ -523,6 +530,7 @@ github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7 h1:cZC+usqsYgHtlBaGulVnZ1hfKAi8iWtujBnRLQE698c= github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7 h1:cZC+usqsYgHtlBaGulVnZ1hfKAi8iWtujBnRLQE698c=
github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7/go.mod h1:IToEjHuttnUzwZI5KBSM/LOOW3qLbbrHOEfp3SbECGY= github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7/go.mod h1:IToEjHuttnUzwZI5KBSM/LOOW3qLbbrHOEfp3SbECGY=
github.com/prysmaticlabs/gohashtree v0.0.1-alpha.0.20220714111606-acbb2962fb48 h1:cSo6/vk8YpvkLbk9v3FO97cakNmUoxwi2KMP8hd5WIw=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=

View File

@ -0,0 +1,90 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package era
import (
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
ssz "github.com/ferranbt/fastssz"
)
// ComputeAccumulator calculates the SSZ hash tree root of the Era1
// accumulator of header records.
func ComputeAccumulator(hashes []common.Hash, tds []*big.Int) (common.Hash, error) {
if len(hashes) != len(tds) {
return common.Hash{}, fmt.Errorf("must have equal number hashes as td values")
}
if len(hashes) > MaxEra1Size {
return common.Hash{}, fmt.Errorf("too many records: have %d, max %d", len(hashes), MaxEra1Size)
}
hh := ssz.NewHasher()
for i := range hashes {
rec := headerRecord{hashes[i], tds[i]}
root, err := rec.HashTreeRoot()
if err != nil {
return common.Hash{}, err
}
hh.Append(root[:])
}
hh.MerkleizeWithMixin(0, uint64(len(hashes)), uint64(MaxEra1Size))
return hh.HashRoot()
}
// headerRecord is an individual record for a historical header.
//
// See https://github.com/ethereum/portal-network-specs/blob/master/history-network.md#the-header-accumulator
// for more information.
type headerRecord struct {
Hash common.Hash
TotalDifficulty *big.Int
}
// GetTree completes the ssz.HashRoot interface, but is unused.
func (h *headerRecord) GetTree() (*ssz.Node, error) {
return nil, nil
}
// HashTreeRoot ssz hashes the headerRecord object.
func (h *headerRecord) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(h)
}
// HashTreeRootWith ssz hashes the headerRecord object with a hasher.
func (h *headerRecord) HashTreeRootWith(hh ssz.HashWalker) (err error) {
hh.PutBytes(h.Hash[:])
td := bigToBytes32(h.TotalDifficulty)
hh.PutBytes(td[:])
hh.Merkleize(0)
return
}
// bigToBytes32 converts a big.Int into a little-endian 32-byte array.
func bigToBytes32(n *big.Int) (b [32]byte) {
n.FillBytes(b[:])
reverseOrder(b[:])
return
}
// reverseOrder reverses the byte order of a slice.
func reverseOrder(b []byte) []byte {
for i := 0; i < 16; i++ {
b[i], b[32-i-1] = b[32-i-1], b[i]
}
return b
}

228
internal/era/builder.go Normal file
View File

@ -0,0 +1,228 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package era
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/internal/era/e2store"
"github.com/ethereum/go-ethereum/rlp"
"github.com/golang/snappy"
)
// Builder is used to create Era1 archives of block data.
//
// Era1 files are themselves e2store files. For more information on this format,
// see https://github.com/status-im/nimbus-eth2/blob/stable/docs/e2store.md.
//
// The overall structure of an Era1 file follows closely the structure of an Era file
// which contains consensus Layer data (and as a byproduct, EL data after the merge).
//
// The structure can be summarized through this definition:
//
// era1 := Version | block-tuple* | other-entries* | Accumulator | BlockIndex
// block-tuple := CompressedHeader | CompressedBody | CompressedReceipts | TotalDifficulty
//
// Each basic element is its own entry:
//
// Version = { type: [0x65, 0x32], data: nil }
// CompressedHeader = { type: [0x03, 0x00], data: snappyFramed(rlp(header)) }
// CompressedBody = { type: [0x04, 0x00], data: snappyFramed(rlp(body)) }
// CompressedReceipts = { type: [0x05, 0x00], data: snappyFramed(rlp(receipts)) }
// TotalDifficulty = { type: [0x06, 0x00], data: uint256(header.total_difficulty) }
// Accumulator = { type: [0x07, 0x00], data: accumulator-root }
// BlockIndex = { type: [0x32, 0x66], data: block-index }
//
// Accumulator is computed by constructing an SSZ list of header-records of length at most
// 8192 and then calculating the hash_tree_root of that list.
//
// header-record := { block-hash: Bytes32, total-difficulty: Uint256 }
// accumulator := hash_tree_root([]header-record, 8192)
//
// BlockIndex stores relative offsets to each compressed block entry. The
// format is:
//
// block-index := starting-number | index | index | index ... | count
//
// starting-number is the first block number in the archive. Every index is a
// defined relative to index's location in the file. The total number of block
// entries in the file is recorded in count.
//
// Due to the accumulator size limit of 8192, the maximum number of blocks in
// an Era1 batch is also 8192.
type Builder struct {
w *e2store.Writer
startNum *uint64
startTd *big.Int
indexes []uint64
hashes []common.Hash
tds []*big.Int
written int
buf *bytes.Buffer
snappy *snappy.Writer
}
// NewBuilder returns a new Builder instance.
func NewBuilder(w io.Writer) *Builder {
buf := bytes.NewBuffer(nil)
return &Builder{
w: e2store.NewWriter(w),
buf: buf,
snappy: snappy.NewBufferedWriter(buf),
}
}
// Add writes a compressed block entry and compressed receipts entry to the
// underlying e2store file.
func (b *Builder) Add(block *types.Block, receipts types.Receipts, td *big.Int) error {
eh, err := rlp.EncodeToBytes(block.Header())
if err != nil {
return err
}
eb, err := rlp.EncodeToBytes(block.Body())
if err != nil {
return err
}
er, err := rlp.EncodeToBytes(receipts)
if err != nil {
return err
}
return b.AddRLP(eh, eb, er, block.NumberU64(), block.Hash(), td, block.Difficulty())
}
// AddRLP writes a compressed block entry and compressed receipts entry to the
// underlying e2store file.
func (b *Builder) AddRLP(header, body, receipts []byte, number uint64, hash common.Hash, td, difficulty *big.Int) error {
// Write Era1 version entry before first block.
if b.startNum == nil {
if err := writeVersion(b.w); err != nil {
return err
}
n := number
b.startNum = &n
b.startTd = new(big.Int).Sub(td, difficulty)
}
if len(b.indexes) >= MaxEra1Size {
return fmt.Errorf("exceeds maximum batch size of %d", MaxEra1Size)
}
b.indexes = append(b.indexes, uint64(b.written))
b.hashes = append(b.hashes, hash)
b.tds = append(b.tds, td)
// Write block data.
if err := b.snappyWrite(TypeCompressedHeader, header); err != nil {
return err
}
if err := b.snappyWrite(TypeCompressedBody, body); err != nil {
return err
}
if err := b.snappyWrite(TypeCompressedReceipts, receipts); err != nil {
return err
}
// Also write total difficulty, but don't snappy encode.
btd := bigToBytes32(td)
n, err := b.w.Write(TypeTotalDifficulty, btd[:])
b.written += n
if err != nil {
return err
}
return nil
}
// Finalize computes the accumulator and block index values, then writes the
// corresponding e2store entries.
func (b *Builder) Finalize() (common.Hash, error) {
if b.startNum == nil {
return common.Hash{}, fmt.Errorf("finalize called on empty builder")
}
// Compute accumulator root and write entry.
root, err := ComputeAccumulator(b.hashes, b.tds)
if err != nil {
return common.Hash{}, fmt.Errorf("error calculating accumulator root: %w", err)
}
n, err := b.w.Write(TypeAccumulator, root[:])
b.written += n
if err != nil {
return common.Hash{}, fmt.Errorf("error writing accumulator: %w", err)
}
// Get beginning of index entry to calculate block relative offset.
base := int64(b.written + (3 * 8)) // skip e2store header (type, length) and start block
// Construct block index. Detailed format described in Builder
// documentation, but it is essentially encoded as:
// "start | index | index | ... | count"
var (
count = len(b.indexes)
index = make([]byte, 16+count*8)
)
binary.LittleEndian.PutUint64(index, *b.startNum)
// Each offset is relative from the position it is encoded in the
// index. This means that even if the same block was to be included in
// the index twice (this would be invalid anyways), the relative offset
// would be different. The idea with this is that after reading a
// relative offset, the corresponding block can be quickly read by
// performing a seek relative to the current position.
for i, offset := range b.indexes {
relative := int64(offset) - (base + int64(i)*8)
binary.LittleEndian.PutUint64(index[8+i*8:], uint64(relative))
}
binary.LittleEndian.PutUint64(index[8+count*8:], uint64(count))
// Finally, write the block index entry.
if _, err := b.w.Write(TypeBlockIndex, index); err != nil {
return common.Hash{}, fmt.Errorf("unable to write block index: %w", err)
}
return root, nil
}
// snappyWrite is a small helper to take care snappy encoding and writing an e2store entry.
func (b *Builder) snappyWrite(typ uint16, in []byte) error {
var (
buf = b.buf
s = b.snappy
)
buf.Reset()
s.Reset(buf)
if _, err := b.snappy.Write(in); err != nil {
return fmt.Errorf("error snappy encoding: %w", err)
}
if err := s.Flush(); err != nil {
return fmt.Errorf("error flushing snappy encoding: %w", err)
}
n, err := b.w.Write(typ, b.buf.Bytes())
b.written += n
if err != nil {
return fmt.Errorf("error writing e2store entry: %w", err)
}
return nil
}
// writeVersion writes a version entry to e2store.
func writeVersion(w *e2store.Writer) error {
_, err := w.Write(TypeVersion, nil)
return err
}

View File

@ -0,0 +1,220 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package e2store
import (
"encoding/binary"
"fmt"
"io"
)
const (
headerSize = 8
valueSizeLimit = 1024 * 1024 * 50
)
// Entry is a variable-length-data record in an e2store.
type Entry struct {
Type uint16
Value []byte
}
// Writer writes entries using e2store encoding.
// For more information on this format, see:
// https://github.com/status-im/nimbus-eth2/blob/stable/docs/e2store.md
type Writer struct {
w io.Writer
}
// NewWriter returns a new Writer that writes to w.
func NewWriter(w io.Writer) *Writer {
return &Writer{w}
}
// Write writes a single e2store entry to w.
// An entry is encoded in a type-length-value format. The first 8 bytes of the
// record store the type (2 bytes), the length (4 bytes), and some reserved
// data (2 bytes). The remaining bytes store b.
func (w *Writer) Write(typ uint16, b []byte) (int, error) {
buf := make([]byte, headerSize)
binary.LittleEndian.PutUint16(buf, typ)
binary.LittleEndian.PutUint32(buf[2:], uint32(len(b)))
// Write header.
if n, err := w.w.Write(buf); err != nil {
return n, err
}
// Write value, return combined write size.
n, err := w.w.Write(b)
return n + headerSize, err
}
// A Reader reads entries from an e2store-encoded file.
// For more information on this format, see
// https://github.com/status-im/nimbus-eth2/blob/stable/docs/e2store.md
type Reader struct {
r io.ReaderAt
offset int64
}
// NewReader returns a new Reader that reads from r.
func NewReader(r io.ReaderAt) *Reader {
return &Reader{r, 0}
}
// Read reads one Entry from r.
func (r *Reader) Read() (*Entry, error) {
var e Entry
n, err := r.ReadAt(&e, r.offset)
if err != nil {
return nil, err
}
r.offset += int64(n)
return &e, nil
}
// ReadAt reads one Entry from r at the specified offset.
func (r *Reader) ReadAt(entry *Entry, off int64) (int, error) {
typ, length, err := r.ReadMetadataAt(off)
if err != nil {
return 0, err
}
entry.Type = typ
// Check length bounds.
if length > valueSizeLimit {
return headerSize, fmt.Errorf("item larger than item size limit %d: have %d", valueSizeLimit, length)
}
if length == 0 {
return headerSize, nil
}
// Read value.
val := make([]byte, length)
if n, err := r.r.ReadAt(val, off+headerSize); err != nil {
n += headerSize
// An entry with a non-zero length should not return EOF when
// reading the value.
if err == io.EOF {
return n, io.ErrUnexpectedEOF
}
return n, err
}
entry.Value = val
return int(headerSize + length), nil
}
// ReaderAt returns an io.Reader delivering value data for the entry at
// the specified offset. If the entry type does not match the expected type, an
// error is returned.
func (r *Reader) ReaderAt(expectedType uint16, off int64) (io.Reader, int, error) {
// problem = need to return length+headerSize not just value length via section reader
typ, length, err := r.ReadMetadataAt(off)
if err != nil {
return nil, headerSize, err
}
if typ != expectedType {
return nil, headerSize, fmt.Errorf("wrong type, want %d have %d", expectedType, typ)
}
if length > valueSizeLimit {
return nil, headerSize, fmt.Errorf("item larger than item size limit %d: have %d", valueSizeLimit, length)
}
return io.NewSectionReader(r.r, off+headerSize, int64(length)), headerSize + int(length), nil
}
// LengthAt reads the header at off and returns the total length of the entry,
// including header.
func (r *Reader) LengthAt(off int64) (int64, error) {
_, length, err := r.ReadMetadataAt(off)
if err != nil {
return 0, err
}
return int64(length) + headerSize, nil
}
// ReadMetadataAt reads the header metadata at the given offset.
func (r *Reader) ReadMetadataAt(off int64) (typ uint16, length uint32, err error) {
b := make([]byte, headerSize)
if n, err := r.r.ReadAt(b, off); err != nil {
if err == io.EOF && n > 0 {
return 0, 0, io.ErrUnexpectedEOF
}
return 0, 0, err
}
typ = binary.LittleEndian.Uint16(b)
length = binary.LittleEndian.Uint32(b[2:])
// Check reserved bytes of header.
if b[6] != 0 || b[7] != 0 {
return 0, 0, fmt.Errorf("reserved bytes are non-zero")
}
return typ, length, nil
}
// Find returns the first entry with the matching type.
func (r *Reader) Find(want uint16) (*Entry, error) {
var (
off int64
typ uint16
length uint32
err error
)
for {
typ, length, err = r.ReadMetadataAt(off)
if err == io.EOF {
return nil, io.EOF
} else if err != nil {
return nil, err
}
if typ == want {
var e Entry
if _, err := r.ReadAt(&e, off); err != nil {
return nil, err
}
return &e, nil
}
off += int64(headerSize + length)
}
}
// FindAll returns all entries with the matching type.
func (r *Reader) FindAll(want uint16) ([]*Entry, error) {
var (
off int64
typ uint16
length uint32
entries []*Entry
err error
)
for {
typ, length, err = r.ReadMetadataAt(off)
if err == io.EOF {
return entries, nil
} else if err != nil {
return entries, err
}
if typ == want {
e := new(Entry)
if _, err := r.ReadAt(e, off); err != nil {
return entries, err
}
entries = append(entries, e)
}
off += int64(headerSize + length)
}
}

View File

@ -0,0 +1,150 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package e2store
import (
"bytes"
"fmt"
"io"
"testing"
"github.com/ethereum/go-ethereum/common"
)
func TestEncode(t *testing.T) {
for _, test := range []struct {
entries []Entry
want string
name string
}{
{
name: "emptyEntry",
entries: []Entry{{0xffff, nil}},
want: "ffff000000000000",
},
{
name: "beef",
entries: []Entry{{42, common.Hex2Bytes("beef")}},
want: "2a00020000000000beef",
},
{
name: "twoEntries",
entries: []Entry{
{42, common.Hex2Bytes("beef")},
{9, common.Hex2Bytes("abcdabcd")},
},
want: "2a00020000000000beef0900040000000000abcdabcd",
},
} {
tt := test
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
var (
b = bytes.NewBuffer(nil)
w = NewWriter(b)
)
for _, e := range tt.entries {
if _, err := w.Write(e.Type, e.Value); err != nil {
t.Fatalf("encoding error: %v", err)
}
}
if want, have := common.FromHex(tt.want), b.Bytes(); !bytes.Equal(want, have) {
t.Fatalf("encoding mismatch (want %x, have %x", want, have)
}
r := NewReader(bytes.NewReader(b.Bytes()))
for _, want := range tt.entries {
have, err := r.Read()
if err != nil {
t.Fatalf("decoding error: %v", err)
}
if have.Type != want.Type {
t.Fatalf("decoded entry does type mismatch (want %v, got %v)", want.Type, have.Type)
}
if !bytes.Equal(have.Value, want.Value) {
t.Fatalf("decoded entry does not match (want %#x, got %#x)", want.Value, have.Value)
}
}
})
}
}
func TestDecode(t *testing.T) {
for i, tt := range []struct {
have string
err error
}{
{ // basic valid decoding
have: "ffff000000000000",
},
{ // basic invalid decoding
have: "ffff000000000001",
err: fmt.Errorf("reserved bytes are non-zero"),
},
{ // no more entries to read, returns EOF
have: "",
err: io.EOF,
},
{ // malformed type
have: "bad",
err: io.ErrUnexpectedEOF,
},
{ // malformed length
have: "badbeef",
err: io.ErrUnexpectedEOF,
},
{ // specified length longer than actual value
have: "beef010000000000",
err: io.ErrUnexpectedEOF,
},
} {
r := NewReader(bytes.NewReader(common.FromHex(tt.have)))
if tt.err != nil {
_, err := r.Read()
if err == nil && tt.err != nil {
t.Fatalf("test %d, expected error, got none", i)
}
if err != nil && tt.err == nil {
t.Fatalf("test %d, expected no error, got %v", i, err)
}
if err != nil && tt.err != nil && err.Error() != tt.err.Error() {
t.Fatalf("expected error %v, got %v", tt.err, err)
}
continue
}
}
}
func FuzzCodec(f *testing.F) {
f.Fuzz(func(t *testing.T, input []byte) {
r := NewReader(bytes.NewReader(input))
entry, err := r.Read()
if err != nil {
return
}
var (
b = bytes.NewBuffer(nil)
w = NewWriter(b)
)
w.Write(entry.Type, entry.Value)
output := b.Bytes()
// Only care about the input that was actually consumed
input = input[:r.offset]
if !bytes.Equal(input, output) {
t.Fatalf("decode-encode mismatch, input %#x output %#x", input, output)
}
})
}

282
internal/era/era.go Normal file
View File

@ -0,0 +1,282 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package era
import (
"encoding/binary"
"fmt"
"io"
"math/big"
"os"
"path"
"strconv"
"strings"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/internal/era/e2store"
"github.com/ethereum/go-ethereum/rlp"
"github.com/golang/snappy"
)
var (
TypeVersion uint16 = 0x3265
TypeCompressedHeader uint16 = 0x03
TypeCompressedBody uint16 = 0x04
TypeCompressedReceipts uint16 = 0x05
TypeTotalDifficulty uint16 = 0x06
TypeAccumulator uint16 = 0x07
TypeBlockIndex uint16 = 0x3266
MaxEra1Size = 8192
)
// Filename returns a recognizable Era1-formatted file name for the specified
// epoch and network.
func Filename(network string, epoch int, root common.Hash) string {
return fmt.Sprintf("%s-%05d-%s.era1", network, epoch, root.Hex()[2:10])
}
// ReadDir reads all the era1 files in a directory for a given network.
// Format: <network>-<epoch>-<hexroot>.era1
func ReadDir(dir, network string) ([]string, error) {
entries, err := os.ReadDir(dir)
if err != nil {
return nil, fmt.Errorf("error reading directory %s: %w", dir, err)
}
var (
next = uint64(0)
eras []string
)
for _, entry := range entries {
if path.Ext(entry.Name()) != ".era1" {
continue
}
parts := strings.Split(entry.Name(), "-")
if len(parts) != 3 || parts[0] != network {
// invalid era1 filename, skip
continue
}
if epoch, err := strconv.ParseUint(parts[1], 10, 64); err != nil {
return nil, fmt.Errorf("malformed era1 filename: %s", entry.Name())
} else if epoch != next {
return nil, fmt.Errorf("missing epoch %d", next)
}
next += 1
eras = append(eras, entry.Name())
}
return eras, nil
}
type ReadAtSeekCloser interface {
io.ReaderAt
io.Seeker
io.Closer
}
// Era reads and Era1 file.
type Era struct {
f ReadAtSeekCloser // backing era1 file
s *e2store.Reader // e2store reader over f
m metadata // start, count, length info
mu *sync.Mutex // lock for buf
buf [8]byte // buffer reading entry offsets
}
// From returns an Era backed by f.
func From(f ReadAtSeekCloser) (*Era, error) {
m, err := readMetadata(f)
if err != nil {
return nil, err
}
return &Era{
f: f,
s: e2store.NewReader(f),
m: m,
mu: new(sync.Mutex),
}, nil
}
// Open returns an Era backed by the given filename.
func Open(filename string) (*Era, error) {
f, err := os.Open(filename)
if err != nil {
return nil, err
}
return From(f)
}
func (e *Era) Close() error {
return e.f.Close()
}
func (e *Era) GetBlockByNumber(num uint64) (*types.Block, error) {
if e.m.start > num || e.m.start+e.m.count <= num {
return nil, fmt.Errorf("out-of-bounds")
}
off, err := e.readOffset(num)
if err != nil {
return nil, err
}
r, n, err := newSnappyReader(e.s, TypeCompressedHeader, off)
if err != nil {
return nil, err
}
var header types.Header
if err := rlp.Decode(r, &header); err != nil {
return nil, err
}
off += n
r, _, err = newSnappyReader(e.s, TypeCompressedBody, off)
if err != nil {
return nil, err
}
var body types.Body
if err := rlp.Decode(r, &body); err != nil {
return nil, err
}
return types.NewBlockWithHeader(&header).WithBody(body.Transactions, body.Uncles), nil
}
// Accumulator reads the accumulator entry in the Era1 file.
func (e *Era) Accumulator() (common.Hash, error) {
entry, err := e.s.Find(TypeAccumulator)
if err != nil {
return common.Hash{}, err
}
return common.BytesToHash(entry.Value), nil
}
// InitialTD returns initial total difficulty before the difficulty of the
// first block of the Era1 is applied.
func (e *Era) InitialTD() (*big.Int, error) {
var (
r io.Reader
header types.Header
rawTd []byte
n int64
off int64
err error
)
// Read first header.
if off, err = e.readOffset(e.m.start); err != nil {
return nil, err
}
if r, n, err = newSnappyReader(e.s, TypeCompressedHeader, off); err != nil {
return nil, err
}
if err := rlp.Decode(r, &header); err != nil {
return nil, err
}
off += n
// Skip over next two records.
for i := 0; i < 2; i++ {
length, err := e.s.LengthAt(off)
if err != nil {
return nil, err
}
off += length
}
// Read total difficulty after first block.
if r, _, err = e.s.ReaderAt(TypeTotalDifficulty, off); err != nil {
return nil, err
}
rawTd, err = io.ReadAll(r)
if err != nil {
return nil, err
}
td := new(big.Int).SetBytes(reverseOrder(rawTd))
return td.Sub(td, header.Difficulty), nil
}
// Start returns the listed start block.
func (e *Era) Start() uint64 {
return e.m.start
}
// Count returns the total number of blocks in the Era1.
func (e *Era) Count() uint64 {
return e.m.count
}
// readOffset reads a specific block's offset from the block index. The value n
// is the absolute block number desired.
func (e *Era) readOffset(n uint64) (int64, error) {
var (
firstIndex = -8 - int64(e.m.count)*8 // size of count - index entries
indexOffset = int64(n-e.m.start) * 8 // desired index * size of indexes
offOffset = e.m.length + firstIndex + indexOffset // offset of block offset
)
e.mu.Lock()
defer e.mu.Unlock()
clearBuffer(e.buf[:])
if _, err := e.f.ReadAt(e.buf[:], offOffset); err != nil {
return 0, err
}
// Since the block offset is relative from its location + size of index
// value (8), we need to add it to it's offset to get the block's
// absolute offset.
return offOffset + 8 + int64(binary.LittleEndian.Uint64(e.buf[:])), nil
}
// newReader returns a snappy.Reader for the e2store entry value at off.
func newSnappyReader(e *e2store.Reader, expectedType uint16, off int64) (io.Reader, int64, error) {
r, n, err := e.ReaderAt(expectedType, off)
if err != nil {
return nil, 0, err
}
return snappy.NewReader(r), int64(n), err
}
// clearBuffer zeroes out the buffer.
func clearBuffer(buf []byte) {
for i := 0; i < len(buf); i++ {
buf[i] = 0
}
}
// metadata wraps the metadata in the block index.
type metadata struct {
start uint64
count uint64
length int64
}
// readMetadata reads the metadata stored in an Era1 file's block index.
func readMetadata(f ReadAtSeekCloser) (m metadata, err error) {
// Determine length of reader.
if m.length, err = f.Seek(0, io.SeekEnd); err != nil {
return
}
b := make([]byte, 16)
// Read count. It's the last 8 bytes of the file.
if _, err = f.ReadAt(b[:8], m.length-8); err != nil {
return
}
m.count = binary.LittleEndian.Uint64(b)
// Read start. It's at the offset -sizeof(m.count) -
// count*sizeof(indexEntry) - sizeof(m.start)
if _, err = f.ReadAt(b[8:], m.length-16-int64(m.count*8)); err != nil {
return
}
m.start = binary.LittleEndian.Uint64(b[8:])
return
}

142
internal/era/era_test.go Normal file
View File

@ -0,0 +1,142 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package era
import (
"bytes"
"io"
"math/big"
"os"
"testing"
"github.com/ethereum/go-ethereum/common"
)
type testchain struct {
headers [][]byte
bodies [][]byte
receipts [][]byte
tds []*big.Int
}
func TestEra1Builder(t *testing.T) {
// Get temp directory.
f, err := os.CreateTemp("", "era1-test")
if err != nil {
t.Fatalf("error creating temp file: %v", err)
}
defer f.Close()
var (
builder = NewBuilder(f)
chain = testchain{}
)
for i := 0; i < 128; i++ {
chain.headers = append(chain.headers, []byte{byte('h'), byte(i)})
chain.bodies = append(chain.bodies, []byte{byte('b'), byte(i)})
chain.receipts = append(chain.receipts, []byte{byte('r'), byte(i)})
chain.tds = append(chain.tds, big.NewInt(int64(i)))
}
// Write blocks to Era1.
for i := 0; i < len(chain.headers); i++ {
var (
header = chain.headers[i]
body = chain.bodies[i]
receipts = chain.receipts[i]
hash = common.Hash{byte(i)}
td = chain.tds[i]
)
if err = builder.AddRLP(header, body, receipts, uint64(i), hash, td, big.NewInt(1)); err != nil {
t.Fatalf("error adding entry: %v", err)
}
}
// Finalize Era1.
if _, err := builder.Finalize(); err != nil {
t.Fatalf("error finalizing era1: %v", err)
}
// Verify Era1 contents.
e, err := Open(f.Name())
if err != nil {
t.Fatalf("failed to open era: %v", err)
}
it, err := NewRawIterator(e)
if err != nil {
t.Fatalf("failed to make iterator: %s", err)
}
for i := uint64(0); i < uint64(len(chain.headers)); i++ {
if !it.Next() {
t.Fatalf("expected more entries")
}
if it.Error() != nil {
t.Fatalf("unexpected error %v", it.Error())
}
// Check headers.
header, err := io.ReadAll(it.Header)
if err != nil {
t.Fatalf("error reading header: %v", err)
}
if !bytes.Equal(header, chain.headers[i]) {
t.Fatalf("mismatched header: want %s, got %s", chain.headers[i], header)
}
// Check bodies.
body, err := io.ReadAll(it.Body)
if err != nil {
t.Fatalf("error reading body: %v", err)
}
if !bytes.Equal(body, chain.bodies[i]) {
t.Fatalf("mismatched body: want %s, got %s", chain.bodies[i], body)
}
// Check receipts.
receipts, err := io.ReadAll(it.Receipts)
if err != nil {
t.Fatalf("error reading receipts: %v", err)
}
if !bytes.Equal(receipts, chain.receipts[i]) {
t.Fatalf("mismatched receipts: want %s, got %s", chain.receipts[i], receipts)
}
// Check total difficulty.
rawTd, err := io.ReadAll(it.TotalDifficulty)
if err != nil {
t.Fatalf("error reading td: %v", err)
}
td := new(big.Int).SetBytes(reverseOrder(rawTd))
if td.Cmp(chain.tds[i]) != 0 {
t.Fatalf("mismatched tds: want %s, got %s", chain.tds[i], td)
}
}
}
func TestEraFilename(t *testing.T) {
for i, tt := range []struct {
network string
epoch int
root common.Hash
expected string
}{
{"mainnet", 1, common.Hash{1}, "mainnet-00001-01000000.era1"},
{"goerli", 99999, common.HexToHash("0xdeadbeef00000000000000000000000000000000000000000000000000000000"), "goerli-99999-deadbeef.era1"},
} {
got := Filename(tt.network, tt.epoch, tt.root)
if tt.expected != got {
t.Errorf("test %d: invalid filename: want %s, got %s", i, tt.expected, got)
}
}
}

197
internal/era/iterator.go Normal file
View File

@ -0,0 +1,197 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package era
import (
"fmt"
"io"
"math/big"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
)
// Iterator wraps RawIterator and returns decoded Era1 entries.
type Iterator struct {
inner *RawIterator
}
// NewRawIterator returns a new Iterator instance. Next must be immediately
// called on new iterators to load the first item.
func NewIterator(e *Era) (*Iterator, error) {
inner, err := NewRawIterator(e)
if err != nil {
return nil, err
}
return &Iterator{inner}, nil
}
// Next moves the iterator to the next block entry. It returns false when all
// items have been read or an error has halted its progress. Block, Receipts,
// and BlockAndReceipts should no longer be called after false is returned.
func (it *Iterator) Next() bool {
return it.inner.Next()
}
// Number returns the current number block the iterator will return.
func (it *Iterator) Number() uint64 {
return it.inner.next - 1
}
// Error returns the error status of the iterator. It should be called before
// reading from any of the iterator's values.
func (it *Iterator) Error() error {
return it.inner.Error()
}
// Block returns the block for the iterator's current position.
func (it *Iterator) Block() (*types.Block, error) {
if it.inner.Header == nil || it.inner.Body == nil {
return nil, fmt.Errorf("header and body must be non-nil")
}
var (
header types.Header
body types.Body
)
if err := rlp.Decode(it.inner.Header, &header); err != nil {
return nil, err
}
if err := rlp.Decode(it.inner.Body, &body); err != nil {
return nil, err
}
return types.NewBlockWithHeader(&header).WithBody(body.Transactions, body.Uncles), nil
}
// Receipts returns the receipts for the iterator's current position.
func (it *Iterator) Receipts() (types.Receipts, error) {
if it.inner.Receipts == nil {
return nil, fmt.Errorf("receipts must be non-nil")
}
var receipts types.Receipts
err := rlp.Decode(it.inner.Receipts, &receipts)
return receipts, err
}
// BlockAndReceipts returns the block and receipts for the iterator's current
// position.
func (it *Iterator) BlockAndReceipts() (*types.Block, types.Receipts, error) {
b, err := it.Block()
if err != nil {
return nil, nil, err
}
r, err := it.Receipts()
if err != nil {
return nil, nil, err
}
return b, r, nil
}
// TotalDifficulty returns the total difficulty for the iterator's current
// position.
func (it *Iterator) TotalDifficulty() (*big.Int, error) {
td, err := io.ReadAll(it.inner.TotalDifficulty)
if err != nil {
return nil, err
}
return new(big.Int).SetBytes(reverseOrder(td)), nil
}
// RawIterator reads an RLP-encode Era1 entries.
type RawIterator struct {
e *Era // backing Era1
next uint64 // next block to read
err error // last error
Header io.Reader
Body io.Reader
Receipts io.Reader
TotalDifficulty io.Reader
}
// NewRawIterator returns a new RawIterator instance. Next must be immediately
// called on new iterators to load the first item.
func NewRawIterator(e *Era) (*RawIterator, error) {
return &RawIterator{
e: e,
next: e.m.start,
}, nil
}
// Next moves the iterator to the next block entry. It returns false when all
// items have been read or an error has halted its progress. Header, Body,
// Receipts, TotalDifficulty will be set to nil in the case returning false or
// finding an error and should therefore no longer be read from.
func (it *RawIterator) Next() bool {
// Clear old errors.
it.err = nil
if it.e.m.start+it.e.m.count <= it.next {
it.clear()
return false
}
off, err := it.e.readOffset(it.next)
if err != nil {
// Error here means block index is corrupted, so don't
// continue.
it.clear()
it.err = err
return false
}
var n int64
if it.Header, n, it.err = newSnappyReader(it.e.s, TypeCompressedHeader, off); it.err != nil {
it.clear()
return true
}
off += n
if it.Body, n, it.err = newSnappyReader(it.e.s, TypeCompressedBody, off); it.err != nil {
it.clear()
return true
}
off += n
if it.Receipts, n, it.err = newSnappyReader(it.e.s, TypeCompressedReceipts, off); it.err != nil {
it.clear()
return true
}
off += n
if it.TotalDifficulty, _, it.err = it.e.s.ReaderAt(TypeTotalDifficulty, off); it.err != nil {
it.clear()
return true
}
it.next += 1
return true
}
// Number returns the current number block the iterator will return.
func (it *RawIterator) Number() uint64 {
return it.next - 1
}
// Error returns the error status of the iterator. It should be called before
// reading from any of the iterator's values.
func (it *RawIterator) Error() error {
if it.err == io.EOF {
return nil
}
return it.err
}
// clear sets all the outputs to nil.
func (it *RawIterator) clear() {
it.Header = nil
it.Body = nil
it.Receipts = nil
it.TotalDifficulty = nil
}