Patch for concurrent iterator & others (onto v1.11.6) #386

Closed
roysc wants to merge 1565 commits from v1.11.6-statediff-v5 into master
2 changed files with 88 additions and 0 deletions
Showing only changes of commit 51eb5f8ca8 - Show all commits

View File

@ -34,9 +34,11 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/console/prompt"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie"
"github.com/olekukonko/tablewriter"
"gopkg.in/urfave/cli.v1"
)
@ -69,6 +71,7 @@ Remove blockchain and state databases`,
dbDumpFreezerIndex,
dbImportCmd,
dbExportCmd,
dbMetadataCmd,
},
}
dbInspectCmd = cli.Command{
@ -233,6 +236,21 @@ WARNING: This is a low-level operation which may cause database corruption!`,
},
Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
}
dbMetadataCmd = cli.Command{
Action: utils.MigrateFlags(showMetaData),
Name: "metadata",
Usage: "Shows metadata about the chain status.",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "Shows metadata about the chain status.",
}
)
func removeDB(ctx *cli.Context) error {
@ -685,3 +703,50 @@ func exportChaindata(ctx *cli.Context) error {
db := utils.MakeChainDatabase(ctx, stack, true)
return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
}
func showMetaData(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true)
ancients, err := db.Ancients()
if err != nil {
fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)
}
pp := func(val *uint64) string {
if val == nil {
return "<nil>"
}
return fmt.Sprintf("%d (0x%x)", *val, *val)
}
data := [][]string{
{"databaseVersion", pp(rawdb.ReadDatabaseVersion(db))},
{"headBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadBlockHash(db))},
{"headFastBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadFastBlockHash(db))},
{"headHeaderHash", fmt.Sprintf("%v", rawdb.ReadHeadHeaderHash(db))}}
if b := rawdb.ReadHeadBlock(db); b != nil {
data = append(data, []string{"headBlock.Hash", fmt.Sprintf("%v", b.Hash())})
data = append(data, []string{"headBlock.Root", fmt.Sprintf("%v", b.Root())})
data = append(data, []string{"headBlock.Number", fmt.Sprintf("%d (0x%x)", b.Number(), b.Number())})
}
if h := rawdb.ReadHeadHeader(db); h != nil {
data = append(data, []string{"headHeader.Hash", fmt.Sprintf("%v", h.Hash())})
data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)})
data = append(data, []string{"headHeader.Number", fmt.Sprintf("%d (0x%x)", h.Number, h.Number)})
}
data = append(data, [][]string{{"frozen", fmt.Sprintf("%d items", ancients)},
{"lastPivotNumber", pp(rawdb.ReadLastPivotNumber(db))},
{"len(snapshotSyncStatus)", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotSyncStatus(db)))},
{"snapshotGenerator", snapshot.ParseGeneratorStatus(rawdb.ReadSnapshotGenerator(db))},
{"snapshotDisabled", fmt.Sprintf("%v", rawdb.ReadSnapshotDisabled(db))},
{"snapshotJournal", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotJournal(db)))},
{"snapshotRecoveryNumber", pp(rawdb.ReadSnapshotRecoveryNumber(db))},
{"snapshotRoot", fmt.Sprintf("%v", rawdb.ReadSnapshotRoot(db))},
{"txIndexTail", pp(rawdb.ReadTxIndexTail(db))},
{"fastTxLookupLimit", pp(rawdb.ReadFastTxLookupLimit(db))},
}...)
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Field", "Value"})
table.AppendBulk(data)
table.Render()
return nil
}

View File

@ -66,6 +66,29 @@ type journalStorage struct {
Vals [][]byte
}
func ParseGeneratorStatus(generatorBlob []byte) string {
if len(generatorBlob) == 0 {
return ""
}
var generator journalGenerator
if err := rlp.DecodeBytes(generatorBlob, &generator); err != nil {
log.Warn("failed to decode snapshot generator", "err", err)
return ""
}
// Figure out whether we're after or within an account
var m string
switch marker := generator.Marker; len(marker) {
case common.HashLength:
m = fmt.Sprintf("at %#x", marker)
case 2 * common.HashLength:
m = fmt.Sprintf("in %#x at %#x", marker[:common.HashLength], marker[common.HashLength:])
default:
m = fmt.Sprintf("%#x", marker)
}
return fmt.Sprintf(`Done: %v, Accounts: %d, Slots: %d, Storage: %d, Marker: %s`,
generator.Done, generator.Accounts, generator.Slots, generator.Storage, m)
}
// loadAndParseJournal tries to parse the snapshot journal in latest format.
func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, journalGenerator, error) {
// Retrieve the disk layer generator. It must exist, no matter the