cmd/utils, eth: relinquish GC cache to read cache in archive mode

This commit is contained in:
Péter Szilágyi 2019-02-05 12:49:59 +02:00
parent 85b3b1c8d6
commit d6225ab846
No known key found for this signature in database
GPG Key ID: E9AE538CEDF8293D
7 changed files with 24 additions and 17 deletions

View File

@ -332,12 +332,12 @@ var (
} }
CacheTrieFlag = cli.IntFlag{ CacheTrieFlag = cli.IntFlag{
Name: "cache.trie", Name: "cache.trie",
Usage: "Percentage of cache memory allowance to use for trie caching", Usage: "Percentage of cache memory allowance to use for trie caching (default = 25% full mode, 50% archive mode)",
Value: 25, Value: 25,
} }
CacheGCFlag = cli.IntFlag{ CacheGCFlag = cli.IntFlag{
Name: "cache.gc", Name: "cache.gc",
Usage: "Percentage of cache memory allowance to use for trie pruning", Usage: "Percentage of cache memory allowance to use for trie pruning (default = 25% full mode, 0% archive mode)",
Value: 25, Value: 25,
} }
TrieCacheGenFlag = cli.IntFlag{ TrieCacheGenFlag = cli.IntFlag{

View File

@ -26,10 +26,10 @@ type StorageSize float64
// String implements the stringer interface. // String implements the stringer interface.
func (s StorageSize) String() string { func (s StorageSize) String() string {
if s > 1000000 { if s > 1048576 {
return fmt.Sprintf("%.2f mB", s/1000000) return fmt.Sprintf("%.2f MiB", s/1048576)
} else if s > 1000 { } else if s > 1024 {
return fmt.Sprintf("%.2f kB", s/1000) return fmt.Sprintf("%.2f KiB", s/1024)
} else { } else {
return fmt.Sprintf("%.2f B", s) return fmt.Sprintf("%.2f B", s)
} }
@ -38,10 +38,10 @@ func (s StorageSize) String() string {
// TerminalString implements log.TerminalStringer, formatting a string for console // TerminalString implements log.TerminalStringer, formatting a string for console
// output during logging. // output during logging.
func (s StorageSize) TerminalString() string { func (s StorageSize) TerminalString() string {
if s > 1000000 { if s > 1048576 {
return fmt.Sprintf("%.2fmB", s/1000000) return fmt.Sprintf("%.2fMiB", s/1048576)
} else if s > 1000 { } else if s > 1024 {
return fmt.Sprintf("%.2fkB", s/1000) return fmt.Sprintf("%.2fKiB", s/1024)
} else { } else {
return fmt.Sprintf("%.2fB", s) return fmt.Sprintf("%.2fB", s)
} }

View File

@ -25,8 +25,8 @@ func TestStorageSizeString(t *testing.T) {
size StorageSize size StorageSize
str string str string
}{ }{
{2381273, "2.38 mB"}, {2381273, "2.27 MiB"},
{2192, "2.19 kB"}, {2192, "2.14 KiB"},
{12, "12.00 B"}, {12, "12.00 B"},
} }

View File

@ -1253,8 +1253,8 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
stats.processed++ stats.processed++
stats.usedGas += usedGas stats.usedGas += usedGas
cache, _ := bc.stateCache.TrieDB().Size() dirty, _ := bc.stateCache.TrieDB().Size()
stats.report(chain, it.index, cache) stats.report(chain, it.index, dirty)
} }
// Any blocks remaining here? The only ones we care about are the future ones // Any blocks remaining here? The only ones we care about are the future ones
if block != nil && err == consensus.ErrFutureBlock { if block != nil && err == consensus.ErrFutureBlock {

View File

@ -39,7 +39,7 @@ const statsReportLimit = 8 * time.Second
// report prints statistics if some number of blocks have been processed // report prints statistics if some number of blocks have been processed
// or more than a few seconds have passed since the last message. // or more than a few seconds have passed since the last message.
func (st *insertStats) report(chain []*types.Block, index int, cache common.StorageSize) { func (st *insertStats) report(chain []*types.Block, index int, dirty common.StorageSize) {
// Fetch the timings for the batch // Fetch the timings for the batch
var ( var (
now = mclock.Now() now = mclock.Now()
@ -63,7 +63,7 @@ func (st *insertStats) report(chain []*types.Block, index int, cache common.Stor
if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute { if timestamp := time.Unix(end.Time().Int64(), 0); time.Since(timestamp) > time.Minute {
context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...) context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
} }
context = append(context, []interface{}{"cache", cache}...) context = append(context, []interface{}{"dirty", dirty}...)
if st.queued > 0 { if st.queued > 0 {
context = append(context, []interface{}{"queued", st.queued}...) context = append(context, []interface{}{"queued", st.queued}...)

View File

@ -113,6 +113,12 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
log.Warn("Sanitizing invalid miner gas price", "provided", config.MinerGasPrice, "updated", DefaultConfig.MinerGasPrice) log.Warn("Sanitizing invalid miner gas price", "provided", config.MinerGasPrice, "updated", DefaultConfig.MinerGasPrice)
config.MinerGasPrice = new(big.Int).Set(DefaultConfig.MinerGasPrice) config.MinerGasPrice = new(big.Int).Set(DefaultConfig.MinerGasPrice)
} }
if config.NoPruning && config.TrieDirtyCache > 0 {
config.TrieCleanCache += config.TrieDirtyCache
config.TrieDirtyCache = 0
}
log.Info("Allocated trie memory caches", "clean", common.StorageSize(config.TrieCleanCache)*1024*1024, "dirty", common.StorageSize(config.TrieDirtyCache)*1024*1024)
// Assemble the Ethereum object // Assemble the Ethereum object
chainDb, err := CreateDB(ctx, config, "chaindata") chainDb, err := CreateDB(ctx, config, "chaindata")
if err != nil { if err != nil {

View File

@ -25,6 +25,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb"
@ -70,7 +71,7 @@ func NewLDBDatabase(file string, cache int, handles int) (*LDBDatabase, error) {
if handles < 16 { if handles < 16 {
handles = 16 handles = 16
} }
logger.Info("Allocated cache and file handles", "cache", cache, "handles", handles) logger.Info("Allocated cache and file handles", "cache", common.StorageSize(cache*1024*1024), "handles", handles)
// Open the db and recover any potential corruptions // Open the db and recover any potential corruptions
db, err := leveldb.OpenFile(file, &opt.Options{ db, err := leveldb.OpenFile(file, &opt.Options{