all: next batch of log polishes to contextual versions

This commit is contained in:
Péter Szilágyi 2017-02-28 13:35:17 +02:00
parent d4f60d362b
commit e588e0ca2b
No known key found for this signature in database
GPG Key ID: E9AE538CEDF8293D
18 changed files with 203 additions and 242 deletions

View File

@ -32,7 +32,7 @@ func WaitMined(ctx context.Context, b DeployBackend, tx *types.Transaction) (*ty
queryTicker := time.NewTicker(time.Second)
defer queryTicker.Stop()
logger := log.New("hash", tx.Hash().Hex()[:8])
logger := log.New("hash", tx.Hash())
for {
receipt, err := b.TransactionReceipt(ctx, tx.Hash())
if receipt != nil {

View File

@ -262,7 +262,7 @@ func (ac *accountCache) scan() ([]accounts.Account, error) {
case err != nil:
logger.Debug("Failed to decode keystore key", "err", err)
case (addr == common.Address{}):
logger.Debug("Failed to decode keystore key", "error", "missing or zero address")
logger.Debug("Failed to decode keystore key", "err", "missing or zero address")
default:
addrs = append(addrs, accounts.Account{Address: addr, URL: accounts.URL{Scheme: KeyStoreScheme, Path: path}})
}

View File

@ -121,7 +121,7 @@ func (hub *LedgerHub) refreshWallets() {
}
// If there are no more wallets or the device is before the next, wrap new wallet
if len(hub.wallets) == 0 || hub.wallets[0].URL().Cmp(url) > 0 {
wallet := &ledgerWallet{url: &url, info: ledger, logger: log.New("url", url)}
wallet := &ledgerWallet{url: &url, info: ledger, log: log.New("url", url)}
events = append(events, accounts.WalletEvent{Wallet: wallet, Arrive: true})
wallets = append(wallets, wallet)

View File

@ -124,7 +124,7 @@ type ledgerWallet struct {
commsLock chan struct{} // Mutex (buf=1) for the USB comms without keeping the state locked
stateLock sync.RWMutex // Protects read and write access to the wallet struct fields
logger log.Logger // Contextual logger to tag the ledger with its id
log log.Logger // Contextual logger to tag the ledger with its id
}
// URL implements accounts.Wallet, returning the URL of the Ledger device.
@ -222,8 +222,8 @@ func (w *ledgerWallet) Open(passphrase string) error {
// - libusb on Windows doesn't support hotplug, so we can't detect USB unplugs
// - communication timeout on the Ledger requires a device power cycle to fix
func (w *ledgerWallet) heartbeat() {
w.logger.Debug("Ledger health-check started")
defer w.logger.Debug("Ledger health-check stopped")
w.log.Debug("Ledger health-check started")
defer w.log.Debug("Ledger health-check stopped")
// Execute heartbeat checks until termination or error
var (
@ -262,7 +262,7 @@ func (w *ledgerWallet) heartbeat() {
}
// In case of error, wait for termination
if err != nil {
w.logger.Debug("Ledger health-check failed", "err", err)
w.log.Debug("Ledger health-check failed", "err", err)
errc = <-w.healthQuit
}
errc <- err
@ -350,8 +350,8 @@ func (w *ledgerWallet) Accounts() []accounts.Account {
// selfDerive is an account derivation loop that upon request attempts to find
// new non-zero accounts.
func (w *ledgerWallet) selfDerive() {
w.logger.Debug("Ledger self-derivation started")
defer w.logger.Debug("Ledger self-derivation stopped")
w.log.Debug("Ledger self-derivation started")
defer w.log.Debug("Ledger self-derivation stopped")
// Execute self-derivations until termination or error
var (
@ -396,7 +396,7 @@ func (w *ledgerWallet) selfDerive() {
// Retrieve the next derived Ethereum account
if nextAddr == (common.Address{}) {
if nextAddr, err = w.ledgerDerive(nextPath); err != nil {
w.logger.Warn("Ledger account derivation failed", "err", err)
w.log.Warn("Ledger account derivation failed", "err", err)
break
}
}
@ -407,12 +407,12 @@ func (w *ledgerWallet) selfDerive() {
)
balance, err = w.deriveChain.BalanceAt(context, nextAddr, nil)
if err != nil {
w.logger.Warn("Ledger balance retrieval failed", "err", err)
w.log.Warn("Ledger balance retrieval failed", "err", err)
break
}
nonce, err = w.deriveChain.NonceAt(context, nextAddr, nil)
if err != nil {
w.logger.Warn("Ledger nonce retrieval failed", "err", err)
w.log.Warn("Ledger nonce retrieval failed", "err", err)
break
}
// If the next account is empty, stop self-derivation, but add it nonetheless
@ -432,7 +432,7 @@ func (w *ledgerWallet) selfDerive() {
// Display a log message to the user for new (or previously empty accounts)
if _, known := w.paths[nextAddr]; !known || (!empty && nextAddr == w.deriveNextAddr) {
w.logger.Info("Ledger discovered new account", "address", nextAddr.Hex(), "path", path, "balance", balance, "nonce", nonce)
w.log.Info("Ledger discovered new account", "address", nextAddr, "path", path, "balance", balance, "nonce", nonce)
}
// Fetch the next potential account
if !empty {
@ -471,7 +471,7 @@ func (w *ledgerWallet) selfDerive() {
}
// In case of error, wait for termination
if err != nil {
w.logger.Debug("Ledger self-derivation failed", "err", err)
w.log.Debug("Ledger self-derivation failed", "err", err)
errc = <-w.deriveQuit
}
errc <- err
@ -851,7 +851,7 @@ func (w *ledgerWallet) ledgerExchange(opcode ledgerOpcode, p1 ledgerParam1, p2 l
apdu = nil
}
// Send over to the device
w.logger.Trace("Data chunk sent to the Ledger", "chunk", hexutil.Bytes(chunk))
w.log.Trace("Data chunk sent to the Ledger", "chunk", hexutil.Bytes(chunk))
if _, err := w.device.Write(chunk); err != nil {
return nil, err
}
@ -864,7 +864,7 @@ func (w *ledgerWallet) ledgerExchange(opcode ledgerOpcode, p1 ledgerParam1, p2 l
if _, err := io.ReadFull(w.device, chunk); err != nil {
return nil, err
}
w.logger.Trace("Data chunk received from the Ledger", "chunk", hexutil.Bytes(chunk))
w.log.Trace("Data chunk received from the Ledger", "chunk", hexutil.Bytes(chunk))
// Make sure the transport header matches
if chunk[0] != 0x01 || chunk[1] != 0x01 || chunk[2] != 0x05 {

View File

@ -47,8 +47,6 @@ func StringToHash(s string) Hash { return BytesToHash([]byte(s)) }
func BigToHash(b *big.Int) Hash { return BytesToHash(b.Bytes()) }
func HexToHash(s string) Hash { return BytesToHash(FromHex(s)) }
// Don't use the default 'String' method in case we want to overwrite
// Get the string representation of the underlying hash
func (h Hash) Str() string { return string(h[:]) }
func (h Hash) Bytes() []byte { return h[:] }
@ -144,6 +142,17 @@ func (a Address) Big() *big.Int { return new(big.Int).SetBytes(a[:]) }
func (a Address) Hash() Hash { return BytesToHash(a[:]) }
func (a Address) Hex() string { return hexutil.Encode(a[:]) }
// String implements the stringer interface and is used also by the logger.
func (a Address) String() string {
return a.Hex()
}
// Format implements fmt.Formatter, forcing the byte slice to be formatted as is,
// without going through the stringer interface used for logging.
func (a Address) Format(s fmt.State, c rune) {
fmt.Fprintf(s, "%"+string(c), a[:])
}
// Sets the address to the value of b. If b is larger than len(a) it will panic
func (a *Address) SetBytes(b []byte) {
if len(b) > len(a) {

View File

@ -305,7 +305,7 @@ func setError(resp *otto.Object, code int, msg string) {
func throwJSException(msg interface{}) otto.Value {
val, err := otto.ToValue(msg)
if err != nil {
log.Error(fmt.Sprintf("Failed to serialize JavaScript exception %v: %v", msg, err))
log.Error("Failed to serialize JavaScript exception", "exception", msg, "err", err)
}
panic(val)
}

View File

@ -37,6 +37,7 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/contracts/chequebook/contract"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
@ -104,6 +105,8 @@ type Chequebook struct {
txhash string // tx hash of last deposit tx
threshold *big.Int // threshold that triggers autodeposit if not nil
buffer *big.Int // buffer to keep on top of balance for fork protection
log log.Logger // contextual logger with the contrac address embedded
}
func (self *Chequebook) String() string {
@ -135,11 +138,12 @@ func NewChequebook(path string, contractAddr common.Address, prvKey *ecdsa.Priva
owner: transactOpts.From,
contract: chbook,
session: session,
log: log.New("contract", contractAddr),
}
if (contractAddr != common.Address{}) {
self.setBalanceFromBlockChain()
log.Trace(fmt.Sprintf("new chequebook initialised from %s (owner: %v, balance: %s)", contractAddr.Hex(), self.owner.Hex(), self.balance.String()))
self.log.Trace("New chequebook initialised", "owner", self.owner, "balance", self.balance)
}
return
}
@ -147,7 +151,7 @@ func NewChequebook(path string, contractAddr common.Address, prvKey *ecdsa.Priva
func (self *Chequebook) setBalanceFromBlockChain() {
balance, err := self.backend.BalanceAt(context.TODO(), self.contractAddr, nil)
if err != nil {
log.Error(fmt.Sprintf("can't get balance: %v", err))
log.Error("Failed to retrieve chequebook balance", "err", err)
} else {
self.balance.Set(balance)
}
@ -160,7 +164,6 @@ func LoadChequebook(path string, prvKey *ecdsa.PrivateKey, backend Backend, chec
if err != nil {
return
}
self, _ = NewChequebook(path, common.Address{}, prvKey, backend)
err = json.Unmarshal(data, self)
@ -170,8 +173,7 @@ func LoadChequebook(path string, prvKey *ecdsa.PrivateKey, backend Backend, chec
if checkBalance {
self.setBalanceFromBlockChain()
}
log.Trace(fmt.Sprintf("loaded chequebook (%s, owner: %v, balance: %v) initialised from %v", self.contractAddr.Hex(), self.owner.Hex(), self.balance, path))
log.Trace("Loaded chequebook from disk", "path", path)
return
}
@ -226,7 +228,7 @@ func (self *Chequebook) Save() (err error) {
if err != nil {
return err
}
log.Trace(fmt.Sprintf("saving chequebook (%s) to %v", self.contractAddr.Hex(), self.path))
self.log.Trace("Saving chequebook to disk", self.path)
return ioutil.WriteFile(self.path, data, os.ModePerm)
}
@ -339,12 +341,12 @@ func (self *Chequebook) deposit(amount *big.Int) (string, error) {
chbookRaw := &contract.ChequebookRaw{Contract: self.contract}
tx, err := chbookRaw.Transfer(depositTransactor)
if err != nil {
log.Warn(fmt.Sprintf("error depositing %d wei to chequebook (%s, balance: %v, target: %v): %v", amount, self.contractAddr.Hex(), self.balance, self.buffer, err))
self.log.Warn("Failed to fund chequebook", "amount", amount, "balance", self.balance, "target", self.buffer, "err", err)
return "", err
}
// assume that transaction is actually successful, we add the amount to balance right away
self.balance.Add(self.balance, amount)
log.Trace(fmt.Sprintf("deposited %d wei to chequebook (%s, balance: %v, target: %v)", amount, self.contractAddr.Hex(), self.balance, self.buffer))
self.log.Trace("Deposited funds to chequebook", "amount", amount, "balance", self.balance, "target", self.buffer)
return tx.Hash().Hex(), nil
}
@ -440,6 +442,7 @@ type Inbox struct {
maxUncashed *big.Int // threshold that triggers autocashing
cashed *big.Int // cumulative amount cashed
cheque *Cheque // last cheque, nil if none yet received
log log.Logger // contextual logger with the contrac address embedded
}
// NewInbox creates an Inbox. An Inboxes is not persisted, the cumulative sum is updated
@ -467,8 +470,9 @@ func NewInbox(prvKey *ecdsa.PrivateKey, contractAddr, beneficiary common.Address
signer: signer,
session: session,
cashed: new(big.Int).Set(common.Big0),
log: log.New("contract", contractAddr),
}
log.Trace(fmt.Sprintf("initialised inbox (%s -> %s) expected signer: %x", self.contract.Hex(), self.beneficiary.Hex(), crypto.FromECDSAPub(signer)))
self.log.Trace("New chequebook inbox initialized", "beneficiary", self.beneficiary, "signer", hexutil.Bytes(crypto.FromECDSAPub(signer)))
return
}
@ -490,7 +494,7 @@ func (self *Inbox) Stop() {
func (self *Inbox) Cash() (txhash string, err error) {
if self.cheque != nil {
txhash, err = self.cheque.Cash(self.session)
log.Trace(fmt.Sprintf("cashing cheque (total: %v) on chequebook (%s) sending to %v", self.cheque.Amount, self.contract.Hex(), self.beneficiary.Hex()))
self.log.Trace("Cashing in chequebook cheque", "amount", self.cheque.Amount, "beneficiary", self.beneficiary)
self.cashed = self.cheque.Amount
}
return
@ -574,7 +578,7 @@ func (self *Inbox) Receive(promise swap.Promise) (*big.Int, error) {
self.Cash()
}
}
log.Trace(fmt.Sprintf("received cheque of %v wei in inbox (%s, uncashed: %v)", amount, self.contract.Hex(), uncashed))
self.log.Trace("Received cheque in chequebook inbox", "amount", amount, "uncashed", uncashed)
}
return amount, err
@ -582,7 +586,7 @@ func (self *Inbox) Receive(promise swap.Promise) (*big.Int, error) {
// Verify verifies cheque for signer, contract, beneficiary, amount, valid signature.
func (self *Cheque) Verify(signerKey *ecdsa.PublicKey, contract, beneficiary common.Address, sum *big.Int) (*big.Int, error) {
log.Trace(fmt.Sprintf("verify cheque: %v - sum: %v", self, sum))
log.Trace("Verifying chequebook cheque", "cheque", self, "sum", sum)
if sum == nil {
return nil, fmt.Errorf("invalid amount")
}

View File

@ -127,10 +127,10 @@ func (r *ReleaseService) checker() {
version, err := r.oracle.CurrentVersion(opts)
if err != nil {
if err == bind.ErrNoCode {
log.Debug(fmt.Sprintf("Release oracle not found at %x", r.config.Oracle))
log.Debug("Release oracle not found", "contract", r.config.Oracle)
continue
}
log.Error(fmt.Sprintf("Failed to retrieve current release: %v", err))
log.Error("Failed to retrieve current release", "err", err)
continue
}
// Version was successfully retrieved, notify if newer than ours
@ -143,13 +143,14 @@ func (r *ReleaseService) checker() {
howtofix := fmt.Sprintf("Please check https://github.com/ethereum/go-ethereum/releases for new releases")
separator := strings.Repeat("-", len(warning))
log.Warn(fmt.Sprint(separator))
log.Warn(fmt.Sprint(warning))
log.Warn(fmt.Sprint(howtofix))
log.Warn(fmt.Sprint(separator))
log.Warn(separator)
log.Warn(warning)
log.Warn(howtofix)
log.Warn(separator)
} else {
log.Debug(fmt.Sprintf("Client v%d.%d.%d-%x seems up to date with upstream v%d.%d.%d-%x",
r.config.Major, r.config.Minor, r.config.Patch, r.config.Commit[:4], version.Major, version.Minor, version.Patch, version.Commit[:4]))
log.Debug("Client seems up to date with upstream",
"local", fmt.Sprintf("v%d.%d.%d-%x", r.config.Major, r.config.Minor, r.config.Patch, r.config.Commit[:4]),
"upstream", fmt.Sprintf("v%d.%d.%d-%x", version.Major, version.Minor, version.Patch, version.Commit[:4]))
}
// If termination was requested, return

View File

@ -170,7 +170,7 @@ func (v *BlockValidator) VerifyUncles(block, parent *types.Block) error {
for h := range ancestors {
branch += fmt.Sprintf(" O - %x\n |\n", h)
}
log.Info(fmt.Sprint(branch))
log.Warn(branch)
return UncleError("uncle[%d](%x) is ancestor", i, hash[:4])
}

View File

@ -160,9 +160,9 @@ func NewBlockChain(chainDb ethdb.Database, config *params.ChainConfig, pow pow.P
headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
// make sure the headerByNumber (if present) is in our current canonical chain
if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
log.Error(fmt.Sprintf("Found bad hash, rewinding chain to block #%d [%x…]", header.Number, header.ParentHash[:4]))
log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
bc.SetHead(header.Number.Uint64() - 1)
log.Error(fmt.Sprint("Chain rewind was successful, resuming normal operation"))
log.Error("Chain rewind was successful, resuming normal operation")
}
}
}
@ -219,9 +219,9 @@ func (self *BlockChain) loadLastState() error {
blockTd := self.GetTd(self.currentBlock.Hash(), self.currentBlock.NumberU64())
fastTd := self.GetTd(self.currentFastBlock.Hash(), self.currentFastBlock.NumberU64())
log.Info(fmt.Sprintf("Last header: #%d [%x…] TD=%v", currentHeader.Number, currentHeader.Hash().Bytes()[:4], headerTd))
log.Info(fmt.Sprintf("Last block: #%d [%x…] TD=%v", self.currentBlock.Number(), self.currentBlock.Hash().Bytes()[:4], blockTd))
log.Info(fmt.Sprintf("Fast block: #%d [%x…] TD=%v", self.currentFastBlock.Number(), self.currentFastBlock.Hash().Bytes()[:4], fastTd))
log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd)
log.Info("Loaded most recent local full block", "number", self.currentBlock.Number(), "hash", self.currentBlock.Hash(), "td", blockTd)
log.Info("Loaded most recent local fast block", "number", self.currentFastBlock.Number(), "hash", self.currentFastBlock.Hash(), "td", fastTd)
return nil
}
@ -262,10 +262,10 @@ func (bc *BlockChain) SetHead(head uint64) {
}
if err := WriteHeadBlockHash(bc.chainDb, bc.currentBlock.Hash()); err != nil {
log.Crit(fmt.Sprintf("failed to reset head block hash: %v", err))
log.Crit("Failed to reset head full block", "err", err)
}
if err := WriteHeadFastBlockHash(bc.chainDb, bc.currentFastBlock.Hash()); err != nil {
log.Crit(fmt.Sprintf("failed to reset head fast block hash: %v", err))
log.Crit("Failed to reset head fast block", "err", err)
}
bc.loadLastState()
}
@ -286,7 +286,7 @@ func (self *BlockChain) FastSyncCommitHead(hash common.Hash) error {
self.currentBlock = block
self.mu.Unlock()
log.Info(fmt.Sprintf("committed block #%d [%x…] as new head", block.Number(), hash[:4]))
log.Info("Committed new head block", "number", block.Number(), "hash", hash)
return nil
}
@ -390,10 +390,10 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) {
// Prepare the genesis block and reinitialise the chain
if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
log.Crit(fmt.Sprintf("failed to write genesis block TD: %v", err))
log.Crit("Failed to write genesis block TD", "err", err)
}
if err := WriteBlock(bc.chainDb, genesis); err != nil {
log.Crit(fmt.Sprintf("failed to write genesis block: %v", err))
log.Crit("Failed to write genesis block", "err", err)
}
bc.genesisBlock = genesis
bc.insert(bc.genesisBlock)
@ -416,8 +416,7 @@ func (self *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
if first > last {
return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
}
log.Info(fmt.Sprintf("exporting %d blocks...\n", last-first+1))
log.Info("Exporting batch of blocks", "count", last-first+1)
for nr := first; nr <= last; nr++ {
block := self.GetBlockByNumber(nr)
@ -445,10 +444,10 @@ func (bc *BlockChain) insert(block *types.Block) {
// Add the block to the canonical chain number scheme and mark as the head
if err := WriteCanonicalHash(bc.chainDb, block.Hash(), block.NumberU64()); err != nil {
log.Crit(fmt.Sprintf("failed to insert block number: %v", err))
log.Crit("Failed to insert block number", "err", err)
}
if err := WriteHeadBlockHash(bc.chainDb, block.Hash()); err != nil {
log.Crit(fmt.Sprintf("failed to insert head block hash: %v", err))
log.Crit("Failed to insert head block hash", "err", err)
}
bc.currentBlock = block
@ -457,7 +456,7 @@ func (bc *BlockChain) insert(block *types.Block) {
bc.hc.SetCurrentHeader(block.Header())
if err := WriteHeadFastBlockHash(bc.chainDb, block.Hash()); err != nil {
log.Crit(fmt.Sprintf("failed to insert head fast block hash: %v", err))
log.Crit("Failed to insert head fast block hash", "err", err)
}
bc.currentFastBlock = block
}
@ -588,8 +587,7 @@ func (bc *BlockChain) Stop() {
atomic.StoreInt32(&bc.procInterrupt, 1)
bc.wg.Wait()
log.Info(fmt.Sprint("Chain manager stopped"))
log.Info("Blockchain manager stopped")
}
func (self *BlockChain) procFutureBlocks() {
@ -683,11 +681,11 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
for i := 1; i < len(blockChain); i++ {
if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
// Chain broke ancestry, log a messge (programming error) and skip insertion
failure := fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(),
"prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash())
log.Error(fmt.Sprint(failure.Error()))
return 0, failure
return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
}
}
// Pre-checks passed, start the block body and receipt imports
@ -734,31 +732,31 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
if err := WriteBody(self.chainDb, block.Hash(), block.NumberU64(), block.Body()); err != nil {
errs[index] = fmt.Errorf("failed to write block body: %v", err)
atomic.AddInt32(&failed, 1)
log.Crit(fmt.Sprint(errs[index]))
log.Crit("Failed to write block body", "err", err)
return
}
if err := WriteBlockReceipts(self.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil {
errs[index] = fmt.Errorf("failed to write block receipts: %v", err)
atomic.AddInt32(&failed, 1)
log.Crit(fmt.Sprint(errs[index]))
log.Crit("Failed to write block receipts", "err", err)
return
}
if err := WriteMipmapBloom(self.chainDb, block.NumberU64(), receipts); err != nil {
errs[index] = fmt.Errorf("failed to write log blooms: %v", err)
atomic.AddInt32(&failed, 1)
log.Crit(fmt.Sprint(errs[index]))
log.Crit("Failed to write log blooms", "err", err)
return
}
if err := WriteTransactions(self.chainDb, block); err != nil {
errs[index] = fmt.Errorf("failed to write individual transactions: %v", err)
atomic.AddInt32(&failed, 1)
log.Crit(fmt.Sprint(errs[index]))
log.Crit("Failed to write individual transactions", "err", err)
return
}
if err := WriteReceipts(self.chainDb, receipts); err != nil {
errs[index] = fmt.Errorf("failed to write individual receipts: %v", err)
atomic.AddInt32(&failed, 1)
log.Crit(fmt.Sprint(errs[index]))
log.Crit("Failed to write individual receipts", "err", err)
return
}
atomic.AddInt32(&stats.processed, 1)
@ -784,7 +782,7 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
}
}
if atomic.LoadInt32(&self.procInterrupt) == 1 {
log.Debug(fmt.Sprint("premature abort during receipt chain processing"))
log.Debug("Premature abort during receipts processing")
return 0, nil
}
// Update the head fast sync block if better
@ -792,20 +790,16 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
head := blockChain[len(errs)-1]
if self.GetTd(self.currentFastBlock.Hash(), self.currentFastBlock.NumberU64()).Cmp(self.GetTd(head.Hash(), head.NumberU64())) < 0 {
if err := WriteHeadFastBlockHash(self.chainDb, head.Hash()); err != nil {
log.Crit(fmt.Sprintf("failed to update head fast block hash: %v", err))
log.Crit("Failed to update head fast block hash", "err", err)
}
self.currentFastBlock = head
}
self.mu.Unlock()
// Report some public statistics so the user has a clue what's going on
first, last := blockChain[0], blockChain[len(blockChain)-1]
ignored := ""
if stats.ignored > 0 {
ignored = fmt.Sprintf(" (%d ignored)", stats.ignored)
}
log.Info(fmt.Sprintf("imported %4d receipts in %9v. #%d [%x… / %x…]%s", stats.processed, common.PrettyDuration(time.Since(start)), last.Number(), first.Hash().Bytes()[:4], last.Hash().Bytes()[:4], ignored))
last := blockChain[len(blockChain)-1]
log.Info("Imported new block receipts", "count", stats.processed, "number", last.Number(), "hash", last.Hash(),
"elapsed", common.PrettyDuration(time.Since(start)), "ignored", stats.ignored)
return 0, nil
}
@ -829,10 +823,10 @@ func (self *BlockChain) WriteBlock(block *types.Block) (status WriteStatus, err
// Irrelevant of the canonical status, write the block itself to the database
if err := self.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
log.Crit(fmt.Sprintf("failed to write block total difficulty: %v", err))
log.Crit("Failed to write block total difficulty", "err", err)
}
if err := WriteBlock(self.chainDb, block); err != nil {
log.Crit(fmt.Sprintf("failed to write block contents: %v", err))
log.Crit("Failed to write block contents", "err", err)
}
// If the total difficulty is higher than our known, add it to the canonical chain
@ -863,11 +857,11 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
for i := 1; i < len(chain); i++ {
if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() {
// Chain broke ancestry, log a messge (programming error) and skip insertion
failure := fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])",
i-1, chain[i-1].NumberU64(), chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4])
log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(),
"parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash())
log.Error(fmt.Sprint(failure.Error()))
return 0, failure
return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(),
chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4])
}
}
// Pre-checks passed, start the full block imports
@ -893,7 +887,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
for i, block := range chain {
if atomic.LoadInt32(&self.procInterrupt) == 1 {
log.Debug(fmt.Sprint("Premature abort during block chain processing"))
log.Debug("Premature abort during blocks processing")
break
}
bstart := time.Now()
@ -903,8 +897,8 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
r := <-nonceResults
nonceChecked[r.index] = true
if !r.valid {
block := chain[r.index]
return r.index, &BlockNonceErr{Hash: block.Hash(), Number: block.Number(), Nonce: block.Nonce()}
invalid := chain[r.index]
return r.index, &BlockNonceErr{Hash: invalid.Hash(), Number: invalid.Number(), Nonce: invalid.Nonce()}
}
}
@ -978,7 +972,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
// coalesce logs for later processing
coalescedLogs = append(coalescedLogs, logs...)
if err := WriteBlockReceipts(self.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil {
if err = WriteBlockReceipts(self.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil {
return i, err
}
@ -990,9 +984,9 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
switch status {
case CanonStatTy:
log.Debug("", "msg", log.Lazy{Fn: func() string {
return fmt.Sprintf("inserted block #%d [%x…] in %9v: %3d txs %7v gas %d uncles.", block.Number(), block.Hash().Bytes()[0:4], common.PrettyDuration(time.Since(bstart)), len(block.Transactions()), block.GasUsed(), len(block.Uncles()))
}})
log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()),
"txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart)))
blockInsertTimer.UpdateSince(bstart)
events = append(events, ChainEvent{block, block.Hash(), logs})
@ -1013,9 +1007,9 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
return i, err
}
case SideStatTy:
log.Trace("", "msg", log.Lazy{Fn: func() string {
return fmt.Sprintf("inserted forked block #%d [%x…] (TD=%v) in %9v: %3d txs %d uncles.", block.Number(), block.Hash().Bytes()[0:4], block.Difficulty(), common.PrettyDuration(time.Since(bstart)), len(block.Transactions()), len(block.Uncles()))
}})
log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed",
common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()))
blockInsertTimer.UpdateSince(bstart)
events = append(events, ChainSideEvent{block})
@ -1055,19 +1049,22 @@ func (st *insertStats) report(chain []*types.Block, index int) {
)
// If we're at the last block of the batch or report period reached, log
if index == len(chain)-1 || elapsed >= statsReportLimit {
start, end := chain[st.lastIndex], chain[index]
txcount := countTransactions(chain[st.lastIndex : index+1])
var hashes, extra string
if st.queued > 0 || st.ignored > 0 {
extra = fmt.Sprintf(" (%d queued %d ignored)", st.queued, st.ignored)
var (
end = chain[index]
txs = countTransactions(chain[st.lastIndex : index+1])
)
context := []interface{}{
"blocks", st.processed, "number", end.Number(), "hash", end.Hash(), "txs", txs,
"mgas", float64(st.usedGas) / 1000000, "elapsed", common.PrettyDuration(elapsed),
"mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
}
if st.processed > 1 {
hashes = fmt.Sprintf("%x… / %x…", start.Hash().Bytes()[:4], end.Hash().Bytes()[:4])
} else {
hashes = fmt.Sprintf("%x…", end.Hash().Bytes()[:4])
if st.queued > 0 {
context = append(context, []interface{}{"queued", st.queued}...)
}
log.Info(fmt.Sprintf("imported %4d blocks, %5d txs (%7.3f Mg) in %9v (%6.3f Mg/s). #%v [%s]%s", st.processed, txcount, float64(st.usedGas)/1000000, common.PrettyDuration(elapsed), float64(st.usedGas)*1000/float64(elapsed), end.Number(), hashes, extra))
if st.ignored > 0 {
context = append(context, []interface{}{"ignored", st.ignored}...)
}
log.Info("Imported new chain segment", context...)
*st = insertStats{startTime: now, lastIndex: index}
}
@ -1152,19 +1149,8 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
if len(oldChain) > 63 {
logFn = log.Warn
}
logFn("", "msg", log.Lazy{Fn: func() string {
oldLen, newLen := len(oldChain), len(newChain)
newLast, newFirst := newChain[0], newChain[newLen-1]
oldLast, oldFirst := oldChain[0], oldChain[oldLen-1]
return fmt.Sprintf("Chain split detected after #%v [%x…]. Reorganising chain (-%v +%v blocks), rejecting #%v-#%v [%x…/%x…] in favour of #%v-#%v [%x…/%x…]",
commonBlock.Number(), commonBlock.Hash().Bytes()[:4],
oldLen, newLen,
oldFirst.Number(), oldLast.Number(),
oldFirst.Hash().Bytes()[:4], oldLast.Hash().Bytes()[:4],
newFirst.Number(), newLast.Number(),
newFirst.Hash().Bytes()[:4], newLast.Hash().Bytes()[:4])
}})
logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(),
"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
var addedTxs types.Transactions
// insert blocks. Order does not matter. Last block will be written in ImportChain itself which creates the new head properly
@ -1271,12 +1257,12 @@ func (bc *BlockChain) addBadBlock(block *types.Block) {
// reportBlock logs a bad block error.
func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
bc.addBadBlock(block)
log.Error("", "msg", log.Lazy{Fn: func() string {
var receiptString string
for _, receipt := range receipts {
receiptString += fmt.Sprintf("\t%v\n", receipt)
}
return fmt.Sprintf(`
var receiptString string
for _, receipt := range receipts {
receiptString += fmt.Sprintf("\t%v\n", receipt)
}
log.Error(fmt.Sprintf(`
########## BAD BLOCK #########
Chain config: %v
@ -1286,8 +1272,7 @@ Hash: 0x%x
Error: %v
##############################
`, bc.config, block.Number(), block.Hash(), receiptString, err)
}})
`, bc.config, block.Number(), block.Hash(), receiptString, err))
}
// InsertHeaderChain attempts to insert the given header chain in to the local

View File

@ -106,7 +106,7 @@ func GetBlockNumber(db ethdb.Database, hash common.Hash) uint64 {
}
header := new(types.Header)
if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
log.Crit(fmt.Sprintf("failed to decode block header: %v", err))
log.Crit("Failed to decode block header", "err", err)
}
return header.Number.Uint64()
}
@ -166,7 +166,7 @@ func GetHeader(db ethdb.Database, hash common.Hash, number uint64) *types.Header
}
header := new(types.Header)
if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
log.Error(fmt.Sprintf("invalid block header RLP for hash %x: %v", hash, err))
log.Error("Invalid block header RLP", "hash", hash, "err", err)
return nil
}
return header
@ -190,7 +190,7 @@ func GetBody(db ethdb.Database, hash common.Hash, number uint64) *types.Body {
}
body := new(types.Body)
if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
log.Error(fmt.Sprintf("invalid block body RLP for hash %x: %v", hash, err))
log.Error("Invalid block body RLP", "hash", hash, "err", err)
return nil
}
return body
@ -208,7 +208,7 @@ func GetTd(db ethdb.Database, hash common.Hash, number uint64) *big.Int {
}
td := new(big.Int)
if err := rlp.Decode(bytes.NewReader(data), td); err != nil {
log.Error(fmt.Sprintf("invalid block total difficulty RLP for hash %x: %v", hash, err))
log.Error("Invalid block total difficulty RLP", "hash", hash, "err", err)
return nil
}
return td
@ -246,7 +246,7 @@ func GetBlockReceipts(db ethdb.Database, hash common.Hash, number uint64) types.
}
storageReceipts := []*types.ReceiptForStorage{}
if err := rlp.DecodeBytes(data, &storageReceipts); err != nil {
log.Error(fmt.Sprintf("invalid receipt array RLP for hash %x: %v", hash, err))
log.Error("Invalid receipt array RLP", "hash", hash, "err", err)
return nil
}
receipts := make(types.Receipts, len(storageReceipts))
@ -285,15 +285,15 @@ func GetTransaction(db ethdb.Database, hash common.Hash) (*types.Transaction, co
}
// GetReceipt returns a receipt by hash
func GetReceipt(db ethdb.Database, txHash common.Hash) *types.Receipt {
data, _ := db.Get(append(receiptsPrefix, txHash[:]...))
func GetReceipt(db ethdb.Database, hash common.Hash) *types.Receipt {
data, _ := db.Get(append(receiptsPrefix, hash[:]...))
if len(data) == 0 {
return nil
}
var receipt types.ReceiptForStorage
err := rlp.DecodeBytes(data, &receipt)
if err != nil {
log.Debug(fmt.Sprint("GetReceipt err:", err))
log.Error("Invalid receipt RLP", "hash", hash, "err", err)
}
return (*types.Receipt)(&receipt)
}
@ -302,7 +302,7 @@ func GetReceipt(db ethdb.Database, txHash common.Hash) *types.Receipt {
func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) error {
key := append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...)
if err := db.Put(key, hash.Bytes()); err != nil {
log.Crit(fmt.Sprintf("failed to store number to hash mapping into database: %v", err))
log.Crit("Failed to store number to hash mapping", "err", err)
}
return nil
}
@ -310,7 +310,7 @@ func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) erro
// WriteHeadHeaderHash stores the head header's hash.
func WriteHeadHeaderHash(db ethdb.Database, hash common.Hash) error {
if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
log.Crit(fmt.Sprintf("failed to store last header's hash into database: %v", err))
log.Crit("Failed to store last header's hash", "err", err)
}
return nil
}
@ -318,7 +318,7 @@ func WriteHeadHeaderHash(db ethdb.Database, hash common.Hash) error {
// WriteHeadBlockHash stores the head block's hash.
func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error {
if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
log.Crit(fmt.Sprintf("failed to store last block's hash into database: %v", err))
log.Crit("Failed to store last block's hash", "err", err)
}
return nil
}
@ -326,7 +326,7 @@ func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error {
// WriteHeadFastBlockHash stores the fast head block's hash.
func WriteHeadFastBlockHash(db ethdb.Database, hash common.Hash) error {
if err := db.Put(headFastKey, hash.Bytes()); err != nil {
log.Crit(fmt.Sprintf("failed to store last fast block's hash into database: %v", err))
log.Crit("Failed to store last fast block's hash", "err", err)
}
return nil
}
@ -342,13 +342,12 @@ func WriteHeader(db ethdb.Database, header *types.Header) error {
encNum := encodeBlockNumber(num)
key := append(blockHashPrefix, hash...)
if err := db.Put(key, encNum); err != nil {
log.Crit(fmt.Sprintf("failed to store hash to number mapping into database: %v", err))
log.Crit("Failed to store hash to number mapping", "err", err)
}
key = append(append(headerPrefix, encNum...), hash...)
if err := db.Put(key, data); err != nil {
log.Crit(fmt.Sprintf("failed to store header into database: %v", err))
log.Crit("Failed to store header", "err", err)
}
log.Debug(fmt.Sprintf("stored header #%v [%x…]", header.Number, hash[:4]))
return nil
}
@ -365,9 +364,8 @@ func WriteBody(db ethdb.Database, hash common.Hash, number uint64, body *types.B
func WriteBodyRLP(db ethdb.Database, hash common.Hash, number uint64, rlp rlp.RawValue) error {
key := append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
if err := db.Put(key, rlp); err != nil {
log.Crit(fmt.Sprintf("failed to store block body into database: %v", err))
log.Crit("Failed to store block body", "err", err)
}
log.Debug(fmt.Sprintf("stored block body [%x…]", hash.Bytes()[:4]))
return nil
}
@ -379,9 +377,8 @@ func WriteTd(db ethdb.Database, hash common.Hash, number uint64, td *big.Int) er
}
key := append(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...), tdSuffix...)
if err := db.Put(key, data); err != nil {
log.Crit(fmt.Sprintf("failed to store block total difficulty into database: %v", err))
log.Crit("Failed to store block total difficulty", "err", err)
}
log.Debug(fmt.Sprintf("stored block total difficulty [%x…]: %v", hash.Bytes()[:4], td))
return nil
}
@ -414,9 +411,8 @@ func WriteBlockReceipts(db ethdb.Database, hash common.Hash, number uint64, rece
// Store the flattened receipt slice
key := append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
if err := db.Put(key, bytes); err != nil {
log.Crit(fmt.Sprintf("failed to store block receipts into database: %v", err))
log.Crit("Failed to store block receipts", "err", err)
}
log.Debug(fmt.Sprintf("stored block receipts [%x…]", hash.Bytes()[:4]))
return nil
}
@ -434,7 +430,7 @@ func WriteTransactions(db ethdb.Database, block *types.Block) error {
if err != nil {
return err
}
if err := batch.Put(tx.Hash().Bytes(), data); err != nil {
if err = batch.Put(tx.Hash().Bytes(), data); err != nil {
return err
}
// Encode and queue up the transaction metadata for storage
@ -457,7 +453,7 @@ func WriteTransactions(db ethdb.Database, block *types.Block) error {
}
// Write the scheduled data into the database
if err := batch.Write(); err != nil {
log.Crit(fmt.Sprintf("failed to store transactions into database: %v", err))
log.Crit("Failed to store transactions", "err", err)
}
return nil
}
@ -489,7 +485,7 @@ func WriteReceipts(db ethdb.Database, receipts types.Receipts) error {
}
// Write the scheduled data into the database
if err := batch.Write(); err != nil {
log.Crit(fmt.Sprintf("failed to store receipts into database: %v", err))
log.Crit("Failed to store receipts", "err", err)
}
return nil
}
@ -595,7 +591,7 @@ func WritePreimages(db ethdb.Database, number uint64, preimages map[common.Hash]
for hash, preimage := range preimages {
if _, err := table.Get(hash.Bytes()); err != nil {
batch.Put(hash.Bytes(), preimage)
hitCount += 1
hitCount++
}
}
preimageCounter.Inc(int64(len(preimages)))
@ -604,7 +600,6 @@ func WritePreimages(db ethdb.Database, number uint64, preimages map[common.Hash]
if err := batch.Write(); err != nil {
return fmt.Errorf("preimage write fail for block %d: %v", number, err)
}
log.Debug(fmt.Sprintf("%d preimages in block %d, including %d new", len(preimages), number, hitCount))
}
return nil
}

View File

@ -117,7 +117,7 @@ func WriteGenesisBlock(chainDb ethdb.Database, reader io.Reader) (*types.Block,
}, nil, nil, nil)
if block := GetBlock(chainDb, block.Hash(), block.NumberU64()); block != nil {
log.Info(fmt.Sprint("Genesis block already in chain. Writing canonical number"))
log.Info("Genesis block known, writing canonical number")
err := WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
if err != nil {
return nil, err
@ -146,7 +146,6 @@ func WriteGenesisBlock(chainDb ethdb.Database, reader io.Reader) (*types.Block,
if err := WriteChainConfig(chainDb, block.Hash(), genesis.ChainConfig); err != nil {
return nil, err
}
return block, nil
}

View File

@ -101,7 +101,7 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, getValid
if err != nil {
return nil, err
}
log.Info(fmt.Sprint("WARNING: Wrote default ethereum genesis block"))
log.Warn("Wrote default Ethereum genesis block")
hc.genesisHeader = genesisBlock.Header()
}
@ -154,12 +154,11 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
// Irrelevant of the canonical status, write the td and header to the database
if err := hc.WriteTd(hash, number, externTd); err != nil {
log.Crit(fmt.Sprintf("failed to write header total difficulty: %v", err))
log.Crit("Failed to write header total difficulty", "err", err)
}
if err := WriteHeader(hc.chainDb, header); err != nil {
log.Crit(fmt.Sprintf("failed to write header contents: %v", err))
log.Crit("Failed to write header content", "err", err)
}
// If the total difficulty is higher than our known, add it to the canonical chain
// Second clause in the if statement reduces the vulnerability to selfish mining.
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
@ -185,15 +184,13 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
headNumber = headHeader.Number.Uint64() - 1
headHeader = hc.GetHeader(headHash, headNumber)
}
// Extend the canonical chain with the new header
if err := WriteCanonicalHash(hc.chainDb, hash, number); err != nil {
log.Crit(fmt.Sprintf("failed to insert header number: %v", err))
log.Crit("Failed to insert header number", "err", err)
}
if err := WriteHeadHeaderHash(hc.chainDb, hash); err != nil {
log.Crit(fmt.Sprintf("failed to insert head header hash: %v", err))
log.Crit("Failed to insert head header hash", "err", err)
}
hc.currentHeaderHash, hc.currentHeader = hash, types.CopyHeader(header)
status = CanonStatTy
@ -227,11 +224,11 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, checkFreq int, w
for i := 1; i < len(chain); i++ {
if chain[i].Number.Uint64() != chain[i-1].Number.Uint64()+1 || chain[i].ParentHash != chain[i-1].Hash() {
// Chain broke ancestry, log a messge (programming error) and skip insertion
failure := fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])",
i-1, chain[i-1].Number.Uint64(), chain[i-1].Hash().Bytes()[:4], i, chain[i].Number.Uint64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash.Bytes()[:4])
log.Error("Non contiguous header insert", "number", chain[i].Number, "hash", chain[i].Hash(),
"parent", chain[i].ParentHash, "prevnumber", chain[i-1].Number, "prevhash", chain[i-1].Hash())
log.Error(fmt.Sprint(failure.Error()))
return 0, failure
return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].Number,
chain[i-1].Hash().Bytes()[:4], i, chain[i].Number, chain[i].Hash().Bytes()[:4], chain[i].ParentHash[:4])
}
}
// Collect some import statistics to report on
@ -316,7 +313,7 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, checkFreq int, w
for i, header := range chain {
// Short circuit insertion if shutting down
if hc.procInterrupt() {
log.Debug(fmt.Sprint("premature abort during header chain processing"))
log.Debug("Premature abort during headers processing")
break
}
hash := header.Hash()
@ -332,13 +329,9 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, checkFreq int, w
stats.processed++
}
// Report some public statistics so the user has a clue what's going on
first, last := chain[0], chain[len(chain)-1]
ignored := ""
if stats.ignored > 0 {
ignored = fmt.Sprintf(" (%d ignored)", stats.ignored)
}
log.Info(fmt.Sprintf("imported %4d headers%s in %9v. #%v [%x… / %x…]", stats.processed, ignored, common.PrettyDuration(time.Since(start)), last.Number, first.Hash().Bytes()[:4], last.Hash().Bytes()[:4]))
last := chain[len(chain)-1]
log.Info("Imported new block headers", "count", stats.processed, "number", last.Number, "hash", last.Hash(),
"elapsed", common.PrettyDuration(time.Since(start)), "ignored", stats.ignored)
return 0, nil
}
@ -445,7 +438,7 @@ func (hc *HeaderChain) CurrentHeader() *types.Header {
// SetCurrentHeader sets the current head header of the canonical chain.
func (hc *HeaderChain) SetCurrentHeader(head *types.Header) {
if err := WriteHeadHeaderHash(hc.chainDb, head.Hash()); err != nil {
log.Crit(fmt.Sprintf("failed to insert head header hash: %v", err))
log.Crit("Failed to insert head header hash", "err", err)
}
hc.currentHeader = head
hc.currentHeaderHash = head.Hash()
@ -488,7 +481,7 @@ func (hc *HeaderChain) SetHead(head uint64, delFn DeleteCallback) {
hc.currentHeaderHash = hc.currentHeader.Hash()
if err := WriteHeadHeaderHash(hc.chainDb, hc.currentHeaderHash); err != nil {
log.Crit(fmt.Sprintf("failed to reset head header hash: %v", err))
log.Crit("Failed to reset head header hash", "err", err)
}
}

View File

@ -17,14 +17,12 @@
package core
import (
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
)
@ -122,8 +120,6 @@ func ApplyTransaction(config *params.ChainConfig, bc *BlockChain, gp *GasPool, s
receipt.Logs = statedb.GetLogs(tx.Hash())
receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
log.Debug(fmt.Sprint(receipt))
return receipt, gas, err
}

View File

@ -18,7 +18,6 @@ package core
import (
"errors"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
@ -256,7 +255,7 @@ func (self *StateTransition) TransitionDb() (ret []byte, requiredGas, usedGas *b
ret, self.gas, vmerr = evm.Call(sender, self.to().Address(), self.data, self.gas, self.value)
}
if vmerr != nil {
log.Debug(fmt.Sprint("vm returned with error:", err))
log.Debug("VM returned with error", "err", err)
// The only possible consensus-error would be if there wasn't
// sufficient balance to make the transfer happen. The first
// balance transfer may never fail.

View File

@ -162,15 +162,10 @@ func (pool *TxPool) eventLoop() {
func (pool *TxPool) resetState() {
currentState, err := pool.currentState()
if err != nil {
log.Error(fmt.Sprintf("Failed to get current state: %v", err))
log.Error("Failed reset txpool state", "err", err)
return
}
managedState := state.ManageState(currentState)
if err != nil {
log.Error(fmt.Sprintf("Failed to get managed state: %v", err))
return
}
pool.pendingState = managedState
pool.pendingState = state.ManageState(currentState)
// validate the pool of pending transactions, this will remove
// any transactions that have been included in the block or
@ -192,7 +187,8 @@ func (pool *TxPool) Stop() {
pool.events.Unsubscribe()
close(pool.quit)
pool.wg.Wait()
log.Info(fmt.Sprint("Transaction pool stopped"))
log.Info("Transaction pool stopped")
}
func (pool *TxPool) State() *state.ManagedState {
@ -323,24 +319,19 @@ func (pool *TxPool) add(tx *types.Transaction) error {
// If the transaction is already known, discard it
hash := tx.Hash()
if pool.all[hash] != nil {
return fmt.Errorf("Known transaction: %x", hash[:4])
log.Trace("Discarding already known transaction", "hash", hash)
return fmt.Errorf("known transaction: %x", hash)
}
// Otherwise ensure basic validation passes and queue it up
if err := pool.validateTx(tx); err != nil {
log.Trace("Discarding invalid transaction", "hash", hash, "err", err)
invalidTxCounter.Inc(1)
return err
}
pool.enqueueTx(hash, tx)
// Print a log message if low enough level is set
log.Debug("", "msg", log.Lazy{Fn: func() string {
rcpt := "[NEW_CONTRACT]"
if to := tx.To(); to != nil {
rcpt = common.Bytes2Hex(to[:4])
}
from, _ := types.Sender(pool.signer, tx) // from already verified during tx validation
return fmt.Sprintf("(t) 0x%x => %s (%v) %x\n", from[:4], rcpt, tx.Value(), hash)
}})
log.Debug("Pooled new transaction", "hash", hash, "from", log.Lazy{Fn: func() common.Address { from, _ := types.Sender(pool.signer, tx); return from }}, "to", tx.To())
return nil
}
@ -409,7 +400,6 @@ func (pool *TxPool) Add(tx *types.Transaction) error {
if err != nil {
return err
}
pool.promoteExecutables(state)
return nil
@ -420,19 +410,21 @@ func (pool *TxPool) AddBatch(txs []*types.Transaction) error {
pool.mu.Lock()
defer pool.mu.Unlock()
// Add the batch of transaction, tracking the accepted ones
added := 0
for _, tx := range txs {
if err := pool.add(tx); err != nil {
log.Debug(fmt.Sprint("tx error:", err))
if err := pool.add(tx); err == nil {
added++
}
}
state, err := pool.currentState()
if err != nil {
return err
// Only reprocess the internal state if something was actually added
if added > 0 {
state, err := pool.currentState()
if err != nil {
return err
}
pool.promoteExecutables(state)
}
pool.promoteExecutables(state)
return nil
}
@ -513,33 +505,29 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
for addr, list := range pool.queue {
// Drop all transactions that are deemed too old (low nonce)
for _, tx := range list.Forward(state.GetNonce(addr)) {
log.Debug("", "msg", log.Lazy{Fn: func() string {
return fmt.Sprintf("Removed old queued transaction: %v", tx)
}})
delete(pool.all, tx.Hash())
hash := tx.Hash()
log.Debug("Removed old queued transaction", "hash", hash)
delete(pool.all, hash)
}
// Drop all transactions that are too costly (low balance)
drops, _ := list.Filter(state.GetBalance(addr))
for _, tx := range drops {
log.Debug("", "msg", log.Lazy{Fn: func() string {
return fmt.Sprintf("Removed unpayable queued transaction: %v", tx)
}})
delete(pool.all, tx.Hash())
hash := tx.Hash()
log.Debug("Removed unpayable queued transaction", "hash", hash)
delete(pool.all, hash)
queuedNofundsCounter.Inc(1)
}
// Gather all executable transactions and promote them
for _, tx := range list.Ready(pool.pendingState.GetNonce(addr)) {
log.Debug("", "msg", log.Lazy{Fn: func() string {
return fmt.Sprintf("Promoting queued transaction: %v", tx)
}})
pool.promoteTx(addr, tx.Hash(), tx)
hash := tx.Hash()
log.Debug("Promoting queued transaction", "hash", hash)
pool.promoteTx(addr, hash, tx)
}
// Drop all transactions over the allowed limit
for _, tx := range list.Cap(int(maxQueuedPerAccount)) {
log.Debug("", "msg", log.Lazy{Fn: func() string {
return fmt.Sprintf("Removed cap-exceeding queued transaction: %v", tx)
}})
delete(pool.all, tx.Hash())
hash := tx.Hash()
log.Debug("Removed cap-exceeding queued transaction", "hash", hash)
delete(pool.all, hash)
queuedRLCounter.Inc(1)
}
queued += uint64(list.Len())
@ -650,25 +638,22 @@ func (pool *TxPool) demoteUnexecutables(state *state.StateDB) {
// Drop all transactions that are deemed too old (low nonce)
for _, tx := range list.Forward(nonce) {
log.Debug("", "msg", log.Lazy{Fn: func() string {
return fmt.Sprintf("Removed old pending transaction: %v", tx)
}})
delete(pool.all, tx.Hash())
hash := tx.Hash()
log.Debug("Removed old pending transaction", "hash", hash)
delete(pool.all, hash)
}
// Drop all transactions that are too costly (low balance), and queue any invalids back for later
drops, invalids := list.Filter(state.GetBalance(addr))
for _, tx := range drops {
log.Debug("", "msg", log.Lazy{Fn: func() string {
return fmt.Sprintf("Removed unpayable pending transaction: %v", tx)
}})
delete(pool.all, tx.Hash())
hash := tx.Hash()
log.Debug("Removed unpayable pending transaction", "hash", hash)
delete(pool.all, hash)
pendingNofundsCounter.Inc(1)
}
for _, tx := range invalids {
log.Debug("", "msg", log.Lazy{Fn: func() string {
return fmt.Sprintf("Demoting pending transaction: %v", tx)
}})
pool.enqueueTx(tx.Hash(), tx)
hash := tx.Hash()
log.Debug("Demoting pending transaction", "hash", hash)
pool.enqueueTx(hash, tx)
}
// Delete the entire queue entry if it became empty.
if list.Empty() {

View File

@ -18,7 +18,6 @@ package vm
import (
"crypto/sha256"
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
@ -83,7 +82,7 @@ func (c *ecrecover) Run(in []byte) []byte {
pubKey, err := crypto.Ecrecover(in[:32], append(in[64:128], v))
// make sure the public key is a valid one
if err != nil {
log.Trace(fmt.Sprint("ECRECOVER error: ", err))
log.Trace("ECRECOVER failed", "err", err)
return nil
}

View File

@ -123,13 +123,9 @@ func (evm *Interpreter) Run(contract *Contract, input []byte) (ret []byte, err e
}
}()
log.Debug("", "msg", log.Lazy{Fn: func() string {
return fmt.Sprintf("evm running: %x\n", codehash[:4])
}})
log.Debug("EVM running contract", "hash", codehash[:])
tstart := time.Now()
defer log.Debug("", "msg", log.Lazy{Fn: func() string {
return fmt.Sprintf("evm done: %x. time: %v\n", codehash[:4], time.Since(tstart))
}})
defer log.Debug("EVM finished running contract", "hash", codehash[:], "elapsed", time.Since(tstart))
// The Interpreter main run loop (contextual). This loop runs until either an
// explicit STOP, RETURN or SELFDESTRUCT is executed, an error occurred during