commit
94c71c171f
@ -32,7 +32,7 @@ func WaitMined(ctx context.Context, b DeployBackend, tx *types.Transaction) (*ty
|
|||||||
queryTicker := time.NewTicker(time.Second)
|
queryTicker := time.NewTicker(time.Second)
|
||||||
defer queryTicker.Stop()
|
defer queryTicker.Stop()
|
||||||
|
|
||||||
logger := log.New("hash", tx.Hash().Hex()[:8])
|
logger := log.New("hash", tx.Hash())
|
||||||
for {
|
for {
|
||||||
receipt, err := b.TransactionReceipt(ctx, tx.Hash())
|
receipt, err := b.TransactionReceipt(ctx, tx.Hash())
|
||||||
if receipt != nil {
|
if receipt != nil {
|
||||||
|
@ -262,7 +262,7 @@ func (ac *accountCache) scan() ([]accounts.Account, error) {
|
|||||||
case err != nil:
|
case err != nil:
|
||||||
logger.Debug("Failed to decode keystore key", "err", err)
|
logger.Debug("Failed to decode keystore key", "err", err)
|
||||||
case (addr == common.Address{}):
|
case (addr == common.Address{}):
|
||||||
logger.Debug("Failed to decode keystore key", "error", "missing or zero address")
|
logger.Debug("Failed to decode keystore key", "err", "missing or zero address")
|
||||||
default:
|
default:
|
||||||
addrs = append(addrs, accounts.Account{Address: addr, URL: accounts.URL{Scheme: KeyStoreScheme, Path: path}})
|
addrs = append(addrs, accounts.Account{Address: addr, URL: accounts.URL{Scheme: KeyStoreScheme, Path: path}})
|
||||||
}
|
}
|
||||||
|
@ -121,7 +121,7 @@ func (hub *LedgerHub) refreshWallets() {
|
|||||||
}
|
}
|
||||||
// If there are no more wallets or the device is before the next, wrap new wallet
|
// If there are no more wallets or the device is before the next, wrap new wallet
|
||||||
if len(hub.wallets) == 0 || hub.wallets[0].URL().Cmp(url) > 0 {
|
if len(hub.wallets) == 0 || hub.wallets[0].URL().Cmp(url) > 0 {
|
||||||
wallet := &ledgerWallet{url: &url, info: ledger, logger: log.New("url", url)}
|
wallet := &ledgerWallet{url: &url, info: ledger, log: log.New("url", url)}
|
||||||
|
|
||||||
events = append(events, accounts.WalletEvent{Wallet: wallet, Arrive: true})
|
events = append(events, accounts.WalletEvent{Wallet: wallet, Arrive: true})
|
||||||
wallets = append(wallets, wallet)
|
wallets = append(wallets, wallet)
|
||||||
|
@ -124,7 +124,7 @@ type ledgerWallet struct {
|
|||||||
commsLock chan struct{} // Mutex (buf=1) for the USB comms without keeping the state locked
|
commsLock chan struct{} // Mutex (buf=1) for the USB comms without keeping the state locked
|
||||||
stateLock sync.RWMutex // Protects read and write access to the wallet struct fields
|
stateLock sync.RWMutex // Protects read and write access to the wallet struct fields
|
||||||
|
|
||||||
logger log.Logger // Contextual logger to tag the ledger with its id
|
log log.Logger // Contextual logger to tag the ledger with its id
|
||||||
}
|
}
|
||||||
|
|
||||||
// URL implements accounts.Wallet, returning the URL of the Ledger device.
|
// URL implements accounts.Wallet, returning the URL of the Ledger device.
|
||||||
@ -222,8 +222,8 @@ func (w *ledgerWallet) Open(passphrase string) error {
|
|||||||
// - libusb on Windows doesn't support hotplug, so we can't detect USB unplugs
|
// - libusb on Windows doesn't support hotplug, so we can't detect USB unplugs
|
||||||
// - communication timeout on the Ledger requires a device power cycle to fix
|
// - communication timeout on the Ledger requires a device power cycle to fix
|
||||||
func (w *ledgerWallet) heartbeat() {
|
func (w *ledgerWallet) heartbeat() {
|
||||||
w.logger.Debug("Ledger health-check started")
|
w.log.Debug("Ledger health-check started")
|
||||||
defer w.logger.Debug("Ledger health-check stopped")
|
defer w.log.Debug("Ledger health-check stopped")
|
||||||
|
|
||||||
// Execute heartbeat checks until termination or error
|
// Execute heartbeat checks until termination or error
|
||||||
var (
|
var (
|
||||||
@ -262,7 +262,7 @@ func (w *ledgerWallet) heartbeat() {
|
|||||||
}
|
}
|
||||||
// In case of error, wait for termination
|
// In case of error, wait for termination
|
||||||
if err != nil {
|
if err != nil {
|
||||||
w.logger.Debug("Ledger health-check failed", "err", err)
|
w.log.Debug("Ledger health-check failed", "err", err)
|
||||||
errc = <-w.healthQuit
|
errc = <-w.healthQuit
|
||||||
}
|
}
|
||||||
errc <- err
|
errc <- err
|
||||||
@ -350,8 +350,8 @@ func (w *ledgerWallet) Accounts() []accounts.Account {
|
|||||||
// selfDerive is an account derivation loop that upon request attempts to find
|
// selfDerive is an account derivation loop that upon request attempts to find
|
||||||
// new non-zero accounts.
|
// new non-zero accounts.
|
||||||
func (w *ledgerWallet) selfDerive() {
|
func (w *ledgerWallet) selfDerive() {
|
||||||
w.logger.Debug("Ledger self-derivation started")
|
w.log.Debug("Ledger self-derivation started")
|
||||||
defer w.logger.Debug("Ledger self-derivation stopped")
|
defer w.log.Debug("Ledger self-derivation stopped")
|
||||||
|
|
||||||
// Execute self-derivations until termination or error
|
// Execute self-derivations until termination or error
|
||||||
var (
|
var (
|
||||||
@ -396,7 +396,7 @@ func (w *ledgerWallet) selfDerive() {
|
|||||||
// Retrieve the next derived Ethereum account
|
// Retrieve the next derived Ethereum account
|
||||||
if nextAddr == (common.Address{}) {
|
if nextAddr == (common.Address{}) {
|
||||||
if nextAddr, err = w.ledgerDerive(nextPath); err != nil {
|
if nextAddr, err = w.ledgerDerive(nextPath); err != nil {
|
||||||
w.logger.Warn("Ledger account derivation failed", "err", err)
|
w.log.Warn("Ledger account derivation failed", "err", err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -407,12 +407,12 @@ func (w *ledgerWallet) selfDerive() {
|
|||||||
)
|
)
|
||||||
balance, err = w.deriveChain.BalanceAt(context, nextAddr, nil)
|
balance, err = w.deriveChain.BalanceAt(context, nextAddr, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
w.logger.Warn("Ledger balance retrieval failed", "err", err)
|
w.log.Warn("Ledger balance retrieval failed", "err", err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
nonce, err = w.deriveChain.NonceAt(context, nextAddr, nil)
|
nonce, err = w.deriveChain.NonceAt(context, nextAddr, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
w.logger.Warn("Ledger nonce retrieval failed", "err", err)
|
w.log.Warn("Ledger nonce retrieval failed", "err", err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
// If the next account is empty, stop self-derivation, but add it nonetheless
|
// If the next account is empty, stop self-derivation, but add it nonetheless
|
||||||
@ -432,7 +432,7 @@ func (w *ledgerWallet) selfDerive() {
|
|||||||
|
|
||||||
// Display a log message to the user for new (or previously empty accounts)
|
// Display a log message to the user for new (or previously empty accounts)
|
||||||
if _, known := w.paths[nextAddr]; !known || (!empty && nextAddr == w.deriveNextAddr) {
|
if _, known := w.paths[nextAddr]; !known || (!empty && nextAddr == w.deriveNextAddr) {
|
||||||
w.logger.Info("Ledger discovered new account", "address", nextAddr.Hex(), "path", path, "balance", balance, "nonce", nonce)
|
w.log.Info("Ledger discovered new account", "address", nextAddr, "path", path, "balance", balance, "nonce", nonce)
|
||||||
}
|
}
|
||||||
// Fetch the next potential account
|
// Fetch the next potential account
|
||||||
if !empty {
|
if !empty {
|
||||||
@ -471,7 +471,7 @@ func (w *ledgerWallet) selfDerive() {
|
|||||||
}
|
}
|
||||||
// In case of error, wait for termination
|
// In case of error, wait for termination
|
||||||
if err != nil {
|
if err != nil {
|
||||||
w.logger.Debug("Ledger self-derivation failed", "err", err)
|
w.log.Debug("Ledger self-derivation failed", "err", err)
|
||||||
errc = <-w.deriveQuit
|
errc = <-w.deriveQuit
|
||||||
}
|
}
|
||||||
errc <- err
|
errc <- err
|
||||||
@ -851,7 +851,7 @@ func (w *ledgerWallet) ledgerExchange(opcode ledgerOpcode, p1 ledgerParam1, p2 l
|
|||||||
apdu = nil
|
apdu = nil
|
||||||
}
|
}
|
||||||
// Send over to the device
|
// Send over to the device
|
||||||
w.logger.Trace("Data chunk sent to the Ledger", "chunk", hexutil.Bytes(chunk))
|
w.log.Trace("Data chunk sent to the Ledger", "chunk", hexutil.Bytes(chunk))
|
||||||
if _, err := w.device.Write(chunk); err != nil {
|
if _, err := w.device.Write(chunk); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -864,7 +864,7 @@ func (w *ledgerWallet) ledgerExchange(opcode ledgerOpcode, p1 ledgerParam1, p2 l
|
|||||||
if _, err := io.ReadFull(w.device, chunk); err != nil {
|
if _, err := io.ReadFull(w.device, chunk); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
w.logger.Trace("Data chunk received from the Ledger", "chunk", hexutil.Bytes(chunk))
|
w.log.Trace("Data chunk received from the Ledger", "chunk", hexutil.Bytes(chunk))
|
||||||
|
|
||||||
// Make sure the transport header matches
|
// Make sure the transport header matches
|
||||||
if chunk[0] != 0x01 || chunk[1] != 0x01 || chunk[2] != 0x05 {
|
if chunk[0] != 0x01 || chunk[1] != 0x01 || chunk[2] != 0x05 {
|
||||||
|
@ -47,8 +47,6 @@ func StringToHash(s string) Hash { return BytesToHash([]byte(s)) }
|
|||||||
func BigToHash(b *big.Int) Hash { return BytesToHash(b.Bytes()) }
|
func BigToHash(b *big.Int) Hash { return BytesToHash(b.Bytes()) }
|
||||||
func HexToHash(s string) Hash { return BytesToHash(FromHex(s)) }
|
func HexToHash(s string) Hash { return BytesToHash(FromHex(s)) }
|
||||||
|
|
||||||
// Don't use the default 'String' method in case we want to overwrite
|
|
||||||
|
|
||||||
// Get the string representation of the underlying hash
|
// Get the string representation of the underlying hash
|
||||||
func (h Hash) Str() string { return string(h[:]) }
|
func (h Hash) Str() string { return string(h[:]) }
|
||||||
func (h Hash) Bytes() []byte { return h[:] }
|
func (h Hash) Bytes() []byte { return h[:] }
|
||||||
@ -144,6 +142,17 @@ func (a Address) Big() *big.Int { return new(big.Int).SetBytes(a[:]) }
|
|||||||
func (a Address) Hash() Hash { return BytesToHash(a[:]) }
|
func (a Address) Hash() Hash { return BytesToHash(a[:]) }
|
||||||
func (a Address) Hex() string { return hexutil.Encode(a[:]) }
|
func (a Address) Hex() string { return hexutil.Encode(a[:]) }
|
||||||
|
|
||||||
|
// String implements the stringer interface and is used also by the logger.
|
||||||
|
func (a Address) String() string {
|
||||||
|
return a.Hex()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format implements fmt.Formatter, forcing the byte slice to be formatted as is,
|
||||||
|
// without going through the stringer interface used for logging.
|
||||||
|
func (a Address) Format(s fmt.State, c rune) {
|
||||||
|
fmt.Fprintf(s, "%"+string(c), a[:])
|
||||||
|
}
|
||||||
|
|
||||||
// Sets the address to the value of b. If b is larger than len(a) it will panic
|
// Sets the address to the value of b. If b is larger than len(a) it will panic
|
||||||
func (a *Address) SetBytes(b []byte) {
|
func (a *Address) SetBytes(b []byte) {
|
||||||
if len(b) > len(a) {
|
if len(b) > len(a) {
|
||||||
|
@ -305,7 +305,7 @@ func setError(resp *otto.Object, code int, msg string) {
|
|||||||
func throwJSException(msg interface{}) otto.Value {
|
func throwJSException(msg interface{}) otto.Value {
|
||||||
val, err := otto.ToValue(msg)
|
val, err := otto.ToValue(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(fmt.Sprintf("Failed to serialize JavaScript exception %v: %v", msg, err))
|
log.Error("Failed to serialize JavaScript exception", "exception", msg, "err", err)
|
||||||
}
|
}
|
||||||
panic(val)
|
panic(val)
|
||||||
}
|
}
|
||||||
|
@ -37,6 +37,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/contracts/chequebook/contract"
|
"github.com/ethereum/go-ethereum/contracts/chequebook/contract"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
@ -104,6 +105,8 @@ type Chequebook struct {
|
|||||||
txhash string // tx hash of last deposit tx
|
txhash string // tx hash of last deposit tx
|
||||||
threshold *big.Int // threshold that triggers autodeposit if not nil
|
threshold *big.Int // threshold that triggers autodeposit if not nil
|
||||||
buffer *big.Int // buffer to keep on top of balance for fork protection
|
buffer *big.Int // buffer to keep on top of balance for fork protection
|
||||||
|
|
||||||
|
log log.Logger // contextual logger with the contrac address embedded
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *Chequebook) String() string {
|
func (self *Chequebook) String() string {
|
||||||
@ -135,11 +138,12 @@ func NewChequebook(path string, contractAddr common.Address, prvKey *ecdsa.Priva
|
|||||||
owner: transactOpts.From,
|
owner: transactOpts.From,
|
||||||
contract: chbook,
|
contract: chbook,
|
||||||
session: session,
|
session: session,
|
||||||
|
log: log.New("contract", contractAddr),
|
||||||
}
|
}
|
||||||
|
|
||||||
if (contractAddr != common.Address{}) {
|
if (contractAddr != common.Address{}) {
|
||||||
self.setBalanceFromBlockChain()
|
self.setBalanceFromBlockChain()
|
||||||
log.Trace(fmt.Sprintf("new chequebook initialised from %s (owner: %v, balance: %s)", contractAddr.Hex(), self.owner.Hex(), self.balance.String()))
|
self.log.Trace("New chequebook initialised", "owner", self.owner, "balance", self.balance)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -147,7 +151,7 @@ func NewChequebook(path string, contractAddr common.Address, prvKey *ecdsa.Priva
|
|||||||
func (self *Chequebook) setBalanceFromBlockChain() {
|
func (self *Chequebook) setBalanceFromBlockChain() {
|
||||||
balance, err := self.backend.BalanceAt(context.TODO(), self.contractAddr, nil)
|
balance, err := self.backend.BalanceAt(context.TODO(), self.contractAddr, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(fmt.Sprintf("can't get balance: %v", err))
|
log.Error("Failed to retrieve chequebook balance", "err", err)
|
||||||
} else {
|
} else {
|
||||||
self.balance.Set(balance)
|
self.balance.Set(balance)
|
||||||
}
|
}
|
||||||
@ -160,7 +164,6 @@ func LoadChequebook(path string, prvKey *ecdsa.PrivateKey, backend Backend, chec
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
self, _ = NewChequebook(path, common.Address{}, prvKey, backend)
|
self, _ = NewChequebook(path, common.Address{}, prvKey, backend)
|
||||||
|
|
||||||
err = json.Unmarshal(data, self)
|
err = json.Unmarshal(data, self)
|
||||||
@ -170,8 +173,7 @@ func LoadChequebook(path string, prvKey *ecdsa.PrivateKey, backend Backend, chec
|
|||||||
if checkBalance {
|
if checkBalance {
|
||||||
self.setBalanceFromBlockChain()
|
self.setBalanceFromBlockChain()
|
||||||
}
|
}
|
||||||
|
log.Trace("Loaded chequebook from disk", "path", path)
|
||||||
log.Trace(fmt.Sprintf("loaded chequebook (%s, owner: %v, balance: %v) initialised from %v", self.contractAddr.Hex(), self.owner.Hex(), self.balance, path))
|
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -226,7 +228,7 @@ func (self *Chequebook) Save() (err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Trace(fmt.Sprintf("saving chequebook (%s) to %v", self.contractAddr.Hex(), self.path))
|
self.log.Trace("Saving chequebook to disk", self.path)
|
||||||
|
|
||||||
return ioutil.WriteFile(self.path, data, os.ModePerm)
|
return ioutil.WriteFile(self.path, data, os.ModePerm)
|
||||||
}
|
}
|
||||||
@ -339,12 +341,12 @@ func (self *Chequebook) deposit(amount *big.Int) (string, error) {
|
|||||||
chbookRaw := &contract.ChequebookRaw{Contract: self.contract}
|
chbookRaw := &contract.ChequebookRaw{Contract: self.contract}
|
||||||
tx, err := chbookRaw.Transfer(depositTransactor)
|
tx, err := chbookRaw.Transfer(depositTransactor)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn(fmt.Sprintf("error depositing %d wei to chequebook (%s, balance: %v, target: %v): %v", amount, self.contractAddr.Hex(), self.balance, self.buffer, err))
|
self.log.Warn("Failed to fund chequebook", "amount", amount, "balance", self.balance, "target", self.buffer, "err", err)
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
// assume that transaction is actually successful, we add the amount to balance right away
|
// assume that transaction is actually successful, we add the amount to balance right away
|
||||||
self.balance.Add(self.balance, amount)
|
self.balance.Add(self.balance, amount)
|
||||||
log.Trace(fmt.Sprintf("deposited %d wei to chequebook (%s, balance: %v, target: %v)", amount, self.contractAddr.Hex(), self.balance, self.buffer))
|
self.log.Trace("Deposited funds to chequebook", "amount", amount, "balance", self.balance, "target", self.buffer)
|
||||||
return tx.Hash().Hex(), nil
|
return tx.Hash().Hex(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -440,6 +442,7 @@ type Inbox struct {
|
|||||||
maxUncashed *big.Int // threshold that triggers autocashing
|
maxUncashed *big.Int // threshold that triggers autocashing
|
||||||
cashed *big.Int // cumulative amount cashed
|
cashed *big.Int // cumulative amount cashed
|
||||||
cheque *Cheque // last cheque, nil if none yet received
|
cheque *Cheque // last cheque, nil if none yet received
|
||||||
|
log log.Logger // contextual logger with the contrac address embedded
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewInbox creates an Inbox. An Inboxes is not persisted, the cumulative sum is updated
|
// NewInbox creates an Inbox. An Inboxes is not persisted, the cumulative sum is updated
|
||||||
@ -467,8 +470,9 @@ func NewInbox(prvKey *ecdsa.PrivateKey, contractAddr, beneficiary common.Address
|
|||||||
signer: signer,
|
signer: signer,
|
||||||
session: session,
|
session: session,
|
||||||
cashed: new(big.Int).Set(common.Big0),
|
cashed: new(big.Int).Set(common.Big0),
|
||||||
|
log: log.New("contract", contractAddr),
|
||||||
}
|
}
|
||||||
log.Trace(fmt.Sprintf("initialised inbox (%s -> %s) expected signer: %x", self.contract.Hex(), self.beneficiary.Hex(), crypto.FromECDSAPub(signer)))
|
self.log.Trace("New chequebook inbox initialized", "beneficiary", self.beneficiary, "signer", hexutil.Bytes(crypto.FromECDSAPub(signer)))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -490,7 +494,7 @@ func (self *Inbox) Stop() {
|
|||||||
func (self *Inbox) Cash() (txhash string, err error) {
|
func (self *Inbox) Cash() (txhash string, err error) {
|
||||||
if self.cheque != nil {
|
if self.cheque != nil {
|
||||||
txhash, err = self.cheque.Cash(self.session)
|
txhash, err = self.cheque.Cash(self.session)
|
||||||
log.Trace(fmt.Sprintf("cashing cheque (total: %v) on chequebook (%s) sending to %v", self.cheque.Amount, self.contract.Hex(), self.beneficiary.Hex()))
|
self.log.Trace("Cashing in chequebook cheque", "amount", self.cheque.Amount, "beneficiary", self.beneficiary)
|
||||||
self.cashed = self.cheque.Amount
|
self.cashed = self.cheque.Amount
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
@ -574,7 +578,7 @@ func (self *Inbox) Receive(promise swap.Promise) (*big.Int, error) {
|
|||||||
self.Cash()
|
self.Cash()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.Trace(fmt.Sprintf("received cheque of %v wei in inbox (%s, uncashed: %v)", amount, self.contract.Hex(), uncashed))
|
self.log.Trace("Received cheque in chequebook inbox", "amount", amount, "uncashed", uncashed)
|
||||||
}
|
}
|
||||||
|
|
||||||
return amount, err
|
return amount, err
|
||||||
@ -582,7 +586,7 @@ func (self *Inbox) Receive(promise swap.Promise) (*big.Int, error) {
|
|||||||
|
|
||||||
// Verify verifies cheque for signer, contract, beneficiary, amount, valid signature.
|
// Verify verifies cheque for signer, contract, beneficiary, amount, valid signature.
|
||||||
func (self *Cheque) Verify(signerKey *ecdsa.PublicKey, contract, beneficiary common.Address, sum *big.Int) (*big.Int, error) {
|
func (self *Cheque) Verify(signerKey *ecdsa.PublicKey, contract, beneficiary common.Address, sum *big.Int) (*big.Int, error) {
|
||||||
log.Trace(fmt.Sprintf("verify cheque: %v - sum: %v", self, sum))
|
log.Trace("Verifying chequebook cheque", "cheque", self, "sum", sum)
|
||||||
if sum == nil {
|
if sum == nil {
|
||||||
return nil, fmt.Errorf("invalid amount")
|
return nil, fmt.Errorf("invalid amount")
|
||||||
}
|
}
|
||||||
|
@ -127,10 +127,10 @@ func (r *ReleaseService) checker() {
|
|||||||
version, err := r.oracle.CurrentVersion(opts)
|
version, err := r.oracle.CurrentVersion(opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == bind.ErrNoCode {
|
if err == bind.ErrNoCode {
|
||||||
log.Debug(fmt.Sprintf("Release oracle not found at %x", r.config.Oracle))
|
log.Debug("Release oracle not found", "contract", r.config.Oracle)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
log.Error(fmt.Sprintf("Failed to retrieve current release: %v", err))
|
log.Error("Failed to retrieve current release", "err", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Version was successfully retrieved, notify if newer than ours
|
// Version was successfully retrieved, notify if newer than ours
|
||||||
@ -143,13 +143,14 @@ func (r *ReleaseService) checker() {
|
|||||||
howtofix := fmt.Sprintf("Please check https://github.com/ethereum/go-ethereum/releases for new releases")
|
howtofix := fmt.Sprintf("Please check https://github.com/ethereum/go-ethereum/releases for new releases")
|
||||||
separator := strings.Repeat("-", len(warning))
|
separator := strings.Repeat("-", len(warning))
|
||||||
|
|
||||||
log.Warn(fmt.Sprint(separator))
|
log.Warn(separator)
|
||||||
log.Warn(fmt.Sprint(warning))
|
log.Warn(warning)
|
||||||
log.Warn(fmt.Sprint(howtofix))
|
log.Warn(howtofix)
|
||||||
log.Warn(fmt.Sprint(separator))
|
log.Warn(separator)
|
||||||
} else {
|
} else {
|
||||||
log.Debug(fmt.Sprintf("Client v%d.%d.%d-%x seems up to date with upstream v%d.%d.%d-%x",
|
log.Debug("Client seems up to date with upstream",
|
||||||
r.config.Major, r.config.Minor, r.config.Patch, r.config.Commit[:4], version.Major, version.Minor, version.Patch, version.Commit[:4]))
|
"local", fmt.Sprintf("v%d.%d.%d-%x", r.config.Major, r.config.Minor, r.config.Patch, r.config.Commit[:4]),
|
||||||
|
"upstream", fmt.Sprintf("v%d.%d.%d-%x", version.Major, version.Minor, version.Patch, version.Commit[:4]))
|
||||||
}
|
}
|
||||||
|
|
||||||
// If termination was requested, return
|
// If termination was requested, return
|
||||||
|
@ -170,7 +170,7 @@ func (v *BlockValidator) VerifyUncles(block, parent *types.Block) error {
|
|||||||
for h := range ancestors {
|
for h := range ancestors {
|
||||||
branch += fmt.Sprintf(" O - %x\n |\n", h)
|
branch += fmt.Sprintf(" O - %x\n |\n", h)
|
||||||
}
|
}
|
||||||
log.Info(fmt.Sprint(branch))
|
log.Warn(branch)
|
||||||
return UncleError("uncle[%d](%x) is ancestor", i, hash[:4])
|
return UncleError("uncle[%d](%x) is ancestor", i, hash[:4])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -160,9 +160,9 @@ func NewBlockChain(chainDb ethdb.Database, config *params.ChainConfig, pow pow.P
|
|||||||
headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
|
headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
|
||||||
// make sure the headerByNumber (if present) is in our current canonical chain
|
// make sure the headerByNumber (if present) is in our current canonical chain
|
||||||
if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
|
if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
|
||||||
log.Error(fmt.Sprintf("Found bad hash, rewinding chain to block #%d [%x…]", header.Number, header.ParentHash[:4]))
|
log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
|
||||||
bc.SetHead(header.Number.Uint64() - 1)
|
bc.SetHead(header.Number.Uint64() - 1)
|
||||||
log.Error(fmt.Sprint("Chain rewind was successful, resuming normal operation"))
|
log.Error("Chain rewind was successful, resuming normal operation")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -219,9 +219,9 @@ func (self *BlockChain) loadLastState() error {
|
|||||||
blockTd := self.GetTd(self.currentBlock.Hash(), self.currentBlock.NumberU64())
|
blockTd := self.GetTd(self.currentBlock.Hash(), self.currentBlock.NumberU64())
|
||||||
fastTd := self.GetTd(self.currentFastBlock.Hash(), self.currentFastBlock.NumberU64())
|
fastTd := self.GetTd(self.currentFastBlock.Hash(), self.currentFastBlock.NumberU64())
|
||||||
|
|
||||||
log.Info(fmt.Sprintf("Last header: #%d [%x…] TD=%v", currentHeader.Number, currentHeader.Hash().Bytes()[:4], headerTd))
|
log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd)
|
||||||
log.Info(fmt.Sprintf("Last block: #%d [%x…] TD=%v", self.currentBlock.Number(), self.currentBlock.Hash().Bytes()[:4], blockTd))
|
log.Info("Loaded most recent local full block", "number", self.currentBlock.Number(), "hash", self.currentBlock.Hash(), "td", blockTd)
|
||||||
log.Info(fmt.Sprintf("Fast block: #%d [%x…] TD=%v", self.currentFastBlock.Number(), self.currentFastBlock.Hash().Bytes()[:4], fastTd))
|
log.Info("Loaded most recent local fast block", "number", self.currentFastBlock.Number(), "hash", self.currentFastBlock.Hash(), "td", fastTd)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -262,10 +262,10 @@ func (bc *BlockChain) SetHead(head uint64) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := WriteHeadBlockHash(bc.chainDb, bc.currentBlock.Hash()); err != nil {
|
if err := WriteHeadBlockHash(bc.chainDb, bc.currentBlock.Hash()); err != nil {
|
||||||
log.Crit(fmt.Sprintf("failed to reset head block hash: %v", err))
|
log.Crit("Failed to reset head full block", "err", err)
|
||||||
}
|
}
|
||||||
if err := WriteHeadFastBlockHash(bc.chainDb, bc.currentFastBlock.Hash()); err != nil {
|
if err := WriteHeadFastBlockHash(bc.chainDb, bc.currentFastBlock.Hash()); err != nil {
|
||||||
log.Crit(fmt.Sprintf("failed to reset head fast block hash: %v", err))
|
log.Crit("Failed to reset head fast block", "err", err)
|
||||||
}
|
}
|
||||||
bc.loadLastState()
|
bc.loadLastState()
|
||||||
}
|
}
|
||||||
@ -286,7 +286,7 @@ func (self *BlockChain) FastSyncCommitHead(hash common.Hash) error {
|
|||||||
self.currentBlock = block
|
self.currentBlock = block
|
||||||
self.mu.Unlock()
|
self.mu.Unlock()
|
||||||
|
|
||||||
log.Info(fmt.Sprintf("committed block #%d [%x…] as new head", block.Number(), hash[:4]))
|
log.Info("Committed new head block", "number", block.Number(), "hash", hash)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -390,10 +390,10 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) {
|
|||||||
|
|
||||||
// Prepare the genesis block and reinitialise the chain
|
// Prepare the genesis block and reinitialise the chain
|
||||||
if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
|
if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
|
||||||
log.Crit(fmt.Sprintf("failed to write genesis block TD: %v", err))
|
log.Crit("Failed to write genesis block TD", "err", err)
|
||||||
}
|
}
|
||||||
if err := WriteBlock(bc.chainDb, genesis); err != nil {
|
if err := WriteBlock(bc.chainDb, genesis); err != nil {
|
||||||
log.Crit(fmt.Sprintf("failed to write genesis block: %v", err))
|
log.Crit("Failed to write genesis block", "err", err)
|
||||||
}
|
}
|
||||||
bc.genesisBlock = genesis
|
bc.genesisBlock = genesis
|
||||||
bc.insert(bc.genesisBlock)
|
bc.insert(bc.genesisBlock)
|
||||||
@ -416,8 +416,7 @@ func (self *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
|
|||||||
if first > last {
|
if first > last {
|
||||||
return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
|
return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
|
||||||
}
|
}
|
||||||
|
log.Info("Exporting batch of blocks", "count", last-first+1)
|
||||||
log.Info(fmt.Sprintf("exporting %d blocks...\n", last-first+1))
|
|
||||||
|
|
||||||
for nr := first; nr <= last; nr++ {
|
for nr := first; nr <= last; nr++ {
|
||||||
block := self.GetBlockByNumber(nr)
|
block := self.GetBlockByNumber(nr)
|
||||||
@ -445,10 +444,10 @@ func (bc *BlockChain) insert(block *types.Block) {
|
|||||||
|
|
||||||
// Add the block to the canonical chain number scheme and mark as the head
|
// Add the block to the canonical chain number scheme and mark as the head
|
||||||
if err := WriteCanonicalHash(bc.chainDb, block.Hash(), block.NumberU64()); err != nil {
|
if err := WriteCanonicalHash(bc.chainDb, block.Hash(), block.NumberU64()); err != nil {
|
||||||
log.Crit(fmt.Sprintf("failed to insert block number: %v", err))
|
log.Crit("Failed to insert block number", "err", err)
|
||||||
}
|
}
|
||||||
if err := WriteHeadBlockHash(bc.chainDb, block.Hash()); err != nil {
|
if err := WriteHeadBlockHash(bc.chainDb, block.Hash()); err != nil {
|
||||||
log.Crit(fmt.Sprintf("failed to insert head block hash: %v", err))
|
log.Crit("Failed to insert head block hash", "err", err)
|
||||||
}
|
}
|
||||||
bc.currentBlock = block
|
bc.currentBlock = block
|
||||||
|
|
||||||
@ -457,7 +456,7 @@ func (bc *BlockChain) insert(block *types.Block) {
|
|||||||
bc.hc.SetCurrentHeader(block.Header())
|
bc.hc.SetCurrentHeader(block.Header())
|
||||||
|
|
||||||
if err := WriteHeadFastBlockHash(bc.chainDb, block.Hash()); err != nil {
|
if err := WriteHeadFastBlockHash(bc.chainDb, block.Hash()); err != nil {
|
||||||
log.Crit(fmt.Sprintf("failed to insert head fast block hash: %v", err))
|
log.Crit("Failed to insert head fast block hash", "err", err)
|
||||||
}
|
}
|
||||||
bc.currentFastBlock = block
|
bc.currentFastBlock = block
|
||||||
}
|
}
|
||||||
@ -588,8 +587,7 @@ func (bc *BlockChain) Stop() {
|
|||||||
atomic.StoreInt32(&bc.procInterrupt, 1)
|
atomic.StoreInt32(&bc.procInterrupt, 1)
|
||||||
|
|
||||||
bc.wg.Wait()
|
bc.wg.Wait()
|
||||||
|
log.Info("Blockchain manager stopped")
|
||||||
log.Info(fmt.Sprint("Chain manager stopped"))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *BlockChain) procFutureBlocks() {
|
func (self *BlockChain) procFutureBlocks() {
|
||||||
@ -683,11 +681,11 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
|
|||||||
for i := 1; i < len(blockChain); i++ {
|
for i := 1; i < len(blockChain); i++ {
|
||||||
if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
|
if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() {
|
||||||
// Chain broke ancestry, log a messge (programming error) and skip insertion
|
// Chain broke ancestry, log a messge (programming error) and skip insertion
|
||||||
failure := fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
|
log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(),
|
||||||
blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
|
"prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash())
|
||||||
|
|
||||||
log.Error(fmt.Sprint(failure.Error()))
|
return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
|
||||||
return 0, failure
|
blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Pre-checks passed, start the block body and receipt imports
|
// Pre-checks passed, start the block body and receipt imports
|
||||||
@ -734,31 +732,31 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
|
|||||||
if err := WriteBody(self.chainDb, block.Hash(), block.NumberU64(), block.Body()); err != nil {
|
if err := WriteBody(self.chainDb, block.Hash(), block.NumberU64(), block.Body()); err != nil {
|
||||||
errs[index] = fmt.Errorf("failed to write block body: %v", err)
|
errs[index] = fmt.Errorf("failed to write block body: %v", err)
|
||||||
atomic.AddInt32(&failed, 1)
|
atomic.AddInt32(&failed, 1)
|
||||||
log.Crit(fmt.Sprint(errs[index]))
|
log.Crit("Failed to write block body", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := WriteBlockReceipts(self.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil {
|
if err := WriteBlockReceipts(self.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil {
|
||||||
errs[index] = fmt.Errorf("failed to write block receipts: %v", err)
|
errs[index] = fmt.Errorf("failed to write block receipts: %v", err)
|
||||||
atomic.AddInt32(&failed, 1)
|
atomic.AddInt32(&failed, 1)
|
||||||
log.Crit(fmt.Sprint(errs[index]))
|
log.Crit("Failed to write block receipts", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := WriteMipmapBloom(self.chainDb, block.NumberU64(), receipts); err != nil {
|
if err := WriteMipmapBloom(self.chainDb, block.NumberU64(), receipts); err != nil {
|
||||||
errs[index] = fmt.Errorf("failed to write log blooms: %v", err)
|
errs[index] = fmt.Errorf("failed to write log blooms: %v", err)
|
||||||
atomic.AddInt32(&failed, 1)
|
atomic.AddInt32(&failed, 1)
|
||||||
log.Crit(fmt.Sprint(errs[index]))
|
log.Crit("Failed to write log blooms", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := WriteTransactions(self.chainDb, block); err != nil {
|
if err := WriteTransactions(self.chainDb, block); err != nil {
|
||||||
errs[index] = fmt.Errorf("failed to write individual transactions: %v", err)
|
errs[index] = fmt.Errorf("failed to write individual transactions: %v", err)
|
||||||
atomic.AddInt32(&failed, 1)
|
atomic.AddInt32(&failed, 1)
|
||||||
log.Crit(fmt.Sprint(errs[index]))
|
log.Crit("Failed to write individual transactions", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := WriteReceipts(self.chainDb, receipts); err != nil {
|
if err := WriteReceipts(self.chainDb, receipts); err != nil {
|
||||||
errs[index] = fmt.Errorf("failed to write individual receipts: %v", err)
|
errs[index] = fmt.Errorf("failed to write individual receipts: %v", err)
|
||||||
atomic.AddInt32(&failed, 1)
|
atomic.AddInt32(&failed, 1)
|
||||||
log.Crit(fmt.Sprint(errs[index]))
|
log.Crit("Failed to write individual receipts", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
atomic.AddInt32(&stats.processed, 1)
|
atomic.AddInt32(&stats.processed, 1)
|
||||||
@ -784,7 +782,7 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if atomic.LoadInt32(&self.procInterrupt) == 1 {
|
if atomic.LoadInt32(&self.procInterrupt) == 1 {
|
||||||
log.Debug(fmt.Sprint("premature abort during receipt chain processing"))
|
log.Debug("Premature abort during receipts processing")
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
// Update the head fast sync block if better
|
// Update the head fast sync block if better
|
||||||
@ -792,20 +790,16 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
|
|||||||
head := blockChain[len(errs)-1]
|
head := blockChain[len(errs)-1]
|
||||||
if self.GetTd(self.currentFastBlock.Hash(), self.currentFastBlock.NumberU64()).Cmp(self.GetTd(head.Hash(), head.NumberU64())) < 0 {
|
if self.GetTd(self.currentFastBlock.Hash(), self.currentFastBlock.NumberU64()).Cmp(self.GetTd(head.Hash(), head.NumberU64())) < 0 {
|
||||||
if err := WriteHeadFastBlockHash(self.chainDb, head.Hash()); err != nil {
|
if err := WriteHeadFastBlockHash(self.chainDb, head.Hash()); err != nil {
|
||||||
log.Crit(fmt.Sprintf("failed to update head fast block hash: %v", err))
|
log.Crit("Failed to update head fast block hash", "err", err)
|
||||||
}
|
}
|
||||||
self.currentFastBlock = head
|
self.currentFastBlock = head
|
||||||
}
|
}
|
||||||
self.mu.Unlock()
|
self.mu.Unlock()
|
||||||
|
|
||||||
// Report some public statistics so the user has a clue what's going on
|
// Report some public statistics so the user has a clue what's going on
|
||||||
first, last := blockChain[0], blockChain[len(blockChain)-1]
|
last := blockChain[len(blockChain)-1]
|
||||||
|
log.Info("Imported new block receipts", "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
|
||||||
ignored := ""
|
"number", last.Number(), "hash", last.Hash(), "ignored", stats.ignored)
|
||||||
if stats.ignored > 0 {
|
|
||||||
ignored = fmt.Sprintf(" (%d ignored)", stats.ignored)
|
|
||||||
}
|
|
||||||
log.Info(fmt.Sprintf("imported %4d receipts in %9v. #%d [%x… / %x…]%s", stats.processed, common.PrettyDuration(time.Since(start)), last.Number(), first.Hash().Bytes()[:4], last.Hash().Bytes()[:4], ignored))
|
|
||||||
|
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
@ -829,10 +823,10 @@ func (self *BlockChain) WriteBlock(block *types.Block) (status WriteStatus, err
|
|||||||
|
|
||||||
// Irrelevant of the canonical status, write the block itself to the database
|
// Irrelevant of the canonical status, write the block itself to the database
|
||||||
if err := self.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
|
if err := self.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
|
||||||
log.Crit(fmt.Sprintf("failed to write block total difficulty: %v", err))
|
log.Crit("Failed to write block total difficulty", "err", err)
|
||||||
}
|
}
|
||||||
if err := WriteBlock(self.chainDb, block); err != nil {
|
if err := WriteBlock(self.chainDb, block); err != nil {
|
||||||
log.Crit(fmt.Sprintf("failed to write block contents: %v", err))
|
log.Crit("Failed to write block contents", "err", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the total difficulty is higher than our known, add it to the canonical chain
|
// If the total difficulty is higher than our known, add it to the canonical chain
|
||||||
@ -863,11 +857,11 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
|||||||
for i := 1; i < len(chain); i++ {
|
for i := 1; i < len(chain); i++ {
|
||||||
if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() {
|
if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() {
|
||||||
// Chain broke ancestry, log a messge (programming error) and skip insertion
|
// Chain broke ancestry, log a messge (programming error) and skip insertion
|
||||||
failure := fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])",
|
log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(),
|
||||||
i-1, chain[i-1].NumberU64(), chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4])
|
"parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash())
|
||||||
|
|
||||||
log.Error(fmt.Sprint(failure.Error()))
|
return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(),
|
||||||
return 0, failure
|
chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Pre-checks passed, start the full block imports
|
// Pre-checks passed, start the full block imports
|
||||||
@ -893,7 +887,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
|||||||
|
|
||||||
for i, block := range chain {
|
for i, block := range chain {
|
||||||
if atomic.LoadInt32(&self.procInterrupt) == 1 {
|
if atomic.LoadInt32(&self.procInterrupt) == 1 {
|
||||||
log.Debug(fmt.Sprint("Premature abort during block chain processing"))
|
log.Debug("Premature abort during blocks processing")
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
bstart := time.Now()
|
bstart := time.Now()
|
||||||
@ -903,8 +897,8 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
|||||||
r := <-nonceResults
|
r := <-nonceResults
|
||||||
nonceChecked[r.index] = true
|
nonceChecked[r.index] = true
|
||||||
if !r.valid {
|
if !r.valid {
|
||||||
block := chain[r.index]
|
invalid := chain[r.index]
|
||||||
return r.index, &BlockNonceErr{Hash: block.Hash(), Number: block.Number(), Nonce: block.Nonce()}
|
return r.index, &BlockNonceErr{Hash: invalid.Hash(), Number: invalid.Number(), Nonce: invalid.Nonce()}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -978,7 +972,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
|||||||
// coalesce logs for later processing
|
// coalesce logs for later processing
|
||||||
coalescedLogs = append(coalescedLogs, logs...)
|
coalescedLogs = append(coalescedLogs, logs...)
|
||||||
|
|
||||||
if err := WriteBlockReceipts(self.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil {
|
if err = WriteBlockReceipts(self.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil {
|
||||||
return i, err
|
return i, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -990,9 +984,9 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
|||||||
|
|
||||||
switch status {
|
switch status {
|
||||||
case CanonStatTy:
|
case CanonStatTy:
|
||||||
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()),
|
||||||
return fmt.Sprintf("inserted block #%d [%x…] in %9v: %3d txs %7v gas %d uncles.", block.Number(), block.Hash().Bytes()[0:4], common.PrettyDuration(time.Since(bstart)), len(block.Transactions()), block.GasUsed(), len(block.Uncles()))
|
"txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart)))
|
||||||
}})
|
|
||||||
blockInsertTimer.UpdateSince(bstart)
|
blockInsertTimer.UpdateSince(bstart)
|
||||||
events = append(events, ChainEvent{block, block.Hash(), logs})
|
events = append(events, ChainEvent{block, block.Hash(), logs})
|
||||||
|
|
||||||
@ -1013,9 +1007,9 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
|
|||||||
return i, err
|
return i, err
|
||||||
}
|
}
|
||||||
case SideStatTy:
|
case SideStatTy:
|
||||||
log.Trace("", "msg", log.Lazy{Fn: func() string {
|
log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed",
|
||||||
return fmt.Sprintf("inserted forked block #%d [%x…] (TD=%v) in %9v: %3d txs %d uncles.", block.Number(), block.Hash().Bytes()[0:4], block.Difficulty(), common.PrettyDuration(time.Since(bstart)), len(block.Transactions()), len(block.Uncles()))
|
common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()))
|
||||||
}})
|
|
||||||
blockInsertTimer.UpdateSince(bstart)
|
blockInsertTimer.UpdateSince(bstart)
|
||||||
events = append(events, ChainSideEvent{block})
|
events = append(events, ChainSideEvent{block})
|
||||||
|
|
||||||
@ -1055,19 +1049,22 @@ func (st *insertStats) report(chain []*types.Block, index int) {
|
|||||||
)
|
)
|
||||||
// If we're at the last block of the batch or report period reached, log
|
// If we're at the last block of the batch or report period reached, log
|
||||||
if index == len(chain)-1 || elapsed >= statsReportLimit {
|
if index == len(chain)-1 || elapsed >= statsReportLimit {
|
||||||
start, end := chain[st.lastIndex], chain[index]
|
var (
|
||||||
txcount := countTransactions(chain[st.lastIndex : index+1])
|
end = chain[index]
|
||||||
|
txs = countTransactions(chain[st.lastIndex : index+1])
|
||||||
var hashes, extra string
|
)
|
||||||
if st.queued > 0 || st.ignored > 0 {
|
context := []interface{}{
|
||||||
extra = fmt.Sprintf(" (%d queued %d ignored)", st.queued, st.ignored)
|
"blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000,
|
||||||
|
"elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
|
||||||
|
"number", end.Number(), "hash", end.Hash(),
|
||||||
}
|
}
|
||||||
if st.processed > 1 {
|
if st.queued > 0 {
|
||||||
hashes = fmt.Sprintf("%x… / %x…", start.Hash().Bytes()[:4], end.Hash().Bytes()[:4])
|
context = append(context, []interface{}{"queued", st.queued}...)
|
||||||
} else {
|
|
||||||
hashes = fmt.Sprintf("%x…", end.Hash().Bytes()[:4])
|
|
||||||
}
|
}
|
||||||
log.Info(fmt.Sprintf("imported %4d blocks, %5d txs (%7.3f Mg) in %9v (%6.3f Mg/s). #%v [%s]%s", st.processed, txcount, float64(st.usedGas)/1000000, common.PrettyDuration(elapsed), float64(st.usedGas)*1000/float64(elapsed), end.Number(), hashes, extra))
|
if st.ignored > 0 {
|
||||||
|
context = append(context, []interface{}{"ignored", st.ignored}...)
|
||||||
|
}
|
||||||
|
log.Info("Imported new chain segment", context...)
|
||||||
|
|
||||||
*st = insertStats{startTime: now, lastIndex: index}
|
*st = insertStats{startTime: now, lastIndex: index}
|
||||||
}
|
}
|
||||||
@ -1152,19 +1149,8 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
|
|||||||
if len(oldChain) > 63 {
|
if len(oldChain) > 63 {
|
||||||
logFn = log.Warn
|
logFn = log.Warn
|
||||||
}
|
}
|
||||||
logFn("", "msg", log.Lazy{Fn: func() string {
|
logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(),
|
||||||
oldLen, newLen := len(oldChain), len(newChain)
|
"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
|
||||||
newLast, newFirst := newChain[0], newChain[newLen-1]
|
|
||||||
oldLast, oldFirst := oldChain[0], oldChain[oldLen-1]
|
|
||||||
|
|
||||||
return fmt.Sprintf("Chain split detected after #%v [%x…]. Reorganising chain (-%v +%v blocks), rejecting #%v-#%v [%x…/%x…] in favour of #%v-#%v [%x…/%x…]",
|
|
||||||
commonBlock.Number(), commonBlock.Hash().Bytes()[:4],
|
|
||||||
oldLen, newLen,
|
|
||||||
oldFirst.Number(), oldLast.Number(),
|
|
||||||
oldFirst.Hash().Bytes()[:4], oldLast.Hash().Bytes()[:4],
|
|
||||||
newFirst.Number(), newLast.Number(),
|
|
||||||
newFirst.Hash().Bytes()[:4], newLast.Hash().Bytes()[:4])
|
|
||||||
}})
|
|
||||||
|
|
||||||
var addedTxs types.Transactions
|
var addedTxs types.Transactions
|
||||||
// insert blocks. Order does not matter. Last block will be written in ImportChain itself which creates the new head properly
|
// insert blocks. Order does not matter. Last block will be written in ImportChain itself which creates the new head properly
|
||||||
@ -1271,12 +1257,12 @@ func (bc *BlockChain) addBadBlock(block *types.Block) {
|
|||||||
// reportBlock logs a bad block error.
|
// reportBlock logs a bad block error.
|
||||||
func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
|
func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
|
||||||
bc.addBadBlock(block)
|
bc.addBadBlock(block)
|
||||||
log.Error("", "msg", log.Lazy{Fn: func() string {
|
|
||||||
var receiptString string
|
var receiptString string
|
||||||
for _, receipt := range receipts {
|
for _, receipt := range receipts {
|
||||||
receiptString += fmt.Sprintf("\t%v\n", receipt)
|
receiptString += fmt.Sprintf("\t%v\n", receipt)
|
||||||
}
|
}
|
||||||
return fmt.Sprintf(`
|
log.Error(fmt.Sprintf(`
|
||||||
########## BAD BLOCK #########
|
########## BAD BLOCK #########
|
||||||
Chain config: %v
|
Chain config: %v
|
||||||
|
|
||||||
@ -1286,8 +1272,7 @@ Hash: 0x%x
|
|||||||
|
|
||||||
Error: %v
|
Error: %v
|
||||||
##############################
|
##############################
|
||||||
`, bc.config, block.Number(), block.Hash(), receiptString, err)
|
`, bc.config, block.Number(), block.Hash(), receiptString, err))
|
||||||
}})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// InsertHeaderChain attempts to insert the given header chain in to the local
|
// InsertHeaderChain attempts to insert the given header chain in to the local
|
||||||
|
@ -106,7 +106,7 @@ func GetBlockNumber(db ethdb.Database, hash common.Hash) uint64 {
|
|||||||
}
|
}
|
||||||
header := new(types.Header)
|
header := new(types.Header)
|
||||||
if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
|
if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
|
||||||
log.Crit(fmt.Sprintf("failed to decode block header: %v", err))
|
log.Crit("Failed to decode block header", "err", err)
|
||||||
}
|
}
|
||||||
return header.Number.Uint64()
|
return header.Number.Uint64()
|
||||||
}
|
}
|
||||||
@ -166,7 +166,7 @@ func GetHeader(db ethdb.Database, hash common.Hash, number uint64) *types.Header
|
|||||||
}
|
}
|
||||||
header := new(types.Header)
|
header := new(types.Header)
|
||||||
if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
|
if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
|
||||||
log.Error(fmt.Sprintf("invalid block header RLP for hash %x: %v", hash, err))
|
log.Error("Invalid block header RLP", "hash", hash, "err", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return header
|
return header
|
||||||
@ -190,7 +190,7 @@ func GetBody(db ethdb.Database, hash common.Hash, number uint64) *types.Body {
|
|||||||
}
|
}
|
||||||
body := new(types.Body)
|
body := new(types.Body)
|
||||||
if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
|
if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
|
||||||
log.Error(fmt.Sprintf("invalid block body RLP for hash %x: %v", hash, err))
|
log.Error("Invalid block body RLP", "hash", hash, "err", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return body
|
return body
|
||||||
@ -208,7 +208,7 @@ func GetTd(db ethdb.Database, hash common.Hash, number uint64) *big.Int {
|
|||||||
}
|
}
|
||||||
td := new(big.Int)
|
td := new(big.Int)
|
||||||
if err := rlp.Decode(bytes.NewReader(data), td); err != nil {
|
if err := rlp.Decode(bytes.NewReader(data), td); err != nil {
|
||||||
log.Error(fmt.Sprintf("invalid block total difficulty RLP for hash %x: %v", hash, err))
|
log.Error("Invalid block total difficulty RLP", "hash", hash, "err", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return td
|
return td
|
||||||
@ -246,7 +246,7 @@ func GetBlockReceipts(db ethdb.Database, hash common.Hash, number uint64) types.
|
|||||||
}
|
}
|
||||||
storageReceipts := []*types.ReceiptForStorage{}
|
storageReceipts := []*types.ReceiptForStorage{}
|
||||||
if err := rlp.DecodeBytes(data, &storageReceipts); err != nil {
|
if err := rlp.DecodeBytes(data, &storageReceipts); err != nil {
|
||||||
log.Error(fmt.Sprintf("invalid receipt array RLP for hash %x: %v", hash, err))
|
log.Error("Invalid receipt array RLP", "hash", hash, "err", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
receipts := make(types.Receipts, len(storageReceipts))
|
receipts := make(types.Receipts, len(storageReceipts))
|
||||||
@ -285,15 +285,15 @@ func GetTransaction(db ethdb.Database, hash common.Hash) (*types.Transaction, co
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetReceipt returns a receipt by hash
|
// GetReceipt returns a receipt by hash
|
||||||
func GetReceipt(db ethdb.Database, txHash common.Hash) *types.Receipt {
|
func GetReceipt(db ethdb.Database, hash common.Hash) *types.Receipt {
|
||||||
data, _ := db.Get(append(receiptsPrefix, txHash[:]...))
|
data, _ := db.Get(append(receiptsPrefix, hash[:]...))
|
||||||
if len(data) == 0 {
|
if len(data) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
var receipt types.ReceiptForStorage
|
var receipt types.ReceiptForStorage
|
||||||
err := rlp.DecodeBytes(data, &receipt)
|
err := rlp.DecodeBytes(data, &receipt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug(fmt.Sprint("GetReceipt err:", err))
|
log.Error("Invalid receipt RLP", "hash", hash, "err", err)
|
||||||
}
|
}
|
||||||
return (*types.Receipt)(&receipt)
|
return (*types.Receipt)(&receipt)
|
||||||
}
|
}
|
||||||
@ -302,7 +302,7 @@ func GetReceipt(db ethdb.Database, txHash common.Hash) *types.Receipt {
|
|||||||
func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) error {
|
func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) error {
|
||||||
key := append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...)
|
key := append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...)
|
||||||
if err := db.Put(key, hash.Bytes()); err != nil {
|
if err := db.Put(key, hash.Bytes()); err != nil {
|
||||||
log.Crit(fmt.Sprintf("failed to store number to hash mapping into database: %v", err))
|
log.Crit("Failed to store number to hash mapping", "err", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -310,7 +310,7 @@ func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) erro
|
|||||||
// WriteHeadHeaderHash stores the head header's hash.
|
// WriteHeadHeaderHash stores the head header's hash.
|
||||||
func WriteHeadHeaderHash(db ethdb.Database, hash common.Hash) error {
|
func WriteHeadHeaderHash(db ethdb.Database, hash common.Hash) error {
|
||||||
if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
|
if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
|
||||||
log.Crit(fmt.Sprintf("failed to store last header's hash into database: %v", err))
|
log.Crit("Failed to store last header's hash", "err", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -318,7 +318,7 @@ func WriteHeadHeaderHash(db ethdb.Database, hash common.Hash) error {
|
|||||||
// WriteHeadBlockHash stores the head block's hash.
|
// WriteHeadBlockHash stores the head block's hash.
|
||||||
func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error {
|
func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error {
|
||||||
if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
|
if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
|
||||||
log.Crit(fmt.Sprintf("failed to store last block's hash into database: %v", err))
|
log.Crit("Failed to store last block's hash", "err", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -326,7 +326,7 @@ func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error {
|
|||||||
// WriteHeadFastBlockHash stores the fast head block's hash.
|
// WriteHeadFastBlockHash stores the fast head block's hash.
|
||||||
func WriteHeadFastBlockHash(db ethdb.Database, hash common.Hash) error {
|
func WriteHeadFastBlockHash(db ethdb.Database, hash common.Hash) error {
|
||||||
if err := db.Put(headFastKey, hash.Bytes()); err != nil {
|
if err := db.Put(headFastKey, hash.Bytes()); err != nil {
|
||||||
log.Crit(fmt.Sprintf("failed to store last fast block's hash into database: %v", err))
|
log.Crit("Failed to store last fast block's hash", "err", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -342,13 +342,12 @@ func WriteHeader(db ethdb.Database, header *types.Header) error {
|
|||||||
encNum := encodeBlockNumber(num)
|
encNum := encodeBlockNumber(num)
|
||||||
key := append(blockHashPrefix, hash...)
|
key := append(blockHashPrefix, hash...)
|
||||||
if err := db.Put(key, encNum); err != nil {
|
if err := db.Put(key, encNum); err != nil {
|
||||||
log.Crit(fmt.Sprintf("failed to store hash to number mapping into database: %v", err))
|
log.Crit("Failed to store hash to number mapping", "err", err)
|
||||||
}
|
}
|
||||||
key = append(append(headerPrefix, encNum...), hash...)
|
key = append(append(headerPrefix, encNum...), hash...)
|
||||||
if err := db.Put(key, data); err != nil {
|
if err := db.Put(key, data); err != nil {
|
||||||
log.Crit(fmt.Sprintf("failed to store header into database: %v", err))
|
log.Crit("Failed to store header", "err", err)
|
||||||
}
|
}
|
||||||
log.Debug(fmt.Sprintf("stored header #%v [%x…]", header.Number, hash[:4]))
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -365,9 +364,8 @@ func WriteBody(db ethdb.Database, hash common.Hash, number uint64, body *types.B
|
|||||||
func WriteBodyRLP(db ethdb.Database, hash common.Hash, number uint64, rlp rlp.RawValue) error {
|
func WriteBodyRLP(db ethdb.Database, hash common.Hash, number uint64, rlp rlp.RawValue) error {
|
||||||
key := append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
|
key := append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
|
||||||
if err := db.Put(key, rlp); err != nil {
|
if err := db.Put(key, rlp); err != nil {
|
||||||
log.Crit(fmt.Sprintf("failed to store block body into database: %v", err))
|
log.Crit("Failed to store block body", "err", err)
|
||||||
}
|
}
|
||||||
log.Debug(fmt.Sprintf("stored block body [%x…]", hash.Bytes()[:4]))
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -379,9 +377,8 @@ func WriteTd(db ethdb.Database, hash common.Hash, number uint64, td *big.Int) er
|
|||||||
}
|
}
|
||||||
key := append(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...), tdSuffix...)
|
key := append(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...), tdSuffix...)
|
||||||
if err := db.Put(key, data); err != nil {
|
if err := db.Put(key, data); err != nil {
|
||||||
log.Crit(fmt.Sprintf("failed to store block total difficulty into database: %v", err))
|
log.Crit("Failed to store block total difficulty", "err", err)
|
||||||
}
|
}
|
||||||
log.Debug(fmt.Sprintf("stored block total difficulty [%x…]: %v", hash.Bytes()[:4], td))
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -414,9 +411,8 @@ func WriteBlockReceipts(db ethdb.Database, hash common.Hash, number uint64, rece
|
|||||||
// Store the flattened receipt slice
|
// Store the flattened receipt slice
|
||||||
key := append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
|
key := append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
|
||||||
if err := db.Put(key, bytes); err != nil {
|
if err := db.Put(key, bytes); err != nil {
|
||||||
log.Crit(fmt.Sprintf("failed to store block receipts into database: %v", err))
|
log.Crit("Failed to store block receipts", "err", err)
|
||||||
}
|
}
|
||||||
log.Debug(fmt.Sprintf("stored block receipts [%x…]", hash.Bytes()[:4]))
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -434,7 +430,7 @@ func WriteTransactions(db ethdb.Database, block *types.Block) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := batch.Put(tx.Hash().Bytes(), data); err != nil {
|
if err = batch.Put(tx.Hash().Bytes(), data); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Encode and queue up the transaction metadata for storage
|
// Encode and queue up the transaction metadata for storage
|
||||||
@ -457,7 +453,7 @@ func WriteTransactions(db ethdb.Database, block *types.Block) error {
|
|||||||
}
|
}
|
||||||
// Write the scheduled data into the database
|
// Write the scheduled data into the database
|
||||||
if err := batch.Write(); err != nil {
|
if err := batch.Write(); err != nil {
|
||||||
log.Crit(fmt.Sprintf("failed to store transactions into database: %v", err))
|
log.Crit("Failed to store transactions", "err", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -489,7 +485,7 @@ func WriteReceipts(db ethdb.Database, receipts types.Receipts) error {
|
|||||||
}
|
}
|
||||||
// Write the scheduled data into the database
|
// Write the scheduled data into the database
|
||||||
if err := batch.Write(); err != nil {
|
if err := batch.Write(); err != nil {
|
||||||
log.Crit(fmt.Sprintf("failed to store receipts into database: %v", err))
|
log.Crit("Failed to store receipts", "err", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -595,7 +591,7 @@ func WritePreimages(db ethdb.Database, number uint64, preimages map[common.Hash]
|
|||||||
for hash, preimage := range preimages {
|
for hash, preimage := range preimages {
|
||||||
if _, err := table.Get(hash.Bytes()); err != nil {
|
if _, err := table.Get(hash.Bytes()); err != nil {
|
||||||
batch.Put(hash.Bytes(), preimage)
|
batch.Put(hash.Bytes(), preimage)
|
||||||
hitCount += 1
|
hitCount++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
preimageCounter.Inc(int64(len(preimages)))
|
preimageCounter.Inc(int64(len(preimages)))
|
||||||
@ -604,7 +600,6 @@ func WritePreimages(db ethdb.Database, number uint64, preimages map[common.Hash]
|
|||||||
if err := batch.Write(); err != nil {
|
if err := batch.Write(); err != nil {
|
||||||
return fmt.Errorf("preimage write fail for block %d: %v", number, err)
|
return fmt.Errorf("preimage write fail for block %d: %v", number, err)
|
||||||
}
|
}
|
||||||
log.Debug(fmt.Sprintf("%d preimages in block %d, including %d new", len(preimages), number, hitCount))
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -117,7 +117,7 @@ func WriteGenesisBlock(chainDb ethdb.Database, reader io.Reader) (*types.Block,
|
|||||||
}, nil, nil, nil)
|
}, nil, nil, nil)
|
||||||
|
|
||||||
if block := GetBlock(chainDb, block.Hash(), block.NumberU64()); block != nil {
|
if block := GetBlock(chainDb, block.Hash(), block.NumberU64()); block != nil {
|
||||||
log.Info(fmt.Sprint("Genesis block already in chain. Writing canonical number"))
|
log.Info("Genesis block known, writing canonical number")
|
||||||
err := WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
|
err := WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -146,7 +146,6 @@ func WriteGenesisBlock(chainDb ethdb.Database, reader io.Reader) (*types.Block,
|
|||||||
if err := WriteChainConfig(chainDb, block.Hash(), genesis.ChainConfig); err != nil {
|
if err := WriteChainConfig(chainDb, block.Hash(), genesis.ChainConfig); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return block, nil
|
return block, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -101,7 +101,7 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, getValid
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
log.Info(fmt.Sprint("WARNING: Wrote default ethereum genesis block"))
|
log.Warn("Wrote default Ethereum genesis block")
|
||||||
hc.genesisHeader = genesisBlock.Header()
|
hc.genesisHeader = genesisBlock.Header()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -154,12 +154,11 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
|
|||||||
|
|
||||||
// Irrelevant of the canonical status, write the td and header to the database
|
// Irrelevant of the canonical status, write the td and header to the database
|
||||||
if err := hc.WriteTd(hash, number, externTd); err != nil {
|
if err := hc.WriteTd(hash, number, externTd); err != nil {
|
||||||
log.Crit(fmt.Sprintf("failed to write header total difficulty: %v", err))
|
log.Crit("Failed to write header total difficulty", "err", err)
|
||||||
}
|
}
|
||||||
if err := WriteHeader(hc.chainDb, header); err != nil {
|
if err := WriteHeader(hc.chainDb, header); err != nil {
|
||||||
log.Crit(fmt.Sprintf("failed to write header contents: %v", err))
|
log.Crit("Failed to write header content", "err", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the total difficulty is higher than our known, add it to the canonical chain
|
// If the total difficulty is higher than our known, add it to the canonical chain
|
||||||
// Second clause in the if statement reduces the vulnerability to selfish mining.
|
// Second clause in the if statement reduces the vulnerability to selfish mining.
|
||||||
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
|
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
|
||||||
@ -185,15 +184,13 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
|
|||||||
headNumber = headHeader.Number.Uint64() - 1
|
headNumber = headHeader.Number.Uint64() - 1
|
||||||
headHeader = hc.GetHeader(headHash, headNumber)
|
headHeader = hc.GetHeader(headHash, headNumber)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extend the canonical chain with the new header
|
// Extend the canonical chain with the new header
|
||||||
if err := WriteCanonicalHash(hc.chainDb, hash, number); err != nil {
|
if err := WriteCanonicalHash(hc.chainDb, hash, number); err != nil {
|
||||||
log.Crit(fmt.Sprintf("failed to insert header number: %v", err))
|
log.Crit("Failed to insert header number", "err", err)
|
||||||
}
|
}
|
||||||
if err := WriteHeadHeaderHash(hc.chainDb, hash); err != nil {
|
if err := WriteHeadHeaderHash(hc.chainDb, hash); err != nil {
|
||||||
log.Crit(fmt.Sprintf("failed to insert head header hash: %v", err))
|
log.Crit("Failed to insert head header hash", "err", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
hc.currentHeaderHash, hc.currentHeader = hash, types.CopyHeader(header)
|
hc.currentHeaderHash, hc.currentHeader = hash, types.CopyHeader(header)
|
||||||
|
|
||||||
status = CanonStatTy
|
status = CanonStatTy
|
||||||
@ -227,11 +224,11 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, checkFreq int, w
|
|||||||
for i := 1; i < len(chain); i++ {
|
for i := 1; i < len(chain); i++ {
|
||||||
if chain[i].Number.Uint64() != chain[i-1].Number.Uint64()+1 || chain[i].ParentHash != chain[i-1].Hash() {
|
if chain[i].Number.Uint64() != chain[i-1].Number.Uint64()+1 || chain[i].ParentHash != chain[i-1].Hash() {
|
||||||
// Chain broke ancestry, log a messge (programming error) and skip insertion
|
// Chain broke ancestry, log a messge (programming error) and skip insertion
|
||||||
failure := fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])",
|
log.Error("Non contiguous header insert", "number", chain[i].Number, "hash", chain[i].Hash(),
|
||||||
i-1, chain[i-1].Number.Uint64(), chain[i-1].Hash().Bytes()[:4], i, chain[i].Number.Uint64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash.Bytes()[:4])
|
"parent", chain[i].ParentHash, "prevnumber", chain[i-1].Number, "prevhash", chain[i-1].Hash())
|
||||||
|
|
||||||
log.Error(fmt.Sprint(failure.Error()))
|
return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].Number,
|
||||||
return 0, failure
|
chain[i-1].Hash().Bytes()[:4], i, chain[i].Number, chain[i].Hash().Bytes()[:4], chain[i].ParentHash[:4])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Collect some import statistics to report on
|
// Collect some import statistics to report on
|
||||||
@ -316,7 +313,7 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, checkFreq int, w
|
|||||||
for i, header := range chain {
|
for i, header := range chain {
|
||||||
// Short circuit insertion if shutting down
|
// Short circuit insertion if shutting down
|
||||||
if hc.procInterrupt() {
|
if hc.procInterrupt() {
|
||||||
log.Debug(fmt.Sprint("premature abort during header chain processing"))
|
log.Debug("Premature abort during headers processing")
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
hash := header.Hash()
|
hash := header.Hash()
|
||||||
@ -332,13 +329,9 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, checkFreq int, w
|
|||||||
stats.processed++
|
stats.processed++
|
||||||
}
|
}
|
||||||
// Report some public statistics so the user has a clue what's going on
|
// Report some public statistics so the user has a clue what's going on
|
||||||
first, last := chain[0], chain[len(chain)-1]
|
last := chain[len(chain)-1]
|
||||||
|
log.Info("Imported new block headers", "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)),
|
||||||
ignored := ""
|
"number", last.Number, "hash", last.Hash(), "ignored", stats.ignored)
|
||||||
if stats.ignored > 0 {
|
|
||||||
ignored = fmt.Sprintf(" (%d ignored)", stats.ignored)
|
|
||||||
}
|
|
||||||
log.Info(fmt.Sprintf("imported %4d headers%s in %9v. #%v [%x… / %x…]", stats.processed, ignored, common.PrettyDuration(time.Since(start)), last.Number, first.Hash().Bytes()[:4], last.Hash().Bytes()[:4]))
|
|
||||||
|
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
@ -445,7 +438,7 @@ func (hc *HeaderChain) CurrentHeader() *types.Header {
|
|||||||
// SetCurrentHeader sets the current head header of the canonical chain.
|
// SetCurrentHeader sets the current head header of the canonical chain.
|
||||||
func (hc *HeaderChain) SetCurrentHeader(head *types.Header) {
|
func (hc *HeaderChain) SetCurrentHeader(head *types.Header) {
|
||||||
if err := WriteHeadHeaderHash(hc.chainDb, head.Hash()); err != nil {
|
if err := WriteHeadHeaderHash(hc.chainDb, head.Hash()); err != nil {
|
||||||
log.Crit(fmt.Sprintf("failed to insert head header hash: %v", err))
|
log.Crit("Failed to insert head header hash", "err", err)
|
||||||
}
|
}
|
||||||
hc.currentHeader = head
|
hc.currentHeader = head
|
||||||
hc.currentHeaderHash = head.Hash()
|
hc.currentHeaderHash = head.Hash()
|
||||||
@ -488,7 +481,7 @@ func (hc *HeaderChain) SetHead(head uint64, delFn DeleteCallback) {
|
|||||||
hc.currentHeaderHash = hc.currentHeader.Hash()
|
hc.currentHeaderHash = hc.currentHeader.Hash()
|
||||||
|
|
||||||
if err := WriteHeadHeaderHash(hc.chainDb, hc.currentHeaderHash); err != nil {
|
if err := WriteHeadHeaderHash(hc.chainDb, hc.currentHeaderHash); err != nil {
|
||||||
log.Crit(fmt.Sprintf("failed to reset head header hash: %v", err))
|
log.Crit("Failed to reset head header hash", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -24,7 +24,6 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
)
|
)
|
||||||
@ -134,9 +133,6 @@ func (self *stateObject) markSuicided() {
|
|||||||
self.onDirty(self.Address())
|
self.onDirty(self.Address())
|
||||||
self.onDirty = nil
|
self.onDirty = nil
|
||||||
}
|
}
|
||||||
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
|
||||||
return fmt.Sprintf("%x: #%d %v X\n", self.Address(), self.Nonce(), self.Balance())
|
|
||||||
}})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *stateObject) touch() {
|
func (c *stateObject) touch() {
|
||||||
@ -251,10 +247,6 @@ func (c *stateObject) AddBalance(amount *big.Int) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
c.SetBalance(new(big.Int).Add(c.Balance(), amount))
|
c.SetBalance(new(big.Int).Add(c.Balance(), amount))
|
||||||
|
|
||||||
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
|
||||||
return fmt.Sprintf("%x: #%d %v (+ %v)\n", c.Address(), c.Nonce(), c.Balance(), amount)
|
|
||||||
}})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SubBalance removes amount from c's balance.
|
// SubBalance removes amount from c's balance.
|
||||||
@ -264,10 +256,6 @@ func (c *stateObject) SubBalance(amount *big.Int) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
c.SetBalance(new(big.Int).Sub(c.Balance(), amount))
|
c.SetBalance(new(big.Int).Sub(c.Balance(), amount))
|
||||||
|
|
||||||
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
|
||||||
return fmt.Sprintf("%x: #%d %v (- %v)\n", c.Address(), c.Nonce(), c.Balance(), amount)
|
|
||||||
}})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *stateObject) SetBalance(amount *big.Int) {
|
func (self *stateObject) SetBalance(amount *big.Int) {
|
||||||
|
@ -410,7 +410,7 @@ func (self *StateDB) getStateObject(addr common.Address) (stateObject *stateObje
|
|||||||
}
|
}
|
||||||
var data Account
|
var data Account
|
||||||
if err := rlp.DecodeBytes(enc, &data); err != nil {
|
if err := rlp.DecodeBytes(enc, &data); err != nil {
|
||||||
log.Error(fmt.Sprintf("can't decode object at %x: %v", addr[:], err))
|
log.Error("Failed to decode state object", "addr", addr, "err", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// Insert into the live set.
|
// Insert into the live set.
|
||||||
@ -445,9 +445,6 @@ func (self *StateDB) createObject(addr common.Address) (newobj, prev *stateObjec
|
|||||||
newobj = newObject(self, addr, Account{}, self.MarkStateObjectDirty)
|
newobj = newObject(self, addr, Account{}, self.MarkStateObjectDirty)
|
||||||
newobj.setNonce(0) // sets the object to dirty
|
newobj.setNonce(0) // sets the object to dirty
|
||||||
if prev == nil {
|
if prev == nil {
|
||||||
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
|
||||||
return fmt.Sprintf("(+) %x\n", addr)
|
|
||||||
}})
|
|
||||||
self.journal = append(self.journal, createObjectChange{account: &addr})
|
self.journal = append(self.journal, createObjectChange{account: &addr})
|
||||||
} else {
|
} else {
|
||||||
self.journal = append(self.journal, resetObjectChange{prev: prev})
|
self.journal = append(self.journal, resetObjectChange{prev: prev})
|
||||||
@ -616,7 +613,7 @@ func (s *StateDB) CommitBatch(deleteEmptyObjects bool) (root common.Hash, batch
|
|||||||
batch = s.db.NewBatch()
|
batch = s.db.NewBatch()
|
||||||
root, _ = s.commit(batch, deleteEmptyObjects)
|
root, _ = s.commit(batch, deleteEmptyObjects)
|
||||||
|
|
||||||
log.Debug(fmt.Sprintf("Trie cache stats: %d misses, %d unloads", trie.CacheMisses(), trie.CacheUnloads()))
|
log.Debug("Trie cache stats after commit", "misses", trie.CacheMisses(), "unloads", trie.CacheUnloads())
|
||||||
return root, batch
|
return root, batch
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,14 +17,12 @@
|
|||||||
package core
|
package core
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -122,8 +120,6 @@ func ApplyTransaction(config *params.ChainConfig, bc *BlockChain, gp *GasPool, s
|
|||||||
receipt.Logs = statedb.GetLogs(tx.Hash())
|
receipt.Logs = statedb.GetLogs(tx.Hash())
|
||||||
receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
|
receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
|
||||||
|
|
||||||
log.Debug(fmt.Sprint(receipt))
|
|
||||||
|
|
||||||
return receipt, gas, err
|
return receipt, gas, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,7 +18,6 @@ package core
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
@ -256,7 +255,7 @@ func (self *StateTransition) TransitionDb() (ret []byte, requiredGas, usedGas *b
|
|||||||
ret, self.gas, vmerr = evm.Call(sender, self.to().Address(), self.data, self.gas, self.value)
|
ret, self.gas, vmerr = evm.Call(sender, self.to().Address(), self.data, self.gas, self.value)
|
||||||
}
|
}
|
||||||
if vmerr != nil {
|
if vmerr != nil {
|
||||||
log.Debug(fmt.Sprint("vm returned with error:", err))
|
log.Debug("VM returned with error", "err", err)
|
||||||
// The only possible consensus-error would be if there wasn't
|
// The only possible consensus-error would be if there wasn't
|
||||||
// sufficient balance to make the transfer happen. The first
|
// sufficient balance to make the transfer happen. The first
|
||||||
// balance transfer may never fail.
|
// balance transfer may never fail.
|
||||||
|
@ -162,15 +162,10 @@ func (pool *TxPool) eventLoop() {
|
|||||||
func (pool *TxPool) resetState() {
|
func (pool *TxPool) resetState() {
|
||||||
currentState, err := pool.currentState()
|
currentState, err := pool.currentState()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(fmt.Sprintf("Failed to get current state: %v", err))
|
log.Error("Failed reset txpool state", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
managedState := state.ManageState(currentState)
|
pool.pendingState = state.ManageState(currentState)
|
||||||
if err != nil {
|
|
||||||
log.Error(fmt.Sprintf("Failed to get managed state: %v", err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
pool.pendingState = managedState
|
|
||||||
|
|
||||||
// validate the pool of pending transactions, this will remove
|
// validate the pool of pending transactions, this will remove
|
||||||
// any transactions that have been included in the block or
|
// any transactions that have been included in the block or
|
||||||
@ -192,7 +187,8 @@ func (pool *TxPool) Stop() {
|
|||||||
pool.events.Unsubscribe()
|
pool.events.Unsubscribe()
|
||||||
close(pool.quit)
|
close(pool.quit)
|
||||||
pool.wg.Wait()
|
pool.wg.Wait()
|
||||||
log.Info(fmt.Sprint("Transaction pool stopped"))
|
|
||||||
|
log.Info("Transaction pool stopped")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pool *TxPool) State() *state.ManagedState {
|
func (pool *TxPool) State() *state.ManagedState {
|
||||||
@ -323,24 +319,19 @@ func (pool *TxPool) add(tx *types.Transaction) error {
|
|||||||
// If the transaction is already known, discard it
|
// If the transaction is already known, discard it
|
||||||
hash := tx.Hash()
|
hash := tx.Hash()
|
||||||
if pool.all[hash] != nil {
|
if pool.all[hash] != nil {
|
||||||
return fmt.Errorf("Known transaction: %x", hash[:4])
|
log.Trace("Discarding already known transaction", "hash", hash)
|
||||||
|
return fmt.Errorf("known transaction: %x", hash)
|
||||||
}
|
}
|
||||||
// Otherwise ensure basic validation passes and queue it up
|
// Otherwise ensure basic validation passes and queue it up
|
||||||
if err := pool.validateTx(tx); err != nil {
|
if err := pool.validateTx(tx); err != nil {
|
||||||
|
log.Trace("Discarding invalid transaction", "hash", hash, "err", err)
|
||||||
invalidTxCounter.Inc(1)
|
invalidTxCounter.Inc(1)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
pool.enqueueTx(hash, tx)
|
pool.enqueueTx(hash, tx)
|
||||||
|
|
||||||
// Print a log message if low enough level is set
|
// Print a log message if low enough level is set
|
||||||
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
log.Debug("Pooled new transaction", "hash", hash, "from", log.Lazy{Fn: func() common.Address { from, _ := types.Sender(pool.signer, tx); return from }}, "to", tx.To())
|
||||||
rcpt := "[NEW_CONTRACT]"
|
|
||||||
if to := tx.To(); to != nil {
|
|
||||||
rcpt = common.Bytes2Hex(to[:4])
|
|
||||||
}
|
|
||||||
from, _ := types.Sender(pool.signer, tx) // from already verified during tx validation
|
|
||||||
return fmt.Sprintf("(t) 0x%x => %s (%v) %x\n", from[:4], rcpt, tx.Value(), hash)
|
|
||||||
}})
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -409,7 +400,6 @@ func (pool *TxPool) Add(tx *types.Transaction) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
pool.promoteExecutables(state)
|
pool.promoteExecutables(state)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -420,19 +410,21 @@ func (pool *TxPool) AddBatch(txs []*types.Transaction) error {
|
|||||||
pool.mu.Lock()
|
pool.mu.Lock()
|
||||||
defer pool.mu.Unlock()
|
defer pool.mu.Unlock()
|
||||||
|
|
||||||
|
// Add the batch of transaction, tracking the accepted ones
|
||||||
|
added := 0
|
||||||
for _, tx := range txs {
|
for _, tx := range txs {
|
||||||
if err := pool.add(tx); err != nil {
|
if err := pool.add(tx); err == nil {
|
||||||
log.Debug(fmt.Sprint("tx error:", err))
|
added++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Only reprocess the internal state if something was actually added
|
||||||
state, err := pool.currentState()
|
if added > 0 {
|
||||||
if err != nil {
|
state, err := pool.currentState()
|
||||||
return err
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
pool.promoteExecutables(state)
|
||||||
}
|
}
|
||||||
|
|
||||||
pool.promoteExecutables(state)
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -513,33 +505,29 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
|
|||||||
for addr, list := range pool.queue {
|
for addr, list := range pool.queue {
|
||||||
// Drop all transactions that are deemed too old (low nonce)
|
// Drop all transactions that are deemed too old (low nonce)
|
||||||
for _, tx := range list.Forward(state.GetNonce(addr)) {
|
for _, tx := range list.Forward(state.GetNonce(addr)) {
|
||||||
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
hash := tx.Hash()
|
||||||
return fmt.Sprintf("Removed old queued transaction: %v", tx)
|
log.Debug("Removed old queued transaction", "hash", hash)
|
||||||
}})
|
delete(pool.all, hash)
|
||||||
delete(pool.all, tx.Hash())
|
|
||||||
}
|
}
|
||||||
// Drop all transactions that are too costly (low balance)
|
// Drop all transactions that are too costly (low balance)
|
||||||
drops, _ := list.Filter(state.GetBalance(addr))
|
drops, _ := list.Filter(state.GetBalance(addr))
|
||||||
for _, tx := range drops {
|
for _, tx := range drops {
|
||||||
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
hash := tx.Hash()
|
||||||
return fmt.Sprintf("Removed unpayable queued transaction: %v", tx)
|
log.Debug("Removed unpayable queued transaction", "hash", hash)
|
||||||
}})
|
delete(pool.all, hash)
|
||||||
delete(pool.all, tx.Hash())
|
|
||||||
queuedNofundsCounter.Inc(1)
|
queuedNofundsCounter.Inc(1)
|
||||||
}
|
}
|
||||||
// Gather all executable transactions and promote them
|
// Gather all executable transactions and promote them
|
||||||
for _, tx := range list.Ready(pool.pendingState.GetNonce(addr)) {
|
for _, tx := range list.Ready(pool.pendingState.GetNonce(addr)) {
|
||||||
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
hash := tx.Hash()
|
||||||
return fmt.Sprintf("Promoting queued transaction: %v", tx)
|
log.Debug("Promoting queued transaction", "hash", hash)
|
||||||
}})
|
pool.promoteTx(addr, hash, tx)
|
||||||
pool.promoteTx(addr, tx.Hash(), tx)
|
|
||||||
}
|
}
|
||||||
// Drop all transactions over the allowed limit
|
// Drop all transactions over the allowed limit
|
||||||
for _, tx := range list.Cap(int(maxQueuedPerAccount)) {
|
for _, tx := range list.Cap(int(maxQueuedPerAccount)) {
|
||||||
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
hash := tx.Hash()
|
||||||
return fmt.Sprintf("Removed cap-exceeding queued transaction: %v", tx)
|
log.Debug("Removed cap-exceeding queued transaction", "hash", hash)
|
||||||
}})
|
delete(pool.all, hash)
|
||||||
delete(pool.all, tx.Hash())
|
|
||||||
queuedRLCounter.Inc(1)
|
queuedRLCounter.Inc(1)
|
||||||
}
|
}
|
||||||
queued += uint64(list.Len())
|
queued += uint64(list.Len())
|
||||||
@ -650,25 +638,22 @@ func (pool *TxPool) demoteUnexecutables(state *state.StateDB) {
|
|||||||
|
|
||||||
// Drop all transactions that are deemed too old (low nonce)
|
// Drop all transactions that are deemed too old (low nonce)
|
||||||
for _, tx := range list.Forward(nonce) {
|
for _, tx := range list.Forward(nonce) {
|
||||||
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
hash := tx.Hash()
|
||||||
return fmt.Sprintf("Removed old pending transaction: %v", tx)
|
log.Debug("Removed old pending transaction", "hash", hash)
|
||||||
}})
|
delete(pool.all, hash)
|
||||||
delete(pool.all, tx.Hash())
|
|
||||||
}
|
}
|
||||||
// Drop all transactions that are too costly (low balance), and queue any invalids back for later
|
// Drop all transactions that are too costly (low balance), and queue any invalids back for later
|
||||||
drops, invalids := list.Filter(state.GetBalance(addr))
|
drops, invalids := list.Filter(state.GetBalance(addr))
|
||||||
for _, tx := range drops {
|
for _, tx := range drops {
|
||||||
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
hash := tx.Hash()
|
||||||
return fmt.Sprintf("Removed unpayable pending transaction: %v", tx)
|
log.Debug("Removed unpayable pending transaction", "hash", hash)
|
||||||
}})
|
delete(pool.all, hash)
|
||||||
delete(pool.all, tx.Hash())
|
|
||||||
pendingNofundsCounter.Inc(1)
|
pendingNofundsCounter.Inc(1)
|
||||||
}
|
}
|
||||||
for _, tx := range invalids {
|
for _, tx := range invalids {
|
||||||
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
hash := tx.Hash()
|
||||||
return fmt.Sprintf("Demoting pending transaction: %v", tx)
|
log.Debug("Demoting pending transaction", "hash", hash)
|
||||||
}})
|
pool.enqueueTx(hash, tx)
|
||||||
pool.enqueueTx(tx.Hash(), tx)
|
|
||||||
}
|
}
|
||||||
// Delete the entire queue entry if it became empty.
|
// Delete the entire queue entry if it became empty.
|
||||||
if list.Empty() {
|
if list.Empty() {
|
||||||
|
@ -18,7 +18,6 @@ package vm
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"fmt"
|
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
@ -83,7 +82,7 @@ func (c *ecrecover) Run(in []byte) []byte {
|
|||||||
pubKey, err := crypto.Ecrecover(in[:32], append(in[64:128], v))
|
pubKey, err := crypto.Ecrecover(in[:32], append(in[64:128], v))
|
||||||
// make sure the public key is a valid one
|
// make sure the public key is a valid one
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Trace(fmt.Sprint("ECRECOVER error: ", err))
|
log.Trace("ECRECOVER failed", "err", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -123,13 +123,9 @@ func (evm *Interpreter) Run(contract *Contract, input []byte) (ret []byte, err e
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
log.Debug("", "msg", log.Lazy{Fn: func() string {
|
log.Debug("EVM running contract", "hash", codehash[:])
|
||||||
return fmt.Sprintf("evm running: %x\n", codehash[:4])
|
|
||||||
}})
|
|
||||||
tstart := time.Now()
|
tstart := time.Now()
|
||||||
defer log.Debug("", "msg", log.Lazy{Fn: func() string {
|
defer log.Debug("EVM finished running contract", "hash", codehash[:], "elapsed", time.Since(tstart))
|
||||||
return fmt.Sprintf("evm done: %x. time: %v\n", codehash[:4], time.Since(tstart))
|
|
||||||
}})
|
|
||||||
|
|
||||||
// The Interpreter main run loop (contextual). This loop runs until either an
|
// The Interpreter main run loop (contextual). This loop runs until either an
|
||||||
// explicit STOP, RETURN or SELFDESTRUCT is executed, an error occurred during
|
// explicit STOP, RETURN or SELFDESTRUCT is executed, an error occurred during
|
||||||
|
@ -10,6 +10,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
"unicode/utf8"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -43,6 +44,13 @@ var locationEnabled uint32
|
|||||||
// padded to to aid in alignment.
|
// padded to to aid in alignment.
|
||||||
var locationLength uint32
|
var locationLength uint32
|
||||||
|
|
||||||
|
// fieldPadding is a global map with maximum field value lengths seen until now
|
||||||
|
// to allow padding log contexts in a bit smarter way.
|
||||||
|
var fieldPadding = make(map[string]int)
|
||||||
|
|
||||||
|
// fieldPaddingLock is a global mutex protecting the field padding map.
|
||||||
|
var fieldPaddingLock sync.RWMutex
|
||||||
|
|
||||||
type Format interface {
|
type Format interface {
|
||||||
Format(r *Record) []byte
|
Format(r *Record) []byte
|
||||||
}
|
}
|
||||||
@ -163,15 +171,29 @@ func logfmt(buf *bytes.Buffer, ctx []interface{}, color int, term bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// XXX: we should probably check that all of your key bytes aren't invalid
|
// XXX: we should probably check that all of your key bytes aren't invalid
|
||||||
|
fieldPaddingLock.RLock()
|
||||||
|
padding := fieldPadding[k]
|
||||||
|
fieldPaddingLock.RUnlock()
|
||||||
|
|
||||||
|
length := utf8.RuneCountInString(v)
|
||||||
|
if padding < length {
|
||||||
|
padding = length
|
||||||
|
|
||||||
|
fieldPaddingLock.Lock()
|
||||||
|
fieldPadding[k] = padding
|
||||||
|
fieldPaddingLock.Unlock()
|
||||||
|
}
|
||||||
if color > 0 {
|
if color > 0 {
|
||||||
fmt.Fprintf(buf, "\x1b[%dm%s\x1b[0m=%s", color, k, v)
|
fmt.Fprintf(buf, "\x1b[%dm%s\x1b[0m=", color, k)
|
||||||
} else {
|
} else {
|
||||||
buf.WriteString(k)
|
buf.WriteString(k)
|
||||||
buf.WriteByte('=')
|
buf.WriteByte('=')
|
||||||
buf.WriteString(v)
|
}
|
||||||
|
buf.WriteString(v)
|
||||||
|
if i < len(ctx)-2 {
|
||||||
|
buf.Write(bytes.Repeat([]byte{' '}, padding-length))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
buf.WriteByte('\n')
|
buf.WriteByte('\n')
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user